Merge branch 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm: Avoid oops in DRM_IOCTL_RM_DRAW if a bad handle is supplied.
  drm: Add 32-bit compatibility for DRM_IOCTL_UPDATE_DRAW.
  drm/i915: use pipes, not planes to label vblank data
  drm/i915: hold dev->struct_mutex and DRM lock during vblank ring operations
  i915: Fix format string warnings on x86-64.
  i915: Don't dereference HWS in /proc debug files when it isn't initialized.
  i915: Enable IMR passthrough of vblank events before enabling it in pipestat.
  drm: Remove two leaks of vblank reference count in error paths.
  drm: fix leak of cliprects in drm_rmdraw()
  i915: Disable MSI on GM965 (errata says it doesn't work)
  drm: Set cliprects to NULL when changing drawable to having 0 cliprects.
  i915: Protect vblank IRQ reg access with spinlock
diff --git a/.mailmap b/.mailmap
index dfab12f..eba9bf9 100644
--- a/.mailmap
+++ b/.mailmap
@@ -66,6 +66,7 @@
 Koushik <raghavendra.koushik@neterion.com>
 Leonid I Ananiev <leonid.i.ananiev@intel.com>
 Linas Vepstas <linas@austin.ibm.com>
+Mark Brown <broonie@sirena.org.uk>
 Matthieu CASTET <castet.matthieu@free.fr>
 Michael Buesch <mb@bu3sch.de>
 Michael Buesch <mbuesch@freenet.de>
diff --git a/CREDITS b/CREDITS
index c62dcb3..b50db17 100644
--- a/CREDITS
+++ b/CREDITS
@@ -598,6 +598,11 @@
 S: Taiwan 251
 S: Republic of China
 
+N: Reinette Chatre
+E: reinette.chatre@intel.com
+D: WiMedia Link Protocol implementation
+D: UWB stack bits and pieces
+
 N: Michael Elizabeth Chastain
 E: mec@shout.net
 D: Configure, Menuconfig, xconfig
@@ -1653,14 +1658,14 @@
 S: USA
 
 N: Dave Jones
-E: davej@codemonkey.org.uk
+E: davej@redhat.com
 W: http://www.codemonkey.org.uk
-D: x86 errata/setup maintenance.
-D: AGPGART driver.
+D: Assorted VIA x86 support.
+D: 2.5 AGPGART overhaul.
 D: CPUFREQ maintenance.
-D: Backport/Forwardport merge monkey.
-D: Various Janitor work.
-S: United Kingdom
+D: Fedora kernel maintainence.
+D: Misc/Other.
+S: 314 Littleton Rd, Westford, MA 01886, USA
 
 N: Martin Josfsson
 E: gandalf@wlug.westbo.se
@@ -2695,6 +2700,12 @@
 S: Tula 300000
 S: Russia
 
+N: Inaky Perez-Gonzalez
+E: inaky.perez-gonzalez@intel.com
+D: UWB stack, HWA-RC driver and HWA-HC drivers
+D: Wireless USB additions to the USB stack
+D: WiMedia Link Protocol bits and pieces
+
 N: Gordon Peters
 E: GordPeters@smarttech.com
 D: Isochronous receive for IEEE 1394 driver (OHCI module).
diff --git a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
new file mode 100644
index 0000000..9a75fb2
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
@@ -0,0 +1,62 @@
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/interface_capabilities
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/device_capabilities
+Date:		August 2008
+Contact:	Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+		These files show the various USB TMC capabilities as described
+		by the device itself.  The full description of the bitfields
+		can be found in the USB TMC documents from the USB-IF entitled
+		"Universal Serial Bus Test and Measurement Class Specification
+		(USBTMC) Revision 1.0" section 4.2.1.8.
+
+		The files are read only.
+
+
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/usb488_interface_capabilities
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/usb488_device_capabilities
+Date:		August 2008
+Contact:	Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+		These files show the various USB TMC capabilities as described
+		by the device itself.  The full description of the bitfields
+		can be found in the USB TMC documents from the USB-IF entitled
+		"Universal Serial Bus Test and Measurement Class, Subclass
+		USB488 Specification (USBTMC-USB488) Revision 1.0" section
+		4.2.2.
+
+		The files are read only.
+
+
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/TermChar
+Date:		August 2008
+Contact:	Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+		This file is the TermChar value to be sent to the USB TMC
+		device as described by the document, "Universal Serial Bus Test
+		and Measurement Class Specification
+		(USBTMC) Revision 1.0" as published by the USB-IF.
+
+		Note that the TermCharEnabled file determines if this value is
+		sent to the device or not.
+
+
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/TermCharEnabled
+Date:		August 2008
+Contact:	Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+		This file determines if the TermChar is to be sent to the
+		device on every transaction or not.  For more details about
+		this, please see the document, "Universal Serial Bus Test and
+		Measurement Class Specification (USBTMC) Revision 1.0" as
+		published by the USB-IF.
+
+
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/auto_abort
+Date:		August 2008
+Contact:	Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+		This file determines if the the transaction of the USB TMC
+		device is to be automatically aborted if there is any error.
+		For more details about this, please see the document,
+		"Universal Serial Bus Test and Measurement Class Specification
+		(USBTMC) Revision 1.0" as published by the USB-IF.
diff --git a/Documentation/ABI/testing/sysfs-bus-umc b/Documentation/ABI/testing/sysfs-bus-umc
new file mode 100644
index 0000000..948fec4
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-umc
@@ -0,0 +1,28 @@
+What:           /sys/bus/umc/
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        David Vrabel <david.vrabel@csr.com>
+Description:
+                The Wireless Host Controller Interface (WHCI)
+                specification describes a PCI-based device with
+                multiple capabilities; the UWB Multi-interface
+                Controller (UMC).
+
+                The umc bus presents each of the individual
+                capabilties as a device.
+
+What:           /sys/bus/umc/devices/.../capability_id
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        David Vrabel <david.vrabel@csr.com>
+Description:
+                The ID of this capability, with 0 being the radio
+                controller capability.
+
+What:           /sys/bus/umc/devices/.../version
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        David Vrabel <david.vrabel@csr.com>
+Description:
+                The specification version this capability's hardware
+                interface complies with.
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index 11a3c16..7772928 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -85,3 +85,62 @@
 Users:
 		PowerTOP <power@bughost.org>
 		http://www.lesswatts.org/projects/powertop/
+
+What:		/sys/bus/usb/device/<busnum>-<devnum>...:<config num>-<interface num>/supports_autosuspend
+Date:		January 2008
+KernelVersion:	2.6.27
+Contact:	Sarah Sharp <sarah.a.sharp@intel.com>
+Description:
+		When read, this file returns 1 if the interface driver
+		for this interface supports autosuspend.  It also
+		returns 1 if no driver has claimed this interface, as an
+		unclaimed interface will not stop the device from being
+		autosuspended if all other interface drivers are idle.
+		The file returns 0 if autosuspend support has not been
+		added to the driver.
+Users:
+		USB PM tool
+		git://git.moblin.org/users/sarah/usb-pm-tool/
+
+What:		/sys/bus/usb/device/.../authorized
+Date:		July 2008
+KernelVersion:	2.6.26
+Contact:	David Vrabel <david.vrabel@csr.com>
+Description:
+		Authorized devices are available for use by device
+		drivers, non-authorized one are not.  By default, wired
+		USB devices are authorized.
+
+		Certified Wireless USB devices are not authorized
+		initially and should be (by writing 1) after the
+		device has been authenticated.
+
+What:		/sys/bus/usb/device/.../wusb_cdid
+Date:		July 2008
+KernelVersion:	2.6.27
+Contact:	David Vrabel <david.vrabel@csr.com>
+Description:
+		For Certified Wireless USB devices only.
+
+		A devices's CDID, as 16 space-separated hex octets.
+
+What:		/sys/bus/usb/device/.../wusb_ck
+Date:		July 2008
+KernelVersion:	2.6.27
+Contact:	David Vrabel <david.vrabel@csr.com>
+Description:
+		For Certified Wireless USB devices only.
+
+		Write the device's connection key (CK) to start the
+		authentication of the device.  The CK is 16
+		space-separated hex octets.
+
+What:		/sys/bus/usb/device/.../wusb_disconnect
+Date:		July 2008
+KernelVersion:	2.6.27
+Contact:	David Vrabel <david.vrabel@csr.com>
+Description:
+		For Certified Wireless USB devices only.
+
+		Write a 1 to force the device to disconnect
+		(equivalent to unplugging a wired USB device).
diff --git a/Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg b/Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg
new file mode 100644
index 0000000..cb830df
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg
@@ -0,0 +1,43 @@
+Where:		/sys/bus/usb/.../powered
+Date:		August 2008
+Kernel Version:	2.6.26
+Contact:	Harrison Metzger <harrisonmetz@gmail.com>
+Description:	Controls whether the device's display will powered.
+		A value of 0 is off and a non-zero value is on.
+
+Where:		/sys/bus/usb/.../mode_msb
+Where:		/sys/bus/usb/.../mode_lsb
+Date:		August 2008
+Kernel Version:	2.6.26
+Contact:	Harrison Metzger <harrisonmetz@gmail.com>
+Description:	Controls the devices display mode.
+		For a 6 character display the values are
+			MSB 0x06; LSB 0x3F, and
+		for an 8 character display the values are
+			MSB 0x08; LSB 0xFF.
+
+Where:		/sys/bus/usb/.../textmode
+Date:		August 2008
+Kernel Version:	2.6.26
+Contact:	Harrison Metzger <harrisonmetz@gmail.com>
+Description:	Controls the way the device interprets its text buffer.
+		raw:	each character controls its segment manually
+		hex:	each character is between 0-15
+		ascii:	each character is between '0'-'9' and 'A'-'F'.
+
+Where:		/sys/bus/usb/.../text
+Date:		August 2008
+Kernel Version:	2.6.26
+Contact:	Harrison Metzger <harrisonmetz@gmail.com>
+Description:	The text (or data) for the device to display
+
+Where:		/sys/bus/usb/.../decimals
+Date:		August 2008
+Kernel Version:	2.6.26
+Contact:	Harrison Metzger <harrisonmetz@gmail.com>
+Description:	Controls the decimal places on the device.
+		To set the nth decimal place, give this field
+		the value of 10 ** n. Assume this field has
+		the value k and has 1 or more decimal places set,
+		to set the mth place (where m is not already set),
+		change this fields value to k + 10 ** m.
\ No newline at end of file
diff --git a/Documentation/ABI/testing/sysfs-class-usb_host b/Documentation/ABI/testing/sysfs-class-usb_host
new file mode 100644
index 0000000..46b66ad1
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-usb_host
@@ -0,0 +1,25 @@
+What:           /sys/class/usb_host/usb_hostN/wusb_chid
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        David Vrabel <david.vrabel@csr.com>
+Description:
+                Write the CHID (16 space-separated hex octets) for this host controller.
+                This starts the host controller, allowing it to accept connection from
+                WUSB devices.
+
+                Set an all zero CHID to stop the host controller.
+
+What:           /sys/class/usb_host/usb_hostN/wusb_trust_timeout
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        David Vrabel <david.vrabel@csr.com>
+Description:
+                Devices that haven't sent a WUSB packet to the host
+                within 'wusb_trust_timeout' ms are considered to have
+                disconnected and are removed.  The default value of
+                4000 ms is the value required by the WUSB
+                specification.
+
+                Since this relates to security (specifically, the
+                lifetime of PTKs and GTKs) it should not be changed
+                from the default.
diff --git a/Documentation/ABI/testing/sysfs-class-uwb_rc b/Documentation/ABI/testing/sysfs-class-uwb_rc
new file mode 100644
index 0000000..a0d18db
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-uwb_rc
@@ -0,0 +1,144 @@
+What:           /sys/class/uwb_rc
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+                Interfaces for WiMedia Ultra Wideband Common Radio
+                Platform (UWB) radio controllers.
+
+                Familiarity with the ECMA-368 'High Rate Ultra
+                Wideband MAC and PHY Specification' is assumed.
+
+What:           /sys/class/uwb_rc/beacon_timeout_ms
+Date:           July 2008
+KernelVersion:  2.6.27
+Description:
+                If no beacons are received from a device for at least
+                this time, the device will be considered to have gone
+                and it will be removed.  The default is 3 superframes
+                (~197 ms) as required by the specification.
+
+What:           /sys/class/uwb_rc/uwbN/
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+                An individual UWB radio controller.
+
+What:           /sys/class/uwb_rc/uwbN/beacon
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+                Write:
+
+                <channel> [<bpst offset>]
+
+                to start beaconing on a specific channel, or stop
+                beaconing if <channel> is -1.  Valid channels depends
+                on the radio controller's supported band groups.
+
+                <bpst offset> may be used to try and join a specific
+                beacon group if more than one was found during a scan.
+
+What:           /sys/class/uwb_rc/uwbN/scan
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+                Write:
+
+                <channel> <type> [<bpst offset>]
+
+                to start (or stop) scanning on a channel.  <type> is one of:
+                    0 - scan
+                    1 - scan outside BP
+                    2 - scan while inactive
+                    3 - scanning disabled
+                    4 - scan (with start time of <bpst offset>)
+
+What:           /sys/class/uwb_rc/uwbN/mac_address
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+                The EUI-48, in colon-separated hex octets, for this
+                radio controller.  A write will change the radio
+                controller's EUI-48 but only do so while the device is
+                not beaconing or scanning.
+
+What:           /sys/class/uwb_rc/uwbN/wusbhc
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+                A symlink to the device (if any) of the WUSB Host
+                Controller PAL using this radio controller.
+
+What:           /sys/class/uwb_rc/uwbN/<EUI-48>/
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+                A neighbour UWB device that has either been detected
+                as part of a scan or is a member of the radio
+                controllers beacon group.
+
+What:           /sys/class/uwb_rc/uwbN/<EUI-48>/BPST
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+                The time (using the radio controllers internal 1 ms
+                interval superframe timer) of the last beacon from
+                this device was received.
+
+What:           /sys/class/uwb_rc/uwbN/<EUI-48>/DevAddr
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+                The current DevAddr of this device in colon separated
+                hex octets.
+
+What:           /sys/class/uwb_rc/uwbN/<EUI-48>/EUI_48
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+
+                The EUI-48 of this device in colon separated hex
+                octets.
+
+What:           /sys/class/uwb_rc/uwbN/<EUI-48>/BPST
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+
+What:           /sys/class/uwb_rc/uwbN/<EUI-48>/IEs
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+                The latest IEs included in this device's beacon, in
+                space separated hex octets with one IE per line.
+
+What:           /sys/class/uwb_rc/uwbN/<EUI-48>/LQE
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+                Link Quality Estimate - the Signal to Noise Ratio
+                (SNR) of all packets received from this device in dB.
+                This gives an estimate on a suitable PHY rate. Refer
+                to [ECMA-368] section 13.3 for more details.
+
+What:           /sys/class/uwb_rc/uwbN/<EUI-48>/RSSI
+Date:           July 2008
+KernelVersion:  2.6.27
+Contact:        linux-usb@vger.kernel.org
+Description:
+                Received Signal Strength Indication - the strength of
+                the received signal in dB.  LQE is a more useful
+                measure of the radio link quality.
diff --git a/Documentation/ABI/testing/sysfs-wusb_cbaf b/Documentation/ABI/testing/sysfs-wusb_cbaf
new file mode 100644
index 0000000..a99c5f8
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-wusb_cbaf
@@ -0,0 +1,100 @@
+What:           /sys/bus/usb/drivers/wusb_cbaf/.../wusb_*
+Date:           August 2008
+KernelVersion:  2.6.27
+Contact:        David Vrabel <david.vrabel@csr.com>
+Description:
+                Various files for managing Cable Based Association of
+                (wireless) USB devices.
+
+                The sequence of operations should be:
+
+                1. Device is plugged in.
+
+                2. The connection manager (CM) sees a device with CBA capability.
+                   (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE).
+
+                3. The CM writes the host name, supported band groups,
+                   and the CHID (host ID) into the wusb_host_name,
+                   wusb_host_band_groups and wusb_chid files. These
+                   get sent to the device and the CDID (if any) for
+                   this host is requested.
+
+                4. The CM can verify that the device's supported band
+                   groups (wusb_device_band_groups) are compatible
+                   with the host.
+
+                5. The CM reads the wusb_cdid file.
+
+                6. The CM looks it up its database.
+
+                   - If it has a matching CHID,CDID entry, the device
+                     has been authorized before and nothing further
+                     needs to be done.
+
+                   - If the CDID is zero (or the CM doesn't find a
+                     matching CDID in its database), the device is
+                     assumed to be not known.  The CM may associate
+                     the host with device by: writing a randomly
+                     generated CDID to wusb_cdid and then a random CK
+                     to wusb_ck (this uploads the new CC to the
+                     device).
+
+                     CMD may choose to prompt the user before
+                     associating with a new device.
+
+                7. Device is unplugged.
+
+                References:
+                  [WUSB-AM] Association Models Supplement to the
+                            Certified Wireless Universal Serial Bus
+                            Specification, version 1.0.
+
+What:           /sys/bus/usb/drivers/wusb_cbaf/.../wusb_chid
+Date:           August 2008
+KernelVersion:  2.6.27
+Contact:        David Vrabel <david.vrabel@csr.com>
+Description:
+                The CHID of the host formatted as 16 space-separated
+                hex octets.
+
+                Writes fetches device's supported band groups and the
+                the CDID for any existing association with this host.
+
+What:           /sys/bus/usb/drivers/wusb_cbaf/.../wusb_host_name
+Date:           August 2008
+KernelVersion:  2.6.27
+Contact:        David Vrabel <david.vrabel@csr.com>
+Description:
+                A friendly name for the host as a UTF-8 encoded string.
+
+What:           /sys/bus/usb/drivers/wusb_cbaf/.../wusb_host_band_groups
+Date:           August 2008
+KernelVersion:  2.6.27
+Contact:        David Vrabel <david.vrabel@csr.com>
+Description:
+                The band groups supported by the host, in the format
+                defined in [WUSB-AM].
+
+What:           /sys/bus/usb/drivers/wusb_cbaf/.../wusb_device_band_groups
+Date:           August 2008
+KernelVersion:  2.6.27
+Contact:        David Vrabel <david.vrabel@csr.com>
+Description:
+                The band groups supported by the device, in the format
+                defined in [WUSB-AM].
+
+What:           /sys/bus/usb/drivers/wusb_cbaf/.../wusb_cdid
+Date:           August 2008
+KernelVersion:  2.6.27
+Contact:        David Vrabel <david.vrabel@csr.com>
+Description:
+                The device's CDID formatted as 16 space-separated hex
+                octets.
+
+What:           /sys/bus/usb/drivers/wusb_cbaf/.../wusb_ck
+Date:           August 2008
+KernelVersion:  2.6.27
+Contact:        David Vrabel <david.vrabel@csr.com>
+Description:
+                Write 16 space-separated random, hex octets to
+                associate with the device.
diff --git a/Documentation/DocBook/gadget.tmpl b/Documentation/DocBook/gadget.tmpl
index ea3bc95..6ef2f00 100644
--- a/Documentation/DocBook/gadget.tmpl
+++ b/Documentation/DocBook/gadget.tmpl
@@ -557,6 +557,9 @@
 </para>
 
 !Edrivers/usb/gadget/f_acm.c
+!Edrivers/usb/gadget/f_ecm.c
+!Edrivers/usb/gadget/f_subset.c
+!Edrivers/usb/gadget/f_obex.c
 !Edrivers/usb/gadget/f_serial.c
 
 </sect1>
diff --git a/Documentation/DocBook/kernel-hacking.tmpl b/Documentation/DocBook/kernel-hacking.tmpl
index 4c63e58..ae15d55 100644
--- a/Documentation/DocBook/kernel-hacking.tmpl
+++ b/Documentation/DocBook/kernel-hacking.tmpl
@@ -1105,7 +1105,7 @@
     </listitem>
     <listitem>
      <para>
-      Function names as strings (__FUNCTION__).
+      Function names as strings (__func__).
      </para>
     </listitem>
     <listitem>
diff --git a/Documentation/MSI-HOWTO.txt b/Documentation/MSI-HOWTO.txt
index a51f693..256defd7 100644
--- a/Documentation/MSI-HOWTO.txt
+++ b/Documentation/MSI-HOWTO.txt
@@ -236,10 +236,8 @@
 MSI-X structure. The implementation of MSI support requires the PCI
 subsystem, not a device driver, to maintain full control of the MSI-X
 table/MSI-X PBA (Pending Bit Array) and MMIO address space of the MSI-X
-table/MSI-X PBA.  A device driver is prohibited from requesting the MMIO
-address space of the MSI-X table/MSI-X PBA. Otherwise, the PCI subsystem
-will fail enabling MSI-X on its hardware device when it calls the function
-pci_enable_msix().
+table/MSI-X PBA.  A device driver should not access the MMIO address
+space of the MSI-X table/MSI-X PBA.
 
 5.3.2 API pci_enable_msix
 
diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt
index 8d4dc62..fd4907a 100644
--- a/Documentation/PCI/pci.txt
+++ b/Documentation/PCI/pci.txt
@@ -163,6 +163,10 @@
 	o class and classmask fields default to 0
 	o driver_data defaults to 0UL.
 
+Note that driver_data must match the value used by any of the pci_device_id
+entries defined in the driver. This makes the driver_data field mandatory
+if all the pci_device_id entries have a non-zero driver_data value.
+
 Once added, the driver probe routine will be invoked for any unclaimed
 PCI devices listed in its (newly updated) pci_ids list.
 
diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt
index 16c2512..ddeb14b 100644
--- a/Documentation/PCI/pcieaer-howto.txt
+++ b/Documentation/PCI/pcieaer-howto.txt
@@ -203,22 +203,17 @@
 
 3.3 helper functions
 
-3.3.1 int pci_find_aer_capability(struct pci_dev *dev);
-pci_find_aer_capability locates the PCI Express AER capability
-in the device configuration space. If the device doesn't support
-PCI-Express AER, the function returns 0.
-
-3.3.2 int pci_enable_pcie_error_reporting(struct pci_dev *dev);
+3.3.1 int pci_enable_pcie_error_reporting(struct pci_dev *dev);
 pci_enable_pcie_error_reporting enables the device to send error
 messages to root port when an error is detected. Note that devices
 don't enable the error reporting by default, so device drivers need
 call this function to enable it.
 
-3.3.3 int pci_disable_pcie_error_reporting(struct pci_dev *dev);
+3.3.2 int pci_disable_pcie_error_reporting(struct pci_dev *dev);
 pci_disable_pcie_error_reporting disables the device to send error
 messages to root port when an error is detected.
 
-3.3.4 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
+3.3.3 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
 pci_cleanup_aer_uncorrect_error_status cleanups the uncorrectable
 error status register.
 
diff --git a/Documentation/cgroups.txt b/Documentation/cgroups/cgroups.txt
similarity index 100%
rename from Documentation/cgroups.txt
rename to Documentation/cgroups/cgroups.txt
diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroups/freezer-subsystem.txt
new file mode 100644
index 0000000..c50ab58
--- /dev/null
+++ b/Documentation/cgroups/freezer-subsystem.txt
@@ -0,0 +1,99 @@
+	The cgroup freezer is useful to batch job management system which start
+and stop sets of tasks in order to schedule the resources of a machine
+according to the desires of a system administrator. This sort of program
+is often used on HPC clusters to schedule access to the cluster as a
+whole. The cgroup freezer uses cgroups to describe the set of tasks to
+be started/stopped by the batch job management system. It also provides
+a means to start and stop the tasks composing the job.
+
+	The cgroup freezer will also be useful for checkpointing running groups
+of tasks. The freezer allows the checkpoint code to obtain a consistent
+image of the tasks by attempting to force the tasks in a cgroup into a
+quiescent state. Once the tasks are quiescent another task can
+walk /proc or invoke a kernel interface to gather information about the
+quiesced tasks. Checkpointed tasks can be restarted later should a
+recoverable error occur. This also allows the checkpointed tasks to be
+migrated between nodes in a cluster by copying the gathered information
+to another node and restarting the tasks there.
+
+	Sequences of SIGSTOP and SIGCONT are not always sufficient for stopping
+and resuming tasks in userspace. Both of these signals are observable
+from within the tasks we wish to freeze. While SIGSTOP cannot be caught,
+blocked, or ignored it can be seen by waiting or ptracing parent tasks.
+SIGCONT is especially unsuitable since it can be caught by the task. Any
+programs designed to watch for SIGSTOP and SIGCONT could be broken by
+attempting to use SIGSTOP and SIGCONT to stop and resume tasks. We can
+demonstrate this problem using nested bash shells:
+
+	$ echo $$
+	16644
+	$ bash
+	$ echo $$
+	16690
+
+	From a second, unrelated bash shell:
+	$ kill -SIGSTOP 16690
+	$ kill -SIGCONT 16990
+
+	<at this point 16990 exits and causes 16644 to exit too>
+
+	This happens because bash can observe both signals and choose how it
+responds to them.
+
+	Another example of a program which catches and responds to these
+signals is gdb. In fact any program designed to use ptrace is likely to
+have a problem with this method of stopping and resuming tasks.
+
+	 In contrast, the cgroup freezer uses the kernel freezer code to
+prevent the freeze/unfreeze cycle from becoming visible to the tasks
+being frozen. This allows the bash example above and gdb to run as
+expected.
+
+	The freezer subsystem in the container filesystem defines a file named
+freezer.state. Writing "FROZEN" to the state file will freeze all tasks in the
+cgroup. Subsequently writing "THAWED" will unfreeze the tasks in the cgroup.
+Reading will return the current state.
+
+* Examples of usage :
+
+   # mkdir /containers/freezer
+   # mount -t cgroup -ofreezer freezer  /containers
+   # mkdir /containers/0
+   # echo $some_pid > /containers/0/tasks
+
+to get status of the freezer subsystem :
+
+   # cat /containers/0/freezer.state
+   THAWED
+
+to freeze all tasks in the container :
+
+   # echo FROZEN > /containers/0/freezer.state
+   # cat /containers/0/freezer.state
+   FREEZING
+   # cat /containers/0/freezer.state
+   FROZEN
+
+to unfreeze all tasks in the container :
+
+   # echo THAWED > /containers/0/freezer.state
+   # cat /containers/0/freezer.state
+   THAWED
+
+This is the basic mechanism which should do the right thing for user space task
+in a simple scenario.
+
+It's important to note that freezing can be incomplete. In that case we return
+EBUSY. This means that some tasks in the cgroup are busy doing something that
+prevents us from completely freezing the cgroup at this time. After EBUSY,
+the cgroup will remain partially frozen -- reflected by freezer.state reporting
+"FREEZING" when read. The state will remain "FREEZING" until one of these
+things happens:
+
+	1) Userspace cancels the freezing operation by writing "THAWED" to
+		the freezer.state file
+	2) Userspace retries the freezing operation by writing "FROZEN" to
+		the freezer.state file (writing "FREEZING" is not legal
+		and returns EIO)
+	3) The tasks that blocked the cgroup from entering the "FROZEN"
+		state disappear from the cgroup's set of tasks.
diff --git a/Documentation/controllers/memory.txt b/Documentation/controllers/memory.txt
index 9b53d58..1c07547 100644
--- a/Documentation/controllers/memory.txt
+++ b/Documentation/controllers/memory.txt
@@ -112,14 +112,22 @@
 
 2.2.1 Accounting details
 
-All mapped pages (RSS) and unmapped user pages (Page Cache) are accounted.
-RSS pages are accounted at the time of page_add_*_rmap() unless they've already
-been accounted for earlier. A file page will be accounted for as Page Cache;
-it's mapped into the page tables of a process, duplicate accounting is carefully
-avoided. Page Cache pages are accounted at the time of add_to_page_cache().
-The corresponding routines that remove a page from the page tables or removes
-a page from Page Cache is used to decrement the accounting counters of the
-cgroup.
+All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
+(some pages which never be reclaimable and will not be on global LRU
+ are not accounted. we just accounts pages under usual vm management.)
+
+RSS pages are accounted at page_fault unless they've already been accounted
+for earlier. A file page will be accounted for as Page Cache when it's
+inserted into inode (radix-tree). While it's mapped into the page tables of
+processes, duplicate accounting is carefully avoided.
+
+A RSS page is unaccounted when it's fully unmapped. A PageCache page is
+unaccounted when it's removed from radix-tree.
+
+At page migration, accounting information is kept.
+
+Note: we just account pages-on-lru because our purpose is to control amount
+of used pages. not-on-lru pages are tend to be out-of-control from vm view.
 
 2.3 Shared Page Accounting
 
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt
index 47e568a..5c86c25 100644
--- a/Documentation/cpusets.txt
+++ b/Documentation/cpusets.txt
@@ -48,7 +48,7 @@
 job placement on large systems.
 
 Cpusets use the generic cgroup subsystem described in
-Documentation/cgroup.txt.
+Documentation/cgroups/cgroups.txt.
 
 Requests by a task, using the sched_setaffinity(2) system call to
 include CPUs in its CPU affinity mask, and using the mbind(2) and
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 05c8064..2be0824 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -2571,6 +2571,9 @@
 		160 = /dev/usb/legousbtower0	1st USB Legotower device
 		    ...
 		175 = /dev/usb/legousbtower15	16th USB Legotower device
+		176 = /dev/usb/usbtmc1	First USB TMC device
+		   ...
+		192 = /dev/usb/usbtmc16	16th USB TMC device
 		240 = /dev/usb/dabusb0	First daubusb device
 		    ...
 		243 = /dev/usb/dabusb3	Fourth dabusb device
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index f5f812d..05d71b4b 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -359,3 +359,11 @@
       eliminates the need for ide-scsi. The new method is more
       efficient in every way.
 Who:  FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+---------------------------
+
+What:	i2c_attach_client(), i2c_detach_client(), i2c_driver->detach_client()
+When:	2.6.29 (ideally) or 2.6.30 (more likely)
+Why:	Deprecated by the new (standard) device driver binding model. Use
+	i2c_driver->probe() and ->remove() instead.
+Who:	Jean Delvare <khali@linux-fr.org>
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 295f26c..9dd2a3bb 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -96,6 +96,11 @@
 errors=continue		Keep going on a filesystem error.
 errors=panic		Panic and halt the machine if an error occurs.
 
+data_err=ignore(*)	Just print an error message if an error occurs
+			in a file data buffer in ordered mode.
+data_err=abort		Abort the journal if an error occurs in a file
+			data buffer in ordered mode.
+
 grpid			Give objects the same group ID as their creator.
 bsdgroups
 
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index eb154ef..174eaff 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -2,19 +2,24 @@
 Ext4 Filesystem
 ===============
 
-This is a development version of the ext4 filesystem, an advanced level
-of the ext3 filesystem which incorporates scalability and reliability
-enhancements for supporting large filesystems (64 bit) in keeping with
-increasing disk capacities and state-of-the-art feature requirements.
+Ext4 is an an advanced level of the ext3 filesystem which incorporates
+scalability and reliability enhancements for supporting large filesystems
+(64 bit) in keeping with increasing disk capacities and state-of-the-art
+feature requirements.
 
-Mailing list: linux-ext4@vger.kernel.org
+Mailing list:	linux-ext4@vger.kernel.org
+Web site:	http://ext4.wiki.kernel.org
 
 
 1. Quick usage instructions:
 ===========================
 
+Note: More extensive information for getting started with ext4 can be
+      found at the ext4 wiki site at the URL:
+      http://ext4.wiki.kernel.org/index.php/Ext4_Howto
+
   - Compile and install the latest version of e2fsprogs (as of this
-    writing version 1.41) from:
+    writing version 1.41.3) from:
 
     http://sourceforge.net/project/showfiles.php?group_id=2406
 	
@@ -36,11 +41,9 @@
 
     	# mke2fs -t ext4 /dev/hda1
 
-    Or configure an existing ext3 filesystem to support extents and set
-    the test_fs flag to indicate that it's ok for an in-development
-    filesystem to touch this filesystem:
+    Or to configure an existing ext3 filesystem to support extents: 
 
-	# tune2fs -O extents -E test_fs /dev/hda1
+	# tune2fs -O extents /dev/hda1
 
     If the filesystem was created with 128 byte inodes, it can be
     converted to use 256 byte for greater efficiency via:
@@ -104,8 +107,8 @@
 The big performance win will come with mballoc, delalloc and flex_bg
 grouping of bitmaps and inode tables.  Some test results available here:
 
- - http://www.bullopensource.org/ext4/20080530/ffsb-write-2.6.26-rc2.html
- - http://www.bullopensource.org/ext4/20080530/ffsb-readwrite-2.6.26-rc2.html
+ - http://www.bullopensource.org/ext4/20080818-ffsb/ffsb-write-2.6.27-rc1.html
+ - http://www.bullopensource.org/ext4/20080818-ffsb/ffsb-readwrite-2.6.27-rc1.html
 
 3. Options
 ==========
@@ -214,9 +217,6 @@
 bsddf		(*)	Make 'df' act like BSD.
 minixdf			Make 'df' act like Minix.
 
-check=none		Don't do extra checking of bitmaps on mount.
-nocheck
-
 debug			Extra debugging information is sent to syslog.
 
 errors=remount-ro(*)	Remount the filesystem read-only on an error.
@@ -253,8 +253,6 @@
 			"nobh" option tries to avoid associating buffer
 			heads (supported only for "writeback" mode).
 
-mballoc		(*)	Use the multiple block allocator for block allocation
-nomballoc		disabled multiple block allocator for block allocation.
 stripe=n		Number of filesystem blocks that mballoc will try
 			to use for allocation size and alignment. For RAID5/6
 			systems this should be the number of data
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index c032bf3..bcceb99 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1384,15 +1384,18 @@
 dirty_background_ratio
 ----------------------
 
-Contains, as a percentage of total system memory, the number of pages at which
-the pdflush background writeback daemon will start writing out dirty data.
+Contains, as a percentage of the dirtyable system memory (free pages + mapped
+pages + file cache, not including locked pages and HugePages), the number of
+pages at which the pdflush background writeback daemon will start writing out
+dirty data.
 
 dirty_ratio
 -----------------
 
-Contains, as a percentage of total system memory, the number of pages at which
-a process which is generating disk writes will itself start writing out dirty
-data.
+Contains, as a percentage of the dirtyable system memory (free pages + mapped
+pages + file cache, not including locked pages and HugePages), the number of
+pages at which a process which is generating disk writes will itself start
+writing out dirty data.
 
 dirty_writeback_centisecs
 -------------------------
@@ -2412,24 +2415,29 @@
 of memory types. If a bit of the bitmask is set, memory segments of the
 corresponding memory type are dumped, otherwise they are not dumped.
 
-The following 4 memory types are supported:
+The following 7 memory types are supported:
   - (bit 0) anonymous private memory
   - (bit 1) anonymous shared memory
   - (bit 2) file-backed private memory
   - (bit 3) file-backed shared memory
   - (bit 4) ELF header pages in file-backed private memory areas (it is
             effective only if the bit 2 is cleared)
+  - (bit 5) hugetlb private memory
+  - (bit 6) hugetlb shared memory
 
   Note that MMIO pages such as frame buffer are never dumped and vDSO pages
   are always dumped regardless of the bitmask status.
 
-Default value of coredump_filter is 0x3; this means all anonymous memory
-segments are dumped.
+  Note bit 0-4 doesn't effect any hugetlb memory. hugetlb memory are only
+  effected by bit 5-6.
+
+Default value of coredump_filter is 0x23; this means all anonymous memory
+segments and hugetlb private memory are dumped.
 
 If you don't want to dump all shared memory segments attached to pid 1234,
-write 1 to the process's proc file.
+write 0x21 to the process's proc file.
 
-  $ echo 0x1 > /proc/1234/coredump_filter
+  $ echo 0x21 > /proc/1234/coredump_filter
 
 When a new process is created, the process inherits the bitmask status from its
 parent. It is useful to set up coredump_filter before the program runs.
diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt
index 6a0d70a..dd84ea3 100644
--- a/Documentation/filesystems/ubifs.txt
+++ b/Documentation/filesystems/ubifs.txt
@@ -86,6 +86,15 @@
 fast_unmount		do not commit on unmount; this option makes
 			unmount faster, but the next mount slower
 			because of the need to replay the journal.
+bulk_read		read more in one go to take advantage of flash
+			media that read faster sequentially
+no_bulk_read (*)	do not bulk-read
+no_chk_data_crc		skip checking of CRCs on data nodes in order to
+			improve read performance. Use this option only
+			if the flash media is highly reliable. The effect
+			of this option is that corruption of the contents
+			of a file can go unnoticed.
+chk_data_crc (*)	do not skip checking CRCs on data nodes
 
 
 Quick usage instructions
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index c31e029..81c0c59 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -13,8 +13,9 @@
   * Intel 631xESB/632xESB (ESB2)
   * Intel 82801H (ICH8)
   * Intel 82801I (ICH9)
-  * Intel Tolapai
-  * Intel ICH10
+  * Intel EP80579 (Tolapai)
+  * Intel 82801JI (ICH10)
+  * Intel PCH
    Datasheets: Publicly available at the Intel website
 
 Authors: 
@@ -32,7 +33,7 @@
 -----------
 
 The ICH (properly known as the 82801AA), ICH0 (82801AB), ICH2 (82801BA),
-ICH3 (82801CA/CAM) and later devices are Intel chips that are a part of
+ICH3 (82801CA/CAM) and later devices (PCH) are Intel chips that are a part of
 Intel's '810' chipset for Celeron-based PCs, '810E' chipset for
 Pentium-based PCs, '815E' chipset, and others.
 
diff --git a/Documentation/i2c/porting-clients b/Documentation/i2c/porting-clients
deleted file mode 100644
index 7bf82c0..0000000
--- a/Documentation/i2c/porting-clients
+++ /dev/null
@@ -1,160 +0,0 @@
-Revision 7, 2007-04-19
-Jean Delvare <khali@linux-fr.org>
-Greg KH <greg@kroah.com>
-
-This is a guide on how to convert I2C chip drivers from Linux 2.4 to
-Linux 2.6. I have been using existing drivers (lm75, lm78) as examples.
-Then I converted a driver myself (lm83) and updated this document.
-Note that this guide is strongly oriented towards hardware monitoring
-drivers. Many points are still valid for other type of drivers, but
-others may be irrelevant.
-
-There are two sets of points below. The first set concerns technical
-changes. The second set concerns coding policy. Both are mandatory.
-
-Although reading this guide will help you porting drivers, I suggest
-you keep an eye on an already ported driver while porting your own
-driver. This will help you a lot understanding what this guide
-exactly means. Choose the chip driver that is the more similar to
-yours for best results.
-
-Technical changes:
-
-* [Driver type] Any driver that was relying on i2c-isa has to be
-  converted to a proper isa, platform or pci driver. This is not
-  covered by this guide.
-
-* [Includes] Get rid of "version.h" and <linux/i2c-proc.h>.
-  Includes typically look like that:
-  #include <linux/module.h>
-  #include <linux/init.h>
-  #include <linux/slab.h>
-  #include <linux/jiffies.h>
-  #include <linux/i2c.h>
-  #include <linux/hwmon.h>	/* for hardware monitoring drivers */
-  #include <linux/hwmon-sysfs.h>
-  #include <linux/hwmon-vid.h>	/* if you need VRM support */
-  #include <linux/err.h>	/* for class registration */
-  Please respect this inclusion order. Some extra headers may be
-  required for a given driver (e.g. "lm75.h").
-
-* [Addresses] SENSORS_I2C_END becomes I2C_CLIENT_END, ISA addresses
-  are no more handled by the i2c core. Address ranges are no more
-  supported either, define each individual address separately.
-  SENSORS_INSMOD_<n> becomes I2C_CLIENT_INSMOD_<n>.
-
-* [Client data] Get rid of sysctl_id. Try using standard names for
-  register values (for example, temp_os becomes temp_max). You're
-  still relatively free here, but you *have* to follow the standard
-  names for sysfs files (see the Sysctl section below).
-
-* [Function prototypes] The detect functions loses its flags
-  parameter. Sysctl (e.g. lm75_temp) and miscellaneous functions
-  are off the list of prototypes. This usually leaves five
-  prototypes:
-  static int lm75_attach_adapter(struct i2c_adapter *adapter);
-  static int lm75_detect(struct i2c_adapter *adapter, int address,
-      int kind);
-  static void lm75_init_client(struct i2c_client *client);
-  static int lm75_detach_client(struct i2c_client *client);
-  static struct lm75_data lm75_update_device(struct device *dev);
-
-* [Sysctl] All sysctl stuff is of course gone (defines, ctl_table
-  and functions). Instead, you have to define show and set functions for
-  each sysfs file. Only define set for writable values. Take a look at an
-  existing 2.6 driver for details (it87 for example). Don't forget
-  to define the attributes for each file (this is that step that
-  links callback functions). Use the file names specified in
-  Documentation/hwmon/sysfs-interface for the individual files. Also
-  convert the units these files read and write to the specified ones.
-  If you need to add a new type of file, please discuss it on the
-  sensors mailing list <lm-sensors@lm-sensors.org> by providing a
-  patch to the Documentation/hwmon/sysfs-interface file.
-
-* [Attach] The attach function should make sure that the adapter's
-  class has I2C_CLASS_HWMON (or whatever class is suitable for your
-  driver), using the following construct:
-  if (!(adapter->class & I2C_CLASS_HWMON))
-          return 0;
-  Call i2c_probe() instead of i2c_detect().
-
-* [Detect] As mentioned earlier, the flags parameter is gone.
-  The type_name and client_name strings are replaced by a single
-  name string, which will be filled with a lowercase, short string.
-  The labels used for error paths are reduced to the number needed.
-  It is advised that the labels are given descriptive names such as
-  exit and exit_free. Don't forget to properly set err before
-  jumping to error labels. By the way, labels should be left-aligned.
-  Use kzalloc instead of kmalloc.
-  Use i2c_set_clientdata to set the client data (as opposed to
-  a direct access to client->data).
-  Use strlcpy instead of strcpy or snprintf to copy the client name.
-  Replace the sysctl directory registration by calls to
-  device_create_file. Move the driver initialization before any
-  sysfs file creation.
-  Register the client with the hwmon class (using hwmon_device_register)
-  if applicable.
-  Drop client->id.
-  Drop any 24RF08 corruption prevention you find, as this is now done
-  at the i2c-core level, and doing it twice voids it.
-  Don't add I2C_CLIENT_ALLOW_USE to client->flags, it's the default now.
-
-* [Init] Limits must not be set by the driver (can be done later in
-  user-space). Chip should not be reset default (although a module
-  parameter may be used to force it), and initialization should be
-  limited to the strictly necessary steps.
-
-* [Detach] Remove the call to i2c_deregister_entry. Do not log an
-  error message if i2c_detach_client fails, as i2c-core will now do
-  it for you.
-  Unregister from the hwmon class if applicable.
-
-* [Update] The function prototype changed, it is now
-  passed a device structure, which you have to convert to a client
-  using to_i2c_client(dev). The update function should return a
-  pointer to the client data.
-  Don't access client->data directly, use i2c_get_clientdata(client)
-  instead.
-  Use time_after() instead of direct jiffies comparison.
-
-* [Interface] Make sure there is a MODULE_LICENSE() line, at the bottom
-  of the file (after MODULE_AUTHOR() and MODULE_DESCRIPTION(), in this
-  order).
-
-* [Driver] The flags field of the i2c_driver structure is gone.
-  I2C_DF_NOTIFY is now the default behavior.
-  The i2c_driver structure has a driver member, which is itself a
-  structure, those name member should be initialized to a driver name
-  string. i2c_driver itself has no name member anymore.
-
-* [Driver model] Instead of shutdown or reboot notifiers, provide a
-  shutdown() method in your driver.
-
-* [Power management] Use the driver model suspend() and resume()
-  callbacks instead of the obsolete pm_register() calls.
-
-Coding policy:
-
-* [Copyright] Use (C), not (c), for copyright.
-
-* [Debug/log] Get rid of #ifdef DEBUG/#endif constructs whenever you
-  can. Calls to printk for debugging purposes are replaced by calls to
-  dev_dbg where possible, else to pr_debug. Here is an example of how
-  to call it (taken from lm75_detect):
-  dev_dbg(&client->dev, "Starting lm75 update\n");
-  Replace other printk calls with the dev_info, dev_err or dev_warn
-  function, as appropriate.
-
-* [Constants] Constants defines (registers, conversions) should be
-  aligned. This greatly improves readability.
-  Alignments are achieved by the means of tabs, not spaces. Remember
-  that tabs are set to 8 in the Linux kernel code.
-
-* [Layout] Avoid extra empty lines between comments and what they
-  comment. Respect the coding style (see Documentation/CodingStyle),
-  in particular when it comes to placing curly braces.
-
-* [Comments] Make sure that no comment refers to a file that isn't
-  part of the Linux source tree (typically doc/chips/<chip name>),
-  and that remaining comments still match the code. Merging comment
-  lines when possible is encouraged.
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index d73ee11..6b9af7d 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -10,23 +10,21 @@
 ===============
 
 Try to keep the kernel namespace as clean as possible. The best way to
-do this is to use a unique prefix for all global symbols. This is 
+do this is to use a unique prefix for all global symbols. This is
 especially important for exported symbols, but it is a good idea to do
 it for non-exported symbols too. We will use the prefix `foo_' in this
-tutorial, and `FOO_' for preprocessor variables.
+tutorial.
 
 
 The driver structure
 ====================
 
 Usually, you will implement a single driver structure, and instantiate
-all clients from it. Remember, a driver structure contains general access 
+all clients from it. Remember, a driver structure contains general access
 routines, and should be zero-initialized except for fields with data you
 provide.  A client structure holds device-specific information like the
 driver model device node, and its I2C address.
 
-/* iff driver uses driver model ("new style") binding model: */
-
 static struct i2c_device_id foo_idtable[] = {
 	{ "foo", my_id_for_foo },
 	{ "bar", my_id_for_bar },
@@ -40,7 +38,6 @@
 		.name	= "foo",
 	},
 
-	/* iff driver uses driver model ("new style") binding model: */
 	.id_table	= foo_ids,
 	.probe		= foo_probe,
 	.remove		= foo_remove,
@@ -49,24 +46,19 @@
 	.detect		= foo_detect,
 	.address_data	= &addr_data,
 
-	/* else, driver uses "legacy" binding model: */
-	.attach_adapter	= foo_attach_adapter,
-	.detach_client	= foo_detach_client,
-
-	/* these may be used regardless of the driver binding model */
 	.shutdown	= foo_shutdown,	/* optional */
 	.suspend	= foo_suspend,	/* optional */
 	.resume		= foo_resume,	/* optional */
-	.command	= foo_command,	/* optional */
+	.command	= foo_command,	/* optional, deprecated */
 }
- 
+
 The name field is the driver name, and must not contain spaces.  It
 should match the module name (if the driver can be compiled as a module),
 although you can use MODULE_ALIAS (passing "foo" in this example) to add
 another name for the module.  If the driver name doesn't match the module
 name, the module won't be automatically loaded (hotplug/coldplug).
 
-All other fields are for call-back functions which will be explained 
+All other fields are for call-back functions which will be explained
 below.
 
 
@@ -74,34 +66,13 @@
 =================
 
 Each client structure has a special `data' field that can point to any
-structure at all.  You should use this to keep device-specific data,
-especially in drivers that handle multiple I2C or SMBUS devices.  You
-do not always need this, but especially for `sensors' drivers, it can
-be very useful.
+structure at all.  You should use this to keep device-specific data.
 
 	/* store the value */
 	void i2c_set_clientdata(struct i2c_client *client, void *data);
 
 	/* retrieve the value */
-	void *i2c_get_clientdata(struct i2c_client *client);
-
-An example structure is below.
-
-  struct foo_data {
-    struct i2c_client client;
-    enum chips type;       /* To keep the chips type for `sensors' drivers. */
-   
-    /* Because the i2c bus is slow, it is often useful to cache the read
-       information of a chip for some time (for example, 1 or 2 seconds).
-       It depends of course on the device whether this is really worthwhile
-       or even sensible. */
-    struct mutex update_lock;     /* When we are reading lots of information,
-                                     another process should not update the
-                                     below information */
-    char valid;                   /* != 0 if the following fields are valid. */
-    unsigned long last_updated;   /* In jiffies */
-    /* Add the read information here too */
-  };
+	void *i2c_get_clientdata(const struct i2c_client *client);
 
 
 Accessing the client
@@ -109,11 +80,9 @@
 
 Let's say we have a valid client structure. At some time, we will need
 to gather information from the client, or write new information to the
-client. How we will export this information to user-space is less 
-important at this moment (perhaps we do not need to do this at all for
-some obscure clients). But we need generic reading and writing routines.
+client.
 
-I have found it useful to define foo_read and foo_write function for this.
+I have found it useful to define foo_read and foo_write functions for this.
 For some cases, it will be easier to call the i2c functions directly,
 but many chips have some kind of register-value idea that can easily
 be encapsulated.
@@ -121,33 +90,33 @@
 The below functions are simple examples, and should not be copied
 literally.
 
-  int foo_read_value(struct i2c_client *client, u8 reg)
-  {
-    if (reg < 0x10) /* byte-sized register */
-      return i2c_smbus_read_byte_data(client,reg);
-    else /* word-sized register */
-      return i2c_smbus_read_word_data(client,reg);
-  }
+int foo_read_value(struct i2c_client *client, u8 reg)
+{
+	if (reg < 0x10)	/* byte-sized register */
+		return i2c_smbus_read_byte_data(client, reg);
+	else		/* word-sized register */
+		return i2c_smbus_read_word_data(client, reg);
+}
 
-  int foo_write_value(struct i2c_client *client, u8 reg, u16 value)
-  {
-    if (reg == 0x10) /* Impossible to write - driver error! */ {
-      return -1;
-    else if (reg < 0x10) /* byte-sized register */
-      return i2c_smbus_write_byte_data(client,reg,value);
-    else /* word-sized register */
-      return i2c_smbus_write_word_data(client,reg,value);
-  }
+int foo_write_value(struct i2c_client *client, u8 reg, u16 value)
+{
+	if (reg == 0x10)	/* Impossible to write - driver error! */
+		return -EINVAL;
+	else if (reg < 0x10)	/* byte-sized register */
+		return i2c_smbus_write_byte_data(client, reg, value);
+	else			/* word-sized register */
+		return i2c_smbus_write_word_data(client, reg, value);
+}
 
 
 Probing and attaching
 =====================
 
 The Linux I2C stack was originally written to support access to hardware
-monitoring chips on PC motherboards, and thus it embeds some assumptions
-that are more appropriate to SMBus (and PCs) than to I2C.  One of these
-assumptions is that most adapters and devices drivers support the SMBUS_QUICK
-protocol to probe device presence.  Another is that devices and their drivers
+monitoring chips on PC motherboards, and thus used to embed some assumptions
+that were more appropriate to SMBus (and PCs) than to I2C.  One of these
+assumptions was that most adapters and devices drivers support the SMBUS_QUICK
+protocol to probe device presence.  Another was that devices and their drivers
 can be sufficiently configured using only such probe primitives.
 
 As Linux and its I2C stack became more widely used in embedded systems
@@ -164,6 +133,9 @@
 objects after SMBus style probing, while the Linux driver model expects
 drivers to be given such device objects in their probe() routines.
 
+The legacy model is deprecated now and will soon be removed, so we no
+longer document it here.
+
 
 Standard Driver Model Binding ("New Style")
 -------------------------------------------
@@ -193,8 +165,8 @@
 the driver knows which one in the table matched.
 
 
-Device Creation (Standard driver model)
----------------------------------------
+Device Creation
+---------------
 
 If you know for a fact that an I2C device is connected to a given I2C bus,
 you can instantiate that device by simply filling an i2c_board_info
@@ -221,8 +193,8 @@
 reference for later use.
 
 
-Device Detection (Standard driver model)
-----------------------------------------
+Device Detection
+----------------
 
 Sometimes you do not know in advance which I2C devices are connected to
 a given I2C bus.  This is for example the case of hardware monitoring
@@ -246,8 +218,8 @@
 quickly.
 
 
-Device Deletion (Standard driver model)
----------------------------------------
+Device Deletion
+---------------
 
 Each I2C device which has been created using i2c_new_device() or
 i2c_new_probed_device() can be unregistered by calling
@@ -256,264 +228,37 @@
 device can't survive its parent in the device driver model.
 
 
-Legacy Driver Binding Model
----------------------------
+Initializing the driver
+=======================
 
-Most i2c devices can be present on several i2c addresses; for some this
-is determined in hardware (by soldering some chip pins to Vcc or Ground),
-for others this can be changed in software (by writing to specific client
-registers). Some devices are usually on a specific address, but not always;
-and some are even more tricky. So you will probably need to scan several
-i2c addresses for your clients, and do some sort of detection to see
-whether it is actually a device supported by your driver.
+When the kernel is booted, or when your foo driver module is inserted,
+you have to do some initializing. Fortunately, just registering the
+driver module is usually enough.
 
-To give the user a maximum of possibilities, some default module parameters
-are defined to help determine what addresses are scanned. Several macros
-are defined in i2c.h to help you support them, as well as a generic
-detection algorithm.
+static int __init foo_init(void)
+{
+	return i2c_add_driver(&foo_driver);
+}
 
-You do not have to use this parameter interface; but don't try to use
-function i2c_probe() if you don't.
+static void __exit foo_cleanup(void)
+{
+	i2c_del_driver(&foo_driver);
+}
 
+/* Substitute your own name and email address */
+MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"
+MODULE_DESCRIPTION("Driver for Barf Inc. Foo I2C devices");
 
-Probing classes (Legacy model)
-------------------------------
+/* a few non-GPL license types are also allowed */
+MODULE_LICENSE("GPL");
 
-All parameters are given as lists of unsigned 16-bit integers. Lists are
-terminated by I2C_CLIENT_END.
-The following lists are used internally:
+module_init(foo_init);
+module_exit(foo_cleanup);
 
-  normal_i2c: filled in by the module writer. 
-     A list of I2C addresses which should normally be examined.
-   probe: insmod parameter. 
-     A list of pairs. The first value is a bus number (-1 for any I2C bus), 
-     the second is the address. These addresses are also probed, as if they 
-     were in the 'normal' list.
-   ignore: insmod parameter.
-     A list of pairs. The first value is a bus number (-1 for any I2C bus), 
-     the second is the I2C address. These addresses are never probed. 
-     This parameter overrules the 'normal_i2c' list only.
-   force: insmod parameter. 
-     A list of pairs. The first value is a bus number (-1 for any I2C bus),
-     the second is the I2C address. A device is blindly assumed to be on
-     the given address, no probing is done. 
-
-Additionally, kind-specific force lists may optionally be defined if
-the driver supports several chip kinds. They are grouped in a
-NULL-terminated list of pointers named forces, those first element if the
-generic force list mentioned above. Each additional list correspond to an
-insmod parameter of the form force_<kind>.
-
-Fortunately, as a module writer, you just have to define the `normal_i2c' 
-parameter. The complete declaration could look like this:
-
-  /* Scan 0x4c to 0x4f */
-  static const unsigned short normal_i2c[] = { 0x4c, 0x4d, 0x4e, 0x4f,
-                                               I2C_CLIENT_END };
-
-  /* Magic definition of all other variables and things */
-  I2C_CLIENT_INSMOD;
-  /* Or, if your driver supports, say, 2 kind of devices: */
-  I2C_CLIENT_INSMOD_2(foo, bar);
-
-If you use the multi-kind form, an enum will be defined for you:
-  enum chips { any_chip, foo, bar, ... }
-You can then (and certainly should) use it in the driver code.
-
-Note that you *have* to call the defined variable `normal_i2c',
-without any prefix!
-
-
-Attaching to an adapter (Legacy model)
---------------------------------------
-
-Whenever a new adapter is inserted, or for all adapters if the driver is
-being registered, the callback attach_adapter() is called. Now is the
-time to determine what devices are present on the adapter, and to register
-a client for each of them.
-
-The attach_adapter callback is really easy: we just call the generic
-detection function. This function will scan the bus for us, using the
-information as defined in the lists explained above. If a device is
-detected at a specific address, another callback is called.
-
-  int foo_attach_adapter(struct i2c_adapter *adapter)
-  {
-    return i2c_probe(adapter,&addr_data,&foo_detect_client);
-  }
-
-Remember, structure `addr_data' is defined by the macros explained above,
-so you do not have to define it yourself.
-
-The i2c_probe function will call the foo_detect_client
-function only for those i2c addresses that actually have a device on
-them (unless a `force' parameter was used). In addition, addresses that
-are already in use (by some other registered client) are skipped.
-
-
-The detect client function (Legacy model)
------------------------------------------
-
-The detect client function is called by i2c_probe. The `kind' parameter
-contains -1 for a probed detection, 0 for a forced detection, or a positive
-number for a forced detection with a chip type forced.
-
-Returning an error different from -ENODEV in a detect function will cause
-the detection to stop: other addresses and adapters won't be scanned.
-This should only be done on fatal or internal errors, such as a memory
-shortage or i2c_attach_client failing.
-
-For now, you can ignore the `flags' parameter. It is there for future use.
-
-  int foo_detect_client(struct i2c_adapter *adapter, int address, 
-                        int kind)
-  {
-    int err = 0;
-    int i;
-    struct i2c_client *client;
-    struct foo_data *data;
-    const char *name = "";
-   
-    /* Let's see whether this adapter can support what we need.
-       Please substitute the things you need here! */
-    if (!i2c_check_functionality(adapter,I2C_FUNC_SMBUS_WORD_DATA |
-                                        I2C_FUNC_SMBUS_WRITE_BYTE))
-       goto ERROR0;
-
-    /* OK. For now, we presume we have a valid client. We now create the
-       client structure, even though we cannot fill it completely yet.
-       But it allows us to access several i2c functions safely */
-    
-    if (!(data = kzalloc(sizeof(struct foo_data), GFP_KERNEL))) {
-      err = -ENOMEM;
-      goto ERROR0;
-    }
-
-    client = &data->client;
-    i2c_set_clientdata(client, data);
-
-    client->addr = address;
-    client->adapter = adapter;
-    client->driver = &foo_driver;
-
-    /* Now, we do the remaining detection. If no `force' parameter is used. */
-
-    /* First, the generic detection (if any), that is skipped if any force
-       parameter was used. */
-    if (kind < 0) {
-      /* The below is of course bogus */
-      if (foo_read(client, FOO_REG_GENERIC) != FOO_GENERIC_VALUE)
-         goto ERROR1;
-    }
-
-    /* Next, specific detection. This is especially important for `sensors'
-       devices. */
-
-    /* Determine the chip type. Not needed if a `force_CHIPTYPE' parameter
-       was used. */
-    if (kind <= 0) {
-      i = foo_read(client, FOO_REG_CHIPTYPE);
-      if (i == FOO_TYPE_1) 
-        kind = chip1; /* As defined in the enum */
-      else if (i == FOO_TYPE_2)
-        kind = chip2;
-      else {
-        printk("foo: Ignoring 'force' parameter for unknown chip at "
-               "adapter %d, address 0x%02x\n",i2c_adapter_id(adapter),address);
-        goto ERROR1;
-      }
-    }
-
-    /* Now set the type and chip names */
-    if (kind == chip1) {
-      name = "chip1";
-    } else if (kind == chip2) {
-      name = "chip2";
-    }
-   
-    /* Fill in the remaining client fields. */
-    strlcpy(client->name, name, I2C_NAME_SIZE);
-    data->type = kind;
-    mutex_init(&data->update_lock); /* Only if you use this field */
-
-    /* Any other initializations in data must be done here too. */
-
-    /* This function can write default values to the client registers, if
-       needed. */
-    foo_init_client(client);
-
-    /* Tell the i2c layer a new client has arrived */
-    if ((err = i2c_attach_client(client)))
-      goto ERROR1;
-
-    return 0;
-
-    /* OK, this is not exactly good programming practice, usually. But it is
-       very code-efficient in this case. */
-
-    ERROR1:
-      kfree(data);
-    ERROR0:
-      return err;
-  }
-
-
-Removing the client (Legacy model)
-==================================
-
-The detach_client call back function is called when a client should be
-removed. It may actually fail, but only when panicking. This code is
-much simpler than the attachment code, fortunately!
-
-  int foo_detach_client(struct i2c_client *client)
-  {
-    int err;
-
-    /* Try to detach the client from i2c space */
-    if ((err = i2c_detach_client(client)))
-      return err;
-
-    kfree(i2c_get_clientdata(client));
-    return 0;
-  }
-
-
-Initializing the module or kernel
-=================================
-
-When the kernel is booted, or when your foo driver module is inserted, 
-you have to do some initializing. Fortunately, just attaching (registering)
-the driver module is usually enough.
-
-  static int __init foo_init(void)
-  {
-    int res;
-    
-    if ((res = i2c_add_driver(&foo_driver))) {
-      printk("foo: Driver registration failed, module not inserted.\n");
-      return res;
-    }
-    return 0;
-  }
-
-  static void __exit foo_cleanup(void)
-  {
-    i2c_del_driver(&foo_driver);
-  }
-
-  /* Substitute your own name and email address */
-  MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"
-  MODULE_DESCRIPTION("Driver for Barf Inc. Foo I2C devices");
-
-  /* a few non-GPL license types are also allowed */
-  MODULE_LICENSE("GPL");
-
-  module_init(foo_init);
-  module_exit(foo_cleanup);
-
-Note that some functions are marked by `__init', and some data structures
-by `__initdata'.  These functions and structures can be removed after
-kernel booting (or module loading) is completed.
+Note that some functions are marked by `__init'.  These functions can
+be removed after kernel booting (or module loading) is completed.
+Likewise, functions marked by `__exit' are dropped by the compiler when
+the code is built into the kernel, as they would never be called.
 
 
 Power Management
@@ -548,33 +293,35 @@
 
 A generic ioctl-like function call back is supported. You will seldom
 need this, and its use is deprecated anyway, so newer design should not
-use it. Set it to NULL.
+use it.
 
 
 Sending and receiving
 =====================
 
 If you want to communicate with your device, there are several functions
-to do this. You can find all of them in i2c.h.
+to do this. You can find all of them in <linux/i2c.h>.
 
-If you can choose between plain i2c communication and SMBus level
-communication, please use the last. All adapters understand SMBus level
-commands, but only some of them understand plain i2c!
+If you can choose between plain I2C communication and SMBus level
+communication, please use the latter. All adapters understand SMBus level
+commands, but only some of them understand plain I2C!
 
 
-Plain i2c communication
+Plain I2C communication
 -----------------------
 
-  extern int i2c_master_send(struct i2c_client *,const char* ,int);
-  extern int i2c_master_recv(struct i2c_client *,char* ,int);
+	int i2c_master_send(struct i2c_client *client, const char *buf,
+			    int count);
+	int i2c_master_recv(struct i2c_client *client, char *buf, int count);
 
 These routines read and write some bytes from/to a client. The client
 contains the i2c address, so you do not have to include it. The second
-parameter contains the bytes the read/write, the third the length of the
-buffer. Returned is the actual number of bytes read/written.
-  
-  extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msg,
-                          int num);
+parameter contains the bytes to read/write, the third the number of bytes
+to read/write (must be less than the length of the buffer.) Returned is
+the actual number of bytes read/written.
+
+	int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msg,
+			 int num);
 
 This sends a series of messages. Each message can be a read or write,
 and they can be mixed in any way. The transactions are combined: no
@@ -583,49 +330,45 @@
 and the message data itself.
 
 You can read the file `i2c-protocol' for more information about the
-actual i2c protocol.
+actual I2C protocol.
 
 
 SMBus communication
 -------------------
 
-  extern s32 i2c_smbus_xfer (struct i2c_adapter * adapter, u16 addr, 
-                             unsigned short flags,
-                             char read_write, u8 command, int size,
-                             union i2c_smbus_data * data);
+	s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+			   unsigned short flags, char read_write, u8 command,
+			   int size, union i2c_smbus_data *data);
 
-  This is the generic SMBus function. All functions below are implemented
-  in terms of it. Never use this function directly!
+This is the generic SMBus function. All functions below are implemented
+in terms of it. Never use this function directly!
 
-
-  extern s32 i2c_smbus_read_byte(struct i2c_client * client);
-  extern s32 i2c_smbus_write_byte(struct i2c_client * client, u8 value);
-  extern s32 i2c_smbus_read_byte_data(struct i2c_client * client, u8 command);
-  extern s32 i2c_smbus_write_byte_data(struct i2c_client * client,
-                                       u8 command, u8 value);
-  extern s32 i2c_smbus_read_word_data(struct i2c_client * client, u8 command);
-  extern s32 i2c_smbus_write_word_data(struct i2c_client * client,
-                                       u8 command, u16 value);
-  extern s32 i2c_smbus_process_call(struct i2c_client *client,
-                                    u8 command, u16 value);
-  extern s32 i2c_smbus_read_block_data(struct i2c_client * client,
-                                       u8 command, u8 *values);
-  extern s32 i2c_smbus_write_block_data(struct i2c_client * client,
-                                        u8 command, u8 length,
-                                        u8 *values);
-  extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client * client,
-                                           u8 command, u8 length, u8 *values);
-  extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client,
-                                            u8 command, u8 length,
-                                            u8 *values);
+	s32 i2c_smbus_read_byte(struct i2c_client *client);
+	s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value);
+	s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command);
+	s32 i2c_smbus_write_byte_data(struct i2c_client *client,
+				      u8 command, u8 value);
+	s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command);
+	s32 i2c_smbus_write_word_data(struct i2c_client *client,
+				      u8 command, u16 value);
+	s32 i2c_smbus_process_call(struct i2c_client *client,
+				   u8 command, u16 value);
+	s32 i2c_smbus_read_block_data(struct i2c_client *client,
+				      u8 command, u8 *values);
+	s32 i2c_smbus_write_block_data(struct i2c_client *client,
+				       u8 command, u8 length, const u8 *values);
+	s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client,
+					  u8 command, u8 length, u8 *values);
+	s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
+					   u8 command, u8 length,
+					   const u8 *values);
 
 These ones were removed from i2c-core because they had no users, but could
 be added back later if needed:
 
-  extern s32 i2c_smbus_write_quick(struct i2c_client * client, u8 value);
-  extern s32 i2c_smbus_block_process_call(struct i2c_client *client,
-                                          u8 command, u8 length,
-                                          u8 *values)
+	s32 i2c_smbus_write_quick(struct i2c_client *client, u8 value);
+	s32 i2c_smbus_block_process_call(struct i2c_client *client,
+					 u8 command, u8 length, u8 *values);
 
 All these transactions return a negative errno value on failure. The 'write'
 transactions return 0 on success; the 'read' transactions return the read
@@ -642,7 +385,5 @@
 Below all general purpose routines are listed, that were not mentioned
 before.
 
-  /* This call returns a unique low identifier for each registered adapter.
-   */
-  extern int i2c_adapter_id(struct i2c_adapter *adap);
-
+	/* Return the adapter number for a specific adapter */
+	int i2c_adapter_id(struct i2c_adapter *adap);
diff --git a/Documentation/ia64/xen.txt b/Documentation/ia64/xen.txt
new file mode 100644
index 0000000..c61a99f
--- /dev/null
+++ b/Documentation/ia64/xen.txt
@@ -0,0 +1,183 @@
+       Recipe for getting/building/running Xen/ia64 with pv_ops
+       --------------------------------------------------------
+
+This recipe describes how to get xen-ia64 source and build it,
+and run domU with pv_ops.
+
+============
+Requirements
+============
+
+  - python
+  - mercurial
+    it (aka "hg") is an open-source source code
+    management software. See the below.
+    http://www.selenic.com/mercurial/wiki/
+  - git
+  - bridge-utils
+
+=================================
+Getting and Building Xen and Dom0
+=================================
+
+  My environment is;
+    Machine  : Tiger4
+    Domain0 OS  : RHEL5
+    DomainU OS  : RHEL5
+
+ 1. Download source
+    # hg clone http://xenbits.xensource.com/ext/ia64/xen-unstable.hg
+    # cd xen-unstable.hg
+    # hg clone http://xenbits.xensource.com/ext/ia64/linux-2.6.18-xen.hg
+
+ 2. # make world
+
+ 3. # make install-tools
+
+ 4. copy kernels and xen
+    # cp xen/xen.gz /boot/efi/efi/redhat/
+    # cp build-linux-2.6.18-xen_ia64/vmlinux.gz \
+      /boot/efi/efi/redhat/vmlinuz-2.6.18.8-xen
+
+ 5. make initrd for Dom0/DomU
+    # make -C linux-2.6.18-xen.hg ARCH=ia64 modules_install \
+      O=$(/bin/pwd)/build-linux-2.6.18-xen_ia64
+    # mkinitrd -f /boot/efi/efi/redhat/initrd-2.6.18.8-xen.img \
+      2.6.18.8-xen --builtin mptspi --builtin mptbase \
+      --builtin mptscsih --builtin uhci-hcd --builtin ohci-hcd \
+      --builtin ehci-hcd
+
+================================
+Making a disk image for guest OS
+================================
+
+ 1. make file
+    # dd if=/dev/zero of=/root/rhel5.img bs=1M seek=4096 count=0
+    # mke2fs -F -j /root/rhel5.img
+    # mount -o loop /root/rhel5.img /mnt
+    # cp -ax /{dev,var,etc,usr,bin,sbin,lib} /mnt
+    # mkdir /mnt/{root,proc,sys,home,tmp}
+
+    Note: You may miss some device files. If so, please create them
+    with mknod. Or you can use tar instead of cp.
+
+ 2. modify DomU's fstab
+    # vi /mnt/etc/fstab
+       /dev/xvda1  /            ext3    defaults        1 1
+       none        /dev/pts     devpts  gid=5,mode=620  0 0
+       none        /dev/shm     tmpfs   defaults        0 0
+       none        /proc        proc    defaults        0 0
+       none        /sys         sysfs   defaults        0 0
+
+ 3. modify inittab
+    set runlevel to 3 to avoid X trying to start
+    # vi /mnt/etc/inittab
+       id:3:initdefault:
+    Start a getty on the hvc0 console
+       X0:2345:respawn:/sbin/mingetty hvc0
+    tty1-6 mingetty can be commented out
+
+ 4. add hvc0 into /etc/securetty
+    # vi /mnt/etc/securetty (add hvc0)
+
+ 5. umount
+    # umount /mnt
+
+FYI, virt-manager can also make a disk image for guest OS.
+It's GUI tools and easy to make it.
+
+==================
+Boot Xen & Domain0
+==================
+
+ 1. replace elilo
+    elilo of RHEL5 can boot Xen and Dom0.
+    If you use old elilo (e.g RHEL4), please download from the below
+    http://elilo.sourceforge.net/cgi-bin/blosxom
+    and copy into /boot/efi/efi/redhat/
+    # cp elilo-3.6-ia64.efi /boot/efi/efi/redhat/elilo.efi
+
+ 2. modify elilo.conf (like the below)
+    # vi /boot/efi/efi/redhat/elilo.conf
+     prompt
+     timeout=20
+     default=xen
+     relocatable
+
+     image=vmlinuz-2.6.18.8-xen
+             label=xen
+             vmm=xen.gz
+             initrd=initrd-2.6.18.8-xen.img
+             read-only
+             append=" -- rhgb root=/dev/sda2"
+
+The append options before "--" are for xen hypervisor,
+the options after "--" are for dom0.
+
+FYI, your machine may need console options like
+"com1=19200,8n1 console=vga,com1". For example,
+append="com1=19200,8n1 console=vga,com1 -- rhgb console=tty0 \
+console=ttyS0 root=/dev/sda2"
+
+=====================================
+Getting and Building domU with pv_ops
+=====================================
+
+ 1. get pv_ops tree
+    # git clone http://people.valinux.co.jp/~yamahata/xen-ia64/linux-2.6-xen-ia64.git/
+
+ 2. git branch (if necessary)
+    # cd linux-2.6-xen-ia64/
+    # git checkout -b your_branch origin/xen-ia64-domu-minimal-2008may19
+    (Note: The current branch is xen-ia64-domu-minimal-2008may19.
+    But you would find the new branch. You can see with
+    "git branch -r" to get the branch lists.
+    http://people.valinux.co.jp/~yamahata/xen-ia64/for_eagl/linux-2.6-ia64-pv-ops.git/
+    is also available. The tree is based on
+    git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6 test)
+
+
+ 3. copy .config for pv_ops of domU
+    # cp arch/ia64/configs/xen_domu_wip_defconfig .config
+
+ 4. make kernel with pv_ops
+    # make oldconfig
+    # make
+
+ 5. install the kernel and initrd
+    # cp vmlinux.gz /boot/efi/efi/redhat/vmlinuz-2.6-pv_ops-xenU
+    # make modules_install
+    # mkinitrd -f /boot/efi/efi/redhat/initrd-2.6-pv_ops-xenU.img \
+      2.6.26-rc3xen-ia64-08941-g1b12161 --builtin mptspi \
+      --builtin mptbase --builtin mptscsih --builtin uhci-hcd \
+      --builtin ohci-hcd --builtin ehci-hcd
+
+========================
+Boot DomainU with pv_ops
+========================
+
+ 1. make config of DomU
+   # vi /etc/xen/rhel5
+     kernel = "/boot/efi/efi/redhat/vmlinuz-2.6-pv_ops-xenU"
+     ramdisk = "/boot/efi/efi/redhat/initrd-2.6-pv_ops-xenU.img"
+     vcpus = 1
+     memory = 512
+     name = "rhel5"
+     disk = [ 'file:/root/rhel5.img,xvda1,w' ]
+     root = "/dev/xvda1 ro"
+     extra= "rhgb console=hvc0"
+
+ 2. After boot xen and dom0, start xend
+   # /etc/init.d/xend start
+   ( In the debugging case, # XEND_DEBUG=1 xend trace_start )
+
+ 3. start domU
+   # xm create -c rhel5
+
+=========
+Reference
+=========
+- Wiki of Xen/IA64 upstream merge
+  http://wiki.xensource.com/xenwiki/XenIA64/UpstreamMerge
+
+Written by Akio Takebe <takebe_akio@jp.fujitsu.com> on 28 May 2008
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt
index 1c6b545..b880ce5 100644
--- a/Documentation/ioctl-number.txt
+++ b/Documentation/ioctl-number.txt
@@ -92,6 +92,7 @@
 'J'	00-1F	drivers/scsi/gdth_ioctl.h
 'K'	all	linux/kd.h
 'L'	00-1F	linux/loop.h
+'L'	20-2F	driver/usb/misc/vstusb.h
 'L'	E0-FF	linux/ppdd.h		encrypted disk device driver
 					<http://linux01.gwdg.de/~alatham/ppdd.html>
 'M'	all	linux/soundcard.h
@@ -110,6 +111,8 @@
 'W'	00-1F	linux/wanrouter.h	conflict!
 'X'	all	linux/xfs_fs.h
 'Y'	all	linux/cyclades.h
+'['	00-07	linux/usb/usbtmc.h	USB Test and Measurement Devices
+					<mailto:gregkh@suse.de>
 'a'	all				ATM on linux
 					<http://lrcwww.epfl.ch/linux-atm/magic.html>
 'b'	00-FF				bit3 vme host bridge
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index 0705040..3f4bc84 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -109,7 +109,8 @@
 2) Or use the system kernel binary itself as dump-capture kernel and there is
    no need to build a separate dump-capture kernel. This is possible
    only with the architecutres which support a relocatable kernel. As
-   of today, i386, x86_64 and ia64 architectures support relocatable kernel.
+   of today, i386, x86_64, ppc64 and ia64 architectures support relocatable
+   kernel.
 
 Building a relocatable kernel is advantageous from the point of view that
 one does not have to build a second kernel for capturing the dump. But
@@ -207,8 +208,15 @@
 Dump-capture kernel config options (Arch Dependent, ppc64)
 ----------------------------------------------------------
 
-*  Make and install the kernel and its modules. DO NOT add this kernel
-   to the boot loader configuration files.
+1) Enable "Build a kdump crash kernel" support under "Kernel" options:
+
+   CONFIG_CRASH_DUMP=y
+
+2)   Enable "Build a relocatable kernel" support
+
+   CONFIG_RELOCATABLE=y
+
+   Make and install the kernel and its modules.
 
 Dump-capture kernel config options (Arch Dependent, ia64)
 ----------------------------------------------------------
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index dd28a0d..53ba7c7 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -101,6 +101,7 @@
 	X86-64	X86-64 architecture is enabled.
 			More X86-64 boot options can be found in
 			Documentation/x86_64/boot-options.txt .
+	X86	Either 32bit or 64bit x86 (same as X86-32+X86-64)
 
 In addition, the following text indicates that the option:
 
@@ -690,7 +691,7 @@
 			See Documentation/block/as-iosched.txt and
 			Documentation/block/deadline-iosched.txt for details.
 
-	elfcorehdr=	[X86-32, X86_64]
+	elfcorehdr=	[IA64,PPC,SH,X86-32,X86_64]
 			Specifies physical address of start of kernel core
 			image elf header. Generally kexec loader will
 			pass this option to capture kernel.
@@ -796,6 +797,8 @@
 			Defaults to the default architecture's huge page size
 			if not specified.
 
+	hlt		[BUGS=ARM,SH]
+
 	i8042.debug	[HW] Toggle i8042 debug mode
 	i8042.direct	[HW] Put keyboard port into non-translated mode
 	i8042.dumbkbd	[HW] Pretend that controller can only read data from
@@ -1211,6 +1214,10 @@
 	mem=nopentium	[BUGS=X86-32] Disable usage of 4MB pages for kernel
 			memory.
 
+	memchunk=nn[KMG]
+			[KNL,SH] Allow user to override the default size for
+			per-device physically contiguous DMA buffers.
+
 	memmap=exactmap	[KNL,X86-32,X86_64] Enable setting of an exact
 			E820 memory map, as specified by the user.
 			Such memmap=exactmap lines can be constructed based on
@@ -1393,6 +1400,8 @@
 
 	nodisconnect	[HW,SCSI,M68K] Disables SCSI disconnects.
 
+	nodsp		[SH] Disable hardware DSP at boot time.
+
 	noefi		[X86-32,X86-64] Disable EFI runtime services support.
 
 	noexec		[IA-64]
@@ -1409,13 +1418,15 @@
 			noexec32=off: disable non-executable mappings
 				read implies executable mappings
 
+	nofpu		[SH] Disable hardware FPU at boot time.
+
 	nofxsr		[BUGS=X86-32] Disables x86 floating point extended
 			register save and restore. The kernel will only save
 			legacy floating-point registers on task switch.
 
 	noclflush	[BUGS=X86] Don't use the CLFLUSH instruction
 
-	nohlt		[BUGS=ARM]
+	nohlt		[BUGS=ARM,SH]
 
 	no-hlt		[BUGS=X86-32] Tells the kernel that the hlt
 			instruction doesn't work correctly and not to
@@ -1578,7 +1589,7 @@
 			See also Documentation/paride.txt.
 
 	pci=option[,option...]	[PCI] various PCI subsystem options:
-		off		[X86-32] don't probe for the PCI bus
+		off		[X86] don't probe for the PCI bus
 		bios		[X86-32] force use of PCI BIOS, don't access
 				the hardware directly. Use this if your machine
 				has a non-standard PCI host bridge.
@@ -1586,9 +1597,9 @@
 				hardware access methods are allowed. Use this
 				if you experience crashes upon bootup and you
 				suspect they are caused by the BIOS.
-		conf1		[X86-32] Force use of PCI Configuration
+		conf1		[X86] Force use of PCI Configuration
 				Mechanism 1.
-		conf2		[X86-32] Force use of PCI Configuration
+		conf2		[X86] Force use of PCI Configuration
 				Mechanism 2.
 		noaer		[PCIE] If the PCIEAER kernel config parameter is
 				enabled, this kernel boot option can be used to
@@ -1608,37 +1619,37 @@
 				this option if the kernel is unable to allocate
 				IRQs or discover secondary PCI buses on your
 				motherboard.
-		rom		[X86-32] Assign address space to expansion ROMs.
+		rom		[X86] Assign address space to expansion ROMs.
 				Use with caution as certain devices share
 				address decoders between ROMs and other
 				resources.
-		norom		[X86-32,X86_64] Do not assign address space to
+		norom		[X86] Do not assign address space to
 				expansion ROMs that do not already have
 				BIOS assigned address ranges.
-		irqmask=0xMMMM	[X86-32] Set a bit mask of IRQs allowed to be
+		irqmask=0xMMMM	[X86] Set a bit mask of IRQs allowed to be
 				assigned automatically to PCI devices. You can
 				make the kernel exclude IRQs of your ISA cards
 				this way.
-		pirqaddr=0xAAAAA	[X86-32] Specify the physical address
+		pirqaddr=0xAAAAA	[X86] Specify the physical address
 				of the PIRQ table (normally generated
 				by the BIOS) if it is outside the
 				F0000h-100000h range.
-		lastbus=N	[X86-32] Scan all buses thru bus #N. Can be
+		lastbus=N	[X86] Scan all buses thru bus #N. Can be
 				useful if the kernel is unable to find your
 				secondary buses and you want to tell it
 				explicitly which ones they are.
-		assign-busses	[X86-32] Always assign all PCI bus
+		assign-busses	[X86] Always assign all PCI bus
 				numbers ourselves, overriding
 				whatever the firmware may have done.
-		usepirqmask	[X86-32] Honor the possible IRQ mask stored
+		usepirqmask	[X86] Honor the possible IRQ mask stored
 				in the BIOS $PIR table. This is needed on
 				some systems with broken BIOSes, notably
 				some HP Pavilion N5400 and Omnibook XE3
 				notebooks. This will have no effect if ACPI
 				IRQ routing is enabled.
-		noacpi		[X86-32] Do not use ACPI for IRQ routing
+		noacpi		[X86] Do not use ACPI for IRQ routing
 				or for PCI scanning.
-		use_crs		[X86-32] Use _CRS for PCI resource
+		use_crs		[X86] Use _CRS for PCI resource
 				allocation.
 		routeirq	Do IRQ routing for all PCI devices.
 				This is normally done in pci_enable_device(),
@@ -1667,6 +1678,12 @@
 				reserved for the CardBus bridge's memory
 				window. The default value is 64 megabytes.
 
+	pcie_aspm=	[PCIE] Forcibly enable or disable PCIe Active State Power
+			Management.
+		off	Disable ASPM.
+		force	Enable ASPM even on devices that claim not to support it.
+			WARNING: Forcing ASPM on may cause system lockups.
+
 	pcmv=		[HW,PCMCIA] BadgePAD 4
 
 	pd.		[PARIDE]
@@ -2253,6 +2270,25 @@
 			autosuspended.  Devices for which the delay is set
 			to a negative value won't be autosuspended at all.
 
+	usbcore.usbfs_snoop=
+			[USB] Set to log all usbfs traffic (default 0 = off).
+
+	usbcore.blinkenlights=
+			[USB] Set to cycle leds on hubs (default 0 = off).
+
+	usbcore.old_scheme_first=
+			[USB] Start with the old device initialization
+			scheme (default 0 = off).
+
+	usbcore.use_both_schemes=
+			[USB] Try the other device initialization scheme
+			if the first one fails (default 1 = enabled).
+
+	usbcore.initial_descriptor_timeout=
+			[USB] Specifies timeout for the initial 64-byte
+                        USB_REQ_GET_DESCRIPTOR request in milliseconds
+			(default 5000 = 5.0 seconds).
+
 	usbhid.mousepoll=
 			[USBHID] The interval which mice are to be polled at.
 
diff --git a/Documentation/markers.txt b/Documentation/markers.txt
index d9f50a1..089f613 100644
--- a/Documentation/markers.txt
+++ b/Documentation/markers.txt
@@ -50,10 +50,12 @@
 to call) for the specific marker through marker_probe_register() and can be
 activated by calling marker_arm(). Marker deactivation can be done by calling
 marker_disarm() as many times as marker_arm() has been called. Removing a probe
-is done through marker_probe_unregister(); it will disarm the probe and make
-sure there is no caller left using the probe when it returns. Probe removal is
-preempt-safe because preemption is disabled around the probe call. See the
-"Probe example" section below for a sample probe module.
+is done through marker_probe_unregister(); it will disarm the probe.
+marker_synchronize_unregister() must be called before the end of the module exit
+function to make sure there is no caller left using the probe. This, and the
+fact that preemption is disabled around the probe call, make sure that probe
+removal and module unload are safe. See the "Probe example" section below for a
+sample probe module.
 
 The marker mechanism supports inserting multiple instances of the same marker.
 Markers can be put in inline functions, inlined static functions, and
diff --git a/Documentation/mtd/nand_ecc.txt b/Documentation/mtd/nand_ecc.txt
new file mode 100644
index 0000000..bdf93b7
--- /dev/null
+++ b/Documentation/mtd/nand_ecc.txt
@@ -0,0 +1,714 @@
+Introduction
+============
+
+Having looked at the linux mtd/nand driver and more specific at nand_ecc.c
+I felt there was room for optimisation. I bashed the code for a few hours
+performing tricks like table lookup removing superfluous code etc.
+After that the speed was increased by 35-40%.
+Still I was not too happy as I felt there was additional room for improvement.
+
+Bad! I was hooked.
+I decided to annotate my steps in this file. Perhaps it is useful to someone
+or someone learns something from it.
+
+
+The problem
+===========
+
+NAND flash (at least SLC one) typically has sectors of 256 bytes.
+However NAND flash is not extremely reliable so some error detection
+(and sometimes correction) is needed.
+
+This is done by means of a Hamming code. I'll try to explain it in
+laymans terms (and apologies to all the pro's in the field in case I do
+not use the right terminology, my coding theory class was almost 30
+years ago, and I must admit it was not one of my favourites).
+
+As I said before the ecc calculation is performed on sectors of 256
+bytes. This is done by calculating several parity bits over the rows and
+columns. The parity used is even parity which means that the parity bit = 1
+if the data over which the parity is calculated is 1 and the parity bit = 0
+if the data over which the parity is calculated is 0. So the total
+number of bits over the data over which the parity is calculated + the
+parity bit is even. (see wikipedia if you can't follow this).
+Parity is often calculated by means of an exclusive or operation,
+sometimes also referred to as xor. In C the operator for xor is ^
+
+Back to ecc.
+Let's give a small figure:
+
+byte   0:  bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0   rp0 rp2 rp4 ... rp14
+byte   1:  bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0   rp1 rp2 rp4 ... rp14
+byte   2:  bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0   rp0 rp3 rp4 ... rp14
+byte   3:  bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0   rp1 rp3 rp4 ... rp14
+byte   4:  bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0   rp0 rp2 rp5 ... rp14
+....
+byte 254:  bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0   rp0 rp3 rp5 ... rp15
+byte 255:  bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0   rp1 rp3 rp5 ... rp15
+           cp1  cp0  cp1  cp0  cp1  cp0  cp1  cp0
+           cp3  cp3  cp2  cp2  cp3  cp3  cp2  cp2
+           cp5  cp5  cp5  cp5  cp4  cp4  cp4  cp4
+
+This figure represents a sector of 256 bytes.
+cp is my abbreviaton for column parity, rp for row parity.
+
+Let's start to explain column parity.
+cp0 is the parity that belongs to all bit0, bit2, bit4, bit6.
+so the sum of all bit0, bit2, bit4 and bit6 values + cp0 itself is even.
+Similarly cp1 is the sum of all bit1, bit3, bit5 and bit7.
+cp2 is the parity over bit0, bit1, bit4 and bit5
+cp3 is the parity over bit2, bit3, bit6 and bit7.
+cp4 is the parity over bit0, bit1, bit2 and bit3.
+cp5 is the parity over bit4, bit5, bit6 and bit7.
+Note that each of cp0 .. cp5 is exactly one bit.
+
+Row parity actually works almost the same.
+rp0 is the parity of all even bytes (0, 2, 4, 6, ... 252, 254)
+rp1 is the parity of all odd bytes (1, 3, 5, 7, ..., 253, 255)
+rp2 is the parity of all bytes 0, 1, 4, 5, 8, 9, ...
+(so handle two bytes, then skip 2 bytes).
+rp3 is covers the half rp2 does not cover (bytes 2, 3, 6, 7, 10, 11, ...)
+for rp4 the rule is cover 4 bytes, skip 4 bytes, cover 4 bytes, skip 4 etc.
+so rp4 calculates parity over bytes 0, 1, 2, 3, 8, 9, 10, 11, 16, ...)
+and rp5 covers the other half, so bytes 4, 5, 6, 7, 12, 13, 14, 15, 20, ..
+The story now becomes quite boring. I guess you get the idea.
+rp6 covers 8 bytes then skips 8 etc
+rp7 skips 8 bytes then covers 8 etc
+rp8 covers 16 bytes then skips 16 etc
+rp9 skips 16 bytes then covers 16 etc
+rp10 covers 32 bytes then skips 32 etc
+rp11 skips 32 bytes then covers 32 etc
+rp12 covers 64 bytes then skips 64 etc
+rp13 skips 64 bytes then covers 64 etc
+rp14 covers 128 bytes then skips 128
+rp15 skips 128 bytes then covers 128
+
+In the end the parity bits are grouped together in three bytes as
+follows:
+ECC    Bit 7 Bit 6 Bit 5 Bit 4 Bit 3 Bit 2 Bit 1 Bit 0
+ECC 0   rp07  rp06  rp05  rp04  rp03  rp02  rp01  rp00
+ECC 1   rp15  rp14  rp13  rp12  rp11  rp10  rp09  rp08
+ECC 2   cp5   cp4   cp3   cp2   cp1   cp0      1     1
+
+I detected after writing this that ST application note AN1823
+(http://www.st.com/stonline/books/pdf/docs/10123.pdf) gives a much
+nicer picture.(but they use line parity as term where I use row parity)
+Oh well, I'm graphically challenged, so suffer with me for a moment :-)
+And I could not reuse the ST picture anyway for copyright reasons.
+
+
+Attempt 0
+=========
+
+Implementing the parity calculation is pretty simple.
+In C pseudocode:
+for (i = 0; i < 256; i++)
+{
+    if (i & 0x01)
+       rp1 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp1;
+    else
+       rp0 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp1;
+    if (i & 0x02)
+       rp3 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp3;
+    else
+       rp2 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp2;
+    if (i & 0x04)
+      rp5 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp5;
+    else
+      rp4 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp4;
+    if (i & 0x08)
+      rp7 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp7;
+    else
+      rp6 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp6;
+    if (i & 0x10)
+      rp9 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp9;
+    else
+      rp8 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp8;
+    if (i & 0x20)
+      rp11 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp11;
+    else
+    rp10 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp10;
+    if (i & 0x40)
+      rp13 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp13;
+    else
+      rp12 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp12;
+    if (i & 0x80)
+      rp15 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp15;
+    else
+      rp14 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp14;
+    cp0 = bit6 ^ bit4 ^ bit2 ^ bit0 ^ cp0;
+    cp1 = bit7 ^ bit5 ^ bit3 ^ bit1 ^ cp1;
+    cp2 = bit5 ^ bit4 ^ bit1 ^ bit0 ^ cp2;
+    cp3 = bit7 ^ bit6 ^ bit3 ^ bit2 ^ cp3
+    cp4 = bit3 ^ bit2 ^ bit1 ^ bit0 ^ cp4
+    cp5 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ cp5
+}
+
+
+Analysis 0
+==========
+
+C does have bitwise operators but not really operators to do the above
+efficiently (and most hardware has no such instructions either).
+Therefore without implementing this it was clear that the code above was
+not going to bring me a Nobel prize :-)
+
+Fortunately the exclusive or operation is commutative, so we can combine
+the values in any order. So instead of calculating all the bits
+individually, let us try to rearrange things.
+For the column parity this is easy. We can just xor the bytes and in the
+end filter out the relevant bits. This is pretty nice as it will bring
+all cp calculation out of the if loop.
+
+Similarly we can first xor the bytes for the various rows.
+This leads to:
+
+
+Attempt 1
+=========
+
+const char parity[256] = {
+    0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+    1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+    1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+    0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+    1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+    0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+    0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+    1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+    1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+    0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+    0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+    1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+    0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+    1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+    1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+    0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0
+};
+
+void ecc1(const unsigned char *buf, unsigned char *code)
+{
+    int i;
+    const unsigned char *bp = buf;
+    unsigned char cur;
+    unsigned char rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
+    unsigned char rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15;
+    unsigned char par;
+
+    par = 0;
+    rp0 = 0; rp1 = 0; rp2 = 0; rp3 = 0;
+    rp4 = 0; rp5 = 0; rp6 = 0; rp7 = 0;
+    rp8 = 0; rp9 = 0; rp10 = 0; rp11 = 0;
+    rp12 = 0; rp13 = 0; rp14 = 0; rp15 = 0;
+
+    for (i = 0; i < 256; i++)
+    {
+        cur = *bp++;
+        par ^= cur;
+        if (i & 0x01) rp1 ^= cur; else rp0 ^= cur;
+        if (i & 0x02) rp3 ^= cur; else rp2 ^= cur;
+        if (i & 0x04) rp5 ^= cur; else rp4 ^= cur;
+        if (i & 0x08) rp7 ^= cur; else rp6 ^= cur;
+        if (i & 0x10) rp9 ^= cur; else rp8 ^= cur;
+        if (i & 0x20) rp11 ^= cur; else rp10 ^= cur;
+        if (i & 0x40) rp13 ^= cur; else rp12 ^= cur;
+        if (i & 0x80) rp15 ^= cur; else rp14 ^= cur;
+    }
+    code[0] =
+        (parity[rp7] << 7) |
+        (parity[rp6] << 6) |
+        (parity[rp5] << 5) |
+        (parity[rp4] << 4) |
+        (parity[rp3] << 3) |
+        (parity[rp2] << 2) |
+        (parity[rp1] << 1) |
+        (parity[rp0]);
+    code[1] =
+        (parity[rp15] << 7) |
+        (parity[rp14] << 6) |
+        (parity[rp13] << 5) |
+        (parity[rp12] << 4) |
+        (parity[rp11] << 3) |
+        (parity[rp10] << 2) |
+        (parity[rp9]  << 1) |
+        (parity[rp8]);
+    code[2] =
+        (parity[par & 0xf0] << 7) |
+        (parity[par & 0x0f] << 6) |
+        (parity[par & 0xcc] << 5) |
+        (parity[par & 0x33] << 4) |
+        (parity[par & 0xaa] << 3) |
+        (parity[par & 0x55] << 2);
+    code[0] = ~code[0];
+    code[1] = ~code[1];
+    code[2] = ~code[2];
+}
+
+Still pretty straightforward. The last three invert statements are there to
+give a checksum of 0xff 0xff 0xff for an empty flash. In an empty flash
+all data is 0xff, so the checksum then matches.
+
+I also introduced the parity lookup. I expected this to be the fastest
+way to calculate the parity, but I will investigate alternatives later
+on.
+
+
+Analysis 1
+==========
+
+The code works, but is not terribly efficient. On my system it took
+almost 4 times as much time as the linux driver code. But hey, if it was
+*that* easy this would have been done long before.
+No pain. no gain.
+
+Fortunately there is plenty of room for improvement.
+
+In step 1 we moved from bit-wise calculation to byte-wise calculation.
+However in C we can also use the unsigned long data type and virtually
+every modern microprocessor supports 32 bit operations, so why not try
+to write our code in such a way that we process data in 32 bit chunks.
+
+Of course this means some modification as the row parity is byte by
+byte. A quick analysis:
+for the column parity we use the par variable. When extending to 32 bits
+we can in the end easily calculate p0 and p1 from it.
+(because par now consists of 4 bytes, contributing to rp1, rp0, rp1, rp0
+respectively)
+also rp2 and rp3 can be easily retrieved from par as rp3 covers the
+first two bytes and rp2 the last two bytes.
+
+Note that of course now the loop is executed only 64 times (256/4).
+And note that care must taken wrt byte ordering. The way bytes are
+ordered in a long is machine dependent, and might affect us.
+Anyway, if there is an issue: this code is developed on x86 (to be
+precise: a DELL PC with a D920 Intel CPU)
+
+And of course the performance might depend on alignment, but I expect
+that the I/O buffers in the nand driver are aligned properly (and
+otherwise that should be fixed to get maximum performance).
+
+Let's give it a try...
+
+
+Attempt 2
+=========
+
+extern const char parity[256];
+
+void ecc2(const unsigned char *buf, unsigned char *code)
+{
+    int i;
+    const unsigned long *bp = (unsigned long *)buf;
+    unsigned long cur;
+    unsigned long rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
+    unsigned long rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15;
+    unsigned long par;
+
+    par = 0;
+    rp0 = 0; rp1 = 0; rp2 = 0; rp3 = 0;
+    rp4 = 0; rp5 = 0; rp6 = 0; rp7 = 0;
+    rp8 = 0; rp9 = 0; rp10 = 0; rp11 = 0;
+    rp12 = 0; rp13 = 0; rp14 = 0; rp15 = 0;
+
+    for (i = 0; i < 64; i++)
+    {
+        cur = *bp++;
+        par ^= cur;
+        if (i & 0x01) rp5 ^= cur; else rp4 ^= cur;
+        if (i & 0x02) rp7 ^= cur; else rp6 ^= cur;
+        if (i & 0x04) rp9 ^= cur; else rp8 ^= cur;
+        if (i & 0x08) rp11 ^= cur; else rp10 ^= cur;
+        if (i & 0x10) rp13 ^= cur; else rp12 ^= cur;
+        if (i & 0x20) rp15 ^= cur; else rp14 ^= cur;
+    }
+    /*
+       we need to adapt the code generation for the fact that rp vars are now
+       long; also the column parity calculation needs to be changed.
+       we'll bring rp4 to 15 back to single byte entities by shifting and
+       xoring
+    */
+    rp4 ^= (rp4 >> 16); rp4 ^= (rp4 >> 8); rp4 &= 0xff;
+    rp5 ^= (rp5 >> 16); rp5 ^= (rp5 >> 8); rp5 &= 0xff;
+    rp6 ^= (rp6 >> 16); rp6 ^= (rp6 >> 8); rp6 &= 0xff;
+    rp7 ^= (rp7 >> 16); rp7 ^= (rp7 >> 8); rp7 &= 0xff;
+    rp8 ^= (rp8 >> 16); rp8 ^= (rp8 >> 8); rp8 &= 0xff;
+    rp9 ^= (rp9 >> 16); rp9 ^= (rp9 >> 8); rp9 &= 0xff;
+    rp10 ^= (rp10 >> 16); rp10 ^= (rp10 >> 8); rp10 &= 0xff;
+    rp11 ^= (rp11 >> 16); rp11 ^= (rp11 >> 8); rp11 &= 0xff;
+    rp12 ^= (rp12 >> 16); rp12 ^= (rp12 >> 8); rp12 &= 0xff;
+    rp13 ^= (rp13 >> 16); rp13 ^= (rp13 >> 8); rp13 &= 0xff;
+    rp14 ^= (rp14 >> 16); rp14 ^= (rp14 >> 8); rp14 &= 0xff;
+    rp15 ^= (rp15 >> 16); rp15 ^= (rp15 >> 8); rp15 &= 0xff;
+    rp3 = (par >> 16); rp3 ^= (rp3 >> 8); rp3 &= 0xff;
+    rp2 = par & 0xffff; rp2 ^= (rp2 >> 8); rp2 &= 0xff;
+    par ^= (par >> 16);
+    rp1 = (par >> 8); rp1 &= 0xff;
+    rp0 = (par & 0xff);
+    par ^= (par >> 8); par &= 0xff;
+
+    code[0] =
+        (parity[rp7] << 7) |
+        (parity[rp6] << 6) |
+        (parity[rp5] << 5) |
+        (parity[rp4] << 4) |
+        (parity[rp3] << 3) |
+        (parity[rp2] << 2) |
+        (parity[rp1] << 1) |
+        (parity[rp0]);
+    code[1] =
+        (parity[rp15] << 7) |
+        (parity[rp14] << 6) |
+        (parity[rp13] << 5) |
+        (parity[rp12] << 4) |
+        (parity[rp11] << 3) |
+        (parity[rp10] << 2) |
+        (parity[rp9]  << 1) |
+        (parity[rp8]);
+    code[2] =
+        (parity[par & 0xf0] << 7) |
+        (parity[par & 0x0f] << 6) |
+        (parity[par & 0xcc] << 5) |
+        (parity[par & 0x33] << 4) |
+        (parity[par & 0xaa] << 3) |
+        (parity[par & 0x55] << 2);
+    code[0] = ~code[0];
+    code[1] = ~code[1];
+    code[2] = ~code[2];
+}
+
+The parity array is not shown any more. Note also that for these
+examples I kinda deviated from my regular programming style by allowing
+multiple statements on a line, not using { } in then and else blocks
+with only a single statement and by using operators like ^=
+
+
+Analysis 2
+==========
+
+The code (of course) works, and hurray: we are a little bit faster than
+the linux driver code (about 15%). But wait, don't cheer too quickly.
+THere is more to be gained.
+If we look at e.g. rp14 and rp15 we see that we either xor our data with
+rp14 or with rp15. However we also have par which goes over all data.
+This means there is no need to calculate rp14 as it can be calculated from
+rp15 through rp14 = par ^ rp15;
+(or if desired we can avoid calculating rp15 and calculate it from
+rp14).  That is why some places refer to inverse parity.
+Of course the same thing holds for rp4/5, rp6/7, rp8/9, rp10/11 and rp12/13.
+Effectively this means we can eliminate the else clause from the if
+statements. Also we can optimise the calculation in the end a little bit
+by going from long to byte first. Actually we can even avoid the table
+lookups
+
+Attempt 3
+=========
+
+Odd replaced:
+        if (i & 0x01) rp5 ^= cur; else rp4 ^= cur;
+        if (i & 0x02) rp7 ^= cur; else rp6 ^= cur;
+        if (i & 0x04) rp9 ^= cur; else rp8 ^= cur;
+        if (i & 0x08) rp11 ^= cur; else rp10 ^= cur;
+        if (i & 0x10) rp13 ^= cur; else rp12 ^= cur;
+        if (i & 0x20) rp15 ^= cur; else rp14 ^= cur;
+with
+        if (i & 0x01) rp5 ^= cur;
+        if (i & 0x02) rp7 ^= cur;
+        if (i & 0x04) rp9 ^= cur;
+        if (i & 0x08) rp11 ^= cur;
+        if (i & 0x10) rp13 ^= cur;
+        if (i & 0x20) rp15 ^= cur;
+
+        and outside the loop added:
+    rp4  = par ^ rp5;
+    rp6  = par ^ rp7;
+    rp8  = par ^ rp9;
+    rp10  = par ^ rp11;
+    rp12  = par ^ rp13;
+    rp14  = par ^ rp15;
+
+And after that the code takes about 30% more time, although the number of
+statements is reduced. This is also reflected in the assembly code.
+
+
+Analysis 3
+==========
+
+Very weird. Guess it has to do with caching or instruction parallellism
+or so. I also tried on an eeePC (Celeron, clocked at 900 Mhz). Interesting
+observation was that this one is only 30% slower (according to time)
+executing the code as my 3Ghz D920 processor.
+
+Well, it was expected not to be easy so maybe instead move to a
+different track: let's move back to the code from attempt2 and do some
+loop unrolling. This will eliminate a few if statements. I'll try
+different amounts of unrolling to see what works best.
+
+
+Attempt 4
+=========
+
+Unrolled the loop 1, 2, 3 and 4 times.
+For 4 the code starts with:
+
+    for (i = 0; i < 4; i++)
+    {
+        cur = *bp++;
+        par ^= cur;
+        rp4 ^= cur;
+        rp6 ^= cur;
+        rp8 ^= cur;
+        rp10 ^= cur;
+        if (i & 0x1) rp13 ^= cur; else rp12 ^= cur;
+        if (i & 0x2) rp15 ^= cur; else rp14 ^= cur;
+        cur = *bp++;
+        par ^= cur;
+        rp5 ^= cur;
+        rp6 ^= cur;
+        ...
+
+
+Analysis 4
+==========
+
+Unrolling once gains about 15%
+Unrolling twice keeps the gain at about 15%
+Unrolling three times gives a gain of 30% compared to attempt 2.
+Unrolling four times gives a marginal improvement compared to unrolling
+three times.
+
+I decided to proceed with a four time unrolled loop anyway. It was my gut
+feeling that in the next steps I would obtain additional gain from it.
+
+The next step was triggered by the fact that par contains the xor of all
+bytes and rp4 and rp5 each contain the xor of half of the bytes.
+So in effect par = rp4 ^ rp5. But as xor is commutative we can also say
+that rp5 = par ^ rp4. So no need to keep both rp4 and rp5 around. We can
+eliminate rp5 (or rp4, but I already foresaw another optimisation).
+The same holds for rp6/7, rp8/9, rp10/11 rp12/13 and rp14/15.
+
+
+Attempt 5
+=========
+
+Effectively so all odd digit rp assignments in the loop were removed.
+This included the else clause of the if statements.
+Of course after the loop we need to correct things by adding code like:
+    rp5 = par ^ rp4;
+Also the initial assignments (rp5 = 0; etc) could be removed.
+Along the line I also removed the initialisation of rp0/1/2/3.
+
+
+Analysis 5
+==========
+
+Measurements showed this was a good move. The run-time roughly halved
+compared with attempt 4 with 4 times unrolled, and we only require 1/3rd
+of the processor time compared to the current code in the linux kernel.
+
+However, still I thought there was more. I didn't like all the if
+statements. Why not keep a running parity and only keep the last if
+statement. Time for yet another version!
+
+
+Attempt 6
+=========
+
+THe code within the for loop was changed to:
+
+    for (i = 0; i < 4; i++)
+    {
+        cur = *bp++; tmppar  = cur; rp4 ^= cur;
+        cur = *bp++; tmppar ^= cur; rp6 ^= tmppar;
+        cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+        cur = *bp++; tmppar ^= cur; rp8 ^= tmppar;
+
+        cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp6 ^= cur;
+        cur = *bp++; tmppar ^= cur; rp6 ^= cur;
+	    cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+	    cur = *bp++; tmppar ^= cur; rp10 ^= tmppar;
+
+	    cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp6 ^= cur; rp8 ^= cur;
+        cur = *bp++; tmppar ^= cur; rp6 ^= cur; rp8 ^= cur;
+	    cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp8 ^= cur;
+        cur = *bp++; tmppar ^= cur; rp8 ^= cur;
+
+        cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp6 ^= cur;
+        cur = *bp++; tmppar ^= cur; rp6 ^= cur;
+        cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+        cur = *bp++; tmppar ^= cur;
+
+	    par ^= tmppar;
+        if ((i & 0x1) == 0) rp12 ^= tmppar;
+        if ((i & 0x2) == 0) rp14 ^= tmppar;
+    }
+
+As you can see tmppar is used to accumulate the parity within a for
+iteration. In the last 3 statements is is added to par and, if needed,
+to rp12 and rp14.
+
+While making the changes I also found that I could exploit that tmppar
+contains the running parity for this iteration. So instead of having:
+rp4 ^= cur; rp6 = cur;
+I removed the rp6 = cur; statement and did rp6 ^= tmppar; on next
+statement. A similar change was done for rp8 and rp10
+
+
+Analysis 6
+==========
+
+Measuring this code again showed big gain. When executing the original
+linux code 1 million times, this took about 1 second on my system.
+(using time to measure the performance). After this iteration I was back
+to 0.075 sec. Actually I had to decide to start measuring over 10
+million interations in order not to loose too much accuracy. This one
+definitely seemed to be the jackpot!
+
+There is a little bit more room for improvement though. There are three
+places with statements:
+rp4 ^= cur; rp6 ^= cur;
+It seems more efficient to also maintain a variable rp4_6 in the while
+loop; This eliminates 3 statements per loop. Of course after the loop we
+need to correct by adding:
+    rp4 ^= rp4_6;
+    rp6 ^= rp4_6
+Furthermore there are 4 sequential assingments to rp8. This can be
+encoded slightly more efficient by saving tmppar before those 4 lines
+and later do rp8 = rp8 ^ tmppar ^ notrp8;
+(where notrp8 is the value of rp8 before those 4 lines).
+Again a use of the commutative property of xor.
+Time for a new test!
+
+
+Attempt 7
+=========
+
+The new code now looks like:
+
+    for (i = 0; i < 4; i++)
+    {
+        cur = *bp++; tmppar  = cur; rp4 ^= cur;
+        cur = *bp++; tmppar ^= cur; rp6 ^= tmppar;
+        cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+        cur = *bp++; tmppar ^= cur; rp8 ^= tmppar;
+
+        cur = *bp++; tmppar ^= cur; rp4_6 ^= cur;
+        cur = *bp++; tmppar ^= cur; rp6 ^= cur;
+	    cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+	    cur = *bp++; tmppar ^= cur; rp10 ^= tmppar;
+
+	    notrp8 = tmppar;
+	    cur = *bp++; tmppar ^= cur; rp4_6 ^= cur;
+        cur = *bp++; tmppar ^= cur; rp6 ^= cur;
+	    cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+        cur = *bp++; tmppar ^= cur;
+	    rp8 = rp8 ^ tmppar ^ notrp8;
+
+        cur = *bp++; tmppar ^= cur; rp4_6 ^= cur;
+        cur = *bp++; tmppar ^= cur; rp6 ^= cur;
+        cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+        cur = *bp++; tmppar ^= cur;
+
+	    par ^= tmppar;
+        if ((i & 0x1) == 0) rp12 ^= tmppar;
+        if ((i & 0x2) == 0) rp14 ^= tmppar;
+    }
+    rp4 ^= rp4_6;
+    rp6 ^= rp4_6;
+
+
+Not a big change, but every penny counts :-)
+
+
+Analysis 7
+==========
+
+Acutally this made things worse. Not very much, but I don't want to move
+into the wrong direction. Maybe something to investigate later. Could
+have to do with caching again.
+
+Guess that is what there is to win within the loop. Maybe unrolling one
+more time will help. I'll keep the optimisations from 7 for now.
+
+
+Attempt 8
+=========
+
+Unrolled the loop one more time.
+
+
+Analysis 8
+==========
+
+This makes things worse. Let's stick with attempt 6 and continue from there.
+Although it seems that the code within the loop cannot be optimised
+further there is still room to optimize the generation of the ecc codes.
+We can simply calcualate the total parity. If this is 0 then rp4 = rp5
+etc. If the parity is 1, then rp4 = !rp5;
+But if rp4 = rp5 we do not need rp5 etc. We can just write the even bits
+in the result byte and then do something like
+    code[0] |= (code[0] << 1);
+Lets test this.
+
+
+Attempt 9
+=========
+
+Changed the code but again this slightly degrades performance. Tried all
+kind of other things, like having dedicated parity arrays to avoid the
+shift after parity[rp7] << 7; No gain.
+Change the lookup using the parity array by using shift operators (e.g.
+replace parity[rp7] << 7 with:
+rp7 ^= (rp7 << 4);
+rp7 ^= (rp7 << 2);
+rp7 ^= (rp7 << 1);
+rp7 &= 0x80;
+No gain.
+
+The only marginal change was inverting the parity bits, so we can remove
+the last three invert statements.
+
+Ah well, pity this does not deliver more. Then again 10 million
+iterations using the linux driver code takes between 13 and 13.5
+seconds, whereas my code now takes about 0.73 seconds for those 10
+million iterations. So basically I've improved the performance by a
+factor 18 on my system. Not that bad. Of course on different hardware
+you will get different results. No warranties!
+
+But of course there is no such thing as a free lunch. The codesize almost
+tripled (from 562 bytes to 1434 bytes). Then again, it is not that much.
+
+
+Correcting errors
+=================
+
+For correcting errors I again used the ST application note as a starter,
+but I also peeked at the existing code.
+The algorithm itself is pretty straightforward. Just xor the given and
+the calculated ecc. If all bytes are 0 there is no problem. If 11 bits
+are 1 we have one correctable bit error. If there is 1 bit 1, we have an
+error in the given ecc code.
+It proved to be fastest to do some table lookups. Performance gain
+introduced by this is about a factor 2 on my system when a repair had to
+be done, and 1% or so if no repair had to be done.
+Code size increased from 330 bytes to 686 bytes for this function.
+(gcc 4.2, -O3)
+
+
+Conclusion
+==========
+
+The gain when calculating the ecc is tremendous. Om my development hardware
+a speedup of a factor of 18 for ecc calculation was achieved. On a test on an
+embedded system with a MIPS core a factor 7 was obtained.
+On  a test with a Linksys NSLU2 (ARMv5TE processor) the speedup was a factor
+5 (big endian mode, gcc 4.1.2, -O3)
+For correction not much gain could be obtained (as bitflips are rare). Then
+again there are also much less cycles spent there.
+
+It seems there is not much more gain possible in this, at least when
+programmed in C. Of course it might be possible to squeeze something more
+out of it with an assembler program, but due to pipeline behaviour etc
+this is very tricky (at least for intel hw).
+
+Author: Frans Meulenbroeks
+Copyright (C) 2008 Koninklijke Philips Electronics NV.
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index de4063c..02ea9a9 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -1917,6 +1917,8 @@
 			inverse clock polarity (CPOL) mode
     - spi-cpha        - (optional) Empty property indicating device requires
 			shifted clock phase (CPHA) mode
+    - spi-cs-high     - (optional) Empty property indicating device requires
+			chip select active high
 
     SPI example for an MPC5200 SPI bus:
 		spi@f00 {
diff --git a/Documentation/powerpc/dts-bindings/fsl/board.txt b/Documentation/powerpc/dts-bindings/fsl/board.txt
index 74ae6f1..81a917e 100644
--- a/Documentation/powerpc/dts-bindings/fsl/board.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/board.txt
@@ -2,13 +2,13 @@
 
 Required properties:
 
- - device_type : Should be "board-control"
+ - compatible : Should be "fsl,<board>-bcsr"
  - reg : Offset and length of the register set for the device
 
 Example:
 
 	bcsr@f8000000 {
-		device_type = "board-control";
+		compatible = "fsl,mpc8360mds-bcsr";
 		reg = <f8000000 8000>;
 	};
 
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 5ce0952..10a0263 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -95,7 +95,9 @@
 
 'p'     - Will dump the current registers and flags to your console.
 
-'q'     - Will dump a list of all running timers.
+'q'     - Will dump per CPU lists of all armed hrtimers (but NOT regular
+          timer_list timers) and detailed information about all
+          clockevent devices.
 
 'r'     - Turns off keyboard raw mode and sets it to XLATE.
 
diff --git a/Documentation/tracepoints.txt b/Documentation/tracepoints.txt
new file mode 100644
index 0000000..5d354e1
--- /dev/null
+++ b/Documentation/tracepoints.txt
@@ -0,0 +1,101 @@
+	             Using the Linux Kernel Tracepoints
+
+			    Mathieu Desnoyers
+
+
+This document introduces Linux Kernel Tracepoints and their use. It provides
+examples of how to insert tracepoints in the kernel and connect probe functions
+to them and provides some examples of probe functions.
+
+
+* Purpose of tracepoints
+
+A tracepoint placed in code provides a hook to call a function (probe) that you
+can provide at runtime. A tracepoint can be "on" (a probe is connected to it) or
+"off" (no probe is attached). When a tracepoint is "off" it has no effect,
+except for adding a tiny time penalty (checking a condition for a branch) and
+space penalty (adding a few bytes for the function call at the end of the
+instrumented function and adds a data structure in a separate section).  When a
+tracepoint is "on", the function you provide is called each time the tracepoint
+is executed, in the execution context of the caller. When the function provided
+ends its execution, it returns to the caller (continuing from the tracepoint
+site).
+
+You can put tracepoints at important locations in the code. They are
+lightweight hooks that can pass an arbitrary number of parameters,
+which prototypes are described in a tracepoint declaration placed in a header
+file.
+
+They can be used for tracing and performance accounting.
+
+
+* Usage
+
+Two elements are required for tracepoints :
+
+- A tracepoint definition, placed in a header file.
+- The tracepoint statement, in C code.
+
+In order to use tracepoints, you should include linux/tracepoint.h.
+
+In include/trace/subsys.h :
+
+#include <linux/tracepoint.h>
+
+DEFINE_TRACE(subsys_eventname,
+	TPPTOTO(int firstarg, struct task_struct *p),
+	TPARGS(firstarg, p));
+
+In subsys/file.c (where the tracing statement must be added) :
+
+#include <trace/subsys.h>
+
+void somefct(void)
+{
+	...
+	trace_subsys_eventname(arg, task);
+	...
+}
+
+Where :
+- subsys_eventname is an identifier unique to your event
+    - subsys is the name of your subsystem.
+    - eventname is the name of the event to trace.
+- TPPTOTO(int firstarg, struct task_struct *p) is the prototype of the function
+  called by this tracepoint.
+- TPARGS(firstarg, p) are the parameters names, same as found in the prototype.
+
+Connecting a function (probe) to a tracepoint is done by providing a probe
+(function to call) for the specific tracepoint through
+register_trace_subsys_eventname().  Removing a probe is done through
+unregister_trace_subsys_eventname(); it will remove the probe sure there is no
+caller left using the probe when it returns. Probe removal is preempt-safe
+because preemption is disabled around the probe call. See the "Probe example"
+section below for a sample probe module.
+
+The tracepoint mechanism supports inserting multiple instances of the same
+tracepoint, but a single definition must be made of a given tracepoint name over
+all the kernel to make sure no type conflict will occur. Name mangling of the
+tracepoints is done using the prototypes to make sure typing is correct.
+Verification of probe type correctness is done at the registration site by the
+compiler. Tracepoints can be put in inline functions, inlined static functions,
+and unrolled loops as well as regular functions.
+
+The naming scheme "subsys_event" is suggested here as a convention intended
+to limit collisions. Tracepoint names are global to the kernel: they are
+considered as being the same whether they are in the core kernel image or in
+modules.
+
+
+* Probe / tracepoint example
+
+See the example provided in samples/tracepoints/src
+
+Compile them with your kernel.
+
+Run, as root :
+modprobe tracepoint-example (insmod order is not important)
+modprobe tracepoint-probe-example
+cat /proc/tracepoint-example (returns an expected error)
+rmmod tracepoint-example tracepoint-probe-example
+dmesg
diff --git a/Documentation/tracers/mmiotrace.txt b/Documentation/tracers/mmiotrace.txt
index a4afb56..5bbbe20 100644
--- a/Documentation/tracers/mmiotrace.txt
+++ b/Documentation/tracers/mmiotrace.txt
@@ -36,7 +36,7 @@
 $ echo mmiotrace > /debug/tracing/current_tracer
 $ cat /debug/tracing/trace_pipe > mydump.txt &
 Start X or whatever.
-$ echo "X is up" > /debug/tracing/marker
+$ echo "X is up" > /debug/tracing/trace_marker
 $ echo none > /debug/tracing/current_tracer
 Check for lost events.
 
@@ -59,9 +59,8 @@
 Load the driver you want to trace and use it. Mmiotrace will only catch MMIO
 accesses to areas that are ioremapped while mmiotrace is active.
 
-[Unimplemented feature:]
 During tracing you can place comments (markers) into the trace by
-$ echo "X is up" > /debug/tracing/marker
+$ echo "X is up" > /debug/tracing/trace_marker
 This makes it easier to see which part of the (huge) trace corresponds to
 which action. It is recommended to place descriptive markers about what you
 do.
diff --git a/Documentation/usb/WUSB-Design-overview.txt b/Documentation/usb/WUSB-Design-overview.txt
new file mode 100644
index 0000000..4c3d62c
--- /dev/null
+++ b/Documentation/usb/WUSB-Design-overview.txt
@@ -0,0 +1,448 @@
+
+Linux UWB + Wireless USB + WiNET
+
+   (C) 2005-2006 Intel Corporation
+   Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License version
+   2 as published by the Free Software Foundation.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+
+Please visit http://bughost.org/thewiki/Design-overview.txt-1.8 for
+updated content.
+
+    * Design-overview.txt-1.8
+
+This code implements a Ultra Wide Band stack for Linux, as well as
+drivers for the the USB based UWB radio controllers defined in the
+Wireless USB 1.0 specification (including Wireless USB host controller
+and an Intel WiNET controller).
+
+   1. Introduction
+         1. HWA: Host Wire adapters, your Wireless USB dongle
+
+         2. DWA: Device Wired Adaptor, a Wireless USB hub for wired
+            devices
+         3. WHCI: Wireless Host Controller Interface, the PCI WUSB host
+            adapter
+   2. The UWB stack
+         1. Devices and hosts: the basic structure
+
+         2. Host Controller life cycle
+
+         3. On the air: beacons and enumerating the radio neighborhood
+
+         4. Device lists
+         5. Bandwidth allocation
+
+   3. Wireless USB Host Controller drivers
+
+   4. Glossary
+
+
+    Introduction
+
+UWB is a wide-band communication protocol that is to serve also as the
+low-level protocol for others (much like TCP sits on IP). Currently
+these others are Wireless USB and TCP/IP, but seems Bluetooth and
+Firewire/1394 are coming along.
+
+UWB uses a band from roughly 3 to 10 GHz, transmitting at a max of
+~-41dB (or 0.074 uW/MHz--geography specific data is still being
+negotiated w/ regulators, so watch for changes). That band is divided in
+a bunch of ~1.5 GHz wide channels (or band groups) composed of three
+subbands/subchannels (528 MHz each). Each channel is independent of each
+other, so you could consider them different "busses". Initially this
+driver considers them all a single one.
+
+Radio time is divided in 65536 us long /superframes/, each one divided
+in 256 256us long /MASs/ (Media Allocation Slots), which are the basic
+time/media allocation units for transferring data. At the beginning of
+each superframe there is a Beacon Period (BP), where every device
+transmit its beacon on a single MAS. The length of the BP depends on how
+many devices are present and the length of their beacons.
+
+Devices have a MAC (fixed, 48 bit address) and a device (changeable, 16
+bit address) and send periodic beacons to advertise themselves and pass
+info on what they are and do. They advertise their capabilities and a
+bunch of other stuff.
+
+The different logical parts of this driver are:
+
+    *
+
+      *UWB*: the Ultra-Wide-Band stack -- manages the radio and
+      associated spectrum to allow for devices sharing it. Allows to
+      control bandwidth assingment, beaconing, scanning, etc
+
+    *
+
+      *WUSB*: the layer that sits on top of UWB to provide Wireless USB.
+      The Wireless USB spec defines means to control a UWB radio and to
+      do the actual WUSB.
+
+
+      HWA: Host Wire adapters, your Wireless USB dongle
+
+WUSB also defines a device called a Host Wire Adaptor (HWA), which in
+mere terms is a USB dongle that enables your PC to have UWB and Wireless
+USB. The Wireless USB Host Controller in a HWA looks to the host like a
+[Wireless] USB controller connected via USB (!)
+
+The HWA itself is broken in two or three main interfaces:
+
+    *
+
+      *RC*: Radio control -- this implements an interface to the
+      Ultra-Wide-Band radio controller. The driver for this implements a
+      USB-based UWB Radio Controller to the UWB stack.
+
+    *
+
+      *HC*: the wireless USB host controller. It looks like a USB host
+      whose root port is the radio and the WUSB devices connect to it.
+      To the system it looks like a separate USB host. The driver (will)
+      implement a USB host controller (similar to UHCI, OHCI or EHCI)
+      for which the root hub is the radio...To reiterate: it is a USB
+      controller that is connected via USB instead of PCI.
+
+    *
+
+      *WINET*: some HW provide a WiNET interface (IP over UWB). This
+      package provides a driver for it (it looks like a network
+      interface, winetX). The driver detects when there is a link up for
+      their type and kick into gear.
+
+
+      DWA: Device Wired Adaptor, a Wireless USB hub for wired devices
+
+These are the complement to HWAs. They are a USB host for connecting
+wired devices, but it is connected to your PC connected via Wireless
+USB. To the system it looks like yet another USB host. To the untrained
+eye, it looks like a hub that connects upstream wirelessly.
+
+We still offer no support for this; however, it should share a lot of
+code with the HWA-RC driver; there is a bunch of factorization work that
+has been done to support that in upcoming releases.
+
+
+      WHCI: Wireless Host Controller Interface, the PCI WUSB host adapter
+
+This is your usual PCI device that implements WHCI. Similar in concept
+to EHCI, it allows your wireless USB devices (including DWAs) to connect
+to your host via a PCI interface. As in the case of the HWA, it has a
+Radio Control interface and the WUSB Host Controller interface per se.
+
+There is still no driver support for this, but will be in upcoming
+releases.
+
+
+    The UWB stack
+
+The main mission of the UWB stack is to keep a tally of which devices
+are in radio proximity to allow drivers to connect to them. As well, it
+provides an API for controlling the local radio controllers (RCs from
+now on), such as to start/stop beaconing, scan, allocate bandwidth, etc.
+
+
+      Devices and hosts: the basic structure
+
+The main building block here is the UWB device (struct uwb_dev). For
+each device that pops up in radio presence (ie: the UWB host receives a
+beacon from it) you get a struct uwb_dev that will show up in
+/sys/class/uwb and in /sys/bus/uwb/devices.
+
+For each RC that is detected, a new struct uwb_rc is created. In turn, a
+RC is also a device, so they also show in /sys/class/uwb and
+/sys/bus/uwb/devices, but at the same time, only radio controllers show
+up in /sys/class/uwb_rc.
+
+    *
+
+      [*] The reason for RCs being also devices is that not only we can
+      see them while enumerating the system device tree, but also on the
+      radio (their beacons and stuff), so the handling has to be
+      likewise to that of a device.
+
+Each RC driver is implemented by a separate driver that plugs into the
+interface that the UWB stack provides through a struct uwb_rc_ops. The
+spec creators have been nice enough to make the message format the same
+for HWA and WHCI RCs, so the driver is really a very thin transport that
+moves the requests from the UWB API to the device [/uwb_rc_ops->cmd()/]
+and sends the replies and notifications back to the API
+[/uwb_rc_neh_grok()/]. Notifications are handled to the UWB daemon, that
+is chartered, among other things, to keep the tab of how the UWB radio
+neighborhood looks, creating and destroying devices as they show up or
+dissapear.
+
+Command execution is very simple: a command block is sent and a event
+block or reply is expected back. For sending/receiving command/events, a
+handle called /neh/ (Notification/Event Handle) is opened with
+/uwb_rc_neh_open()/.
+
+The HWA-RC (USB dongle) driver (drivers/uwb/hwa-rc.c) does this job for
+the USB connected HWA. Eventually, drivers/whci-rc.c will do the same
+for the PCI connected WHCI controller.
+
+
+      Host Controller life cycle
+
+So let's say we connect a dongle to the system: it is detected and
+firmware uploaded if needed [for Intel's i1480
+/drivers/uwb/ptc/usb.c:ptc_usb_probe()/] and then it is reenumerated.
+Now we have a real HWA device connected and
+/drivers/uwb/hwa-rc.c:hwarc_probe()/ picks it up, that will set up the
+Wire-Adaptor environment and then suck it into the UWB stack's vision of
+the world [/drivers/uwb/lc-rc.c:uwb_rc_add()/].
+
+    *
+
+      [*] The stack should put a new RC to scan for devices
+      [/uwb_rc_scan()/] so it finds what's available around and tries to
+      connect to them, but this is policy stuff and should be driven
+      from user space. As of now, the operator is expected to do it
+      manually; see the release notes for documentation on the procedure.
+
+When a dongle is disconnected, /drivers/uwb/hwa-rc.c:hwarc_disconnect()/
+takes time of tearing everything down safely (or not...).
+
+
+      On the air: beacons and enumerating the radio neighborhood
+
+So assuming we have devices and we have agreed for a channel to connect
+on (let's say 9), we put the new RC to beacon:
+
+    *
+
+            $ echo 9 0 > /sys/class/uwb_rc/uwb0/beacon
+
+Now it is visible. If there were other devices in the same radio channel
+and beacon group (that's what the zero is for), the dongle's radio
+control interface will send beacon notifications on its
+notification/event endpoint (NEEP). The beacon notifications are part of
+the event stream that is funneled into the API with
+/drivers/uwb/neh.c:uwb_rc_neh_grok()/ and delivered to the UWBD, the UWB
+daemon through a notification list.
+
+UWBD wakes up and scans the event list; finds a beacon and adds it to
+the BEACON CACHE (/uwb_beca/). If he receives a number of beacons from
+the same device, he considers it to be 'onair' and creates a new device
+[/drivers/uwb/lc-dev.c:uwbd_dev_onair()/]. Similarly, when no beacons
+are received in some time, the device is considered gone and wiped out
+[uwbd calls periodically /uwb/beacon.c:uwb_beca_purge()/ that will purge
+the beacon cache of dead devices].
+
+
+      Device lists
+
+All UWB devices are kept in the list of the struct bus_type uwb_bus.
+
+
+      Bandwidth allocation
+
+The UWB stack maintains a local copy of DRP availability through
+processing of incoming *DRP Availability Change* notifications. This
+local copy is currently used to present the current bandwidth
+availability to the user through the sysfs file
+/sys/class/uwb_rc/uwbx/bw_avail. In the future the bandwidth
+availability information will be used by the bandwidth reservation
+routines.
+
+The bandwidth reservation routines are in progress and are thus not
+present in the current release. When completed they will enable a user
+to initiate DRP reservation requests through interaction with sysfs. DRP
+reservation requests from remote UWB devices will also be handled. The
+bandwidth management done by the UWB stack will include callbacks to the
+higher layers will enable the higher layers to use the reservations upon
+completion. [Note: The bandwidth reservation work is in progress and
+subject to change.]
+
+
+    Wireless USB Host Controller drivers
+
+*WARNING* This section needs a lot of work!
+
+As explained above, there are three different types of HCs in the WUSB
+world: HWA-HC, DWA-HC and WHCI-HC.
+
+HWA-HC and DWA-HC share that they are Wire-Adapters (USB or WUSB
+connected controllers), and their transfer management system is almost
+identical. So is their notification delivery system.
+
+HWA-HC and WHCI-HC share that they are both WUSB host controllers, so
+they have to deal with WUSB device life cycle and maintenance, wireless
+root-hub
+
+HWA exposes a Host Controller interface (HWA-HC 0xe0/02/02). This has
+three endpoints (Notifications, Data Transfer In and Data Transfer
+Out--known as NEP, DTI and DTO in the code).
+
+We reserve UWB bandwidth for our Wireless USB Cluster, create a Cluster
+ID and tell the HC to use all that. Then we start it. This means the HC
+starts sending MMCs.
+
+    *
+
+      The MMCs are blocks of data defined somewhere in the WUSB1.0 spec
+      that define a stream in the UWB channel time allocated for sending
+      WUSB IEs (host to device commands/notifications) and Device
+      Notifications (device initiated to host). Each host defines a
+      unique Wireless USB cluster through MMCs. Devices can connect to a
+      single cluster at the time. The IEs are Information Elements, and
+      among them are the bandwidth allocations that tell each device
+      when can they transmit or receive.
+
+Now it all depends on external stimuli.
+
+*New device connection*
+
+A new device pops up, it scans the radio looking for MMCs that give out
+the existence of Wireless USB channels. Once one (or more) are found,
+selects which one to connect to. Sends a /DN_Connect/ (device
+notification connect) during the DNTS (Device Notification Time
+Slot--announced in the MMCs
+
+HC picks the /DN_Connect/ out (nep module sends to notif.c for delivery
+into /devconnect/). This process starts the authentication process for
+the device. First we allocate a /fake port/ and assign an
+unauthenticated address (128 to 255--what we really do is
+0x80 | fake_port_idx). We fiddle with the fake port status and /khubd/
+sees a new connection, so he moves on to enable the fake port with a reset.
+
+So now we are in the reset path -- we know we have a non-yet enumerated
+device with an unauthorized address; we ask user space to authenticate
+(FIXME: not yet done, similar to bluetooth pairing), then we do the key
+exchange (FIXME: not yet done) and issue a /set address 0/ to bring the
+device to the default state. Device is authenticated.
+
+From here, the USB stack takes control through the usb_hcd ops. khubd
+has seen the port status changes, as we have been toggling them. It will
+start enumerating and doing transfers through usb_hcd->urb_enqueue() to
+read descriptors and move our data.
+
+*Device life cycle and keep alives*
+
+Everytime there is a succesful transfer to/from a device, we update a
+per-device activity timestamp. If not, every now and then we check and
+if the activity timestamp gets old, we ping the device by sending it a
+Keep Alive IE; it responds with a /DN_Alive/ pong during the DNTS (this
+arrives to us as a notification through
+devconnect.c:wusb_handle_dn_alive(). If a device times out, we
+disconnect it from the system (cleaning up internal information and
+toggling the bits in the fake hub port, which kicks khubd into removing
+the rest of the stuff).
+
+This is done through devconnect:__wusb_check_devs(), which will scan the
+device list looking for whom needs refreshing.
+
+If the device wants to disconnect, it will either die (ugly) or send a
+/DN_Disconnect/ that will prompt a disconnection from the system.
+
+*Sending and receiving data*
+
+Data is sent and received through /Remote Pipes/ (rpipes). An rpipe is
+/aimed/ at an endpoint in a WUSB device. This is the same for HWAs and
+DWAs.
+
+Each HC has a number of rpipes and buffers that can be assigned to them;
+when doing a data transfer (xfer), first the rpipe has to be aimed and
+prepared (buffers assigned), then we can start queueing requests for
+data in or out.
+
+Data buffers have to be segmented out before sending--so we send first a
+header (segment request) and then if there is any data, a data buffer
+immediately after to the DTI interface (yep, even the request). If our
+buffer is bigger than the max segment size, then we just do multiple
+requests.
+
+[This sucks, because doing USB scatter gatter in Linux is resource
+intensive, if any...not that the current approach is not. It just has to
+be cleaned up a lot :)].
+
+If reading, we don't send data buffers, just the segment headers saying
+we want to read segments.
+
+When the xfer is executed, we receive a notification that says data is
+ready in the DTI endpoint (handled through
+xfer.c:wa_handle_notif_xfer()). In there we read from the DTI endpoint a
+descriptor that gives us the status of the transfer, its identification
+(given when we issued it) and the segment number. If it was a data read,
+we issue another URB to read into the destination buffer the chunk of
+data coming out of the remote endpoint. Done, wait for the next guy. The
+callbacks for the URBs issued from here are the ones that will declare
+the xfer complete at some point and call it's callback.
+
+Seems simple, but the implementation is not trivial.
+
+    *
+
+      *WARNING* Old!!
+
+The main xfer descriptor, wa_xfer (equivalent to a URB) contains an
+array of segments, tallys on segments and buffers and callback
+information. Buried in there is a lot of URBs for executing the segments
+and buffer transfers.
+
+For OUT xfers, there is an array of segments, one URB for each, another
+one of buffer URB. When submitting, we submit URBs for segment request
+1, buffer 1, segment 2, buffer 2...etc. Then we wait on the DTI for xfer
+result data; when all the segments are complete, we call the callback to
+finalize the transfer.
+
+For IN xfers, we only issue URBs for the segments we want to read and
+then wait for the xfer result data.
+
+*URB mapping into xfers*
+
+This is done by hwahc_op_urb_[en|de]queue(). In enqueue() we aim an
+rpipe to the endpoint where we have to transmit, create a transfer
+context (wa_xfer) and submit it. When the xfer is done, our callback is
+called and we assign the status bits and release the xfer resources.
+
+In dequeue() we are basically cancelling/aborting the transfer. We issue
+a xfer abort request to the HC, cancell all the URBs we had submitted
+and not yet done and when all that is done, the xfer callback will be
+called--this will call the URB callback.
+
+
+    Glossary
+
+*DWA* -- Device Wire Adapter
+
+USB host, wired for downstream devices, upstream connects wirelessly
+with Wireless USB.
+
+*EVENT* -- Response to a command on the NEEP
+
+*HWA* -- Host Wire Adapter / USB dongle for UWB and Wireless USB
+
+*NEH* -- Notification/Event Handle
+
+Handle/file descriptor for receiving notifications or events. The WA
+code requires you to get one of this to listen for notifications or
+events on the NEEP.
+
+*NEEP* -- Notification/Event EndPoint
+
+Stuff related to the management of the first endpoint of a HWA USB
+dongle that is used to deliver an stream of events and notifications to
+the host.
+
+*NOTIFICATION* -- Message coming in the NEEP as response to something.
+
+*RC* -- Radio Control
+
+Design-overview.txt-1.8 (last edited 2006-11-04 12:22:24 by
+InakyPerezGonzalez)
+
diff --git a/Documentation/usb/anchors.txt b/Documentation/usb/anchors.txt
index 5e6b64c..6f24f56 100644
--- a/Documentation/usb/anchors.txt
+++ b/Documentation/usb/anchors.txt
@@ -52,6 +52,11 @@
 the call returns. They may be unlinked later but will be unlinked in
 finite time.
 
+usb_scuttle_anchored_urbs()
+---------------------------
+
+All URBs of an anchor are unanchored en masse.
+
 usb_wait_anchor_empty_timeout()
 -------------------------------
 
@@ -59,4 +64,16 @@
 or a timeout, whichever comes first. Its return value will tell you
 whether the timeout was reached.
 
+usb_anchor_empty()
+------------------
 
+Returns true if no URBs are associated with an anchor. Locking
+is the caller's responsibility.
+
+usb_get_from_anchor()
+---------------------
+
+Returns the oldest anchored URB of an anchor. The URB is unanchored
+and returned with a reference. As you may mix URBs to several
+destinations in one anchor you have no guarantee the chronologically
+first submitted URB is returned.
\ No newline at end of file
diff --git a/Documentation/usb/misc_usbsevseg.txt b/Documentation/usb/misc_usbsevseg.txt
new file mode 100644
index 0000000..0f6be4f
--- /dev/null
+++ b/Documentation/usb/misc_usbsevseg.txt
@@ -0,0 +1,46 @@
+USB 7-Segment Numeric Display
+Manufactured by Delcom Engineering
+
+Device Information
+------------------
+USB VENDOR_ID	0x0fc5
+USB PRODUCT_ID	0x1227
+Both the 6 character and 8 character displays have PRODUCT_ID,
+and according to Delcom Engineering no queryable information
+can be obtained from the device to tell them apart.
+
+Device Modes
+------------
+By default, the driver assumes the display is only 6 characters
+The mode for 6 characters is:
+	MSB 0x06; LSB 0x3f
+For the 8 character display:
+	MSB 0x08; LSB 0xff
+The device can accept "text" either in raw, hex, or ascii textmode.
+raw controls each segment manually,
+hex expects a value between 0-15 per character,
+ascii expects a value between '0'-'9' and 'A'-'F'.
+The default is ascii.
+
+Device Operation
+----------------
+1.	Turn on the device:
+	echo 1 > /sys/bus/usb/.../powered
+2.	Set the device's mode:
+	echo $mode_msb > /sys/bus/usb/.../mode_msb
+	echo $mode_lsb > /sys/bus/usb/.../mode_lsb
+3.	Set the textmode:
+	echo $textmode > /sys/bus/usb/.../textmode
+4.	set the text (for example):
+	echo "123ABC" > /sys/bus/usb/.../text (ascii)
+	echo "A1B2" > /sys/bus/usb/.../text (ascii)
+	echo -ne "\x01\x02\x03" > /sys/bus/usb/.../text (hex)
+5.	Set the decimal places.
+	The device has either 6 or 8 decimal points.
+	to set the nth decimal place calculate 10 ** n
+	and echo it in to /sys/bus/usb/.../decimals
+	To set multiple decimals points sum up each power.
+	For example, to set the 0th and 3rd decimal place
+	echo 1001 > /sys/bus/usb/.../decimals
+
+
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index 9d31140..e48ea1d 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -350,12 +350,12 @@
 
 There also are a couple of utility routines drivers can use:
 
-	usb_autopm_enable() sets pm_usage_cnt to 1 and then calls
-	usb_autopm_set_interface(), which will attempt an autoresume.
-
-	usb_autopm_disable() sets pm_usage_cnt to 0 and then calls
+	usb_autopm_enable() sets pm_usage_cnt to 0 and then calls
 	usb_autopm_set_interface(), which will attempt an autosuspend.
 
+	usb_autopm_disable() sets pm_usage_cnt to 1 and then calls
+	usb_autopm_set_interface(), which will attempt an autoresume.
+
 The conventional usage pattern is that a driver calls
 usb_autopm_get_interface() in its open routine and
 usb_autopm_put_interface() in its close or release routine.  But
diff --git a/Documentation/usb/wusb-cbaf b/Documentation/usb/wusb-cbaf
new file mode 100644
index 0000000..2e78b70
--- /dev/null
+++ b/Documentation/usb/wusb-cbaf
@@ -0,0 +1,139 @@
+#! /bin/bash
+#
+
+set -e
+
+progname=$(basename $0)
+function help
+{
+    cat <<EOF
+Usage: $progname COMMAND DEVICEs [ARGS]
+
+Command for manipulating the pairing/authentication credentials of a
+Wireless USB device that supports wired-mode Cable-Based-Association.
+
+Works in conjunction with the wusb-cba.ko driver from http://linuxuwb.org.
+
+
+DEVICE
+
+ sysfs path to the device to authenticate; for example, both this
+ guys are the same:
+
+ /sys/devices/pci0000:00/0000:00:1d.7/usb1/1-4/1-4.4/1-4.4:1.1
+ /sys/bus/usb/drivers/wusb-cbaf/1-4.4:1.1
+
+COMMAND/ARGS are
+
+ start
+
+   Start a WUSB host controller (by setting up a CHID)
+
+ set-chid DEVICE HOST-CHID HOST-BANDGROUP HOST-NAME
+
+   Sets host information in the device; after this you can call the
+   get-cdid to see how does this device report itself to us.
+
+ get-cdid DEVICE
+
+   Get the device ID associated to the HOST-CHDI we sent with
+   'set-chid'. We might not know about it.
+
+ set-cc DEVICE
+
+   If we allow the device to connect, set a random new CDID and CK
+   (connection key). Device saves them for the next time it wants to
+   connect wireless. We save them for that next time also so we can
+   authenticate the device (when we see the CDID he uses to id
+   itself) and the CK to crypto talk to it.
+
+CHID is always 16 hex bytes in 'XX YY ZZ...' form
+BANDGROUP is almost always 0001
+
+Examples:
+
+  You can default most arguments to '' to get a sane value:
+
+  $ $progname set-chid '' '' '' "My host name"
+
+  A full sequence:
+
+  $ $progname set-chid '' '' '' "My host name"
+  $ $progname get-cdid ''
+  $ $progname set-cc ''
+
+EOF
+}
+
+
+# Defaults
+# FIXME: CHID should come from a database :), band group from the host
+host_CHID="00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff"
+host_band_group="0001"
+host_name=$(hostname)
+
+devs="$(echo /sys/bus/usb/drivers/wusb-cbaf/[0-9]*)"
+hdevs="$(for h in /sys/class/uwb_rc/*/wusbhc; do readlink -f $h; done)"
+
+result=0
+case $1 in
+    start)
+        for dev in ${2:-$hdevs}
+          do
+          uwb_rc=$(readlink -f $dev/uwb_rc)
+          if cat $uwb_rc/beacon | grep -q -- "-1"
+              then
+              echo 13 0 > $uwb_rc/beacon
+              echo I: started beaconing on ch 13 on $(basename $uwb_rc) >&2
+          fi
+          echo $host_CHID > $dev/wusb_chid
+          echo I: started host $(basename $dev) >&2
+        done
+        ;;
+    stop)
+        for dev in ${2:-$hdevs}
+          do
+          echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid
+          echo I: stopped host $(basename $dev) >&2
+          uwb_rc=$(readlink -f $dev/uwb_rc)
+          echo -1 | cat > $uwb_rc/beacon
+          echo I: stopped beaconing on $(basename $uwb_rc) >&2
+        done
+        ;;
+    set-chid)
+        shift
+        for dev in ${2:-$devs}; do
+            echo "${4:-$host_name}" > $dev/wusb_host_name
+            echo "${3:-$host_band_group}" > $dev/wusb_host_band_groups
+            echo ${2:-$host_CHID} > $dev/wusb_chid
+        done
+        ;;
+    get-cdid)
+        for dev in ${2:-$devs}
+          do
+          cat $dev/wusb_cdid
+        done
+        ;;
+    set-cc)
+        for dev in ${2:-$devs}; do
+            shift
+            CDID="$(head --bytes=16 /dev/urandom  | od -tx1 -An)"
+            CK="$(head --bytes=16 /dev/urandom  | od -tx1 -An)"
+            echo "$CDID" > $dev/wusb_cdid
+            echo "$CK" > $dev/wusb_ck
+
+            echo I: CC set >&2
+            echo "CHID: $(cat $dev/wusb_chid)"
+            echo "CDID:$CDID"
+            echo "CK:  $CK"
+        done
+        ;;
+    help|h|--help|-h)
+        help
+        ;;
+    *)
+        echo "E: Unknown usage" 1>&2
+        help 1>&2
+        result=1
+esac
+exit $result
diff --git a/Documentation/video4linux/CARDLIST.au0828 b/Documentation/video4linux/CARDLIST.au0828
index aa05e5b..d5cb4ea 100644
--- a/Documentation/video4linux/CARDLIST.au0828
+++ b/Documentation/video4linux/CARDLIST.au0828
@@ -1,5 +1,5 @@
   0 -> Unknown board                            (au0828)
-  1 -> Hauppauge HVR950Q                        (au0828)        [2040:7200,2040:7210,2040:7217,2040:721b,2040:721f,2040:7280,0fd9:0008]
+  1 -> Hauppauge HVR950Q                        (au0828)        [2040:7200,2040:7210,2040:7217,2040:721b,2040:721e,2040:721f,2040:7280,0fd9:0008]
   2 -> Hauppauge HVR850                         (au0828)        [2040:7240]
   3 -> DViCO FusionHDTV USB                     (au0828)        [0fe9:d620]
   4 -> Hauppauge HVR950Q rev xxF8               (au0828)        [2040:7201,2040:7211,2040:7281]
diff --git a/Documentation/video4linux/CARDLIST.tuner b/Documentation/video4linux/CARDLIST.tuner
index 30bbdda..691d2f3 100644
--- a/Documentation/video4linux/CARDLIST.tuner
+++ b/Documentation/video4linux/CARDLIST.tuner
@@ -75,3 +75,4 @@
 tuner=75 - Philips TEA5761 FM Radio
 tuner=76 - Xceive 5000 tuner
 tuner=77 - TCL tuner MF02GIP-5N-E
+tuner=78 - Philips FMD1216MEX MK3 Hybrid Tuner
diff --git a/Documentation/vm/unevictable-lru.txt b/Documentation/vm/unevictable-lru.txt
new file mode 100644
index 0000000..125eed5
--- /dev/null
+++ b/Documentation/vm/unevictable-lru.txt
@@ -0,0 +1,615 @@
+
+This document describes the Linux memory management "Unevictable LRU"
+infrastructure and the use of this infrastructure to manage several types
+of "unevictable" pages.  The document attempts to provide the overall
+rationale behind this mechanism and the rationale for some of the design
+decisions that drove the implementation.  The latter design rationale is
+discussed in the context of an implementation description.  Admittedly, one
+can obtain the implementation details--the "what does it do?"--by reading the
+code.  One hopes that the descriptions below add value by provide the answer
+to "why does it do that?".
+
+Unevictable LRU Infrastructure:
+
+The Unevictable LRU adds an additional LRU list to track unevictable pages
+and to hide these pages from vmscan.  This mechanism is based on a patch by
+Larry Woodman of Red Hat to address several scalability problems with page
+reclaim in Linux.  The problems have been observed at customer sites on large
+memory x86_64 systems.  For example, a non-numal x86_64 platform with 128GB
+of main memory will have over 32 million 4k pages in a single zone.  When a
+large fraction of these pages are not evictable for any reason [see below],
+vmscan will spend a lot of time scanning the LRU lists looking for the small
+fraction of pages that are evictable.  This can result in a situation where
+all cpus are spending 100% of their time in vmscan for hours or days on end,
+with the system completely unresponsive.
+
+The Unevictable LRU infrastructure addresses the following classes of
+unevictable pages:
+
++ page owned by ramfs
++ page mapped into SHM_LOCKed shared memory regions
++ page mapped into VM_LOCKED [mlock()ed] vmas
+
+The infrastructure might be able to handle other conditions that make pages
+unevictable, either by definition or by circumstance, in the future.
+
+
+The Unevictable LRU List
+
+The Unevictable LRU infrastructure consists of an additional, per-zone, LRU list
+called the "unevictable" list and an associated page flag, PG_unevictable, to
+indicate that the page is being managed on the unevictable list.  The
+PG_unevictable flag is analogous to, and mutually exclusive with, the PG_active
+flag in that it indicates on which LRU list a page resides when PG_lru is set.
+The unevictable LRU list is source configurable based on the UNEVICTABLE_LRU
+Kconfig option.
+
+The Unevictable LRU infrastructure maintains unevictable pages on an additional
+LRU list for a few reasons:
+
+1) We get to "treat unevictable pages just like we treat other pages in the
+   system, which means we get to use the same code to manipulate them, the
+   same code to isolate them (for migrate, etc.), the same code to keep track
+   of the statistics, etc..." [Rik van Riel]
+
+2) We want to be able to migrate unevictable pages between nodes--for memory
+   defragmentation, workload management and memory hotplug.  The linux kernel
+   can only migrate pages that it can successfully isolate from the lru lists.
+   If we were to maintain pages elsewise than on an lru-like list, where they
+   can be found by isolate_lru_page(), we would prevent their migration, unless
+   we reworked migration code to find the unevictable pages.
+
+
+The unevictable LRU list does not differentiate between file backed and swap
+backed [anon] pages.  This differentiation is only important while the pages
+are, in fact, evictable.
+
+The unevictable LRU list benefits from the "arrayification" of the per-zone
+LRU lists and statistics originally proposed and posted by Christoph Lameter.
+
+The unevictable list does not use the lru pagevec mechanism. Rather,
+unevictable pages are placed directly on the page's zone's unevictable
+list under the zone lru_lock.  The reason for this is to prevent stranding
+of pages on the unevictable list when one task has the page isolated from the
+lru and other tasks are changing the "evictability" state of the page.
+
+
+Unevictable LRU and Memory Controller Interaction
+
+The memory controller data structure automatically gets a per zone unevictable
+lru list as a result of the "arrayification" of the per-zone LRU lists.  The
+memory controller tracks the movement of pages to and from the unevictable list.
+When a memory control group comes under memory pressure, the controller will
+not attempt to reclaim pages on the unevictable list.  This has a couple of
+effects.  Because the pages are "hidden" from reclaim on the unevictable list,
+the reclaim process can be more efficient, dealing only with pages that have
+a chance of being reclaimed.  On the other hand, if too many of the pages
+charged to the control group are unevictable, the evictable portion of the
+working set of the tasks in the control group may not fit into the available
+memory.  This can cause the control group to thrash or to oom-kill tasks.
+
+
+Unevictable LRU:  Detecting Unevictable Pages
+
+The function page_evictable(page, vma) in vmscan.c determines whether a
+page is evictable or not.  For ramfs pages and pages in SHM_LOCKed regions,
+page_evictable() tests a new address space flag, AS_UNEVICTABLE, in the page's
+address space using a wrapper function.  Wrapper functions are used to set,
+clear and test the flag to reduce the requirement for #ifdef's throughout the
+source code.  AS_UNEVICTABLE is set on ramfs inode/mapping when it is created.
+This flag remains for the life of the inode.
+
+For shared memory regions, AS_UNEVICTABLE is set when an application
+successfully SHM_LOCKs the region and is removed when the region is
+SHM_UNLOCKed.  Note that shmctl(SHM_LOCK, ...) does not populate the page
+tables for the region as does, for example, mlock().   So, we make no special
+effort to push any pages in the SHM_LOCKed region to the unevictable list.
+Vmscan will do this when/if it encounters the pages during reclaim.  On
+SHM_UNLOCK, shmctl() scans the pages in the region and "rescues" them from the
+unevictable list if no other condition keeps them unevictable.  If a SHM_LOCKed
+region is destroyed, the pages are also "rescued" from the unevictable list in
+the process of freeing them.
+
+page_evictable() detects mlock()ed pages by testing an additional page flag,
+PG_mlocked via the PageMlocked() wrapper.  If the page is NOT mlocked, and a
+non-NULL vma is supplied, page_evictable() will check whether the vma is
+VM_LOCKED via is_mlocked_vma().  is_mlocked_vma() will SetPageMlocked() and
+update the appropriate statistics if the vma is VM_LOCKED.  This method allows
+efficient "culling" of pages in the fault path that are being faulted in to
+VM_LOCKED vmas.
+
+
+Unevictable Pages and Vmscan [shrink_*_list()]
+
+If unevictable pages are culled in the fault path, or moved to the unevictable
+list at mlock() or mmap() time, vmscan will never encounter the pages until
+they have become evictable again, for example, via munlock() and have been
+"rescued" from the unevictable list.  However, there may be situations where we
+decide, for the sake of expediency, to leave a unevictable page on one of the
+regular active/inactive LRU lists for vmscan to deal with.  Vmscan checks for
+such pages in all of the shrink_{active|inactive|page}_list() functions and
+will "cull" such pages that it encounters--that is, it diverts those pages to
+the unevictable list for the zone being scanned.
+
+There may be situations where a page is mapped into a VM_LOCKED vma, but the
+page is not marked as PageMlocked.  Such pages will make it all the way to
+shrink_page_list() where they will be detected when vmscan walks the reverse
+map in try_to_unmap().  If try_to_unmap() returns SWAP_MLOCK, shrink_page_list()
+will cull the page at that point.
+
+Note that for anonymous pages, shrink_page_list() attempts to add the page to
+the swap cache before it tries to unmap the page.  To avoid this unnecessary
+consumption of swap space, shrink_page_list() calls try_to_munlock() to check
+whether any VM_LOCKED vmas map the page without attempting to unmap the page.
+If try_to_munlock() returns SWAP_MLOCK, shrink_page_list() will cull the page
+without consuming swap space.  try_to_munlock() will be described below.
+
+To "cull" an unevictable page, vmscan simply puts the page back on the lru
+list using putback_lru_page()--the inverse operation to isolate_lru_page()--
+after dropping the page lock.  Because the condition which makes the page
+unevictable may change once the page is unlocked, putback_lru_page() will
+recheck the unevictable state of a page that it places on the unevictable lru
+list.  If the page has become unevictable, putback_lru_page() removes it from
+the list and retries, including the page_unevictable() test.  Because such a
+race is a rare event and movement of pages onto the unevictable list should be
+rare, these extra evictabilty checks should not occur in the majority of calls
+to putback_lru_page().
+
+
+Mlocked Page:  Prior Work
+
+The "Unevictable Mlocked Pages" infrastructure is based on work originally
+posted by Nick Piggin in an RFC patch entitled "mm: mlocked pages off LRU".
+Nick posted his patch as an alternative to a patch posted by Christoph
+Lameter to achieve the same objective--hiding mlocked pages from vmscan.
+In Nick's patch, he used one of the struct page lru list link fields as a count
+of VM_LOCKED vmas that map the page.  This use of the link field for a count
+prevented the management of the pages on an LRU list.  Thus, mlocked pages were
+not migratable as isolate_lru_page() could not find them and the lru list link
+field was not available to the migration subsystem.  Nick resolved this by
+putting mlocked pages back on the lru list before attempting to isolate them,
+thus abandoning the count of VM_LOCKED vmas.  When Nick's patch was integrated
+with the Unevictable LRU work, the count was replaced by walking the reverse
+map to determine whether any VM_LOCKED vmas mapped the page.  More on this
+below.
+
+
+Mlocked Pages:  Basic Management
+
+Mlocked pages--pages mapped into a VM_LOCKED vma--represent one class of
+unevictable pages.  When such a page has been "noticed" by the memory
+management subsystem, the page is marked with the PG_mlocked [PageMlocked()]
+flag.  A PageMlocked() page will be placed on the unevictable LRU list when
+it is added to the LRU.   Pages can be "noticed" by memory management in
+several places:
+
+1) in the mlock()/mlockall() system call handlers.
+2) in the mmap() system call handler when mmap()ing a region with the
+   MAP_LOCKED flag, or mmap()ing a region in a task that has called
+   mlockall() with the MCL_FUTURE flag.  Both of these conditions result
+   in the VM_LOCKED flag being set for the vma.
+3) in the fault path, if mlocked pages are "culled" in the fault path,
+   and when a VM_LOCKED stack segment is expanded.
+4) as mentioned above, in vmscan:shrink_page_list() with attempting to
+   reclaim a page in a VM_LOCKED vma--via try_to_unmap() or try_to_munlock().
+
+Mlocked pages become unlocked and rescued from the unevictable list when:
+
+1) mapped in a range unlocked via the munlock()/munlockall() system calls.
+2) munmapped() out of the last VM_LOCKED vma that maps the page, including
+   unmapping at task exit.
+3) when the page is truncated from the last VM_LOCKED vma of an mmap()ed file.
+4) before a page is COWed in a VM_LOCKED vma.
+
+
+Mlocked Pages:  mlock()/mlockall() System Call Handling
+
+Both [do_]mlock() and [do_]mlockall() system call handlers call mlock_fixup()
+for each vma in the range specified by the call.  In the case of mlockall(),
+this is the entire active address space of the task.  Note that mlock_fixup()
+is used for both mlock()ing and munlock()ing a range of memory.  A call to
+mlock() an already VM_LOCKED vma, or to munlock() a vma that is not VM_LOCKED
+is treated as a no-op--mlock_fixup() simply returns.
+
+If the vma passes some filtering described in "Mlocked Pages:  Filtering Vmas"
+below, mlock_fixup() will attempt to merge the vma with its neighbors or split
+off a subset of the vma if the range does not cover the entire vma.  Once the
+vma has been merged or split or neither, mlock_fixup() will call
+__mlock_vma_pages_range() to fault in the pages via get_user_pages() and
+to mark the pages as mlocked via mlock_vma_page().
+
+Note that the vma being mlocked might be mapped with PROT_NONE.  In this case,
+get_user_pages() will be unable to fault in the pages.  That's OK.  If pages
+do end up getting faulted into this VM_LOCKED vma, we'll handle them in the
+fault path or in vmscan.
+
+Also note that a page returned by get_user_pages() could be truncated or
+migrated out from under us, while we're trying to mlock it.  To detect
+this, __mlock_vma_pages_range() tests the page_mapping after acquiring
+the page lock.  If the page is still associated with its mapping, we'll
+go ahead and call mlock_vma_page().  If the mapping is gone, we just
+unlock the page and move on.  Worse case, this results in page mapped
+in a VM_LOCKED vma remaining on a normal LRU list without being
+PageMlocked().  Again, vmscan will detect and cull such pages.
+
+mlock_vma_page(), called with the page locked [N.B., not "mlocked"], will
+TestSetPageMlocked() for each page returned by get_user_pages().  We use
+TestSetPageMlocked() because the page might already be mlocked by another
+task/vma and we don't want to do extra work.  We especially do not want to
+count an mlocked page more than once in the statistics.  If the page was
+already mlocked, mlock_vma_page() is done.
+
+If the page was NOT already mlocked, mlock_vma_page() attempts to isolate the
+page from the LRU, as it is likely on the appropriate active or inactive list
+at that time.  If the isolate_lru_page() succeeds, mlock_vma_page() will
+putback the page--putback_lru_page()--which will notice that the page is now
+mlocked and divert the page to the zone's unevictable LRU list.  If
+mlock_vma_page() is unable to isolate the page from the LRU, vmscan will handle
+it later if/when it attempts to reclaim the page.
+
+
+Mlocked Pages:  Filtering Special Vmas
+
+mlock_fixup() filters several classes of "special" vmas:
+
+1) vmas with VM_IO|VM_PFNMAP set are skipped entirely.  The pages behind
+   these mappings are inherently pinned, so we don't need to mark them as
+   mlocked.  In any case, most of the pages have no struct page in which to
+   so mark the page.  Because of this, get_user_pages() will fail for these
+   vmas, so there is no sense in attempting to visit them.
+
+2) vmas mapping hugetlbfs page are already effectively pinned into memory.
+   We don't need nor want to mlock() these pages.  However, to preserve the
+   prior behavior of mlock()--before the unevictable/mlock changes--mlock_fixup()
+   will call make_pages_present() in the hugetlbfs vma range to allocate the
+   huge pages and populate the ptes.
+
+3) vmas with VM_DONTEXPAND|VM_RESERVED are generally user space mappings of
+   kernel pages, such as the vdso page, relay channel pages, etc.  These pages
+   are inherently unevictable and are not managed on the LRU lists.
+   mlock_fixup() treats these vmas the same as hugetlbfs vmas.  It calls
+   make_pages_present() to populate the ptes.
+
+Note that for all of these special vmas, mlock_fixup() does not set the
+VM_LOCKED flag.  Therefore, we won't have to deal with them later during
+munlock() or munmap()--for example, at task exit.  Neither does mlock_fixup()
+account these vmas against the task's "locked_vm".
+
+Mlocked Pages:  Downgrading the Mmap Semaphore.
+
+mlock_fixup() must be called with the mmap semaphore held for write, because
+it may have to merge or split vmas.  However, mlocking a large region of
+memory can take a long time--especially if vmscan must reclaim pages to
+satisfy the regions requirements.  Faulting in a large region with the mmap
+semaphore held for write can hold off other faults on the address space, in
+the case of a multi-threaded task.  It can also hold off scans of the task's
+address space via /proc.  While testing under heavy load, it was observed that
+the ps(1) command could be held off for many minutes while a large segment was
+mlock()ed down.
+
+To address this issue, and to make the system more responsive during mlock()ing
+of large segments, mlock_fixup() downgrades the mmap semaphore to read mode
+during the call to __mlock_vma_pages_range().  This works fine.  However, the
+callers of mlock_fixup() expect the semaphore to be returned in write mode.
+So, mlock_fixup() "upgrades" the semphore to write mode.  Linux does not
+support an atomic upgrade_sem() call, so mlock_fixup() must drop the semaphore
+and reacquire it in write mode.  In a multi-threaded task, it is possible for
+the task memory map to change while the semaphore is dropped.  Therefore,
+mlock_fixup() looks up the vma at the range start address after reacquiring
+the semaphore in write mode and verifies that it still covers the original
+range.  If not, mlock_fixup() returns an error [-EAGAIN].  All callers of
+mlock_fixup() have been changed to deal with this new error condition.
+
+Note:  when munlocking a region, all of the pages should already be resident--
+unless we have racing threads mlocking() and munlocking() regions.  So,
+unlocking should not have to wait for page allocations nor faults  of any kind.
+Therefore mlock_fixup() does not downgrade the semaphore for munlock().
+
+
+Mlocked Pages:  munlock()/munlockall() System Call Handling
+
+The munlock() and munlockall() system calls are handled by the same functions--
+do_mlock[all]()--as the mlock() and mlockall() system calls with the unlock
+vs lock operation indicated by an argument.  So, these system calls are also
+handled by mlock_fixup().  Again, if called for an already munlock()ed vma,
+mlock_fixup() simply returns.  Because of the vma filtering discussed above,
+VM_LOCKED will not be set in any "special" vmas.  So, these vmas will be
+ignored for munlock.
+
+If the vma is VM_LOCKED, mlock_fixup() again attempts to merge or split off
+the specified range.  The range is then munlocked via the function
+__mlock_vma_pages_range()--the same function used to mlock a vma range--
+passing a flag to indicate that munlock() is being performed.
+
+Because the vma access protections could have been changed to PROT_NONE after
+faulting in and mlocking some pages, get_user_pages() was unreliable for visiting
+these pages for munlocking.  Because we don't want to leave pages mlocked(),
+get_user_pages() was enhanced to accept a flag to ignore the permissions when
+fetching the pages--all of which should be resident as a result of previous
+mlock()ing.
+
+For munlock(), __mlock_vma_pages_range() unlocks individual pages by calling
+munlock_vma_page().  munlock_vma_page() unconditionally clears the PG_mlocked
+flag using TestClearPageMlocked().  As with mlock_vma_page(), munlock_vma_page()
+use the Test*PageMlocked() function to handle the case where the page might
+have already been unlocked by another task.  If the page was mlocked,
+munlock_vma_page() updates that zone statistics for the number of mlocked
+pages.  Note, however, that at this point we haven't checked whether the page
+is mapped by other VM_LOCKED vmas.
+
+We can't call try_to_munlock(), the function that walks the reverse map to check
+for other VM_LOCKED vmas, without first isolating the page from the LRU.
+try_to_munlock() is a variant of try_to_unmap() and thus requires that the page
+not be on an lru list.  [More on these below.]  However, the call to
+isolate_lru_page() could fail, in which case we couldn't try_to_munlock().
+So, we go ahead and clear PG_mlocked up front, as this might be the only chance
+we have.  If we can successfully isolate the page, we go ahead and
+try_to_munlock(), which will restore the PG_mlocked flag and update the zone
+page statistics if it finds another vma holding the page mlocked.  If we fail
+to isolate the page, we'll have left a potentially mlocked page on the LRU.
+This is fine, because we'll catch it later when/if vmscan tries to reclaim the
+page.  This should be relatively rare.
+
+Mlocked Pages:  Migrating Them...
+
+A page that is being migrated has been isolated from the lru lists and is
+held locked across unmapping of the page, updating the page's mapping
+[address_space] entry and copying the contents and state, until the
+page table entry has been replaced with an entry that refers to the new
+page.  Linux supports migration of mlocked pages and other unevictable
+pages.  This involves simply moving the PageMlocked and PageUnevictable states
+from the old page to the new page.
+
+Note that page migration can race with mlocking or munlocking of the same
+page.  This has been discussed from the mlock/munlock perspective in the
+respective sections above.  Both processes [migration, m[un]locking], hold
+the page locked.  This provides the first level of synchronization.  Page
+migration zeros out the page_mapping of the old page before unlocking it,
+so m[un]lock can skip these pages by testing the page mapping under page
+lock.
+
+When completing page migration, we place the new and old pages back onto the
+lru after dropping the page lock.  The "unneeded" page--old page on success,
+new page on failure--will be freed when the reference count held by the
+migration process is released.  To ensure that we don't strand pages on the
+unevictable list because of a race between munlock and migration, page
+migration uses the putback_lru_page() function to add migrated pages back to
+the lru.
+
+
+Mlocked Pages:  mmap(MAP_LOCKED) System Call Handling
+
+In addition the the mlock()/mlockall() system calls, an application can request
+that a region of memory be mlocked using the MAP_LOCKED flag with the mmap()
+call.  Furthermore, any mmap() call or brk() call that expands the heap by a
+task that has previously called mlockall() with the MCL_FUTURE flag will result
+in the newly mapped memory being mlocked.  Before the unevictable/mlock changes,
+the kernel simply called make_pages_present() to allocate pages and populate
+the page table.
+
+To mlock a range of memory under the unevictable/mlock infrastructure, the
+mmap() handler and task address space expansion functions call
+mlock_vma_pages_range() specifying the vma and the address range to mlock.
+mlock_vma_pages_range() filters vmas like mlock_fixup(), as described above in
+"Mlocked Pages:  Filtering Vmas".  It will clear the VM_LOCKED flag, which will
+have already been set by the caller, in filtered vmas.  Thus these vma's need
+not be visited for munlock when the region is unmapped.
+
+For "normal" vmas, mlock_vma_pages_range() calls __mlock_vma_pages_range() to
+fault/allocate the pages and mlock them.  Again, like mlock_fixup(),
+mlock_vma_pages_range() downgrades the mmap semaphore to read mode before
+attempting to fault/allocate and mlock the pages; and "upgrades" the semaphore
+back to write mode before returning.
+
+The callers of mlock_vma_pages_range() will have already added the memory
+range to be mlocked to the task's "locked_vm".  To account for filtered vmas,
+mlock_vma_pages_range() returns the number of pages NOT mlocked.  All of the
+callers then subtract a non-negative return value from the task's locked_vm.
+A negative return value represent an error--for example, from get_user_pages()
+attempting to fault in a vma with PROT_NONE access.  In this case, we leave
+the memory range accounted as locked_vm, as the protections could be changed
+later and pages allocated into that region.
+
+
+Mlocked Pages:  munmap()/exit()/exec() System Call Handling
+
+When unmapping an mlocked region of memory, whether by an explicit call to
+munmap() or via an internal unmap from exit() or exec() processing, we must
+munlock the pages if we're removing the last VM_LOCKED vma that maps the pages.
+Before the unevictable/mlock changes, mlocking did not mark the pages in any way,
+so unmapping them required no processing.
+
+To munlock a range of memory under the unevictable/mlock infrastructure, the
+munmap() hander and task address space tear down function call
+munlock_vma_pages_all().  The name reflects the observation that one always
+specifies the entire vma range when munlock()ing during unmap of a region.
+Because of the vma filtering when mlocking() regions, only "normal" vmas that
+actually contain mlocked pages will be passed to munlock_vma_pages_all().
+
+munlock_vma_pages_all() clears the VM_LOCKED vma flag and, like mlock_fixup()
+for the munlock case, calls __munlock_vma_pages_range() to walk the page table
+for the vma's memory range and munlock_vma_page() each resident page mapped by
+the vma.  This effectively munlocks the page, only if this is the last
+VM_LOCKED vma that maps the page.
+
+
+Mlocked Page:  try_to_unmap()
+
+[Note:  the code changes represented by this section are really quite small
+compared to the text to describe what happening and why, and to discuss the
+implications.]
+
+Pages can, of course, be mapped into multiple vmas.  Some of these vmas may
+have VM_LOCKED flag set.  It is possible for a page mapped into one or more
+VM_LOCKED vmas not to have the PG_mlocked flag set and therefore reside on one
+of the active or inactive LRU lists.  This could happen if, for example, a
+task in the process of munlock()ing the page could not isolate the page from
+the LRU.  As a result, vmscan/shrink_page_list() might encounter such a page
+as described in "Unevictable Pages and Vmscan [shrink_*_list()]".  To
+handle this situation, try_to_unmap() has been enhanced to check for VM_LOCKED
+vmas while it is walking a page's reverse map.
+
+try_to_unmap() is always called, by either vmscan for reclaim or for page
+migration, with the argument page locked and isolated from the LRU.  BUG_ON()
+assertions enforce this requirement.  Separate functions handle anonymous and
+mapped file pages, as these types of pages have different reverse map
+mechanisms.
+
+	try_to_unmap_anon()
+
+To unmap anonymous pages, each vma in the list anchored in the anon_vma must be
+visited--at least until a VM_LOCKED vma is encountered.  If the page is being
+unmapped for migration, VM_LOCKED vmas do not stop the process because mlocked
+pages are migratable.  However, for reclaim, if the page is mapped into a
+VM_LOCKED vma, the scan stops.  try_to_unmap() attempts to acquire the mmap
+semphore of the mm_struct to which the vma belongs in read mode.  If this is
+successful, try_to_unmap() will mlock the page via mlock_vma_page()--we
+wouldn't have gotten to try_to_unmap() if the page were already mlocked--and
+will return SWAP_MLOCK, indicating that the page is unevictable.  If the
+mmap semaphore cannot be acquired, we are not sure whether the page is really
+unevictable or not.  In this case, try_to_unmap() will return SWAP_AGAIN.
+
+	try_to_unmap_file() -- linear mappings
+
+Unmapping of a mapped file page works the same, except that the scan visits
+all vmas that maps the page's index/page offset in the page's mapping's
+reverse map priority search tree.  It must also visit each vma in the page's
+mapping's non-linear list, if the list is non-empty.  As for anonymous pages,
+on encountering a VM_LOCKED vma for a mapped file page, try_to_unmap() will
+attempt to acquire the associated mm_struct's mmap semaphore to mlock the page,
+returning SWAP_MLOCK if this is successful, and SWAP_AGAIN, if not.
+
+	try_to_unmap_file() -- non-linear mappings
+
+If a page's mapping contains a non-empty non-linear mapping vma list, then
+try_to_un{map|lock}() must also visit each vma in that list to determine
+whether the page is mapped in a VM_LOCKED vma.  Again, the scan must visit
+all vmas in the non-linear list to ensure that the pages is not/should not be
+mlocked.  If a VM_LOCKED vma is found in the list, the scan could terminate.
+However, there is no easy way to determine whether the page is actually mapped
+in a given vma--either for unmapping or testing whether the VM_LOCKED vma
+actually pins the page.
+
+So, try_to_unmap_file() handles non-linear mappings by scanning a certain
+number of pages--a "cluster"--in each non-linear vma associated with the page's
+mapping, for each file mapped page that vmscan tries to unmap.  If this happens
+to unmap the page we're trying to unmap, try_to_unmap() will notice this on
+return--(page_mapcount(page) == 0)--and return SWAP_SUCCESS.  Otherwise, it
+will return SWAP_AGAIN, causing vmscan to recirculate this page.  We take
+advantage of the cluster scan in try_to_unmap_cluster() as follows:
+
+For each non-linear vma, try_to_unmap_cluster() attempts to acquire the mmap
+semaphore of the associated mm_struct for read without blocking.  If this
+attempt is successful and the vma is VM_LOCKED, try_to_unmap_cluster() will
+retain the mmap semaphore for the scan; otherwise it drops it here.  Then,
+for each page in the cluster, if we're holding the mmap semaphore for a locked
+vma, try_to_unmap_cluster() calls mlock_vma_page() to mlock the page.  This
+call is a no-op if the page is already locked, but will mlock any pages in
+the non-linear mapping that happen to be unlocked.  If one of the pages so
+mlocked is the page passed in to try_to_unmap(), try_to_unmap_cluster() will
+return SWAP_MLOCK, rather than the default SWAP_AGAIN.  This will allow vmscan
+to cull the page, rather than recirculating it on the inactive list.  Again,
+if try_to_unmap_cluster() cannot acquire the vma's mmap sem, it returns
+SWAP_AGAIN, indicating that the page is mapped by a VM_LOCKED vma, but
+couldn't be mlocked.
+
+
+Mlocked pages:  try_to_munlock() Reverse Map Scan
+
+TODO/FIXME:  a better name might be page_mlocked()--analogous to the
+page_referenced() reverse map walker--especially if we continue to call this
+from shrink_page_list().  See related TODO/FIXME below.
+
+When munlock_vma_page()--see "Mlocked Pages:  munlock()/munlockall() System
+Call Handling" above--tries to munlock a page, or when shrink_page_list()
+encounters an anonymous page that is not yet in the swap cache, they need to
+determine whether or not the page is mapped by any VM_LOCKED vma, without
+actually attempting to unmap all ptes from the page.  For this purpose, the
+unevictable/mlock infrastructure introduced a variant of try_to_unmap() called
+try_to_munlock().
+
+try_to_munlock() calls the same functions as try_to_unmap() for anonymous and
+mapped file pages with an additional argument specifing unlock versus unmap
+processing.  Again, these functions walk the respective reverse maps looking
+for VM_LOCKED vmas.  When such a vma is found for anonymous pages and file
+pages mapped in linear VMAs, as in the try_to_unmap() case, the functions
+attempt to acquire the associated mmap semphore, mlock the page via
+mlock_vma_page() and return SWAP_MLOCK.  This effectively undoes the
+pre-clearing of the page's PG_mlocked done by munlock_vma_page() and informs
+shrink_page_list() that the anonymous page should be culled rather than added
+to the swap cache in preparation for a try_to_unmap() that will almost
+certainly fail.
+
+If try_to_unmap() is unable to acquire a VM_LOCKED vma's associated mmap
+semaphore, it will return SWAP_AGAIN.  This will allow shrink_page_list()
+to recycle the page on the inactive list and hope that it has better luck
+with the page next time.
+
+For file pages mapped into non-linear vmas, the try_to_munlock() logic works
+slightly differently.  On encountering a VM_LOCKED non-linear vma that might
+map the page, try_to_munlock() returns SWAP_AGAIN without actually mlocking
+the page.  munlock_vma_page() will just leave the page unlocked and let
+vmscan deal with it--the usual fallback position.
+
+Note that try_to_munlock()'s reverse map walk must visit every vma in a pages'
+reverse map to determine that a page is NOT mapped into any VM_LOCKED vma.
+However, the scan can terminate when it encounters a VM_LOCKED vma and can
+successfully acquire the vma's mmap semphore for read and mlock the page.
+Although try_to_munlock() can be called many [very many!] times when
+munlock()ing a large region or tearing down a large address space that has been
+mlocked via mlockall(), overall this is a fairly rare event.  In addition,
+although shrink_page_list() calls try_to_munlock() for every anonymous page that
+it handles that is not yet in the swap cache, on average anonymous pages will
+have very short reverse map lists.
+
+Mlocked Page:  Page Reclaim in shrink_*_list()
+
+shrink_active_list() culls any obviously unevictable pages--i.e.,
+!page_evictable(page, NULL)--diverting these to the unevictable lru
+list.  However, shrink_active_list() only sees unevictable pages that
+made it onto the active/inactive lru lists.  Note that these pages do not
+have PageUnevictable set--otherwise, they would be on the unevictable list and
+shrink_active_list would never see them.
+
+Some examples of these unevictable pages on the LRU lists are:
+
+1) ramfs pages that have been placed on the lru lists when first allocated.
+
+2) SHM_LOCKed shared memory pages.  shmctl(SHM_LOCK) does not attempt to
+   allocate or fault in the pages in the shared memory region.  This happens
+   when an application accesses the page the first time after SHM_LOCKing
+   the segment.
+
+3) Mlocked pages that could not be isolated from the lru and moved to the
+   unevictable list in mlock_vma_page().
+
+3) Pages mapped into multiple VM_LOCKED vmas, but try_to_munlock() couldn't
+   acquire the vma's mmap semaphore to test the flags and set PageMlocked.
+   munlock_vma_page() was forced to let the page back on to the normal
+   LRU list for vmscan to handle.
+
+shrink_inactive_list() also culls any unevictable pages that it finds
+on the inactive lists, again diverting them to the appropriate zone's unevictable
+lru list.  shrink_inactive_list() should only see SHM_LOCKed pages that became
+SHM_LOCKed after shrink_active_list() had moved them to the inactive list, or
+pages mapped into VM_LOCKED vmas that munlock_vma_page() couldn't isolate from
+the lru to recheck via try_to_munlock().  shrink_inactive_list() won't notice
+the latter, but will pass on to shrink_page_list().
+
+shrink_page_list() again culls obviously unevictable pages that it could
+encounter for similar reason to shrink_inactive_list().  As already discussed,
+shrink_page_list() proactively looks for anonymous pages that should have
+PG_mlocked set but don't--these would not be detected by page_evictable()--to
+avoid adding them to the swap cache unnecessarily.  File pages mapped into
+VM_LOCKED vmas but without PG_mlocked set will make it all the way to
+try_to_unmap().  shrink_page_list() will divert them to the unevictable list when
+try_to_unmap() returns SWAP_MLOCK, as discussed above.
+
+TODO/FIXME:  If we can enhance the swap cache to reliably remove entries
+with page_count(page) > 2, as long as all ptes are mapped to the page and
+not the swap entry, we can probably remove the call to try_to_munlock() in
+shrink_page_list() and just remove the page from the swap cache when
+try_to_unmap() returns SWAP_MLOCK.   Currently, remove_exclusive_swap_page()
+doesn't seem to allow that.
+
+
diff --git a/MAINTAINERS b/MAINTAINERS
index 355c192..67fa3cf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,12 @@
 W:	http://www.ibm.com/developerworks/power/cell/
 S:	Supported
 
+CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
+P:	David Vrabel
+M:	david.vrabel@csr.com
+L:	linux-usb@vger.kernel.org
+S:	Supported
+
 CFAG12864B LCD DRIVER
 P:	Miguel Ojeda Sandonis
 M:	miguel.ojeda.sandonis@gmail.com
@@ -1198,7 +1204,7 @@
 
 CPU FREQUENCY DRIVERS
 P:	Dave Jones
-M:	davej@codemonkey.org.uk
+M:	davej@redhat.com
 L:	cpufreq@vger.kernel.org
 W:	http://www.codemonkey.org.uk/projects/cpufreq/
 T:	git kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
@@ -2176,6 +2182,13 @@
 L:	linux-kernel@vger.kernel.org
 S:	Supported
 
+INTEL IOMMU (VT-d)
+P:	David Woodhouse
+M:	dwmw2@infradead.org
+L:	iommu@lists.linux-foundation.org
+T:	git://git.infradead.org/iommu-2.6.git
+S:	Supported
+
 INTEL IOP-ADMA DMA DRIVER
 P:	Dan Williams
 M:	dan.j.williams@intel.com
@@ -2928,9 +2941,9 @@
 
 NETEFFECT IWARP RNIC DRIVER (IW_NES)
 P:	Faisal Latif
-M:	flatif@neteffect.com
+M:	faisal.latif@intel.com
 P:	Chien Tung
-M:	ctung@neteffect.com
+M:	chien.tin.tung@intel.com
 L:	general@lists.openfabrics.org
 W:	http://www.neteffect.com
 S:	Supported
@@ -3244,11 +3257,6 @@
 T:	git kernel.org:/pub/scm/linux/kernel/git/jbarnes/pci-2.6.git
 S:	Supported
 
-PCI HOTPLUG CORE
-P:	Kristen Carlson Accardi
-M:	kristen.c.accardi@intel.com
-S:	Supported
-
 PCIE HOTPLUG DRIVER
 P:	Kristen Carlson Accardi
 M:	kristen.c.accardi@intel.com
@@ -4191,6 +4199,12 @@
 T:	git kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6.git
 S:	Maintained
 
+ULTRA-WIDEBAND (UWB) SUBSYSTEM:
+P:	David Vrabel
+M:	david.vrabel@csr.com
+L:	linux-usb@vger.kernel.org
+S:	Supported
+
 UNIFORM CDROM DRIVER
 P:	Jens Axboe
 M:	axboe@kernel.dk
@@ -4616,6 +4630,11 @@
 L:	linux-scsi@vger.kernel.org
 S:	Maintained
 
+WIMEDIA LLC PROTOCOL (WLP) SUBSYSTEM
+P:	David Vrabel
+M:	david.vrabel@csr.com
+S:	Maintained
+
 WISTRON LAPTOP BUTTON DRIVER
 P:	Miloslav Trmac
 M:	mitr@volny.cz
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index a0f642b..6110197 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -70,6 +70,7 @@
 	default y
 
 source "init/Kconfig"
+source "kernel/Kconfig.freezer"
 
 
 menu "System setup"
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 15fda43..d069526 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -74,12 +74,14 @@
 #define TIF_UAC_SIGBUS		7
 #define TIF_MEMDIE		8
 #define TIF_RESTORE_SIGMASK	9	/* restore signal mask in do_signal */
+#define TIF_FREEZE		16	/* is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
+#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 /* Work to do on interrupt/exception return.  */
 #define _TIF_WORK_MASK		(_TIF_SIGPENDING | _TIF_NEED_RESCHED)
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index 04dcc5e..9cd8dca 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -655,7 +655,7 @@
 
 	case 0x71:					/* RTC_PORT(1) */
 		rtc_access.index = index;
-		rtc_access.data = BCD_TO_BIN(b);
+		rtc_access.data = bcd2bin(b);
 		rtc_access.function = 0x48 + !write;	/* GET/PUT_TOY */
 
 #ifdef CONFIG_SMP
@@ -668,7 +668,7 @@
 #else
 		__marvel_access_rtc(&rtc_access);
 #endif
-		ret = BIN_TO_BCD(rtc_access.data);
+		ret = bin2bcd(rtc_access.data);
 		break;
 
 	default:
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 99a7f19..a4555f4 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -47,7 +47,7 @@
 
 static irq_swizzle_t *sable_lynx_irq_swizzle;
 
-static void sable_lynx_init_irq(int nr_irqs);
+static void sable_lynx_init_irq(int nr_of_irqs);
 
 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE)
 
@@ -530,11 +530,11 @@
 }
 
 static void __init
-sable_lynx_init_irq(int nr_irqs)
+sable_lynx_init_irq(int nr_of_irqs)
 {
 	long i;
 
-	for (i = 0; i < nr_irqs; ++i) {
+	for (i = 0; i < nr_of_irqs; ++i) {
 		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
 		irq_desc[i].chip = &sable_lynx_irq_type;
 	}
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 75480ca..e6a2314 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -346,12 +346,12 @@
 	year = CMOS_READ(RTC_YEAR);
 
 	if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-		BCD_TO_BIN(sec);
-		BCD_TO_BIN(min);
-		BCD_TO_BIN(hour);
-		BCD_TO_BIN(day);
-		BCD_TO_BIN(mon);
-		BCD_TO_BIN(year);
+		sec = bcd2bin(sec);
+		min = bcd2bin(min);
+		hour = bcd2bin(hour);
+		day = bcd2bin(day);
+		mon = bcd2bin(mon);
+		year = bcd2bin(year);
 	}
 
 	/* PC-like is standard; used for year >= 70 */
@@ -525,7 +525,7 @@
 
 	cmos_minutes = CMOS_READ(RTC_MINUTES);
 	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-		BCD_TO_BIN(cmos_minutes);
+		cmos_minutes = bcd2bin(cmos_minutes);
 
 	/*
 	 * since we're only adjusting minutes and seconds,
@@ -543,8 +543,8 @@
 
 	if (abs(real_minutes - cmos_minutes) < 30) {
 		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-			BIN_TO_BCD(real_seconds);
-			BIN_TO_BCD(real_minutes);
+			real_seconds = bin2bcd(real_seconds);
+			real_minutes = bin2bcd(real_minutes);
 		}
 		CMOS_WRITE(real_seconds,RTC_SECONDS);
 		CMOS_WRITE(real_minutes,RTC_MINUTES);
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index 7c3d5ec..bd8ac53 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -106,7 +106,7 @@
 }
 
 static int
-op_axp_create_files(struct super_block * sb, struct dentry * root)
+op_axp_create_files(struct super_block *sb, struct dentry *root)
 {
 	int i;
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4853f9d..f504c80 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -192,6 +192,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 menu "System Type"
 
 choice
@@ -354,7 +356,7 @@
 	select GENERIC_GPIO
 	select GENERIC_TIME
 	select GENERIC_CLOCKEVENTS
-	select ZONE_DMA if PCI
+	select DMABOUNCE if PCI
 	help
 	  Support for Intel's IXP4XX (XScale) family of processors.
 
@@ -1254,6 +1256,8 @@
 
 source "drivers/usb/Kconfig"
 
+source "drivers/uwb/Kconfig"
+
 source "drivers/mmc/Kconfig"
 
 source "drivers/memstick/Kconfig"
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 2e32acc..86b5e69 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -13,10 +13,10 @@
 config SA1111
 	bool
 	select DMABOUNCE if !ARCH_PXA
-	select ZONE_DMA if !ARCH_PXA
 
 config DMABOUNCE
 	bool
+	select ZONE_DMA
 
 config TIMER_ACORN
 	bool
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index fb86f24..47ccec9 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -581,6 +581,7 @@
 		goto out;
 	}
 
+#ifdef CONFIG_DMABOUNCE
 	/*
 	 * If the parent device has a DMA mask associated with it,
 	 * propagate it down to the children.
@@ -598,6 +599,7 @@
 			}
 		}
 	}
+#endif
 
 out:
 	return ret;
@@ -937,7 +939,7 @@
 #define sa1111_resume  NULL
 #endif
 
-static int sa1111_probe(struct platform_device *pdev)
+static int __devinit sa1111_probe(struct platform_device *pdev)
 {
 	struct resource *mem;
 	int irq;
diff --git a/arch/arm/configs/trizeps4_defconfig b/arch/arm/configs/trizeps4_defconfig
index 8b7a431..9033d14 100644
--- a/arch/arm/configs/trizeps4_defconfig
+++ b/arch/arm/configs/trizeps4_defconfig
@@ -147,6 +147,7 @@
 # CONFIG_MACH_MAINSTONE is not set
 # CONFIG_ARCH_PXA_IDP is not set
 # CONFIG_PXA_SHARPSL is not set
+CONFIG_TRIZEPS_PXA=y
 CONFIG_MACH_TRIZEPS4=y
 CONFIG_MACH_TRIZEPS4_CONXS=y
 # CONFIG_MACH_TRIZEPS4_ANY is not set
diff --git a/arch/arm/mach-clps711x/include/mach/memory.h b/arch/arm/mach-clps711x/include/mach/memory.h
index 71c2fa7..98ec30c 100644
--- a/arch/arm/mach-clps711x/include/mach/memory.h
+++ b/arch/arm/mach-clps711x/include/mach/memory.h
@@ -89,6 +89,8 @@
  * 	node 3:  0xd8000000 - 0xdfffffff
  */
 #define NODE_MEM_SIZE_BITS	24
+#define SECTION_SIZE_BITS	24
+#define MAX_PHYSMEM_BITS	32
 
 #endif
 
diff --git a/arch/arm/mach-iop13xx/include/mach/time.h b/arch/arm/mach-iop13xx/include/mach/time.h
index 49213d9..d6d5252 100644
--- a/arch/arm/mach-iop13xx/include/mach/time.h
+++ b/arch/arm/mach-iop13xx/include/mach/time.h
@@ -41,7 +41,7 @@
 		return 1200000000;
 	default:
 		printk("%s: warning unknown frequency, defaulting to 800Mhz\n",
-			__FUNCTION__);
+			__func__);
 	}
 
 	return 800000000;
@@ -60,7 +60,7 @@
 		return 4;
 	default:
 		printk("%s: warning unknown ratio, defaulting to 2\n",
-			__FUNCTION__);
+			__func__);
 	}
 
 	return 2;
diff --git a/arch/arm/mach-ixp2000/ixdp2x00.c b/arch/arm/mach-ixp2000/ixdp2x00.c
index b0653a8..3045130 100644
--- a/arch/arm/mach-ixp2000/ixdp2x00.c
+++ b/arch/arm/mach-ixp2000/ixdp2x00.c
@@ -143,7 +143,7 @@
 	.unmask	= ixdp2x00_irq_unmask
 };
 
-void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigned long *mask_reg, unsigned long nr_irqs)
+void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigned long *mask_reg, unsigned long nr_of_irqs)
 {
 	unsigned int irq;
 
@@ -154,7 +154,7 @@
 
 	board_irq_stat = stat_reg;
 	board_irq_mask = mask_reg;
-	board_irq_count = nr_irqs;
+	board_irq_count = nr_of_irqs;
 
 	*board_irq_mask = 0xffffffff;
 
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index db8b5fe..2c5a02b 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -167,11 +167,6 @@
 
 comment "IXP4xx Options"
 
-config DMABOUNCE
-	bool
-	default y
-	depends on PCI
-
 config IXP4XX_INDIRECT_PCI
 	bool "Use indirect PCI memory access"
 	depends on PCI
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 85cad05..0bb1fbd 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -16,6 +16,7 @@
 #include <linux/mv643xx_eth.h>
 #include <linux/ata_platform.h>
 #include <linux/spi/orion_spi.h>
+#include <net/dsa.h>
 #include <asm/page.h>
 #include <asm/timex.h>
 #include <asm/mach/map.h>
@@ -152,6 +153,40 @@
 
 
 /*****************************************************************************
+ * Ethernet switch
+ ****************************************************************************/
+static struct resource kirkwood_switch_resources[] = {
+	{
+		.start	= 0,
+		.end	= 0,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device kirkwood_switch_device = {
+	.name		= "dsa",
+	.id		= 0,
+	.num_resources	= 0,
+	.resource	= kirkwood_switch_resources,
+};
+
+void __init kirkwood_ge00_switch_init(struct dsa_platform_data *d, int irq)
+{
+	if (irq != NO_IRQ) {
+		kirkwood_switch_resources[0].start = irq;
+		kirkwood_switch_resources[0].end = irq;
+		kirkwood_switch_device.num_resources = 1;
+	}
+
+	d->mii_bus = &kirkwood_ge00_shared.dev;
+	d->netdev = &kirkwood_ge00.dev;
+	kirkwood_switch_device.dev.platform_data = d;
+
+	platform_device_register(&kirkwood_switch_device);
+}
+
+
+/*****************************************************************************
  * SoC RTC
  ****************************************************************************/
 static struct resource kirkwood_rtc_resource = {
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h
index 8fa0f6a..5774632 100644
--- a/arch/arm/mach-kirkwood/common.h
+++ b/arch/arm/mach-kirkwood/common.h
@@ -11,6 +11,7 @@
 #ifndef __ARCH_KIRKWOOD_COMMON_H
 #define __ARCH_KIRKWOOD_COMMON_H
 
+struct dsa_platform_data;
 struct mv643xx_eth_platform_data;
 struct mv_sata_platform_data;
 
@@ -29,6 +30,7 @@
 
 void kirkwood_ehci_init(void);
 void kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data);
+void kirkwood_ge00_switch_init(struct dsa_platform_data *d, int irq);
 void kirkwood_pcie_init(void);
 void kirkwood_rtc_init(void);
 void kirkwood_sata_init(struct mv_sata_platform_data *sata_data);
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c
index f785093..175054a 100644
--- a/arch/arm/mach-kirkwood/rd88f6281-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c
@@ -19,6 +19,7 @@
 #include <linux/ata_platform.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/ethtool.h>
+#include <net/dsa.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/pci.h>
@@ -74,6 +75,15 @@
 	.duplex		= DUPLEX_FULL,
 };
 
+static struct dsa_platform_data rd88f6281_switch_data = {
+	.port_names[0]	= "lan1",
+	.port_names[1]	= "lan2",
+	.port_names[2]	= "lan3",
+	.port_names[3]	= "lan4",
+	.port_names[4]	= "wan",
+	.port_names[5]	= "cpu",
+};
+
 static struct mv_sata_platform_data rd88f6281_sata_data = {
 	.n_ports	= 2,
 };
@@ -87,6 +97,7 @@
 
 	kirkwood_ehci_init();
 	kirkwood_ge00_init(&rd88f6281_ge00_data);
+	kirkwood_ge00_switch_init(&rd88f6281_switch_data, NO_IRQ);
 	kirkwood_rtc_init();
 	kirkwood_sata_init(&rd88f6281_sata_data);
 	kirkwood_uart0_init();
diff --git a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
index 49f434c..2e285bb 100644
--- a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
+++ b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
@@ -13,6 +13,7 @@
 #include <linux/platform_device.h>
 #include <linux/ata_platform.h>
 #include <linux/mv643xx_eth.h>
+#include <linux/ethtool.h>
 #include <mach/mv78xx0.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -28,10 +29,14 @@
 
 static struct mv643xx_eth_platform_data db78x00_ge10_data = {
 	.phy_addr	= MV643XX_ETH_PHY_NONE,
+	.speed		= SPEED_1000,
+	.duplex		= DUPLEX_FULL,
 };
 
 static struct mv643xx_eth_platform_data db78x00_ge11_data = {
 	.phy_addr	= MV643XX_ETH_PHY_NONE,
+	.speed		= SPEED_1000,
+	.duplex		= DUPLEX_FULL,
 };
 
 static struct mv_sata_platform_data db78x00_sata_data = {
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index d354e0f..c40fc37 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -119,7 +119,7 @@
 
 void __init omap_init_irq(void)
 {
-	unsigned long nr_irqs = 0;
+	unsigned long nr_of_irqs = 0;
 	unsigned int nr_banks = 0;
 	int i;
 
@@ -133,14 +133,14 @@
 
 		omap_irq_bank_init_one(bank);
 
-		nr_irqs += bank->nr_irqs;
+		nr_of_irqs += bank->nr_irqs;
 		nr_banks++;
 	}
 
 	printk(KERN_INFO "Total of %ld interrupts on %d active controller%s\n",
-	       nr_irqs, nr_banks, nr_banks > 1 ? "s" : "");
+	       nr_of_irqs, nr_banks, nr_banks > 1 ? "s" : "");
 
-	for (i = 0; i < nr_irqs; i++) {
+	for (i = 0; i < nr_of_irqs; i++) {
 		set_irq_chip(i, &omap_irq_chip);
 		set_irq_handler(i, handle_level_irq);
 		set_irq_flags(i, IRQF_VALID);
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 9625ef5..437065c 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -19,6 +19,7 @@
 #include <linux/mv643xx_i2c.h>
 #include <linux/ata_platform.h>
 #include <linux/spi/orion_spi.h>
+#include <net/dsa.h>
 #include <asm/page.h>
 #include <asm/setup.h>
 #include <asm/timex.h>
@@ -198,6 +199,40 @@
 
 
 /*****************************************************************************
+ * Ethernet switch
+ ****************************************************************************/
+static struct resource orion5x_switch_resources[] = {
+	{
+		.start	= 0,
+		.end	= 0,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device orion5x_switch_device = {
+	.name		= "dsa",
+	.id		= 0,
+	.num_resources	= 0,
+	.resource	= orion5x_switch_resources,
+};
+
+void __init orion5x_eth_switch_init(struct dsa_platform_data *d, int irq)
+{
+	if (irq != NO_IRQ) {
+		orion5x_switch_resources[0].start = irq;
+		orion5x_switch_resources[0].end = irq;
+		orion5x_switch_device.num_resources = 1;
+	}
+
+	d->mii_bus = &orion5x_eth_shared.dev;
+	d->netdev = &orion5x_eth.dev;
+	orion5x_switch_device.dev.platform_data = d;
+
+	platform_device_register(&orion5x_switch_device);
+}
+
+
+/*****************************************************************************
  * I2C
  ****************************************************************************/
 static struct mv64xxx_i2c_pdata orion5x_i2c_pdata = {
@@ -275,7 +310,8 @@
  * SPI
  ****************************************************************************/
 static struct orion_spi_info orion5x_spi_plat_data = {
-	.tclk		= 0,
+	.tclk			= 0,
+	.enable_clock_fix	= 1,
 };
 
 static struct resource orion5x_spi_resources[] = {
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index 1f8b2da..a000c7c 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -1,6 +1,7 @@
 #ifndef __ARCH_ORION5X_COMMON_H
 #define __ARCH_ORION5X_COMMON_H
 
+struct dsa_platform_data;
 struct mv643xx_eth_platform_data;
 struct mv_sata_platform_data;
 
@@ -29,6 +30,7 @@
 void orion5x_ehci0_init(void);
 void orion5x_ehci1_init(void);
 void orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data);
+void orion5x_eth_switch_init(struct dsa_platform_data *d, int irq);
 void orion5x_i2c_init(void);
 void orion5x_sata_init(struct mv_sata_platform_data *sata_data);
 void orion5x_spi_init(void);
diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
index 500cdada..15f5323 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
@@ -16,6 +16,7 @@
 #include <linux/mtd/physmap.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/ethtool.h>
+#include <net/dsa.h>
 #include <asm/mach-types.h>
 #include <asm/gpio.h>
 #include <asm/leds.h>
@@ -93,6 +94,15 @@
 	.duplex		= DUPLEX_FULL,
 };
 
+static struct dsa_platform_data rd88f5181l_fxo_switch_data = {
+	.port_names[0]	= "lan2",
+	.port_names[1]	= "lan1",
+	.port_names[2]	= "wan",
+	.port_names[3]	= "cpu",
+	.port_names[5]	= "lan4",
+	.port_names[7]	= "lan3",
+};
+
 static void __init rd88f5181l_fxo_init(void)
 {
 	/*
@@ -107,6 +117,7 @@
 	 */
 	orion5x_ehci0_init();
 	orion5x_eth_init(&rd88f5181l_fxo_eth_data);
+	orion5x_eth_switch_init(&rd88f5181l_fxo_switch_data, NO_IRQ);
 	orion5x_uart0_init();
 
 	orion5x_setup_dev_boot_win(RD88F5181L_FXO_NOR_BOOT_BASE,
diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
index ebde814..8ad3934 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
@@ -17,6 +17,7 @@
 #include <linux/mv643xx_eth.h>
 #include <linux/ethtool.h>
 #include <linux/i2c.h>
+#include <net/dsa.h>
 #include <asm/mach-types.h>
 #include <asm/gpio.h>
 #include <asm/leds.h>
@@ -94,6 +95,15 @@
 	.duplex		= DUPLEX_FULL,
 };
 
+static struct dsa_platform_data rd88f5181l_ge_switch_data = {
+	.port_names[0]	= "lan2",
+	.port_names[1]	= "lan1",
+	.port_names[2]	= "wan",
+	.port_names[3]	= "cpu",
+	.port_names[5]	= "lan4",
+	.port_names[7]	= "lan3",
+};
+
 static struct i2c_board_info __initdata rd88f5181l_ge_i2c_rtc = {
 	I2C_BOARD_INFO("ds1338", 0x68),
 };
@@ -112,6 +122,7 @@
 	 */
 	orion5x_ehci0_init();
 	orion5x_eth_init(&rd88f5181l_ge_eth_data);
+	orion5x_eth_switch_init(&rd88f5181l_ge_switch_data, gpio_to_irq(8));
 	orion5x_i2c_init();
 	orion5x_uart0_init();
 
diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
index 40e0495..262e25e 100644
--- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
@@ -19,6 +19,7 @@
 #include <linux/spi/orion_spi.h>
 #include <linux/spi/flash.h>
 #include <linux/ethtool.h>
+#include <net/dsa.h>
 #include <asm/mach-types.h>
 #include <asm/gpio.h>
 #include <asm/leds.h>
@@ -34,6 +35,15 @@
 	.duplex		= DUPLEX_FULL,
 };
 
+static struct dsa_platform_data rd88f6183ap_ge_switch_data = {
+	.port_names[0]	= "lan1",
+	.port_names[1]	= "lan2",
+	.port_names[2]	= "lan3",
+	.port_names[3]	= "lan4",
+	.port_names[4]	= "wan",
+	.port_names[5]	= "cpu",
+};
+
 static struct mtd_partition rd88f6183ap_ge_partitions[] = {
 	{
 		.name	= "kernel",
@@ -79,6 +89,7 @@
 	 */
 	orion5x_ehci0_init();
 	orion5x_eth_init(&rd88f6183ap_ge_eth_data);
+	orion5x_eth_switch_init(&rd88f6183ap_ge_switch_data, gpio_to_irq(3));
 	spi_register_board_info(rd88f6183ap_ge_spi_slave_info,
 				ARRAY_SIZE(rd88f6183ap_ge_spi_slave_info));
 	orion5x_spi_init();
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
index 9a4fd52..cc8f892 100644
--- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c
+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
@@ -15,6 +15,7 @@
 #include <linux/mtd/physmap.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/ethtool.h>
+#include <net/dsa.h>
 #include <asm/mach-types.h>
 #include <asm/gpio.h>
 #include <asm/mach/arch.h>
@@ -105,6 +106,15 @@
 	.duplex		= DUPLEX_FULL,
 };
 
+static struct dsa_platform_data wrt350n_v2_switch_data = {
+	.port_names[0]	= "lan2",
+	.port_names[1]	= "lan1",
+	.port_names[2]	= "wan",
+	.port_names[3]	= "cpu",
+	.port_names[5]	= "lan3",
+	.port_names[7]	= "lan4",
+};
+
 static void __init wrt350n_v2_init(void)
 {
 	/*
@@ -119,6 +129,7 @@
 	 */
 	orion5x_ehci0_init();
 	orion5x_eth_init(&wrt350n_v2_eth_data);
+	orion5x_eth_switch_init(&wrt350n_v2_switch_data, NO_IRQ);
 	orion5x_uart0_init();
 
 	orion5x_setup_dev_boot_win(WRT350N_V2_NOR_BOOT_BASE,
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index f27f6b3..f781873 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -257,7 +257,6 @@
 	bool "CompuLab CM-X255/CM-X270 modules"
 	select PXA27x
 	select IWMMXT
-	select ZONE_DMA if PCI
 	select PXA25x
 	select PXA_SSP
 
diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h
index 9c163e1..32bb4a2 100644
--- a/arch/arm/mach-pxa/include/mach/irqs.h
+++ b/arch/arm/mach-pxa/include/mach/irqs.h
@@ -9,7 +9,8 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-
+#ifndef __ASM_MACH_IRQS_H
+#define __ASM_MACH_IRQS_H
 
 #ifdef CONFIG_PXA_HAVE_ISA_IRQS
 #define PXA_ISA_IRQ(x)	(x)
@@ -264,3 +265,5 @@
 #endif
 
 #endif /* CONFIG_PCI_HOST_ITE8152 */
+
+#endif /* __ASM_MACH_IRQS_H */
diff --git a/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h b/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h
index eb4b190..eb35fca 100644
--- a/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h
+++ b/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h
@@ -4,6 +4,43 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 
+struct pxa3xx_nand_timing {
+	unsigned int	tCH;  /* Enable signal hold time */
+	unsigned int	tCS;  /* Enable signal setup time */
+	unsigned int	tWH;  /* ND_nWE high duration */
+	unsigned int	tWP;  /* ND_nWE pulse time */
+	unsigned int	tRH;  /* ND_nRE high duration */
+	unsigned int	tRP;  /* ND_nRE pulse width */
+	unsigned int	tR;   /* ND_nWE high to ND_nRE low for read */
+	unsigned int	tWHR; /* ND_nWE high to ND_nRE low for status read */
+	unsigned int	tAR;  /* ND_ALE low to ND_nRE low delay */
+};
+
+struct pxa3xx_nand_cmdset {
+	uint16_t	read1;
+	uint16_t	read2;
+	uint16_t	program;
+	uint16_t	read_status;
+	uint16_t	read_id;
+	uint16_t	erase;
+	uint16_t	reset;
+	uint16_t	lock;
+	uint16_t	unlock;
+	uint16_t	lock_status;
+};
+
+struct pxa3xx_nand_flash {
+	const struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
+	const struct pxa3xx_nand_cmdset *cmdset;
+
+	uint32_t page_per_block;/* Pages per block (PG_PER_BLK) */
+	uint32_t page_size;	/* Page size in bytes (PAGE_SZ) */
+	uint32_t flash_width;	/* Width of Flash memory (DWIDTH_M) */
+	uint32_t dfc_width;	/* Width of flash controller(DWIDTH_C) */
+	uint32_t num_blocks;	/* Number of physical blocks in Flash */
+	uint32_t chip_id;
+};
+
 struct pxa3xx_nand_platform_data {
 
 	/* the data flash bus is shared between the Static Memory
@@ -12,8 +49,11 @@
 	 */
 	int	enable_arbiter;
 
-	struct mtd_partition *parts;
-	unsigned int	nr_parts;
+	const struct mtd_partition		*parts;
+	unsigned int				nr_parts;
+
+	const struct pxa3xx_nand_flash * 	flash;
+	size_t					num_flash;
 };
 
 extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info);
diff --git a/arch/arm/mach-pxa/include/mach/spitz.h b/arch/arm/mach-pxa/include/mach/spitz.h
index 31ac26b..e8488df 100644
--- a/arch/arm/mach-pxa/include/mach/spitz.h
+++ b/arch/arm/mach-pxa/include/mach/spitz.h
@@ -142,7 +142,7 @@
 
 #define SPITZ_SCP2_GPIO_BASE		(NR_BUILTIN_GPIO + 12)
 #define SPITZ_GPIO_IR_ON		(SPITZ_SCP2_GPIO_BASE + 0)
-#define SPITZ_GPIO_AKIN_PULLUP		(SPITZ_SCP2_GPIO_BASE + 1
+#define SPITZ_GPIO_AKIN_PULLUP		(SPITZ_SCP2_GPIO_BASE + 1)
 #define SPITZ_GPIO_RESERVED_1		(SPITZ_SCP2_GPIO_BASE + 2)
 #define SPITZ_GPIO_RESERVED_2		(SPITZ_SCP2_GPIO_BASE + 3)
 #define SPITZ_GPIO_RESERVED_3		(SPITZ_SCP2_GPIO_BASE + 4)
diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/include/mach/tosa.h
index a72803f..8bce6d8 100644
--- a/arch/arm/mach-pxa/include/mach/tosa.h
+++ b/arch/arm/mach-pxa/include/mach/tosa.h
@@ -59,8 +59,6 @@
  * TC6393XB GPIOs
  */
 #define TOSA_TC6393XB_GPIO_BASE		(NR_BUILTIN_GPIO + 2 * 12)
-#define TOSA_TC6393XB_GPIO(i)		(TOSA_TC6393XB_GPIO_BASE + (i))
-#define TOSA_TC6393XB_GPIO_BIT(gpio)	(1 << (gpio - TOSA_TC6393XB_GPIO_BASE))
 
 #define TOSA_GPIO_TG_ON			(TOSA_TC6393XB_GPIO_BASE + 0)
 #define TOSA_GPIO_L_MUTE		(TOSA_TC6393XB_GPIO_BASE + 1)
diff --git a/arch/arm/mach-pxa/include/mach/zylonite.h b/arch/arm/mach-pxa/include/mach/zylonite.h
index 0d35ca0..bf6785a 100644
--- a/arch/arm/mach-pxa/include/mach/zylonite.h
+++ b/arch/arm/mach-pxa/include/mach/zylonite.h
@@ -30,7 +30,7 @@
 static inline void zylonite_pxa300_init(void)
 {
 	if (cpu_is_pxa300() || cpu_is_pxa310())
-		panic("%s: PXA300/PXA310 not supported\n", __FUNCTION__);
+		panic("%s: PXA300/PXA310 not supported\n", __func__);
 }
 #endif
 
@@ -40,7 +40,7 @@
 static inline void zylonite_pxa320_init(void)
 {
 	if (cpu_is_pxa320())
-		panic("%s: PXA320 not supported\n", __FUNCTION__);
+		panic("%s: PXA320 not supported\n", __func__);
 }
 #endif
 
diff --git a/arch/arm/mach-pxa/pwm.c b/arch/arm/mach-pxa/pwm.c
index 316cd98..74e2ead 100644
--- a/arch/arm/mach-pxa/pwm.c
+++ b/arch/arm/mach-pxa/pwm.c
@@ -60,7 +60,7 @@
 	do_div(c, 1000000000);
 	period_cycles = c;
 
-	if (period_cycles < 0)
+	if (period_cycles < 1)
 		period_cycles = 1;
 	prescale = (period_cycles - 1) / 1024;
 	pv = period_cycles / (prescale + 1) - 1;
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 130e37e..a6c4694 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -706,16 +706,39 @@
 	.badblock_pattern = &tosa_tc6393xb_nand_bbt,
 };
 
-static struct tc6393xb_platform_data tosa_tc6393xb_setup = {
+static int tosa_tc6393xb_setup(struct platform_device *dev)
+{
+	int rc;
+
+	rc = gpio_request(TOSA_GPIO_CARD_VCC_ON, "CARD_VCC_ON");
+	if (rc)
+		goto err_req;
+
+	rc = gpio_direction_output(TOSA_GPIO_CARD_VCC_ON, 1);
+	if (rc)
+		goto err_dir;
+
+	return rc;
+
+err_dir:
+	gpio_free(TOSA_GPIO_CARD_VCC_ON);
+err_req:
+	return rc;
+}
+
+static void tosa_tc6393xb_teardown(struct platform_device *dev)
+{
+	gpio_free(TOSA_GPIO_CARD_VCC_ON);
+}
+
+static struct tc6393xb_platform_data tosa_tc6393xb_data = {
 	.scr_pll2cr	= 0x0cc1,
 	.scr_gper	= 0x3300,
-	.scr_gpo_dsr	=
-		TOSA_TC6393XB_GPIO_BIT(TOSA_GPIO_CARD_VCC_ON),
-	.scr_gpo_doecr	=
-		TOSA_TC6393XB_GPIO_BIT(TOSA_GPIO_CARD_VCC_ON),
 
 	.irq_base	= IRQ_BOARD_START,
 	.gpio_base	= TOSA_TC6393XB_GPIO_BASE,
+	.setup		= tosa_tc6393xb_setup,
+	.teardown	= tosa_tc6393xb_teardown,
 
 	.enable		= tosa_tc6393xb_enable,
 	.disable	= tosa_tc6393xb_disable,
@@ -723,6 +746,8 @@
 	.resume		= tosa_tc6393xb_resume,
 
 	.nand_data	= &tosa_tc6393xb_nand_config,
+
+	.resume_restore = 1,
 };
 
 
@@ -730,7 +755,7 @@
 	.name	= "tc6393xb",
 	.id	= -1,
 	.dev	= {
-		.platform_data	= &tosa_tc6393xb_setup,
+		.platform_data	= &tosa_tc6393xb_data,
 	},
 	.num_resources	= ARRAY_SIZE(tc6393xb_resources),
 	.resource	= tc6393xb_resources,
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index a13dbf3..a72e3ad 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -399,7 +399,7 @@
 	/* Switch mode */
 	if (mode & IR_SIRMODE)
 		trizeps_conxs_ircr &= ~ConXS_IRCR_MODE;	/* Slow mode */
-	else if (mode & IR_FIRMODE) {
+	else if (mode & IR_FIRMODE)
 		trizeps_conxs_ircr |= ConXS_IRCR_MODE;	/* Fast mode */
 
 	/* Switch power */
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c
index 2f60bf6..f854e73 100644
--- a/arch/arm/mach-s3c2443/clock.c
+++ b/arch/arm/mach-s3c2443/clock.c
@@ -1033,8 +1033,7 @@
 
 	fclk = pll / s3c2443_fclk_div(clkdiv0);
 	hclk = s3c2443_prediv_getrate(&clk_prediv);
-	hclk = hclk / s3c2443_get_hdiv(clkdiv0);
-	hclk = hclk / ((clkdiv0 & S3C2443_CLKDIV0_HALF_HCLK) ? 2 : 1);
+	hclk /= s3c2443_get_hdiv(clkdiv0);
  	pclk = hclk / ((clkdiv0 & S3C2443_CLKDIV0_HALF_PCLK) ? 2 : 1);
 
 	s3c24xx_setup_clocks(xtal, fclk, hclk, pclk);
diff --git a/arch/arm/mach-sa1100/include/mach/ide.h b/arch/arm/mach-sa1100/include/mach/ide.h
deleted file mode 100644
index 4c99c8f..0000000
--- a/arch/arm/mach-sa1100/include/mach/ide.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * arch/arm/mach-sa1100/include/mach/ide.h
- *
- * Copyright (c) 1998 Hugo Fiennes & Nicolas Pitre
- *
- * 18-aug-2000: Cleanup by Erik Mouw (J.A.K.Mouw@its.tudelft.nl)
- *              Get rid of the special ide_init_hwif_ports() functions
- *              and make a generalised function that can be used by all
- *              architectures.
- */
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-
-#error "This code is broken and needs update to match with current ide support"
-
-
-/*
- * Set up a hw structure for a specified data port, control port and IRQ.
- * This should follow whatever the default interface uses.
- */
-static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port,
-				       unsigned long ctrl_port, int *irq)
-{
-	unsigned long reg = data_port;
-	int i;
-	int regincr = 1;
-
-	/* The Empeg board has the first two address lines unused */
-	if (machine_is_empeg())
-		regincr = 1 << 2;
-
-	/* The LART doesn't use A0 for IDE */
-	if (machine_is_lart())
-		regincr = 1 << 1;
-
-	memset(hw, 0, sizeof(*hw));
-
-	for (i = 0; i <= 7; i++) {
-		hw->io_ports_array[i] = reg;
-		reg += regincr;
-	}
-
-	hw->io_ports.ctl_addr = ctrl_port;
-
-	if (irq)
-		*irq = 0;
-}
-
-/*
- * This registers the standard ports for this architecture with the IDE
- * driver.
- */
-static __inline__ void
-ide_init_default_hwifs(void)
-{
-    if (machine_is_lart()) {
-#ifdef CONFIG_SA1100_LART
-        hw_regs_t hw;
-
-        /* Enable GPIO as interrupt line */
-        GPDR &= ~LART_GPIO_IDE;
-	set_irq_type(LART_IRQ_IDE, IRQ_TYPE_EDGE_RISING);
-
-        /* set PCMCIA interface timing */
-        MECR = 0x00060006;
-
-        /* init the interface */
-	ide_init_hwif_ports(&hw, PCMCIA_IO_0_BASE + 0x0000, PCMCIA_IO_0_BASE + 0x1000, NULL);
-        hw.irq = LART_IRQ_IDE;
-        ide_register_hw(&hw);
-#endif
-    }
-}
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 33926c9..5786adf1 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -29,7 +29,7 @@
  *	Clean and invalidate the entire cache.
  */
 ENTRY(v4_flush_kern_cache_all)
-#ifdef CPU_CP15
+#ifdef CONFIG_CPU_CP15
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7, 0		@ flush ID cache
 	mov	pc, lr
@@ -48,7 +48,7 @@
  *	- flags	- vma_area_struct flags describing address space
  */
 ENTRY(v4_flush_user_cache_range)
-#ifdef CPU_CP15
+#ifdef CONFIG_CPU_CP15
 	mov	ip, #0
 	mcreq	p15, 0, ip, c7, c7, 0		@ flush ID cache
 	mov	pc, lr
@@ -116,7 +116,7 @@
  *	- end	 - virtual end address
  */
 ENTRY(v4_dma_flush_range)
-#ifdef CPU_CP15
+#ifdef CONFIG_CPU_CP15
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7, 0		@ flush ID cache
 #endif
diff --git a/arch/arm/plat-mxc/include/mach/mxc_nand.h b/arch/arm/plat-mxc/include/mach/mxc_nand.h
new file mode 100644
index 0000000..2b972df
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/mxc_nand.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#ifndef __ASM_ARCH_NAND_H
+#define __ASM_ARCH_NAND_H
+
+struct mxc_nand_platform_data {
+	int width;	/* data bus width in bytes */
+	int hw_ecc;	/* 0 if supress hardware ECC */
+};
+#endif /* __ASM_ARCH_NAND_H */
diff --git a/arch/arm/plat-omap/include/mach/onenand.h b/arch/arm/plat-omap/include/mach/onenand.h
index d57f202..4649d30 100644
--- a/arch/arm/plat-omap/include/mach/onenand.h
+++ b/arch/arm/plat-omap/include/mach/onenand.h
@@ -16,6 +16,10 @@
 	int			gpio_irq;
 	struct mtd_partition	*parts;
 	int			nr_parts;
-	int                     (*onenand_setup)(void __iomem *);
+	int                     (*onenand_setup)(void __iomem *, int freq);
 	int			dma_channel;
 };
+
+int omap2_onenand_rephase(void);
+
+#define ONENAND_MAX_PARTITIONS 8
diff --git a/arch/arm/plat-s3c24xx/pwm-clock.c b/arch/arm/plat-s3c24xx/pwm-clock.c
index b8e854f..3fad68a 100644
--- a/arch/arm/plat-s3c24xx/pwm-clock.c
+++ b/arch/arm/plat-s3c24xx/pwm-clock.c
@@ -315,7 +315,7 @@
 	if (parent == s3c24xx_pwmclk_tclk(id))
 		bits = S3C2410_TCFG1_MUX_TCLK << shift;
 	else if (parent == s3c24xx_pwmclk_tdiv(id))
-		bits = clk_pwm_tdiv_bits(to_tdiv(clk)) << shift;
+		bits = clk_pwm_tdiv_bits(to_tdiv(parent)) << shift;
 	else
 		return -EINVAL;
 
diff --git a/arch/arm/plat-s3c24xx/pwm.c b/arch/arm/plat-s3c24xx/pwm.c
index feb770f..ec56b88 100644
--- a/arch/arm/plat-s3c24xx/pwm.c
+++ b/arch/arm/plat-s3c24xx/pwm.c
@@ -56,7 +56,7 @@
 		}					\
 	}
 
-#define DEFINE_TIMER(_tmr_no, _irq)			\
+#define DEFINE_S3C_TIMER(_tmr_no, _irq)			\
 	.name		= "s3c24xx-pwm",		\
 	.id		= _tmr_no,			\
 	.num_resources	= TIMER_RESOURCE_SIZE,		\
@@ -67,11 +67,11 @@
  */
 
 struct platform_device s3c_device_timer[] = {
-	[0] = { DEFINE_TIMER(0, IRQ_TIMER0) },
-	[1] = { DEFINE_TIMER(1, IRQ_TIMER1) },
-	[2] = { DEFINE_TIMER(2, IRQ_TIMER2) },
-	[3] = { DEFINE_TIMER(3, IRQ_TIMER3) },
-	[4] = { DEFINE_TIMER(4, IRQ_TIMER4) },
+	[0] = { DEFINE_S3C_TIMER(0, IRQ_TIMER0) },
+	[1] = { DEFINE_S3C_TIMER(1, IRQ_TIMER1) },
+	[2] = { DEFINE_S3C_TIMER(2, IRQ_TIMER2) },
+	[3] = { DEFINE_S3C_TIMER(3, IRQ_TIMER3) },
+	[4] = { DEFINE_S3C_TIMER(4, IRQ_TIMER4) },
 };
 
 static inline int pwm_is_tdiv(struct pwm_device *pwm)
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 7c239a9..33a5b29 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -72,6 +72,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 menu "System Type and features"
 
 source "kernel/time/Kconfig"
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h
index 294b25f..4442f8d 100644
--- a/arch/avr32/include/asm/thread_info.h
+++ b/arch/avr32/include/asm/thread_info.h
@@ -96,6 +96,7 @@
 #define _TIF_MEMDIE		(1 << TIF_MEMDIE)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
+#define _TIF_FREEZE		(1 << TIF_FREEZE)
 
 /* Note: The masks below must never span more than 16 bits! */
 
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c
index c36a6d5..310477b 100644
--- a/arch/avr32/mach-at32ap/extint.c
+++ b/arch/avr32/mach-at32ap/extint.c
@@ -191,7 +191,7 @@
 	struct eic *eic;
 	struct resource *regs;
 	unsigned int i;
-	unsigned int nr_irqs;
+	unsigned int nr_of_irqs;
 	unsigned int int_irq;
 	int ret;
 	u32 pattern;
@@ -224,7 +224,7 @@
 	eic_writel(eic, IDR, ~0UL);
 	eic_writel(eic, MODE, ~0UL);
 	pattern = eic_readl(eic, MODE);
-	nr_irqs = fls(pattern);
+	nr_of_irqs = fls(pattern);
 
 	/* Trigger on low level unless overridden by driver */
 	eic_writel(eic, EDGE, 0UL);
@@ -232,7 +232,7 @@
 
 	eic->chip = &eic_chip;
 
-	for (i = 0; i < nr_irqs; i++) {
+	for (i = 0; i < nr_of_irqs; i++) {
 		set_irq_chip_and_handler(eic->first_irq + i, &eic_chip,
 					 handle_level_irq);
 		set_irq_chip_data(eic->first_irq + i, eic);
@@ -256,7 +256,7 @@
 		 eic->regs, int_irq);
 	dev_info(&pdev->dev,
 		 "Handling %u external IRQs, starting with IRQ %u\n",
-		 nr_irqs, eic->first_irq);
+		 nr_of_irqs, eic->first_irq);
 
 	return 0;
 
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 8102c79..29e71ed 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -64,8 +64,11 @@
 	depends on OPROFILE
 
 source "init/Kconfig"
+
 source "kernel/Kconfig.preempt"
 
+source "kernel/Kconfig.freezer"
+
 menu "Blackfin Processor Options"
 
 comment "Processor and Board Settings"
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 9389d38..b17aeea 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -62,6 +62,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 menu "General setup"
 
 source "fs/Kconfig.binfmt"
@@ -677,6 +679,8 @@
 
 source "drivers/usb/Kconfig"
 
+source "drivers/uwb/Kconfig"
+
 source "arch/cris/Kconfig.debug"
 
 source "security/Kconfig"
diff --git a/arch/cris/arch-v10/drivers/ds1302.c b/arch/cris/arch-v10/drivers/ds1302.c
index c9aa390..3bdfaf4 100644
--- a/arch/cris/arch-v10/drivers/ds1302.c
+++ b/arch/cris/arch-v10/drivers/ds1302.c
@@ -215,12 +215,12 @@
 
 	local_irq_restore(flags);
 	
-	BCD_TO_BIN(rtc_tm->tm_sec);
-	BCD_TO_BIN(rtc_tm->tm_min);
-	BCD_TO_BIN(rtc_tm->tm_hour);
-	BCD_TO_BIN(rtc_tm->tm_mday);
-	BCD_TO_BIN(rtc_tm->tm_mon);
-	BCD_TO_BIN(rtc_tm->tm_year);
+	rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
+	rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
+	rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
+	rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
+	rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
+	rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
 
 	/*
 	 * Account for differences between how the RTC uses the values
@@ -295,12 +295,12 @@
 			else
 				yrs -= 1900;	/* RTC (70, 71, ... 99) */
 
-			BIN_TO_BCD(sec);
-			BIN_TO_BCD(min);
-			BIN_TO_BCD(hrs);
-			BIN_TO_BCD(day);
-			BIN_TO_BCD(mon);
-			BIN_TO_BCD(yrs);
+			sec = bin2bcd(sec);
+			min = bin2bcd(min);
+			hrs = bin2bcd(hrs);
+			day = bin2bcd(day);
+			mon = bin2bcd(mon);
+			yrs = bin2bcd(yrs);
 
 			local_irq_save(flags);
 			CMOS_WRITE(yrs, RTC_YEAR);
diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c
index 8769dc91..1e90c1a 100644
--- a/arch/cris/arch-v10/drivers/pcf8563.c
+++ b/arch/cris/arch-v10/drivers/pcf8563.c
@@ -122,7 +122,7 @@
 		       "information is no longer guaranteed!\n", PCF8563_NAME);
 	}
 
-	tm->tm_year  = BCD_TO_BIN(tm->tm_year) +
+	tm->tm_year  = bcd2bin(tm->tm_year) +
 		       ((tm->tm_mon & 0x80) ? 100 : 0);
 	tm->tm_sec  &= 0x7F;
 	tm->tm_min  &= 0x7F;
@@ -131,11 +131,11 @@
 	tm->tm_wday &= 0x07; /* Not coded in BCD. */
 	tm->tm_mon  &= 0x1F;
 
-	BCD_TO_BIN(tm->tm_sec);
-	BCD_TO_BIN(tm->tm_min);
-	BCD_TO_BIN(tm->tm_hour);
-	BCD_TO_BIN(tm->tm_mday);
-	BCD_TO_BIN(tm->tm_mon);
+	tm->tm_sec = bcd2bin(tm->tm_sec);
+	tm->tm_min = bcd2bin(tm->tm_min);
+	tm->tm_hour = bcd2bin(tm->tm_hour);
+	tm->tm_mday = bcd2bin(tm->tm_mday);
+	tm->tm_mon = bcd2bin(tm->tm_mon);
 	tm->tm_mon--; /* Month is 1..12 in RTC but 0..11 in linux */
 }
 
@@ -282,12 +282,12 @@
 		century = (tm.tm_year >= 2000) ? 0x80 : 0;
 		tm.tm_year = tm.tm_year % 100;
 
-		BIN_TO_BCD(tm.tm_year);
-		BIN_TO_BCD(tm.tm_mon);
-		BIN_TO_BCD(tm.tm_mday);
-		BIN_TO_BCD(tm.tm_hour);
-		BIN_TO_BCD(tm.tm_min);
-		BIN_TO_BCD(tm.tm_sec);
+		tm.tm_year = bin2bcd(tm.tm_year);
+		tm.tm_mon = bin2bcd(tm.tm_mon);
+		tm.tm_mday = bin2bcd(tm.tm_mday);
+		tm.tm_hour = bin2bcd(tm.tm_hour);
+		tm.tm_min = bin2bcd(tm.tm_min);
+		tm.tm_sec = bin2bcd(tm.tm_sec);
 		tm.tm_mon |= century;
 
 		mutex_lock(&rtc_lock);
diff --git a/arch/cris/arch-v32/drivers/pcf8563.c b/arch/cris/arch-v32/drivers/pcf8563.c
index f263ab5..f447850 100644
--- a/arch/cris/arch-v32/drivers/pcf8563.c
+++ b/arch/cris/arch-v32/drivers/pcf8563.c
@@ -118,7 +118,7 @@
 		       "information is no longer guaranteed!\n", PCF8563_NAME);
 	}
 
-	tm->tm_year  = BCD_TO_BIN(tm->tm_year) +
+	tm->tm_year  = bcd2bin(tm->tm_year) +
 		       ((tm->tm_mon & 0x80) ? 100 : 0);
 	tm->tm_sec  &= 0x7F;
 	tm->tm_min  &= 0x7F;
@@ -127,11 +127,11 @@
 	tm->tm_wday &= 0x07; /* Not coded in BCD. */
 	tm->tm_mon  &= 0x1F;
 
-	BCD_TO_BIN(tm->tm_sec);
-	BCD_TO_BIN(tm->tm_min);
-	BCD_TO_BIN(tm->tm_hour);
-	BCD_TO_BIN(tm->tm_mday);
-	BCD_TO_BIN(tm->tm_mon);
+	tm->tm_sec = bcd2bin(tm->tm_sec);
+	tm->tm_min = bcd2bin(tm->tm_min);
+	tm->tm_hour = bcd2bin(tm->tm_hour);
+	tm->tm_mday = bcd2bin(tm->tm_mday);
+	tm->tm_mon = bcd2bin(tm->tm_mon);
 	tm->tm_mon--; /* Month is 1..12 in RTC but 0..11 in linux */
 }
 
@@ -279,12 +279,12 @@
 		century = (tm.tm_year >= 2000) ? 0x80 : 0;
 		tm.tm_year = tm.tm_year % 100;
 
-		BIN_TO_BCD(tm.tm_year);
-		BIN_TO_BCD(tm.tm_mon);
-		BIN_TO_BCD(tm.tm_mday);
-		BIN_TO_BCD(tm.tm_hour);
-		BIN_TO_BCD(tm.tm_min);
-		BIN_TO_BCD(tm.tm_sec);
+		tm.tm_year = bin2bcd(tm.tm_year);
+		tm.tm_mon = bin2bcd(tm.tm_mon);
+		tm.tm_mday = bin2bcd(tm.tm_mday);
+		tm.tm_hour = bin2bcd(tm.tm_hour);
+		tm.tm_min = bin2bcd(tm.tm_min);
+		tm.tm_sec = bin2bcd(tm.tm_sec);
 		tm.tm_mon |= century;
 
 		mutex_lock(&rtc_lock);
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index ff4c6aa..074fe7d 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -127,7 +127,7 @@
 		return 0;
 
 	cmos_minutes = CMOS_READ(RTC_MINUTES);
-	BCD_TO_BIN(cmos_minutes);
+	cmos_minutes = bcd2bin(cmos_minutes);
 
 	/*
 	 * since we're only adjusting minutes and seconds,
@@ -142,8 +142,8 @@
 	real_minutes %= 60;
 
 	if (abs(real_minutes - cmos_minutes) < 30) {
-		BIN_TO_BCD(real_seconds);
-		BIN_TO_BCD(real_minutes);
+		real_seconds = bin2bcd(real_seconds);
+		real_minutes = bin2bcd(real_minutes);
 		CMOS_WRITE(real_seconds,RTC_SECONDS);
 		CMOS_WRITE(real_minutes,RTC_MINUTES);
 	} else {
@@ -170,12 +170,12 @@
 	mon = CMOS_READ(RTC_MONTH);
 	year = CMOS_READ(RTC_YEAR);
 
-	BCD_TO_BIN(sec);
-	BCD_TO_BIN(min);
-	BCD_TO_BIN(hour);
-	BCD_TO_BIN(day);
-	BCD_TO_BIN(mon);
-	BCD_TO_BIN(year);
+	sec = bcd2bin(sec);
+	min = bcd2bin(min);
+	hour = bcd2bin(hour);
+	day = bcd2bin(day);
+	mon = bcd2bin(mon);
+	year = bcd2bin(year);
 
 	if ((year += 1900) < 1970)
 		year += 100;
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index a5aac1b..9d1552a 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -66,6 +66,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 
 menu "Fujitsu FR-V system setup"
 
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index c796674..28f06fd 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -90,6 +90,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 source "arch/h8300/Kconfig.cpu"
 
 menu "Executable file formats"
@@ -214,6 +216,8 @@
 
 source "drivers/usb/Kconfig"
 
+source "drivers/uwb/Kconfig"
+
 endmenu
 
 source "fs/Kconfig"
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index aafd4d3..700014d 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -89,6 +89,7 @@
 					   TIF_NEED_RESCHED */
 #define TIF_MEMDIE		4
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
+#define TIF_FREEZE		16	/* is freezing for suspend */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
@@ -96,6 +97,7 @@
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
+#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 3b7aa38..27eec714 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -7,6 +7,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 menu "Processor type and features"
 
 config IA64
@@ -21,6 +23,7 @@
 	select HAVE_KRETPROBES
 	select HAVE_DMA_ATTRS
 	select HAVE_KVM
+	select HAVE_ARCH_TRACEHOOK
 	default y
 	help
 	  The Itanium Processor Family is Intel's 64-bit successor to
@@ -108,6 +111,33 @@
 	bool
 	default y
 
+menuconfig PARAVIRT_GUEST
+	bool "Paravirtualized guest support"
+	help
+	  Say Y here to get to see options related to running Linux under
+	  various hypervisors.  This option alone does not add any kernel code.
+
+	  If you say N, all options in this submenu will be skipped and disabled.
+
+if PARAVIRT_GUEST
+
+config PARAVIRT
+	bool "Enable paravirtualization code"
+	depends on PARAVIRT_GUEST
+	default y
+	bool
+	default y
+	help
+	  This changes the kernel so it can modify itself when it is run
+	  under a hypervisor, potentially improving performance significantly
+	  over full virtualization.  However, when run without a hypervisor
+	  the kernel is theoretically slower and slightly larger.
+
+
+source "arch/ia64/xen/Kconfig"
+
+endif
+
 choice
 	prompt "System type"
 	default IA64_GENERIC
@@ -117,6 +147,7 @@
 	select NUMA
 	select ACPI_NUMA
 	select SWIOTLB
+	select PCI_MSI
 	help
 	  This selects the system type of your hardware.  A "generic" kernel
 	  will run on any supported IA-64 system.  However, if you configure
@@ -124,11 +155,13 @@
 
 	  generic		For any supported IA-64 system
 	  DIG-compliant		For DIG ("Developer's Interface Guide") compliant systems
+	  DIG+Intel+IOMMU	For DIG systems with Intel IOMMU
 	  HP-zx1/sx1000		For HP systems
 	  HP-zx1/sx1000+swiotlb	For HP systems with (broken) DMA-constrained devices.
 	  SGI-SN2		For SGI Altix systems
 	  SGI-UV		For SGI UV systems
 	  Ski-simulator		For the HP simulator <http://www.hpl.hp.com/research/linux/ski/>
+	  Xen-domU		For xen domU system
 
 	  If you don't know what to do, choose "generic".
 
@@ -136,6 +169,11 @@
 	bool "DIG-compliant"
 	select SWIOTLB
 
+config IA64_DIG_VTD
+	bool "DIG+Intel+IOMMU"
+	select DMAR
+	select PCI_MSI
+
 config IA64_HP_ZX1
 	bool "HP-zx1/sx1000"
 	help
@@ -179,6 +217,10 @@
 	bool "Ski-simulator"
 	select SWIOTLB
 
+config IA64_XEN_GUEST
+	bool "Xen guest"
+	depends on XEN
+
 endchoice
 
 choice
@@ -581,6 +623,16 @@
 
 source "drivers/pcmcia/Kconfig"
 
+config DMAR
+        bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
+        depends on IA64_GENERIC && ACPI && EXPERIMENTAL
+	help
+	  DMA remapping (DMAR) devices support enables independent address
+	  translations for Direct Memory Access (DMA) from devices.
+	  These DMA remapping devices are reported via ACPI tables
+	  and include PCI device scope covered by these DMA
+	  remapping devices.
+
 endmenu
 
 endif
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 905d25b..58a7e46a 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -53,12 +53,15 @@
 core-y				+= arch/ia64/kernel/ arch/ia64/mm/
 core-$(CONFIG_IA32_SUPPORT)	+= arch/ia64/ia32/
 core-$(CONFIG_IA64_DIG) 	+= arch/ia64/dig/
+core-$(CONFIG_IA64_DIG_VTD) 	+= arch/ia64/dig/
 core-$(CONFIG_IA64_GENERIC) 	+= arch/ia64/dig/
 core-$(CONFIG_IA64_HP_ZX1)	+= arch/ia64/dig/
 core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
+core-$(CONFIG_IA64_XEN_GUEST)	+= arch/ia64/dig/
 core-$(CONFIG_IA64_SGI_SN2)	+= arch/ia64/sn/
 core-$(CONFIG_IA64_SGI_UV)	+= arch/ia64/uv/
 core-$(CONFIG_KVM) 		+= arch/ia64/kvm/
+core-$(CONFIG_XEN)		+= arch/ia64/xen/
 
 drivers-$(CONFIG_PCI)		+= arch/ia64/pci/
 drivers-$(CONFIG_IA64_HP_SIM)	+= arch/ia64/hp/sim/
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 9f48397..e05f9e1 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -233,6 +233,8 @@
 CONFIG_BINFMT_ELF=y
 CONFIG_BINFMT_MISC=m
 
+# CONFIG_DMAR is not set
+
 #
 # Power management and ACPI
 #
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index 797acf9..c522edf 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -172,6 +172,8 @@
 CONFIG_BINFMT_ELF=y
 CONFIG_BINFMT_MISC=m
 
+# CONFIG_DMAR is not set
+
 #
 # Power management and ACPI
 #
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile
index 971cd78..5c02838 100644
--- a/arch/ia64/dig/Makefile
+++ b/arch/ia64/dig/Makefile
@@ -6,4 +6,9 @@
 #
 
 obj-y := setup.o
+ifeq ($(CONFIG_DMAR), y)
+obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o
+else
 obj-$(CONFIG_IA64_GENERIC) += machvec.o
+endif
+obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o
diff --git a/arch/ia64/dig/dig_vtd_iommu.c b/arch/ia64/dig/dig_vtd_iommu.c
new file mode 100644
index 0000000..1c8a079
--- /dev/null
+++ b/arch/ia64/dig/dig_vtd_iommu.c
@@ -0,0 +1,59 @@
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/intel-iommu.h>
+
+void *
+vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+		 gfp_t flags)
+{
+	return intel_alloc_coherent(dev, size, dma_handle, flags);
+}
+EXPORT_SYMBOL_GPL(vtd_alloc_coherent);
+
+void
+vtd_free_coherent(struct device *dev, size_t size, void *vaddr,
+		 dma_addr_t dma_handle)
+{
+	intel_free_coherent(dev, size, vaddr, dma_handle);
+}
+EXPORT_SYMBOL_GPL(vtd_free_coherent);
+
+dma_addr_t
+vtd_map_single_attrs(struct device *dev, void *addr, size_t size,
+		     int dir, struct dma_attrs *attrs)
+{
+	return intel_map_single(dev, (phys_addr_t)addr, size, dir);
+}
+EXPORT_SYMBOL_GPL(vtd_map_single_attrs);
+
+void
+vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
+		       int dir, struct dma_attrs *attrs)
+{
+	intel_unmap_single(dev, iova, size, dir);
+}
+EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs);
+
+int
+vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
+		 int dir, struct dma_attrs *attrs)
+{
+	return intel_map_sg(dev, sglist, nents, dir);
+}
+EXPORT_SYMBOL_GPL(vtd_map_sg_attrs);
+
+void
+vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
+		   int nents, int dir, struct dma_attrs *attrs)
+{
+	intel_unmap_sg(dev, sglist, nents, dir);
+}
+EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs);
+
+int
+vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vtd_dma_mapping_error);
diff --git a/arch/ia64/dig/machvec_vtd.c b/arch/ia64/dig/machvec_vtd.c
new file mode 100644
index 0000000..7cd3eb4
--- /dev/null
+++ b/arch/ia64/dig/machvec_vtd.c
@@ -0,0 +1,3 @@
+#define MACHVEC_PLATFORM_NAME		dig_vtd
+#define MACHVEC_PLATFORM_HEADER		<asm/machvec_dig_vtd.h>
+#include <asm/machvec_init.h>
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 4956be4..d98f0f4 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2070,14 +2070,13 @@
 	if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
 		return 0;
 
-#if defined(CONFIG_IA64_GENERIC) && defined(CONFIG_CRASH_DUMP) && \
-        defined(CONFIG_PROC_FS)
+#if defined(CONFIG_IA64_GENERIC)
 	/* If we are booting a kdump kernel, the sba_iommu will
 	 * cause devices that were not shutdown properly to MCA
 	 * as soon as they are turned back on.  Our only option for
 	 * a successful kdump kernel boot is to use the swiotlb.
 	 */
-	if (elfcorehdr_addr < ELFCORE_ADDR_MAX) {
+	if (is_kdump_kernel()) {
 		if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
 			panic("Unable to initialize software I/O TLB:"
 				  " Try machvec=dig boot option");
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index 53505bb..a8cf199 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -108,6 +108,11 @@
 	;;
 	st8 [r2]=r3				// initialize return code to -ENOSYS
 	br.call.sptk.few rp=syscall_trace_enter	// give parent a chance to catch syscall args
+	cmp.lt p6,p0=r8,r0			// check tracehook
+	adds r2=IA64_PT_REGS_R8_OFFSET+16,sp	// r2 = &pt_regs.r8
+	;;
+(p6)	st8.spill [r2]=r8			// store return value in slot for r8
+(p6)	br.spnt.few .ret4
 .ret2:	// Need to reload arguments (they may be changed by the tracing process)
 	adds r2=IA64_PT_REGS_R1_OFFSET+16,sp	// r2 = &pt_regs.r1
 	adds r3=IA64_PT_REGS_R13_OFFSET+16,sp	// r3 = &pt_regs.r13
@@ -199,10 +204,10 @@
 	data8 sys_setuid	/* 16-bit version */
 	data8 sys_getuid	/* 16-bit version */
 	data8 compat_sys_stime    /* 25 */
-	data8 sys32_ptrace
+	data8 compat_sys_ptrace
 	data8 sys32_alarm
 	data8 sys_ni_syscall
-	data8 sys32_pause
+	data8 sys_pause
 	data8 compat_sys_utime	  /* 30 */
 	data8 sys_ni_syscall	  /* old stty syscall holder */
 	data8 sys_ni_syscall	  /* old gtty syscall holder */
@@ -215,7 +220,7 @@
 	data8 sys_mkdir
 	data8 sys_rmdir		  /* 40 */
 	data8 sys_dup
-	data8 sys32_pipe
+	data8 sys_pipe
 	data8 compat_sys_times
 	data8 sys_ni_syscall	  /* old prof syscall holder */
 	data8 sys32_brk		  /* 45 */
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index f4430bb..5e92ae0 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -1098,21 +1098,6 @@
 	return ret;
 }
 
-asmlinkage long
-sys32_pipe (int __user *fd)
-{
-	int retval;
-	int fds[2];
-
-	retval = do_pipe_flags(fds, 0);
-	if (retval)
-		goto out;
-	if (copy_to_user(fd, fds, sizeof(fds)))
-		retval = -EFAULT;
-  out:
-	return retval;
-}
-
 asmlinkage unsigned long
 sys32_alarm (unsigned int seconds)
 {
@@ -1209,25 +1194,6 @@
 	return compat_sys_wait4(pid, stat_addr, options, NULL);
 }
 
-static unsigned int
-ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val)
-{
-	size_t copied;
-	unsigned int ret;
-
-	copied = access_process_vm(child, addr, val, sizeof(*val), 0);
-	return (copied != sizeof(ret)) ? -EIO : 0;
-}
-
-static unsigned int
-ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val)
-{
-
-	if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
-		return -EIO;
-	return 0;
-}
-
 /*
  *  The order in which registers are stored in the ptrace regs structure
  */
@@ -1525,49 +1491,15 @@
 	return 0;
 }
 
-asmlinkage long
-sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
+long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+	compat_ulong_t caddr, compat_ulong_t cdata)
 {
-	struct task_struct *child;
-	unsigned int value, tmp;
+	unsigned long addr = caddr;
+	unsigned long data = cdata;
+	unsigned int tmp;
 	long i, ret;
 
-	lock_kernel();
-	if (request == PTRACE_TRACEME) {
-		ret = ptrace_traceme();
-		goto out;
-	}
-
-	child = ptrace_get_task_struct(pid);
-	if (IS_ERR(child)) {
-		ret = PTR_ERR(child);
-		goto out;
-	}
-
-	if (request == PTRACE_ATTACH) {
-		ret = sys_ptrace(request, pid, addr, data);
-		goto out_tsk;
-	}
-
-	ret = ptrace_check_attach(child, request == PTRACE_KILL);
-	if (ret < 0)
-		goto out_tsk;
-
 	switch (request) {
-	      case PTRACE_PEEKTEXT:
-	      case PTRACE_PEEKDATA:	/* read word at location addr */
-		ret = ia32_peek(child, addr, &value);
-		if (ret == 0)
-			ret = put_user(value, (unsigned int __user *) compat_ptr(data));
-		else
-			ret = -EIO;
-		goto out_tsk;
-
-	      case PTRACE_POKETEXT:
-	      case PTRACE_POKEDATA:	/* write the word at location addr */
-		ret = ia32_poke(child, addr, data);
-		goto out_tsk;
-
 	      case PTRACE_PEEKUSR:	/* read word at addr in USER area */
 		ret = -EIO;
 		if ((addr & 3) || addr > 17*sizeof(int))
@@ -1632,27 +1564,9 @@
 					    compat_ptr(data));
 		break;
 
-	      case PTRACE_GETEVENTMSG:   
-		ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data));
-		break;
-
-	      case PTRACE_SYSCALL:	/* continue, stop after next syscall */
-	      case PTRACE_CONT:		/* restart after signal. */
-	      case PTRACE_KILL:
-	      case PTRACE_SINGLESTEP:	/* execute chile for one instruction */
-	      case PTRACE_DETACH:	/* detach a process */
-		ret = sys_ptrace(request, pid, addr, data);
-		break;
-
 	      default:
-		ret = ptrace_request(child, request, addr, data);
-		break;
-
+		return compat_ptrace_request(child, request, caddr, cdata);
 	}
-  out_tsk:
-	put_task_struct(child);
-  out:
-	unlock_kernel();
 	return ret;
 }
 
@@ -1704,14 +1618,6 @@
 }
 
 asmlinkage int
-sys32_pause (void)
-{
-	current->state = TASK_INTERRUPTIBLE;
-	schedule();
-	return -ERESTARTNOHAND;
-}
-
-asmlinkage int
 sys32_msync (unsigned int start, unsigned int len, int flags)
 {
 	unsigned int addr;
diff --git a/arch/ia64/include/asm/break.h b/arch/ia64/include/asm/break.h
index f034020..e90c40e 100644
--- a/arch/ia64/include/asm/break.h
+++ b/arch/ia64/include/asm/break.h
@@ -20,4 +20,13 @@
  */
 #define __IA64_BREAK_SYSCALL		0x100000
 
+/*
+ * Xen specific break numbers:
+ */
+#define __IA64_XEN_HYPERCALL		0x1000
+/* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used
+   for xen hyperprivops */
+#define __IA64_XEN_HYPERPRIVOP_START	0x1
+#define __IA64_XEN_HYPERPRIVOP_MAX	0x1a
+
 #endif /* _ASM_IA64_BREAK_H */
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
index afcfbda..c8ce271 100644
--- a/arch/ia64/include/asm/cacheflush.h
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -34,6 +34,8 @@
 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
 
 extern void flush_icache_range (unsigned long start, unsigned long end);
+extern void clflush_cache_range(void *addr, int size);
+
 
 #define flush_icache_user_range(vma, page, user_addr, len)					\
 do {												\
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h
index 3db6daf..41ab85d 100644
--- a/arch/ia64/include/asm/device.h
+++ b/arch/ia64/include/asm/device.h
@@ -10,6 +10,9 @@
 #ifdef CONFIG_ACPI
 	void	*acpi_handle;
 #endif
+#ifdef CONFIG_DMAR
+	void *iommu; /* hook for IOMMU specific extension */
+#endif
 };
 
 #endif /* _ASM_IA64_DEVICE_H */
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 06ff1ba..bbab7e2 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -7,6 +7,49 @@
  */
 #include <asm/machvec.h>
 #include <linux/scatterlist.h>
+#include <asm/swiotlb.h>
+
+struct dma_mapping_ops {
+	int             (*mapping_error)(struct device *dev,
+					 dma_addr_t dma_addr);
+	void*           (*alloc_coherent)(struct device *dev, size_t size,
+				dma_addr_t *dma_handle, gfp_t gfp);
+	void            (*free_coherent)(struct device *dev, size_t size,
+				void *vaddr, dma_addr_t dma_handle);
+	dma_addr_t      (*map_single)(struct device *hwdev, unsigned long ptr,
+				size_t size, int direction);
+	void            (*unmap_single)(struct device *dev, dma_addr_t addr,
+				size_t size, int direction);
+	void            (*sync_single_for_cpu)(struct device *hwdev,
+				dma_addr_t dma_handle, size_t size,
+				int direction);
+	void            (*sync_single_for_device)(struct device *hwdev,
+				dma_addr_t dma_handle, size_t size,
+				int direction);
+	void            (*sync_single_range_for_cpu)(struct device *hwdev,
+				dma_addr_t dma_handle, unsigned long offset,
+				size_t size, int direction);
+	void            (*sync_single_range_for_device)(struct device *hwdev,
+				dma_addr_t dma_handle, unsigned long offset,
+				size_t size, int direction);
+	void            (*sync_sg_for_cpu)(struct device *hwdev,
+				struct scatterlist *sg, int nelems,
+				int direction);
+	void            (*sync_sg_for_device)(struct device *hwdev,
+				struct scatterlist *sg, int nelems,
+				int direction);
+	int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
+				int nents, int direction);
+	void            (*unmap_sg)(struct device *hwdev,
+				struct scatterlist *sg, int nents,
+				int direction);
+	int             (*dma_supported_op)(struct device *hwdev, u64 mask);
+	int		is_phys;
+};
+
+extern struct dma_mapping_ops *dma_ops;
+extern struct ia64_machine_vector ia64_mv;
+extern void set_iommu_machvec(void);
 
 #define dma_alloc_coherent(dev, size, handle, gfp)	\
 	platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA)
@@ -96,4 +139,11 @@
 
 #define dma_is_consistent(d, h)	(1)	/* all we do is coherent memory... */
 
+static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
+{
+	return dma_ops;
+}
+
+
+
 #endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
new file mode 100644
index 0000000..5fb2bb9
--- /dev/null
+++ b/arch/ia64/include/asm/iommu.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_IA64_IOMMU_H
+#define _ASM_IA64_IOMMU_H 1
+
+#define cpu_has_x2apic 0
+/* 10 seconds */
+#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
+
+extern void pci_iommu_shutdown(void);
+extern void no_iommu_init(void);
+extern int force_iommu, no_iommu;
+extern int iommu_detected;
+extern void iommu_dma_init(void);
+extern void machvec_init(const char *name);
+extern int forbid_dac;
+
+#endif
diff --git a/arch/ia64/include/asm/kregs.h b/arch/ia64/include/asm/kregs.h
index aefcdfe..39e65f6 100644
--- a/arch/ia64/include/asm/kregs.h
+++ b/arch/ia64/include/asm/kregs.h
@@ -32,7 +32,7 @@
 #define IA64_TR_CURRENT_STACK	1	/* dtr1: maps kernel's memory- & register-stacks */
 
 #define IA64_TR_ALLOC_BASE	2 	/* itr&dtr: Base of dynamic TR resource*/
-#define IA64_TR_ALLOC_MAX	32 	/* Max number for dynamic use*/
+#define IA64_TR_ALLOC_MAX	64 	/* Max number for dynamic use*/
 
 /* Processor status register bits: */
 #define IA64_PSR_BE_BIT		1
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 2b850cc..1ea28bc 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -120,6 +120,8 @@
 #  include <asm/machvec_hpsim.h>
 # elif defined (CONFIG_IA64_DIG)
 #  include <asm/machvec_dig.h>
+# elif defined(CONFIG_IA64_DIG_VTD)
+#  include <asm/machvec_dig_vtd.h>
 # elif defined (CONFIG_IA64_HP_ZX1)
 #  include <asm/machvec_hpzx1.h>
 # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
@@ -128,6 +130,8 @@
 #  include <asm/machvec_sn2.h>
 # elif defined (CONFIG_IA64_SGI_UV)
 #  include <asm/machvec_uv.h>
+# elif defined (CONFIG_IA64_XEN_GUEST)
+#  include <asm/machvec_xen.h>
 # elif defined (CONFIG_IA64_GENERIC)
 
 # ifdef MACHVEC_PLATFORM_HEADER
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h
new file mode 100644
index 0000000..3400b56
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_dig_vtd.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_IA64_MACHVEC_DIG_VTD_h
+#define _ASM_IA64_MACHVEC_DIG_VTD_h
+
+extern ia64_mv_setup_t			dig_setup;
+extern ia64_mv_dma_alloc_coherent	vtd_alloc_coherent;
+extern ia64_mv_dma_free_coherent	vtd_free_coherent;
+extern ia64_mv_dma_map_single_attrs	vtd_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs	vtd_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs		vtd_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs	vtd_unmap_sg_attrs;
+extern ia64_mv_dma_supported		iommu_dma_supported;
+extern ia64_mv_dma_mapping_error	vtd_dma_mapping_error;
+extern ia64_mv_dma_init			pci_iommu_alloc;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name				"dig_vtd"
+#define platform_setup				dig_setup
+#define platform_dma_init			pci_iommu_alloc
+#define platform_dma_alloc_coherent		vtd_alloc_coherent
+#define platform_dma_free_coherent		vtd_free_coherent
+#define platform_dma_map_single_attrs		vtd_map_single_attrs
+#define platform_dma_unmap_single_attrs		vtd_unmap_single_attrs
+#define platform_dma_map_sg_attrs		vtd_map_sg_attrs
+#define platform_dma_unmap_sg_attrs		vtd_unmap_sg_attrs
+#define platform_dma_sync_single_for_cpu	machvec_dma_sync_single
+#define platform_dma_sync_sg_for_cpu		machvec_dma_sync_sg
+#define platform_dma_sync_single_for_device	machvec_dma_sync_single
+#define platform_dma_sync_sg_for_device		machvec_dma_sync_sg
+#define platform_dma_supported			iommu_dma_supported
+#define platform_dma_mapping_error		vtd_dma_mapping_error
+
+#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */
diff --git a/arch/ia64/include/asm/machvec_init.h b/arch/ia64/include/asm/machvec_init.h
index 7f21249..ef964b2 100644
--- a/arch/ia64/include/asm/machvec_init.h
+++ b/arch/ia64/include/asm/machvec_init.h
@@ -1,3 +1,4 @@
+#include <asm/iommu.h>
 #include <asm/machvec.h>
 
 extern ia64_mv_send_ipi_t ia64_send_ipi;
diff --git a/arch/ia64/include/asm/machvec_xen.h b/arch/ia64/include/asm/machvec_xen.h
new file mode 100644
index 0000000..55f9228
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_xen.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_IA64_MACHVEC_XEN_h
+#define _ASM_IA64_MACHVEC_XEN_h
+
+extern ia64_mv_setup_t			dig_setup;
+extern ia64_mv_cpu_init_t		xen_cpu_init;
+extern ia64_mv_irq_init_t		xen_irq_init;
+extern ia64_mv_send_ipi_t		xen_platform_send_ipi;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name				"xen"
+#define platform_setup				dig_setup
+#define platform_cpu_init			xen_cpu_init
+#define platform_irq_init			xen_irq_init
+#define platform_send_ipi			xen_platform_send_ipi
+
+#endif /* _ASM_IA64_MACHVEC_XEN_h */
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 7245a57..6bc96ee 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -18,10 +18,11 @@
  * 	- crash dumping code reserved region
  * 	- Kernel memory map built from EFI memory map
  * 	- ELF core header
+ *	- xen start info if CONFIG_XEN
  *
  * More could be added if necessary
  */
-#define IA64_MAX_RSVD_REGIONS 8
+#define IA64_MAX_RSVD_REGIONS 9
 
 struct rsvd_region {
 	unsigned long start;	/* virtual address of beginning of element */
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h
index c8efbf7..0a1026c 100644
--- a/arch/ia64/include/asm/native/inst.h
+++ b/arch/ia64/include/asm/native/inst.h
@@ -36,8 +36,13 @@
 	;;					\
 	movl clob = PARAVIRT_POISON;		\
 	;;
+# define CLOBBER_PRED(pred_clob)		\
+	;;					\
+	cmp.eq pred_clob, p0 = r0, r0		\
+	;;
 #else
-# define CLOBBER(clob)		/* nothing */
+# define CLOBBER(clob)			/* nothing */
+# define CLOBBER_PRED(pred_clob)	/* nothing */
 #endif
 
 #define MOV_FROM_IFA(reg)	\
@@ -136,7 +141,8 @@
 
 #define SSM_PSR_I(pred, pred_clob, clob)	\
 (pred)	ssm psr.i				\
-	CLOBBER(clob)
+	CLOBBER(clob)				\
+	CLOBBER_PRED(pred_clob)
 
 #define RSM_PSR_I(pred, clob0, clob1)	\
 (pred)	rsm psr.i			\
diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h
new file mode 100644
index 0000000..b8e6eb1
--- /dev/null
+++ b/arch/ia64/include/asm/native/pvchk_inst.h
@@ -0,0 +1,263 @@
+#ifndef _ASM_NATIVE_PVCHK_INST_H
+#define _ASM_NATIVE_PVCHK_INST_H
+
+/******************************************************************************
+ * arch/ia64/include/asm/native/pvchk_inst.h
+ * Checker for paravirtualizations of privileged operations.
+ *
+ * Copyright (C) 2005 Hewlett-Packard Co
+ *      Dan Magenheimer <dan.magenheimer@hp.com>
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+/**********************************************
+ * Instructions paravirtualized for correctness
+ **********************************************/
+
+/* "fc" and "thash" are privilege-sensitive instructions, meaning they
+ *  may have different semantics depending on whether they are executed
+ *  at PL0 vs PL!=0.  When paravirtualized, these instructions mustn't
+ *  be allowed to execute directly, lest incorrect semantics result.
+ */
+
+#define fc	.error "fc should not be used directly."
+#define thash	.error "thash should not be used directly."
+
+/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
+ * is not currently used (though it may be in a long-format VHPT system!)
+ * and the semantics of cover only change if psr.ic is off which is very
+ * rare (and currently non-existent outside of assembly code
+ */
+#define ttag	.error "ttag should not be used directly."
+#define cover	.error "cover should not be used directly."
+
+/* There are also privilege-sensitive registers.  These registers are
+ * readable at any privilege level but only writable at PL0.
+ */
+#define cpuid	.error "cpuid should not be used directly."
+#define pmd	.error "pmd should not be used directly."
+
+/*
+ * mov ar.eflag =
+ * mov = ar.eflag
+ */
+
+/**********************************************
+ * Instructions paravirtualized for performance
+ **********************************************/
+/*
+ * Those instructions include '.' which can't be handled by cpp.
+ * or can't be handled by cpp easily.
+ * They are handled by sed instead of cpp.
+ */
+
+/* for .S
+ * itc.i
+ * itc.d
+ *
+ * bsw.0
+ * bsw.1
+ *
+ * ssm psr.ic | PSR_DEFAULT_BITS
+ * ssm psr.ic
+ * rsm psr.ic
+ * ssm psr.i
+ * rsm psr.i
+ * rsm psr.i | psr.ic
+ * rsm psr.dt
+ * ssm psr.dt
+ *
+ * mov = cr.ifa
+ * mov = cr.itir
+ * mov = cr.isr
+ * mov = cr.iha
+ * mov = cr.ipsr
+ * mov = cr.iim
+ * mov = cr.iip
+ * mov = cr.ivr
+ * mov = psr
+ *
+ * mov cr.ifa =
+ * mov cr.itir =
+ * mov cr.iha =
+ * mov cr.ipsr =
+ * mov cr.ifs =
+ * mov cr.iip =
+ * mov cr.kr =
+ */
+
+/* for intrinsics
+ * ssm psr.i
+ * rsm psr.i
+ * mov = psr
+ * mov = ivr
+ * mov = tpr
+ * mov cr.itm =
+ * mov eoi =
+ * mov rr[] =
+ * mov = rr[]
+ * mov = kr
+ * mov kr =
+ * ptc.ga
+ */
+
+/*************************************************************
+ * define paravirtualized instrcution macros as nop to ingore.
+ * and check whether arguments are appropriate.
+ *************************************************************/
+
+/* check whether reg is a regular register */
+.macro is_rreg_in reg
+	.ifc "\reg", "r0"
+		nop 0
+		.exitm
+	.endif
+	;;
+	mov \reg = r0
+	;;
+.endm
+#define IS_RREG_IN(reg)	is_rreg_in reg ;
+
+#define IS_RREG_OUT(reg)			\
+	;;					\
+	mov reg = r0				\
+	;;
+
+#define IS_RREG_CLOB(reg)	IS_RREG_OUT(reg)
+
+/* check whether pred is a predicate register */
+#define IS_PRED_IN(pred)			\
+	;;					\
+	(pred)	nop 0				\
+	;;
+
+#define IS_PRED_OUT(pred)			\
+	;;					\
+	cmp.eq pred, p0 = r0, r0		\
+	;;
+
+#define IS_PRED_CLOB(pred)	IS_PRED_OUT(pred)
+
+
+#define DO_SAVE_MIN(__COVER, SAVE_IFS, EXTRA, WORKAROUND)	\
+	nop 0
+#define MOV_FROM_IFA(reg)			\
+	IS_RREG_OUT(reg)
+#define MOV_FROM_ITIR(reg)			\
+	IS_RREG_OUT(reg)
+#define MOV_FROM_ISR(reg)			\
+	IS_RREG_OUT(reg)
+#define MOV_FROM_IHA(reg)			\
+	IS_RREG_OUT(reg)
+#define MOV_FROM_IPSR(pred, reg)		\
+	IS_PRED_IN(pred)			\
+	IS_RREG_OUT(reg)
+#define MOV_FROM_IIM(reg)			\
+	IS_RREG_OUT(reg)
+#define MOV_FROM_IIP(reg)			\
+	IS_RREG_OUT(reg)
+#define MOV_FROM_IVR(reg, clob)			\
+	IS_RREG_OUT(reg)			\
+	IS_RREG_CLOB(clob)
+#define MOV_FROM_PSR(pred, reg, clob)		\
+	IS_PRED_IN(pred)			\
+	IS_RREG_OUT(reg)			\
+	IS_RREG_CLOB(clob)
+#define MOV_TO_IFA(reg, clob)			\
+	IS_RREG_IN(reg)				\
+	IS_RREG_CLOB(clob)
+#define MOV_TO_ITIR(pred, reg, clob)		\
+	IS_PRED_IN(pred)			\
+	IS_RREG_IN(reg)				\
+	IS_RREG_CLOB(clob)
+#define MOV_TO_IHA(pred, reg, clob)		\
+	IS_PRED_IN(pred)			\
+	IS_RREG_IN(reg)				\
+	IS_RREG_CLOB(clob)
+#define MOV_TO_IPSR(pred, reg, clob)		\
+	IS_PRED_IN(pred)			\
+	IS_RREG_IN(reg)				\
+	IS_RREG_CLOB(clob)
+#define MOV_TO_IFS(pred, reg, clob)		\
+	IS_PRED_IN(pred)			\
+	IS_RREG_IN(reg)				\
+	IS_RREG_CLOB(clob)
+#define MOV_TO_IIP(reg, clob)			\
+	IS_RREG_IN(reg)				\
+	IS_RREG_CLOB(clob)
+#define MOV_TO_KR(kr, reg, clob0, clob1)	\
+	IS_RREG_IN(reg)				\
+	IS_RREG_CLOB(clob0)			\
+	IS_RREG_CLOB(clob1)
+#define ITC_I(pred, reg, clob)			\
+	IS_PRED_IN(pred)			\
+	IS_RREG_IN(reg)				\
+	IS_RREG_CLOB(clob)
+#define ITC_D(pred, reg, clob)			\
+	IS_PRED_IN(pred)			\
+	IS_RREG_IN(reg)				\
+	IS_RREG_CLOB(clob)
+#define ITC_I_AND_D(pred_i, pred_d, reg, clob)	\
+	IS_PRED_IN(pred_i)			\
+	IS_PRED_IN(pred_d)			\
+	IS_RREG_IN(reg)				\
+	IS_RREG_CLOB(clob)
+#define THASH(pred, reg0, reg1, clob)		\
+	IS_PRED_IN(pred)			\
+	IS_RREG_OUT(reg0)			\
+	IS_RREG_IN(reg1)			\
+	IS_RREG_CLOB(clob)
+#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1)	\
+	IS_RREG_CLOB(clob0)					\
+	IS_RREG_CLOB(clob1)
+#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1)	\
+	IS_RREG_CLOB(clob0)			\
+	IS_RREG_CLOB(clob1)
+#define RSM_PSR_IC(clob)			\
+	IS_RREG_CLOB(clob)
+#define SSM_PSR_I(pred, pred_clob, clob)	\
+	IS_PRED_IN(pred)			\
+	IS_PRED_CLOB(pred_clob)			\
+	IS_RREG_CLOB(clob)
+#define RSM_PSR_I(pred, clob0, clob1)		\
+	IS_PRED_IN(pred)			\
+	IS_RREG_CLOB(clob0)			\
+	IS_RREG_CLOB(clob1)
+#define RSM_PSR_I_IC(clob0, clob1, clob2)	\
+	IS_RREG_CLOB(clob0)			\
+	IS_RREG_CLOB(clob1)			\
+	IS_RREG_CLOB(clob2)
+#define RSM_PSR_DT				\
+	nop 0
+#define SSM_PSR_DT_AND_SRLZ_I			\
+	nop 0
+#define BSW_0(clob0, clob1, clob2)		\
+	IS_RREG_CLOB(clob0)			\
+	IS_RREG_CLOB(clob1)			\
+	IS_RREG_CLOB(clob2)
+#define BSW_1(clob0, clob1)			\
+	IS_RREG_CLOB(clob0)			\
+	IS_RREG_CLOB(clob1)
+#define COVER					\
+	nop 0
+#define RFI					\
+	br.ret.sptk.many rp /* defining nop causes dependency error */
+
+#endif /* _ASM_NATIVE_PVCHK_INST_H */
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index 660cab0..2bf3636 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -117,7 +117,7 @@
 struct pv_iosapic_ops {
 	void (*pcat_compat_init)(void);
 
-	struct irq_chip *(*get_irq_chip)(unsigned long trigger);
+	struct irq_chip *(*__get_irq_chip)(unsigned long trigger);
 
 	unsigned int (*__read)(char __iomem *iosapic, unsigned int reg);
 	void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val);
@@ -135,7 +135,7 @@
 static inline struct irq_chip*
 iosapic_get_irq_chip(unsigned long trigger)
 {
-	return pv_iosapic_ops.get_irq_chip(trigger);
+	return pv_iosapic_ops.__get_irq_chip(trigger);
 }
 
 static inline unsigned int
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 0149097..1d660d8 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -95,16 +95,8 @@
 				enum pci_mmap_state mmap_state, int write_combine);
 #define HAVE_PCI_LEGACY
 extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
-				      struct vm_area_struct *vma);
-extern ssize_t pci_read_legacy_io(struct kobject *kobj,
-				  struct bin_attribute *bin_attr,
-				  char *buf, loff_t off, size_t count);
-extern ssize_t pci_write_legacy_io(struct kobject *kobj,
-				   struct bin_attribute *bin_attr,
-				   char *buf, loff_t off, size_t count);
-extern int pci_mmap_legacy_mem(struct kobject *kobj,
-			       struct bin_attribute *attr,
-			       struct vm_area_struct *vma);
+				      struct vm_area_struct *vma,
+				      enum pci_mmap_state mmap_state);
 
 #define pci_get_legacy_mem platform_pci_get_legacy_mem
 #define pci_legacy_read platform_pci_legacy_read
@@ -164,4 +156,7 @@
 	return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
 }
 
+#ifdef CONFIG_DMAR
+extern void pci_iommu_alloc(void);
+#endif
 #endif /* _ASM_IA64_PCI_H */
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
index 15f8dcf..6417c1e 100644
--- a/arch/ia64/include/asm/ptrace.h
+++ b/arch/ia64/include/asm/ptrace.h
@@ -240,6 +240,12 @@
  */
 # define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
 
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+	/* FIXME: should this be bspstore + nr_dirty regs? */
+	return regs->ar_bspstore;
+}
+
 #define regs_return_value(regs) ((regs)->r8)
 
 /* Conserve space in histogram by encoding slot bits in address
@@ -319,6 +325,8 @@
   #define arch_has_block_step()   (1)
   extern void user_enable_block_step(struct task_struct *);
 
+#define __ARCH_WANT_COMPAT_SYS_PTRACE
+
 #endif /* !__KERNEL__ */
 
 /* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h
new file mode 100644
index 0000000..44ef9ef
--- /dev/null
+++ b/arch/ia64/include/asm/pvclock-abi.h
@@ -0,0 +1,48 @@
+/*
+ * same structure to x86's
+ * Hopefully asm-x86/pvclock-abi.h would be moved to somewhere more generic.
+ * For now, define same duplicated definitions.
+ */
+
+#ifndef _ASM_IA64__PVCLOCK_ABI_H
+#define _ASM_IA64__PVCLOCK_ABI_H
+#ifndef __ASSEMBLY__
+
+/*
+ * These structs MUST NOT be changed.
+ * They are the ABI between hypervisor and guest OS.
+ * Both Xen and KVM are using this.
+ *
+ * pvclock_vcpu_time_info holds the system time and the tsc timestamp
+ * of the last update. So the guest can use the tsc delta to get a
+ * more precise system time.  There is one per virtual cpu.
+ *
+ * pvclock_wall_clock references the point in time when the system
+ * time was zero (usually boot time), thus the guest calculates the
+ * current wall clock by adding the system time.
+ *
+ * Protocol for the "version" fields is: hypervisor raises it (making
+ * it uneven) before it starts updating the fields and raises it again
+ * (making it even) when it is done.  Thus the guest can make sure the
+ * time values it got are consistent by checking the version before
+ * and after reading them.
+ */
+
+struct pvclock_vcpu_time_info {
+	u32   version;
+	u32   pad0;
+	u64   tsc_timestamp;
+	u64   system_time;
+	u32   tsc_to_system_mul;
+	s8    tsc_shift;
+	u8    pad[3];
+} __attribute__((__packed__)); /* 32 bytes */
+
+struct pvclock_wall_clock {
+	u32   version;
+	u32   sec;
+	u32   nsec;
+} __attribute__((__packed__));
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_IA64__PVCLOCK_ABI_H */
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
new file mode 100644
index 0000000..fb79423
--- /dev/null
+++ b/arch/ia64/include/asm/swiotlb.h
@@ -0,0 +1,56 @@
+#ifndef ASM_IA64__SWIOTLB_H
+#define ASM_IA64__SWIOTLB_H
+
+#include <linux/dma-mapping.h>
+
+/* SWIOTLB interface */
+
+extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
+				     size_t size, int dir);
+extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+				    dma_addr_t *dma_handle, gfp_t flags);
+extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
+				 size_t size, int dir);
+extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
+					dma_addr_t dev_addr,
+					size_t size, int dir);
+extern void swiotlb_sync_single_for_device(struct device *hwdev,
+					   dma_addr_t dev_addr,
+					   size_t size, int dir);
+extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev,
+					      dma_addr_t dev_addr,
+					      unsigned long offset,
+					      size_t size, int dir);
+extern void swiotlb_sync_single_range_for_device(struct device *hwdev,
+						 dma_addr_t dev_addr,
+						 unsigned long offset,
+						 size_t size, int dir);
+extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
+				    struct scatterlist *sg, int nelems,
+				    int dir);
+extern void swiotlb_sync_sg_for_device(struct device *hwdev,
+				       struct scatterlist *sg, int nelems,
+				       int dir);
+extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
+			  int nents, int direction);
+extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
+			     int nents, int direction);
+extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
+extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
+				  void *vaddr, dma_addr_t dma_handle);
+extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
+extern void swiotlb_init(void);
+
+extern int swiotlb_force;
+
+#ifdef CONFIG_SWIOTLB
+extern int swiotlb;
+extern void pci_swiotlb_init(void);
+#else
+#define swiotlb 0
+static inline void pci_swiotlb_init(void)
+{
+}
+#endif
+
+#endif /* ASM_IA64__SWIOTLB_H */
diff --git a/arch/ia64/include/asm/sync_bitops.h b/arch/ia64/include/asm/sync_bitops.h
new file mode 100644
index 0000000..593c12e
--- /dev/null
+++ b/arch/ia64/include/asm/sync_bitops.h
@@ -0,0 +1,51 @@
+#ifndef _ASM_IA64_SYNC_BITOPS_H
+#define _ASM_IA64_SYNC_BITOPS_H
+
+/*
+ * Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *
+ * Based on synch_bitops.h which Dan Magenhaimer wrote.
+ *
+ * bit operations which provide guaranteed strong synchronisation
+ * when communicating with Xen or other guest OSes running on other CPUs.
+ */
+
+static inline void sync_set_bit(int nr, volatile void *addr)
+{
+	set_bit(nr, addr);
+}
+
+static inline void sync_clear_bit(int nr, volatile void *addr)
+{
+	clear_bit(nr, addr);
+}
+
+static inline void sync_change_bit(int nr, volatile void *addr)
+{
+	change_bit(nr, addr);
+}
+
+static inline int sync_test_and_set_bit(int nr, volatile void *addr)
+{
+	return test_and_set_bit(nr, addr);
+}
+
+static inline int sync_test_and_clear_bit(int nr, volatile void *addr)
+{
+	return test_and_clear_bit(nr, addr);
+}
+
+static inline int sync_test_and_change_bit(int nr, volatile void *addr)
+{
+	return test_and_change_bit(nr, addr);
+}
+
+static inline int sync_test_bit(int nr, const volatile void *addr)
+{
+	return test_bit(nr, addr);
+}
+
+#define sync_cmpxchg(ptr, old, new)				\
+	((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new)))
+
+#endif /* _ASM_IA64_SYNC_BITOPS_H */
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
new file mode 100644
index 0000000..2f758a4
--- /dev/null
+++ b/arch/ia64/include/asm/syscall.h
@@ -0,0 +1,163 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008 Intel Corp.  Shaohua Li <shaohua.li@intel.com>
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H	1
+
+#include <linux/sched.h>
+#include <linux/err.h>
+
+static inline long syscall_get_nr(struct task_struct *task,
+				  struct pt_regs *regs)
+{
+	if ((long)regs->cr_ifs < 0) /* Not a syscall */
+		return -1;
+
+#ifdef CONFIG_IA32_SUPPORT
+	if (IS_IA32_PROCESS(regs))
+		return regs->r1;
+#endif
+
+	return regs->r15;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+				    struct pt_regs *regs)
+{
+#ifdef CONFIG_IA32_SUPPORT
+	if (IS_IA32_PROCESS(regs))
+		regs->r8 = regs->r1;
+#endif
+
+	/* do nothing */
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+				     struct pt_regs *regs)
+{
+#ifdef CONFIG_IA32_SUPPORT
+	if (IS_IA32_PROCESS(regs))
+		return regs->r8;
+#endif
+
+	return regs->r10 == -1 ? regs->r8:0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+					    struct pt_regs *regs)
+{
+	return regs->r8;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+					    struct pt_regs *regs,
+					    int error, long val)
+{
+#ifdef CONFIG_IA32_SUPPORT
+	if (IS_IA32_PROCESS(regs)) {
+		regs->r8 = (long) error ? error : val;
+		return;
+	}
+#endif
+
+	if (error) {
+		/* error < 0, but ia64 uses > 0 return value */
+		regs->r8 = -error;
+		regs->r10 = -1;
+	} else {
+		regs->r8 = val;
+		regs->r10 = 0;
+	}
+}
+
+extern void ia64_syscall_get_set_arguments(struct task_struct *task,
+	struct pt_regs *regs, unsigned int i, unsigned int n,
+	unsigned long *args, int rw);
+static inline void syscall_get_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 unsigned int i, unsigned int n,
+					 unsigned long *args)
+{
+	BUG_ON(i + n > 6);
+
+#ifdef CONFIG_IA32_SUPPORT
+	if (IS_IA32_PROCESS(regs)) {
+		switch (i + n) {
+		case 6:
+			if (!n--) break;
+			*args++ = regs->r13;
+		case 5:
+			if (!n--) break;
+			*args++ = regs->r15;
+		case 4:
+			if (!n--) break;
+			*args++ = regs->r14;
+		case 3:
+			if (!n--) break;
+			*args++ = regs->r10;
+		case 2:
+			if (!n--) break;
+			*args++ = regs->r9;
+		case 1:
+			if (!n--) break;
+			*args++ = regs->r11;
+		case 0:
+			if (!n--) break;
+		default:
+			BUG();
+			break;
+		}
+
+		return;
+	}
+#endif
+	ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 unsigned int i, unsigned int n,
+					 unsigned long *args)
+{
+	BUG_ON(i + n > 6);
+
+#ifdef CONFIG_IA32_SUPPORT
+	if (IS_IA32_PROCESS(regs)) {
+		switch (i + n) {
+		case 6:
+			if (!n--) break;
+			regs->r13 = *args++;
+		case 5:
+			if (!n--) break;
+			regs->r15 = *args++;
+		case 4:
+			if (!n--) break;
+			regs->r14 = *args++;
+		case 3:
+			if (!n--) break;
+			regs->r10 = *args++;
+		case 2:
+			if (!n--) break;
+			regs->r9 = *args++;
+		case 1:
+			if (!n--) break;
+			regs->r11 = *args++;
+		case 0:
+			if (!n--) break;
+		}
+
+		return;
+	}
+#endif
+	ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
+}
+#endif	/* _ASM_SYSCALL_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 7c60fcd..ae69226 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -87,9 +87,6 @@
 #define alloc_task_struct()	((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER))
 #define free_task_struct(tsk)	free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
 
-#define tsk_set_notify_resume(tsk) \
-	set_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME)
-extern void tsk_clear_notify_resume(struct task_struct *tsk);
 #endif /* !__ASSEMBLY */
 
 /*
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
index 05a6baf..4e03cfe 100644
--- a/arch/ia64/include/asm/timex.h
+++ b/arch/ia64/include/asm/timex.h
@@ -39,4 +39,6 @@
 	return ret;
 }
 
+extern void ia64_cpu_local_tick (void);
+
 #endif /* _ASM_IA64_TIMEX_H */
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index d535833..f791576 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -337,6 +337,7 @@
 # define __ARCH_WANT_SYS_NICE
 # define __ARCH_WANT_SYS_OLD_GETRLIMIT
 # define __ARCH_WANT_SYS_OLDUMOUNT
+# define __ARCH_WANT_SYS_PAUSE
 # define __ARCH_WANT_SYS_SIGPENDING
 # define __ARCH_WANT_SYS_SIGPROCMASK
 # define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h
new file mode 100644
index 0000000..7324878
--- /dev/null
+++ b/arch/ia64/include/asm/xen/events.h
@@ -0,0 +1,50 @@
+/******************************************************************************
+ * arch/ia64/include/asm/xen/events.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#ifndef _ASM_IA64_XEN_EVENTS_H
+#define _ASM_IA64_XEN_EVENTS_H
+
+enum ipi_vector {
+	XEN_RESCHEDULE_VECTOR,
+	XEN_IPI_VECTOR,
+	XEN_CMCP_VECTOR,
+	XEN_CPEP_VECTOR,
+
+	XEN_NR_IPIS,
+};
+
+static inline int xen_irqs_disabled(struct pt_regs *regs)
+{
+	return !(ia64_psr(regs)->i);
+}
+
+static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
+{
+	struct pt_regs *old_regs;
+	old_regs = set_irq_regs(regs);
+	irq_enter();
+	__do_IRQ(irq);
+	irq_exit();
+	set_irq_regs(old_regs);
+}
+#define irq_ctx_init(cpu)	do { } while (0)
+
+#endif /* _ASM_IA64_XEN_EVENTS_H */
diff --git a/arch/ia64/include/asm/xen/grant_table.h b/arch/ia64/include/asm/xen/grant_table.h
new file mode 100644
index 0000000..2b1fae0
--- /dev/null
+++ b/arch/ia64/include/asm/xen/grant_table.h
@@ -0,0 +1,29 @@
+/******************************************************************************
+ * arch/ia64/include/asm/xen/grant_table.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef _ASM_IA64_XEN_GRANT_TABLE_H
+#define _ASM_IA64_XEN_GRANT_TABLE_H
+
+struct vm_struct *xen_alloc_vm_area(unsigned long size);
+void xen_free_vm_area(struct vm_struct *area);
+
+#endif /* _ASM_IA64_XEN_GRANT_TABLE_H */
diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h
new file mode 100644
index 0000000..96fc623
--- /dev/null
+++ b/arch/ia64/include/asm/xen/hypercall.h
@@ -0,0 +1,265 @@
+/******************************************************************************
+ * hypercall.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _ASM_IA64_XEN_HYPERCALL_H
+#define _ASM_IA64_XEN_HYPERCALL_H
+
+#include <xen/interface/xen.h>
+#include <xen/interface/physdev.h>
+#include <xen/interface/sched.h>
+#include <asm/xen/xcom_hcall.h>
+struct xencomm_handle;
+extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
+				 unsigned long a3, unsigned long a4,
+				 unsigned long a5, unsigned long cmd);
+
+/*
+ * Assembler stubs for hyper-calls.
+ */
+
+#define _hypercall0(type, name)					\
+({								\
+	long __res;						\
+	__res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\
+	(type)__res;						\
+})
+
+#define _hypercall1(type, name, a1)				\
+({								\
+	long __res;						\
+	__res = __hypercall((unsigned long)a1,			\
+			     0, 0, 0, 0, __HYPERVISOR_##name);	\
+	(type)__res;						\
+})
+
+#define _hypercall2(type, name, a1, a2)				\
+({								\
+	long __res;						\
+	__res = __hypercall((unsigned long)a1,			\
+			    (unsigned long)a2,			\
+			    0, 0, 0, __HYPERVISOR_##name);	\
+	(type)__res;						\
+})
+
+#define _hypercall3(type, name, a1, a2, a3)			\
+({								\
+	long __res;						\
+	__res = __hypercall((unsigned long)a1,			\
+			    (unsigned long)a2,			\
+			    (unsigned long)a3,			\
+			    0, 0, __HYPERVISOR_##name);		\
+	(type)__res;						\
+})
+
+#define _hypercall4(type, name, a1, a2, a3, a4)			\
+({								\
+	long __res;						\
+	__res = __hypercall((unsigned long)a1,			\
+			    (unsigned long)a2,			\
+			    (unsigned long)a3,			\
+			    (unsigned long)a4,			\
+			    0, __HYPERVISOR_##name);		\
+	(type)__res;						\
+})
+
+#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
+({								\
+	long __res;						\
+	__res = __hypercall((unsigned long)a1,			\
+			    (unsigned long)a2,			\
+			    (unsigned long)a3,			\
+			    (unsigned long)a4,			\
+			    (unsigned long)a5,			\
+			    __HYPERVISOR_##name);		\
+	(type)__res;						\
+})
+
+
+static inline int
+xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
+{
+	return _hypercall2(int, sched_op_new, cmd, arg);
+}
+
+static inline long
+HYPERVISOR_set_timer_op(u64 timeout)
+{
+	unsigned long timeout_hi = (unsigned long)(timeout >> 32);
+	unsigned long timeout_lo = (unsigned long)timeout;
+	return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
+}
+
+static inline int
+xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list,
+				 int nr_calls)
+{
+	return _hypercall2(int, multicall, call_list, nr_calls);
+}
+
+static inline int
+xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg)
+{
+	return _hypercall2(int, memory_op, cmd, arg);
+}
+
+static inline int
+xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg)
+{
+	return _hypercall2(int, event_channel_op, cmd, arg);
+}
+
+static inline int
+xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg)
+{
+	return _hypercall2(int, xen_version, cmd, arg);
+}
+
+static inline int
+xencomm_arch_hypercall_console_io(int cmd, int count,
+				  struct xencomm_handle *str)
+{
+	return _hypercall3(int, console_io, cmd, count, str);
+}
+
+static inline int
+xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg)
+{
+	return _hypercall2(int, physdev_op, cmd, arg);
+}
+
+static inline int
+xencomm_arch_hypercall_grant_table_op(unsigned int cmd,
+				      struct xencomm_handle *uop,
+				      unsigned int count)
+{
+	return _hypercall3(int, grant_table_op, cmd, uop, count);
+}
+
+int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
+
+extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
+
+static inline int
+xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg)
+{
+	return _hypercall2(int, callback_op, cmd, arg);
+}
+
+static inline long
+xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
+{
+	return _hypercall3(long, vcpu_op, cmd, cpu, arg);
+}
+
+static inline int
+HYPERVISOR_physdev_op(int cmd, void *arg)
+{
+	switch (cmd) {
+	case PHYSDEVOP_eoi:
+		return _hypercall1(int, ia64_fast_eoi,
+				   ((struct physdev_eoi *)arg)->irq);
+	default:
+		return xencomm_hypercall_physdev_op(cmd, arg);
+	}
+}
+
+static inline long
+xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg)
+{
+	return _hypercall1(long, opt_feature, arg);
+}
+
+/* for balloon driver */
+#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
+
+/* Use xencomm to do hypercalls.  */
+#define HYPERVISOR_sched_op xencomm_hypercall_sched_op
+#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op
+#define HYPERVISOR_callback_op xencomm_hypercall_callback_op
+#define HYPERVISOR_multicall xencomm_hypercall_multicall
+#define HYPERVISOR_xen_version xencomm_hypercall_xen_version
+#define HYPERVISOR_console_io xencomm_hypercall_console_io
+#define HYPERVISOR_memory_op xencomm_hypercall_memory_op
+#define HYPERVISOR_suspend xencomm_hypercall_suspend
+#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
+#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature
+
+/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */
+#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; })
+
+static inline int
+HYPERVISOR_shutdown(
+	unsigned int reason)
+{
+	struct sched_shutdown sched_shutdown = {
+		.reason = reason
+	};
+
+	int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
+
+	return rc;
+}
+
+/* for netfront.c, netback.c */
+#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
+
+static inline void
+MULTI_update_va_mapping(
+	struct multicall_entry *mcl, unsigned long va,
+	pte_t new_val, unsigned long flags)
+{
+	mcl->op = __HYPERVISOR_update_va_mapping;
+	mcl->result = 0;
+}
+
+static inline void
+MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
+	void *uop, unsigned int count)
+{
+	mcl->op = __HYPERVISOR_grant_table_op;
+	mcl->args[0] = cmd;
+	mcl->args[1] = (unsigned long)uop;
+	mcl->args[2] = count;
+}
+
+static inline void
+MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
+		 int count, int *success_count, domid_t domid)
+{
+	mcl->op = __HYPERVISOR_mmu_update;
+	mcl->args[0] = (unsigned long)req;
+	mcl->args[1] = count;
+	mcl->args[2] = (unsigned long)success_count;
+	mcl->args[3] = domid;
+}
+
+#endif /* _ASM_IA64_XEN_HYPERCALL_H */
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
new file mode 100644
index 0000000..7a804e8
--- /dev/null
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -0,0 +1,89 @@
+/******************************************************************************
+ * hypervisor.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _ASM_IA64_XEN_HYPERVISOR_H
+#define _ASM_IA64_XEN_HYPERVISOR_H
+
+#ifdef CONFIG_XEN
+
+#include <linux/init.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/version.h>	/* to compile feature.c */
+#include <xen/features.h>		/* to comiple xen-netfront.c */
+#include <asm/xen/hypercall.h>
+
+/* xen_domain_type is set before executing any C code by early_xen_setup */
+enum xen_domain_type {
+	XEN_NATIVE,
+	XEN_PV_DOMAIN,
+	XEN_HVM_DOMAIN,
+};
+
+extern enum xen_domain_type xen_domain_type;
+
+#define xen_domain()		(xen_domain_type != XEN_NATIVE)
+#define xen_pv_domain()		(xen_domain_type == XEN_PV_DOMAIN)
+#define xen_initial_domain()	(xen_pv_domain() && \
+				 (xen_start_info->flags & SIF_INITDOMAIN))
+#define xen_hvm_domain()	(xen_domain_type == XEN_HVM_DOMAIN)
+
+/* deprecated. remove this */
+#define is_running_on_xen()	(xen_domain_type == XEN_PV_DOMAIN)
+
+extern struct shared_info *HYPERVISOR_shared_info;
+extern struct start_info *xen_start_info;
+
+void __init xen_setup_vcpu_info_placement(void);
+void force_evtchn_callback(void);
+
+/* for drivers/xen/balloon/balloon.c */
+#ifdef CONFIG_XEN_SCRUB_PAGES
+#define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
+#else
+#define scrub_pages(_p, _n) ((void)0)
+#endif
+
+/* For setup_arch() in arch/ia64/kernel/setup.c */
+void xen_ia64_enable_opt_feature(void);
+
+#else /* CONFIG_XEN */
+
+#define xen_domain()		(0)
+#define xen_pv_domain()		(0)
+#define xen_initial_domain()	(0)
+#define xen_hvm_domain()	(0)
+#define is_running_on_xen()	(0)	/* deprecated. remove this */
+#endif
+
+#define is_initial_xendomain()	(0)	/* deprecated. remove this */
+
+#endif /* _ASM_IA64_XEN_HYPERVISOR_H */
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h
new file mode 100644
index 0000000..19c2ae1
--- /dev/null
+++ b/arch/ia64/include/asm/xen/inst.h
@@ -0,0 +1,458 @@
+/******************************************************************************
+ * arch/ia64/include/asm/xen/inst.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <asm/xen/privop.h>
+
+#define ia64_ivt				xen_ivt
+#define DO_SAVE_MIN				XEN_DO_SAVE_MIN
+
+#define __paravirt_switch_to			xen_switch_to
+#define __paravirt_leave_syscall		xen_leave_syscall
+#define __paravirt_work_processed_syscall	xen_work_processed_syscall
+#define __paravirt_leave_kernel			xen_leave_kernel
+#define __paravirt_pending_syscall_end		xen_work_pending_syscall_end
+#define __paravirt_work_processed_syscall_target \
+						xen_work_processed_syscall
+
+#define MOV_FROM_IFA(reg)	\
+	movl reg = XSI_IFA;	\
+	;;			\
+	ld8 reg = [reg]
+
+#define MOV_FROM_ITIR(reg)	\
+	movl reg = XSI_ITIR;	\
+	;;			\
+	ld8 reg = [reg]
+
+#define MOV_FROM_ISR(reg)	\
+	movl reg = XSI_ISR;	\
+	;;			\
+	ld8 reg = [reg]
+
+#define MOV_FROM_IHA(reg)	\
+	movl reg = XSI_IHA;	\
+	;;			\
+	ld8 reg = [reg]
+
+#define MOV_FROM_IPSR(pred, reg)	\
+(pred)	movl reg = XSI_IPSR;		\
+	;;				\
+(pred)	ld8 reg = [reg]
+
+#define MOV_FROM_IIM(reg)	\
+	movl reg = XSI_IIM;	\
+	;;			\
+	ld8 reg = [reg]
+
+#define MOV_FROM_IIP(reg)	\
+	movl reg = XSI_IIP;	\
+	;;			\
+	ld8 reg = [reg]
+
+.macro __MOV_FROM_IVR reg, clob
+	.ifc "\reg", "r8"
+		XEN_HYPER_GET_IVR
+		.exitm
+	.endif
+	.ifc "\clob", "r8"
+		XEN_HYPER_GET_IVR
+		;;
+		mov \reg = r8
+		.exitm
+	.endif
+
+	mov \clob = r8
+	;;
+	XEN_HYPER_GET_IVR
+	;;
+	mov \reg = r8
+	;;
+	mov r8 = \clob
+.endm
+#define MOV_FROM_IVR(reg, clob)	__MOV_FROM_IVR reg, clob
+
+.macro __MOV_FROM_PSR pred, reg, clob
+	.ifc "\reg", "r8"
+		(\pred)	XEN_HYPER_GET_PSR;
+		.exitm
+	.endif
+	.ifc "\clob", "r8"
+		(\pred)	XEN_HYPER_GET_PSR
+		;;
+		(\pred)	mov \reg = r8
+		.exitm
+	.endif
+
+	(\pred)	mov \clob = r8
+	(\pred)	XEN_HYPER_GET_PSR
+	;;
+	(\pred)	mov \reg = r8
+	(\pred)	mov r8 = \clob
+.endm
+#define MOV_FROM_PSR(pred, reg, clob)	__MOV_FROM_PSR pred, reg, clob
+
+
+#define MOV_TO_IFA(reg, clob)	\
+	movl clob = XSI_IFA;	\
+	;;			\
+	st8 [clob] = reg	\
+
+#define MOV_TO_ITIR(pred, reg, clob)	\
+(pred)	movl clob = XSI_ITIR;		\
+	;;				\
+(pred)	st8 [clob] = reg
+
+#define MOV_TO_IHA(pred, reg, clob)	\
+(pred)	movl clob = XSI_IHA;		\
+	;;				\
+(pred)	st8 [clob] = reg
+
+#define MOV_TO_IPSR(pred, reg, clob)	\
+(pred)	movl clob = XSI_IPSR;		\
+	;;				\
+(pred)	st8 [clob] = reg;		\
+	;;
+
+#define MOV_TO_IFS(pred, reg, clob)	\
+(pred)	movl clob = XSI_IFS;		\
+	;;				\
+(pred)	st8 [clob] = reg;		\
+	;;
+
+#define MOV_TO_IIP(reg, clob)	\
+	movl clob = XSI_IIP;	\
+	;;			\
+	st8 [clob] = reg
+
+.macro ____MOV_TO_KR kr, reg, clob0, clob1
+	.ifc "\clob0", "r9"
+		.error "clob0 \clob0 must not be r9"
+	.endif
+	.ifc "\clob1", "r8"
+		.error "clob1 \clob1 must not be r8"
+	.endif
+
+	.ifnc "\reg", "r9"
+		.ifnc "\clob1", "r9"
+			mov \clob1 = r9
+		.endif
+		mov r9 = \reg
+	.endif
+	.ifnc "\clob0", "r8"
+		mov \clob0 = r8
+	.endif
+	mov r8 = \kr
+	;;
+	XEN_HYPER_SET_KR
+
+	.ifnc "\reg", "r9"
+		.ifnc "\clob1", "r9"
+			mov r9 = \clob1
+		.endif
+	.endif
+	.ifnc "\clob0", "r8"
+		mov r8 = \clob0
+	.endif
+.endm
+
+.macro __MOV_TO_KR kr, reg, clob0, clob1
+	.ifc "\clob0", "r9"
+		____MOV_TO_KR \kr, \reg, \clob1, \clob0
+		.exitm
+	.endif
+	.ifc "\clob1", "r8"
+		____MOV_TO_KR \kr, \reg, \clob1, \clob0
+		.exitm
+	.endif
+
+	____MOV_TO_KR \kr, \reg, \clob0, \clob1
+.endm
+
+#define MOV_TO_KR(kr, reg, clob0, clob1) \
+	__MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
+
+
+.macro __ITC_I pred, reg, clob
+	.ifc "\reg", "r8"
+		(\pred)	XEN_HYPER_ITC_I
+		.exitm
+	.endif
+	.ifc "\clob", "r8"
+		(\pred)	mov r8 = \reg
+		;;
+		(\pred)	XEN_HYPER_ITC_I
+		.exitm
+	.endif
+
+	(\pred)	mov \clob = r8
+	(\pred)	mov r8 = \reg
+	;;
+	(\pred)	XEN_HYPER_ITC_I
+	;;
+	(\pred)	mov r8 = \clob
+	;;
+.endm
+#define ITC_I(pred, reg, clob)	__ITC_I pred, reg, clob
+
+.macro __ITC_D pred, reg, clob
+	.ifc "\reg", "r8"
+		(\pred)	XEN_HYPER_ITC_D
+		;;
+		.exitm
+	.endif
+	.ifc "\clob", "r8"
+		(\pred)	mov r8 = \reg
+		;;
+		(\pred)	XEN_HYPER_ITC_D
+		;;
+		.exitm
+	.endif
+
+	(\pred)	mov \clob = r8
+	(\pred)	mov r8 = \reg
+	;;
+	(\pred)	XEN_HYPER_ITC_D
+	;;
+	(\pred)	mov r8 = \clob
+	;;
+.endm
+#define ITC_D(pred, reg, clob)	__ITC_D pred, reg, clob
+
+.macro __ITC_I_AND_D pred_i, pred_d, reg, clob
+	.ifc "\reg", "r8"
+		(\pred_i)XEN_HYPER_ITC_I
+		;;
+		(\pred_d)XEN_HYPER_ITC_D
+		;;
+		.exitm
+	.endif
+	.ifc "\clob", "r8"
+		mov r8 = \reg
+		;;
+		(\pred_i)XEN_HYPER_ITC_I
+		;;
+		(\pred_d)XEN_HYPER_ITC_D
+		;;
+		.exitm
+	.endif
+
+	mov \clob = r8
+	mov r8 = \reg
+	;;
+	(\pred_i)XEN_HYPER_ITC_I
+	;;
+	(\pred_d)XEN_HYPER_ITC_D
+	;;
+	mov r8 = \clob
+	;;
+.endm
+#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
+	__ITC_I_AND_D pred_i, pred_d, reg, clob
+
+.macro __THASH pred, reg0, reg1, clob
+	.ifc "\reg0", "r8"
+		(\pred)	mov r8 = \reg1
+		(\pred)	XEN_HYPER_THASH
+		.exitm
+	.endc
+	.ifc "\reg1", "r8"
+		(\pred)	XEN_HYPER_THASH
+		;;
+		(\pred)	mov \reg0 = r8
+		;;
+		.exitm
+	.endif
+	.ifc "\clob", "r8"
+		(\pred)	mov r8 = \reg1
+		(\pred)	XEN_HYPER_THASH
+		;;
+		(\pred)	mov \reg0 = r8
+		;;
+		.exitm
+	.endif
+
+	(\pred)	mov \clob = r8
+	(\pred)	mov r8 = \reg1
+	(\pred)	XEN_HYPER_THASH
+	;;
+	(\pred)	mov \reg0 = r8
+	(\pred)	mov r8 = \clob
+	;;
+.endm
+#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
+
+#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1)	\
+	mov clob0 = 1;						\
+	movl clob1 = XSI_PSR_IC;				\
+	;;							\
+	st4 [clob1] = clob0					\
+	;;
+
+#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1)	\
+	;;					\
+	srlz.d;					\
+	mov clob1 = 1;				\
+	movl clob0 = XSI_PSR_IC;		\
+	;;					\
+	st4 [clob0] = clob1
+
+#define RSM_PSR_IC(clob)	\
+	movl clob = XSI_PSR_IC;	\
+	;;			\
+	st4 [clob] = r0;	\
+	;;
+
+/* pred will be clobbered */
+#define MASK_TO_PEND_OFS    (-1)
+#define SSM_PSR_I(pred, pred_clob, clob)				\
+(pred)	movl clob = XSI_PSR_I_ADDR					\
+	;;								\
+(pred)	ld8 clob = [clob]						\
+	;;								\
+	/* if (pred) vpsr.i = 1 */					\
+	/* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */		\
+(pred)	st1 [clob] = r0, MASK_TO_PEND_OFS				\
+	;;								\
+	/* if (vcpu->vcpu_info->evtchn_upcall_pending) */		\
+(pred)	ld1 clob = [clob]						\
+	;;								\
+(pred)	cmp.ne.unc pred_clob, p0 = clob, r0				\
+	;;								\
+(pred_clob)XEN_HYPER_SSM_I	/* do areal ssm psr.i */
+
+#define RSM_PSR_I(pred, clob0, clob1)	\
+	movl clob0 = XSI_PSR_I_ADDR;	\
+	mov clob1 = 1;			\
+	;;				\
+	ld8 clob0 = [clob0];		\
+	;;				\
+(pred)	st1 [clob0] = clob1
+
+#define RSM_PSR_I_IC(clob0, clob1, clob2)		\
+	movl clob0 = XSI_PSR_I_ADDR;			\
+	movl clob1 = XSI_PSR_IC;			\
+	;;						\
+	ld8 clob0 = [clob0];				\
+	mov clob2 = 1;					\
+	;;						\
+	/* note: clears both vpsr.i and vpsr.ic! */	\
+	st1 [clob0] = clob2;				\
+	st4 [clob1] = r0;				\
+	;;
+
+#define RSM_PSR_DT		\
+	XEN_HYPER_RSM_PSR_DT
+
+#define SSM_PSR_DT_AND_SRLZ_I	\
+	XEN_HYPER_SSM_PSR_DT
+
+#define BSW_0(clob0, clob1, clob2)			\
+	;;						\
+	/* r16-r31 all now hold bank1 values */		\
+	mov clob2 = ar.unat;				\
+	movl clob0 = XSI_BANK1_R16;			\
+	movl clob1 = XSI_BANK1_R16 + 8;			\
+	;;						\
+.mem.offset 0, 0; st8.spill [clob0] = r16, 16;		\
+.mem.offset 8, 0; st8.spill [clob1] = r17, 16;		\
+	;;						\
+.mem.offset 0, 0; st8.spill [clob0] = r18, 16;		\
+.mem.offset 8, 0; st8.spill [clob1] = r19, 16;		\
+	;;						\
+.mem.offset 0, 0; st8.spill [clob0] = r20, 16;		\
+.mem.offset 8, 0; st8.spill [clob1] = r21, 16;		\
+	;;						\
+.mem.offset 0, 0; st8.spill [clob0] = r22, 16;		\
+.mem.offset 8, 0; st8.spill [clob1] = r23, 16;		\
+	;;						\
+.mem.offset 0, 0; st8.spill [clob0] = r24, 16;		\
+.mem.offset 8, 0; st8.spill [clob1] = r25, 16;		\
+	;;						\
+.mem.offset 0, 0; st8.spill [clob0] = r26, 16;		\
+.mem.offset 8, 0; st8.spill [clob1] = r27, 16;		\
+	;;						\
+.mem.offset 0, 0; st8.spill [clob0] = r28, 16;		\
+.mem.offset 8, 0; st8.spill [clob1] = r29, 16;		\
+	;;						\
+.mem.offset 0, 0; st8.spill [clob0] = r30, 16;		\
+.mem.offset 8, 0; st8.spill [clob1] = r31, 16;		\
+	;;						\
+	mov clob1 = ar.unat;				\
+	movl clob0 = XSI_B1NAT;				\
+	;;						\
+	st8 [clob0] = clob1;				\
+	mov ar.unat = clob2;				\
+	movl clob0 = XSI_BANKNUM;			\
+	;;						\
+	st4 [clob0] = r0
+
+
+	/* FIXME: THIS CODE IS NOT NaT SAFE! */
+#define XEN_BSW_1(clob)			\
+	mov clob = ar.unat;		\
+	movl r30 = XSI_B1NAT;		\
+	;;				\
+	ld8 r30 = [r30];		\
+	mov r31 = 1;			\
+	;;				\
+	mov ar.unat = r30;		\
+	movl r30 = XSI_BANKNUM;		\
+	;;				\
+	st4 [r30] = r31;		\
+	movl r30 = XSI_BANK1_R16;	\
+	movl r31 = XSI_BANK1_R16+8;	\
+	;;				\
+	ld8.fill r16 = [r30], 16;	\
+	ld8.fill r17 = [r31], 16;	\
+	;;				\
+	ld8.fill r18 = [r30], 16;	\
+	ld8.fill r19 = [r31], 16;	\
+	;;				\
+	ld8.fill r20 = [r30], 16;	\
+	ld8.fill r21 = [r31], 16;	\
+	;;				\
+	ld8.fill r22 = [r30], 16;	\
+	ld8.fill r23 = [r31], 16;	\
+	;;				\
+	ld8.fill r24 = [r30], 16;	\
+	ld8.fill r25 = [r31], 16;	\
+	;;				\
+	ld8.fill r26 = [r30], 16;	\
+	ld8.fill r27 = [r31], 16;	\
+	;;				\
+	ld8.fill r28 = [r30], 16;	\
+	ld8.fill r29 = [r31], 16;	\
+	;;				\
+	ld8.fill r30 = [r30];		\
+	ld8.fill r31 = [r31];		\
+	;;				\
+	mov ar.unat = clob
+
+#define BSW_1(clob0, clob1)	XEN_BSW_1(clob1)
+
+
+#define COVER	\
+	XEN_HYPER_COVER
+
+#define RFI			\
+	XEN_HYPER_RFI;		\
+	dv_serialize_data
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h
new file mode 100644
index 0000000..f00fab40
--- /dev/null
+++ b/arch/ia64/include/asm/xen/interface.h
@@ -0,0 +1,346 @@
+/******************************************************************************
+ * arch-ia64/hypervisor-if.h
+ *
+ * Guest OS interface to IA64 Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright by those who contributed. (in alphabetical order)
+ *
+ * Anthony Xu <anthony.xu@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ * Fred Yang <fred.yang@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ * Alex Williamson <alex.williamson@hp.com>
+ * Chris Wright <chrisw@sous-sol.org>
+ * Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
+ * Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
+ * Hollis Blanchard <hollisb@us.ibm.com>
+ * Isaku Yamahata <yamahata@valinux.co.jp>
+ * Jan Beulich <jbeulich@novell.com>
+ * John Levon <john.levon@sun.com>
+ * Kazuhiro Suzuki <kaz@jp.fujitsu.com>
+ * Keir Fraser <keir.fraser@citrix.com>
+ * Kouya Shimura <kouya@jp.fujitsu.com>
+ * Masaki Kanno <kanno.masaki@jp.fujitsu.com>
+ * Matt Chapman <matthewc@hp.com>
+ * Matthew Chapman <matthewc@hp.com>
+ * Samuel Thibault <samuel.thibault@eu.citrix.com>
+ * Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
+ * Tristan Gingold <tgingold@free.fr>
+ * Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
+ * Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com>
+ * Zhang Xin <xing.z.zhang@intel.com>
+ * Zhang xiantao <xiantao.zhang@intel.com>
+ * dan.magenheimer@hp.com
+ * ian.pratt@cl.cam.ac.uk
+ * michael.fetterman@cl.cam.ac.uk
+ */
+
+#ifndef _ASM_IA64_XEN_INTERFACE_H
+#define _ASM_IA64_XEN_INTERFACE_H
+
+#define __DEFINE_GUEST_HANDLE(name, type)	\
+	typedef struct { type *p; } __guest_handle_ ## name
+
+#define DEFINE_GUEST_HANDLE_STRUCT(name)	\
+	__DEFINE_GUEST_HANDLE(name, struct name)
+#define DEFINE_GUEST_HANDLE(name)	__DEFINE_GUEST_HANDLE(name, name)
+#define GUEST_HANDLE(name)		__guest_handle_ ## name
+#define GUEST_HANDLE_64(name)		GUEST_HANDLE(name)
+#define set_xen_guest_handle(hnd, val)	do { (hnd).p = val; } while (0)
+
+#ifndef __ASSEMBLY__
+/* Guest handles for primitive C types. */
+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
+__DEFINE_GUEST_HANDLE(uint, unsigned int);
+__DEFINE_GUEST_HANDLE(ulong, unsigned long);
+__DEFINE_GUEST_HANDLE(u64, unsigned long);
+DEFINE_GUEST_HANDLE(char);
+DEFINE_GUEST_HANDLE(int);
+DEFINE_GUEST_HANDLE(long);
+DEFINE_GUEST_HANDLE(void);
+
+typedef unsigned long xen_pfn_t;
+DEFINE_GUEST_HANDLE(xen_pfn_t);
+#define PRI_xen_pfn	"lx"
+#endif
+
+/* Arch specific VIRQs definition */
+#define VIRQ_ITC	VIRQ_ARCH_0	/* V. Virtual itc timer */
+#define VIRQ_MCA_CMC	VIRQ_ARCH_1	/* MCA cmc interrupt */
+#define VIRQ_MCA_CPE	VIRQ_ARCH_2	/* MCA cpe interrupt */
+
+/* Maximum number of virtual CPUs in multi-processor guests. */
+/* keep sizeof(struct shared_page) <= PAGE_SIZE.
+ * this is checked in arch/ia64/xen/hypervisor.c. */
+#define MAX_VIRT_CPUS	64
+
+#ifndef __ASSEMBLY__
+
+#define INVALID_MFN	(~0UL)
+
+union vac {
+	unsigned long value;
+	struct {
+		int a_int:1;
+		int a_from_int_cr:1;
+		int a_to_int_cr:1;
+		int a_from_psr:1;
+		int a_from_cpuid:1;
+		int a_cover:1;
+		int a_bsw:1;
+		long reserved:57;
+	};
+};
+
+union vdc {
+	unsigned long value;
+	struct {
+		int d_vmsw:1;
+		int d_extint:1;
+		int d_ibr_dbr:1;
+		int d_pmc:1;
+		int d_to_pmd:1;
+		int d_itm:1;
+		long reserved:58;
+	};
+};
+
+struct mapped_regs {
+	union vac vac;
+	union vdc vdc;
+	unsigned long virt_env_vaddr;
+	unsigned long reserved1[29];
+	unsigned long vhpi;
+	unsigned long reserved2[95];
+	union {
+		unsigned long vgr[16];
+		unsigned long bank1_regs[16];	/* bank1 regs (r16-r31)
+						   when bank0 active */
+	};
+	union {
+		unsigned long vbgr[16];
+		unsigned long bank0_regs[16];	/* bank0 regs (r16-r31)
+						   when bank1 active */
+	};
+	unsigned long vnat;
+	unsigned long vbnat;
+	unsigned long vcpuid[5];
+	unsigned long reserved3[11];
+	unsigned long vpsr;
+	unsigned long vpr;
+	unsigned long reserved4[76];
+	union {
+		unsigned long vcr[128];
+		struct {
+			unsigned long dcr;	/* CR0 */
+			unsigned long itm;
+			unsigned long iva;
+			unsigned long rsv1[5];
+			unsigned long pta;	/* CR8 */
+			unsigned long rsv2[7];
+			unsigned long ipsr;	/* CR16 */
+			unsigned long isr;
+			unsigned long rsv3;
+			unsigned long iip;
+			unsigned long ifa;
+			unsigned long itir;
+			unsigned long iipa;
+			unsigned long ifs;
+			unsigned long iim;	/* CR24 */
+			unsigned long iha;
+			unsigned long rsv4[38];
+			unsigned long lid;	/* CR64 */
+			unsigned long ivr;
+			unsigned long tpr;
+			unsigned long eoi;
+			unsigned long irr[4];
+			unsigned long itv;	/* CR72 */
+			unsigned long pmv;
+			unsigned long cmcv;
+			unsigned long rsv5[5];
+			unsigned long lrr0;	/* CR80 */
+			unsigned long lrr1;
+			unsigned long rsv6[46];
+		};
+	};
+	union {
+		unsigned long reserved5[128];
+		struct {
+			unsigned long precover_ifs;
+			unsigned long unat;	/* not sure if this is needed
+						   until NaT arch is done */
+			int interrupt_collection_enabled; /* virtual psr.ic */
+
+			/* virtual interrupt deliverable flag is
+			 * evtchn_upcall_mask in shared info area now.
+			 * interrupt_mask_addr is the address
+			 * of evtchn_upcall_mask for current vcpu
+			 */
+			unsigned char *interrupt_mask_addr;
+			int pending_interruption;
+			unsigned char vpsr_pp;
+			unsigned char vpsr_dfh;
+			unsigned char hpsr_dfh;
+			unsigned char hpsr_mfh;
+			unsigned long reserved5_1[4];
+			int metaphysical_mode;	/* 1 = use metaphys mapping
+						   0 = use virtual */
+			int banknum;		/* 0 or 1, which virtual
+						   register bank is active */
+			unsigned long rrs[8];	/* region registers */
+			unsigned long krs[8];	/* kernel registers */
+			unsigned long tmp[16];	/* temp registers
+						   (e.g. for hyperprivops) */
+		};
+	};
+};
+
+struct arch_vcpu_info {
+	/* nothing */
+};
+
+/*
+ * This structure is used for magic page in domain pseudo physical address
+ * space and the result of XENMEM_machine_memory_map.
+ * As the XENMEM_machine_memory_map result,
+ * xen_memory_map::nr_entries indicates the size in bytes
+ * including struct xen_ia64_memmap_info. Not the number of entries.
+ */
+struct xen_ia64_memmap_info {
+	uint64_t efi_memmap_size;	/* size of EFI memory map */
+	uint64_t efi_memdesc_size;	/* size of an EFI memory map
+					 * descriptor */
+	uint32_t efi_memdesc_version;	/* memory descriptor version */
+	void *memdesc[0];		/* array of efi_memory_desc_t */
+};
+
+struct arch_shared_info {
+	/* PFN of the start_info page.	*/
+	unsigned long start_info_pfn;
+
+	/* Interrupt vector for event channel.	*/
+	int evtchn_vector;
+
+	/* PFN of memmap_info page */
+	unsigned int memmap_info_num_pages;	/* currently only = 1 case is
+						   supported. */
+	unsigned long memmap_info_pfn;
+
+	uint64_t pad[31];
+};
+
+struct xen_callback {
+	unsigned long ip;
+};
+typedef struct xen_callback xen_callback_t;
+
+#endif /* !__ASSEMBLY__ */
+
+/* Size of the shared_info area (this is not related to page size).  */
+#define XSI_SHIFT			14
+#define XSI_SIZE			(1 << XSI_SHIFT)
+/* Log size of mapped_regs area (64 KB - only 4KB is used).  */
+#define XMAPPEDREGS_SHIFT		12
+#define XMAPPEDREGS_SIZE		(1 << XMAPPEDREGS_SHIFT)
+/* Offset of XASI (Xen arch shared info) wrt XSI_BASE.	*/
+#define XMAPPEDREGS_OFS			XSI_SIZE
+
+/* Hyperprivops.  */
+#define HYPERPRIVOP_START		0x1
+#define HYPERPRIVOP_RFI			(HYPERPRIVOP_START + 0x0)
+#define HYPERPRIVOP_RSM_DT		(HYPERPRIVOP_START + 0x1)
+#define HYPERPRIVOP_SSM_DT		(HYPERPRIVOP_START + 0x2)
+#define HYPERPRIVOP_COVER		(HYPERPRIVOP_START + 0x3)
+#define HYPERPRIVOP_ITC_D		(HYPERPRIVOP_START + 0x4)
+#define HYPERPRIVOP_ITC_I		(HYPERPRIVOP_START + 0x5)
+#define HYPERPRIVOP_SSM_I		(HYPERPRIVOP_START + 0x6)
+#define HYPERPRIVOP_GET_IVR		(HYPERPRIVOP_START + 0x7)
+#define HYPERPRIVOP_GET_TPR		(HYPERPRIVOP_START + 0x8)
+#define HYPERPRIVOP_SET_TPR		(HYPERPRIVOP_START + 0x9)
+#define HYPERPRIVOP_EOI			(HYPERPRIVOP_START + 0xa)
+#define HYPERPRIVOP_SET_ITM		(HYPERPRIVOP_START + 0xb)
+#define HYPERPRIVOP_THASH		(HYPERPRIVOP_START + 0xc)
+#define HYPERPRIVOP_PTC_GA		(HYPERPRIVOP_START + 0xd)
+#define HYPERPRIVOP_ITR_D		(HYPERPRIVOP_START + 0xe)
+#define HYPERPRIVOP_GET_RR		(HYPERPRIVOP_START + 0xf)
+#define HYPERPRIVOP_SET_RR		(HYPERPRIVOP_START + 0x10)
+#define HYPERPRIVOP_SET_KR		(HYPERPRIVOP_START + 0x11)
+#define HYPERPRIVOP_FC			(HYPERPRIVOP_START + 0x12)
+#define HYPERPRIVOP_GET_CPUID		(HYPERPRIVOP_START + 0x13)
+#define HYPERPRIVOP_GET_PMD		(HYPERPRIVOP_START + 0x14)
+#define HYPERPRIVOP_GET_EFLAG		(HYPERPRIVOP_START + 0x15)
+#define HYPERPRIVOP_SET_EFLAG		(HYPERPRIVOP_START + 0x16)
+#define HYPERPRIVOP_RSM_BE		(HYPERPRIVOP_START + 0x17)
+#define HYPERPRIVOP_GET_PSR		(HYPERPRIVOP_START + 0x18)
+#define HYPERPRIVOP_SET_RR0_TO_RR4	(HYPERPRIVOP_START + 0x19)
+#define HYPERPRIVOP_MAX			(0x1a)
+
+/* Fast and light hypercalls.  */
+#define __HYPERVISOR_ia64_fast_eoi	__HYPERVISOR_arch_1
+
+/* Xencomm macros.  */
+#define XENCOMM_INLINE_MASK		0xf800000000000000UL
+#define XENCOMM_INLINE_FLAG		0x8000000000000000UL
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Optimization features.
+ * The hypervisor may do some special optimizations for guests. This hypercall
+ * can be used to switch on/of these special optimizations.
+ */
+#define __HYPERVISOR_opt_feature	0x700UL
+
+#define XEN_IA64_OPTF_OFF		0x0
+#define XEN_IA64_OPTF_ON		0x1
+
+/*
+ * If this feature is switched on, the hypervisor inserts the
+ * tlb entries without calling the guests traphandler.
+ * This is useful in guests using region 7 for identity mapping
+ * like the linux kernel does.
+ */
+#define XEN_IA64_OPTF_IDENT_MAP_REG7	1
+
+/* Identity mapping of region 4 addresses in HVM. */
+#define XEN_IA64_OPTF_IDENT_MAP_REG4	2
+
+/* Identity mapping of region 5 addresses in HVM. */
+#define XEN_IA64_OPTF_IDENT_MAP_REG5	3
+
+#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET	 (0)
+
+struct xen_ia64_opt_feature {
+	unsigned long cmd;	/* Which feature */
+	unsigned char on;	/* Switch feature on/off */
+	union {
+		struct {
+			/* The page protection bit mask of the pte.
+			 * This will be or'ed with the pte. */
+			unsigned long pgprot;
+			unsigned long key;	/* A protection key for itir.*/
+		};
+	};
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_XEN_INTERFACE_H */
diff --git a/arch/ia64/include/asm/xen/irq.h b/arch/ia64/include/asm/xen/irq.h
new file mode 100644
index 0000000..a904509
--- /dev/null
+++ b/arch/ia64/include/asm/xen/irq.h
@@ -0,0 +1,44 @@
+/******************************************************************************
+ * arch/ia64/include/asm/xen/irq.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef _ASM_IA64_XEN_IRQ_H
+#define _ASM_IA64_XEN_IRQ_H
+
+/*
+ * The flat IRQ space is divided into two regions:
+ *  1. A one-to-one mapping of real physical IRQs. This space is only used
+ *     if we have physical device-access privilege. This region is at the
+ *     start of the IRQ space so that existing device drivers do not need
+ *     to be modified to translate physical IRQ numbers into our IRQ space.
+ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
+ *     are bound using the provided bind/unbind functions.
+ */
+
+#define XEN_PIRQ_BASE		0
+#define XEN_NR_PIRQS		256
+
+#define XEN_DYNIRQ_BASE		(XEN_PIRQ_BASE + XEN_NR_PIRQS)
+#define XEN_NR_DYNIRQS		(NR_CPUS * 8)
+
+#define XEN_NR_IRQS		(XEN_NR_PIRQS + XEN_NR_DYNIRQS)
+
+#endif /* _ASM_IA64_XEN_IRQ_H */
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
new file mode 100644
index 0000000..4d92d9b
--- /dev/null
+++ b/arch/ia64/include/asm/xen/minstate.h
@@ -0,0 +1,134 @@
+/*
+ * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
+ * the minimum state necessary that allows us to turn psr.ic back
+ * on.
+ *
+ * Assumed state upon entry:
+ *	psr.ic: off
+ *	r31:	contains saved predicates (pr)
+ *
+ * Upon exit, the state is as follows:
+ *	psr.ic: off
+ *	 r2 = points to &pt_regs.r16
+ *	 r8 = contents of ar.ccv
+ *	 r9 = contents of ar.csd
+ *	r10 = contents of ar.ssd
+ *	r11 = FPSR_DEFAULT
+ *	r12 = kernel sp (kernel virtual address)
+ *	r13 = points to current task_struct (kernel virtual address)
+ *	p15 = TRUE if psr.i is set in cr.ipsr
+ *	predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
+ *		preserved
+ * CONFIG_XEN note: p6/p7 are not preserved
+ *
+ * Note that psr.ic is NOT turned on by this macro.  This is so that
+ * we can pass interruption state as arguments to a handler.
+ */
+#define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND)					\
+	mov r16=IA64_KR(CURRENT);	/* M */							\
+	mov r27=ar.rsc;			/* M */							\
+	mov r20=r1;			/* A */							\
+	mov r25=ar.unat;		/* M */							\
+	MOV_FROM_IPSR(p0,r29);		/* M */							\
+	MOV_FROM_IIP(r28);		/* M */							\
+	mov r21=ar.fpsr;		/* M */							\
+	mov r26=ar.pfs;			/* I */							\
+	__COVER;			/* B;; (or nothing) */					\
+	adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16;						\
+	;;											\
+	ld1 r17=[r16];				/* load current->thread.on_ustack flag */	\
+	st1 [r16]=r0;				/* clear current->thread.on_ustack flag */	\
+	adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16						\
+	/* switch from user to kernel RBS: */							\
+	;;											\
+	invala;				/* M */							\
+	/* SAVE_IFS;*/ /* see xen special handling below */					\
+	cmp.eq pKStk,pUStk=r0,r17;		/* are we in kernel mode already? */		\
+	;;											\
+(pUStk)	mov ar.rsc=0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
+	;;											\
+(pUStk)	mov.m r24=ar.rnat;									\
+(pUStk)	addl r22=IA64_RBS_OFFSET,r1;			/* compute base of RBS */		\
+(pKStk) mov r1=sp;					/* get sp  */				\
+	;;											\
+(pUStk) lfetch.fault.excl.nt1 [r22];								\
+(pUStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;	/* compute base of memory stack */	\
+(pUStk)	mov r23=ar.bspstore;				/* save ar.bspstore */			\
+	;;											\
+(pUStk)	mov ar.bspstore=r22;				/* switch to kernel RBS */		\
+(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;			/* if in kernel mode, use sp (r12) */	\
+	;;											\
+(pUStk)	mov r18=ar.bsp;										\
+(pUStk)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		\
+	adds r17=2*L1_CACHE_BYTES,r1;		/* really: biggest cache-line size */		\
+	adds r16=PT(CR_IPSR),r1;								\
+	;;											\
+	lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;						\
+	st8 [r16]=r29;		/* save cr.ipsr */						\
+	;;											\
+	lfetch.fault.excl.nt1 [r17];								\
+	tbit.nz p15,p0=r29,IA64_PSR_I_BIT;							\
+	mov r29=b0										\
+	;;											\
+	WORKAROUND;										\
+	adds r16=PT(R8),r1;	/* initialize first base pointer */				\
+	adds r17=PT(R9),r1;	/* initialize second base pointer */				\
+(pKStk)	mov r18=r0;		/* make sure r18 isn't NaT */					\
+	;;											\
+.mem.offset 0,0; st8.spill [r16]=r8,16;								\
+.mem.offset 8,0; st8.spill [r17]=r9,16;								\
+        ;;											\
+.mem.offset 0,0; st8.spill [r16]=r10,24;							\
+	movl r8=XSI_PRECOVER_IFS;								\
+.mem.offset 8,0; st8.spill [r17]=r11,24;							\
+        ;;											\
+	/* xen special handling for possibly lazy cover */					\
+	/* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */				\
+	ld8 r30=[r8];										\
+(pUStk)	sub r18=r18,r22;	/* r18=RSE.ndirty*8 */						\
+	st8 [r16]=r28,16;	/* save cr.iip */						\
+	;;											\
+	st8 [r17]=r30,16;	/* save cr.ifs */						\
+	mov r8=ar.ccv;										\
+	mov r9=ar.csd;										\
+	mov r10=ar.ssd;										\
+	movl r11=FPSR_DEFAULT;   /* L-unit */							\
+	;;											\
+	st8 [r16]=r25,16;	/* save ar.unat */						\
+	st8 [r17]=r26,16;	/* save ar.pfs */						\
+	shl r18=r18,16;		/* compute ar.rsc to be used for "loadrs" */			\
+	;;											\
+	st8 [r16]=r27,16;	/* save ar.rsc */						\
+(pUStk)	st8 [r17]=r24,16;	/* save ar.rnat */						\
+(pKStk)	adds r17=16,r17;	/* skip over ar_rnat field */					\
+	;;			/* avoid RAW on r16 & r17 */					\
+(pUStk)	st8 [r16]=r23,16;	/* save ar.bspstore */						\
+	st8 [r17]=r31,16;	/* save predicates */						\
+(pKStk)	adds r16=16,r16;	/* skip over ar_bspstore field */				\
+	;;											\
+	st8 [r16]=r29,16;	/* save b0 */							\
+	st8 [r17]=r18,16;	/* save ar.rsc value for "loadrs" */				\
+	cmp.eq pNonSys,pSys=r0,r0	/* initialize pSys=0, pNonSys=1 */			\
+	;;											\
+.mem.offset 0,0; st8.spill [r16]=r20,16;	/* save original r1 */				\
+.mem.offset 8,0; st8.spill [r17]=r12,16;							\
+	adds r12=-16,r1;	/* switch to kernel memory stack (with 16 bytes of scratch) */	\
+	;;											\
+.mem.offset 0,0; st8.spill [r16]=r13,16;							\
+.mem.offset 8,0; st8.spill [r17]=r21,16;	/* save ar.fpsr */				\
+	mov r13=IA64_KR(CURRENT);	/* establish `current' */				\
+	;;											\
+.mem.offset 0,0; st8.spill [r16]=r15,16;							\
+.mem.offset 8,0; st8.spill [r17]=r14,16;							\
+	;;											\
+.mem.offset 0,0; st8.spill [r16]=r2,16;								\
+.mem.offset 8,0; st8.spill [r17]=r3,16;								\
+	ACCOUNT_GET_STAMP									\
+	adds r2=IA64_PT_REGS_R16_OFFSET,r1;							\
+	;;											\
+	EXTRA;											\
+	movl r1=__gp;		/* establish kernel global pointer */				\
+	;;											\
+	ACCOUNT_SYS_ENTER									\
+	BSW_1(r3,r14);	/* switch back to bank 1 (must be last in insn group) */		\
+	;;
diff --git a/arch/ia64/include/asm/xen/page.h b/arch/ia64/include/asm/xen/page.h
new file mode 100644
index 0000000..03441a78
--- /dev/null
+++ b/arch/ia64/include/asm/xen/page.h
@@ -0,0 +1,65 @@
+/******************************************************************************
+ * arch/ia64/include/asm/xen/page.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef _ASM_IA64_XEN_PAGE_H
+#define _ASM_IA64_XEN_PAGE_H
+
+#define INVALID_P2M_ENTRY	(~0UL)
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+	return mfn;
+}
+
+static inline unsigned long pfn_to_mfn(unsigned long pfn)
+{
+	return pfn;
+}
+
+#define phys_to_machine_mapping_valid(_x)	(1)
+
+static inline void *mfn_to_virt(unsigned long mfn)
+{
+	return __va(mfn << PAGE_SHIFT);
+}
+
+static inline unsigned long virt_to_mfn(void *virt)
+{
+	return __pa(virt) >> PAGE_SHIFT;
+}
+
+/* for tpmfront.c */
+static inline unsigned long virt_to_machine(void *virt)
+{
+	return __pa(virt);
+}
+
+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+	/* nothing */
+}
+
+#define pte_mfn(_x)	pte_pfn(_x)
+#define mfn_pte(_x, _y)	__pte_ma(0)		/* unmodified use */
+#define __pte_ma(_x)	((pte_t) {(_x)})        /* unmodified use */
+
+#endif /* _ASM_IA64_XEN_PAGE_H */
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h
new file mode 100644
index 0000000..71ec754
--- /dev/null
+++ b/arch/ia64/include/asm/xen/privop.h
@@ -0,0 +1,129 @@
+#ifndef _ASM_IA64_XEN_PRIVOP_H
+#define _ASM_IA64_XEN_PRIVOP_H
+
+/*
+ * Copyright (C) 2005 Hewlett-Packard Co
+ *	Dan Magenheimer <dan.magenheimer@hp.com>
+ *
+ * Paravirtualizations of privileged operations for Xen/ia64
+ *
+ *
+ * inline privop and paravirt_alt support
+ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ */
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>		/* arch-ia64.h requires uint64_t */
+#endif
+#include <asm/xen/interface.h>
+
+/* At 1 MB, before per-cpu space but still addressable using addl instead
+   of movl. */
+#define XSI_BASE			0xfffffffffff00000
+
+/* Address of mapped regs.  */
+#define XMAPPEDREGS_BASE		(XSI_BASE + XSI_SIZE)
+
+#ifdef __ASSEMBLY__
+#define XEN_HYPER_RFI			break HYPERPRIVOP_RFI
+#define XEN_HYPER_RSM_PSR_DT		break HYPERPRIVOP_RSM_DT
+#define XEN_HYPER_SSM_PSR_DT		break HYPERPRIVOP_SSM_DT
+#define XEN_HYPER_COVER			break HYPERPRIVOP_COVER
+#define XEN_HYPER_ITC_D			break HYPERPRIVOP_ITC_D
+#define XEN_HYPER_ITC_I			break HYPERPRIVOP_ITC_I
+#define XEN_HYPER_SSM_I			break HYPERPRIVOP_SSM_I
+#define XEN_HYPER_GET_IVR		break HYPERPRIVOP_GET_IVR
+#define XEN_HYPER_THASH			break HYPERPRIVOP_THASH
+#define XEN_HYPER_ITR_D			break HYPERPRIVOP_ITR_D
+#define XEN_HYPER_SET_KR		break HYPERPRIVOP_SET_KR
+#define XEN_HYPER_GET_PSR		break HYPERPRIVOP_GET_PSR
+#define XEN_HYPER_SET_RR0_TO_RR4	break HYPERPRIVOP_SET_RR0_TO_RR4
+
+#define XSI_IFS				(XSI_BASE + XSI_IFS_OFS)
+#define XSI_PRECOVER_IFS		(XSI_BASE + XSI_PRECOVER_IFS_OFS)
+#define XSI_IFA				(XSI_BASE + XSI_IFA_OFS)
+#define XSI_ISR				(XSI_BASE + XSI_ISR_OFS)
+#define XSI_IIM				(XSI_BASE + XSI_IIM_OFS)
+#define XSI_ITIR			(XSI_BASE + XSI_ITIR_OFS)
+#define XSI_PSR_I_ADDR			(XSI_BASE + XSI_PSR_I_ADDR_OFS)
+#define XSI_PSR_IC			(XSI_BASE + XSI_PSR_IC_OFS)
+#define XSI_IPSR			(XSI_BASE + XSI_IPSR_OFS)
+#define XSI_IIP				(XSI_BASE + XSI_IIP_OFS)
+#define XSI_B1NAT			(XSI_BASE + XSI_B1NATS_OFS)
+#define XSI_BANK1_R16			(XSI_BASE + XSI_BANK1_R16_OFS)
+#define XSI_BANKNUM			(XSI_BASE + XSI_BANKNUM_OFS)
+#define XSI_IHA				(XSI_BASE + XSI_IHA_OFS)
+#endif
+
+#ifndef __ASSEMBLY__
+
+/************************************************/
+/* Instructions paravirtualized for correctness */
+/************************************************/
+
+/* "fc" and "thash" are privilege-sensitive instructions, meaning they
+ *  may have different semantics depending on whether they are executed
+ *  at PL0 vs PL!=0.  When paravirtualized, these instructions mustn't
+ *  be allowed to execute directly, lest incorrect semantics result. */
+extern void xen_fc(unsigned long addr);
+extern unsigned long xen_thash(unsigned long addr);
+
+/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
+ * is not currently used (though it may be in a long-format VHPT system!)
+ * and the semantics of cover only change if psr.ic is off which is very
+ * rare (and currently non-existent outside of assembly code */
+
+/* There are also privilege-sensitive registers.  These registers are
+ * readable at any privilege level but only writable at PL0. */
+extern unsigned long xen_get_cpuid(int index);
+extern unsigned long xen_get_pmd(int index);
+
+extern unsigned long xen_get_eflag(void);	/* see xen_ia64_getreg */
+extern void xen_set_eflag(unsigned long);	/* see xen_ia64_setreg */
+
+/************************************************/
+/* Instructions paravirtualized for performance */
+/************************************************/
+
+/* Xen uses memory-mapped virtual privileged registers for access to many
+ * performance-sensitive privileged registers.  Some, like the processor
+ * status register (psr), are broken up into multiple memory locations.
+ * Others, like "pend", are abstractions based on privileged registers.
+ * "Pend" is guaranteed to be set if reading cr.ivr would return a
+ * (non-spurious) interrupt. */
+#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
+
+#define XSI_PSR_I			\
+	(*XEN_MAPPEDREGS->interrupt_mask_addr)
+#define xen_get_virtual_psr_i()		\
+	(!XSI_PSR_I)
+#define xen_set_virtual_psr_i(_val)	\
+	({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
+#define xen_set_virtual_psr_ic(_val)	\
+	({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
+#define xen_get_virtual_pend()		\
+	(*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
+
+/* Although all privileged operations can be left to trap and will
+ * be properly handled by Xen, some are frequent enough that we use
+ * hyperprivops for performance. */
+extern unsigned long xen_get_psr(void);
+extern unsigned long xen_get_ivr(void);
+extern unsigned long xen_get_tpr(void);
+extern void xen_hyper_ssm_i(void);
+extern void xen_set_itm(unsigned long);
+extern void xen_set_tpr(unsigned long);
+extern void xen_eoi(unsigned long);
+extern unsigned long xen_get_rr(unsigned long index);
+extern void xen_set_rr(unsigned long index, unsigned long val);
+extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
+			       unsigned long val2, unsigned long val3,
+			       unsigned long val4);
+extern void xen_set_kr(unsigned long index, unsigned long val);
+extern void xen_ptcga(unsigned long addr, unsigned long size);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_XEN_PRIVOP_H */
diff --git a/arch/ia64/include/asm/xen/xcom_hcall.h b/arch/ia64/include/asm/xen/xcom_hcall.h
new file mode 100644
index 0000000..20b2950
--- /dev/null
+++ b/arch/ia64/include/asm/xen/xcom_hcall.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef _ASM_IA64_XEN_XCOM_HCALL_H
+#define _ASM_IA64_XEN_XCOM_HCALL_H
+
+/* These function creates inline or mini descriptor for the parameters and
+   calls the corresponding xencomm_arch_hypercall_X.
+   Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless
+   they want to use their own wrapper.  */
+extern int xencomm_hypercall_console_io(int cmd, int count, char *str);
+
+extern int xencomm_hypercall_event_channel_op(int cmd, void *op);
+
+extern int xencomm_hypercall_xen_version(int cmd, void *arg);
+
+extern int xencomm_hypercall_physdev_op(int cmd, void *op);
+
+extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
+					    unsigned int count);
+
+extern int xencomm_hypercall_sched_op(int cmd, void *arg);
+
+extern int xencomm_hypercall_multicall(void *call_list, int nr_calls);
+
+extern int xencomm_hypercall_callback_op(int cmd, void *arg);
+
+extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg);
+
+extern int xencomm_hypercall_suspend(unsigned long srec);
+
+extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
+
+extern long xencomm_hypercall_opt_feature(void *arg);
+
+#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */
diff --git a/arch/ia64/include/asm/xen/xencomm.h b/arch/ia64/include/asm/xen/xencomm.h
new file mode 100644
index 0000000..cded677
--- /dev/null
+++ b/arch/ia64/include/asm/xen/xencomm.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef _ASM_IA64_XEN_XENCOMM_H
+#define _ASM_IA64_XEN_XENCOMM_H
+
+#include <xen/xencomm.h>
+#include <asm/pgtable.h>
+
+/* Must be called before any hypercall.  */
+extern void xencomm_initialize(void);
+extern int xencomm_is_initialized(void);
+
+/* Check if virtual contiguity means physical contiguity
+ * where the passed address is a pointer value in virtual address.
+ * On ia64, identity mapping area in region 7 or the piece of region 5
+ * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL]
+ */
+static inline int xencomm_is_phys_contiguous(unsigned long addr)
+{
+	return (PAGE_OFFSET <= addr &&
+		addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) ||
+		(KERNEL_START <= addr &&
+		 addr < KERNEL_START + KERNEL_TR_PAGE_SIZE);
+}
+
+#endif /* _ASM_IA64_XEN_XENCOMM_H */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 87fea11..c381ea9 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -42,6 +42,10 @@
 ifneq ($(CONFIG_IA64_ESI),)
 obj-y				+= esi_stub.o	# must be in kernel proper
 endif
+obj-$(CONFIG_DMAR)		+= pci-dma.o
+ifeq ($(CONFIG_DMAR), y)
+obj-$(CONFIG_SWIOTLB)		+= pci-swiotlb.o
+endif
 
 # The gate DSO image is built using a special linker script.
 targets += gate.so gate-syms.o
@@ -112,5 +116,23 @@
 ASM_PARAVIRT_OBJS = ivt.o entry.o
 define paravirtualized_native
 AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
+AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK
+extra-y += pvchk-$(1)
 endef
 $(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj))))
+
+#
+# Checker for paravirtualizations of privileged operations.
+#
+quiet_cmd_pv_check_sed = PVCHK   $@
+define cmd_pv_check_sed
+	sed -f $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed $< > $@
+endef
+
+$(obj)/pvchk-sed-%.s: $(src)/%.S $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed FORCE
+	$(call if_changed_dep,as_s_S)
+$(obj)/pvchk-%.s: $(obj)/pvchk-sed-%.s FORCE
+	$(call if_changed,pv_check_sed)
+$(obj)/pvchk-%.o: $(obj)/pvchk-%.s FORCE
+	$(call if_changed,as_o_S)
+.PRECIOUS: $(obj)/pvchk-sed-%.s $(obj)/pvchk-%.s $(obj)/pvchk-%.o
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 5d1eb7e..0635015 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -52,6 +52,7 @@
 #include <asm/numa.h>
 #include <asm/sal.h>
 #include <asm/cyclone.h>
+#include <asm/xen/hypervisor.h>
 
 #define BAD_MADT_ENTRY(entry, end) (                                        \
 		(!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
@@ -91,6 +92,9 @@
 	struct acpi_table_rsdp *rsdp;
 	struct acpi_table_xsdt *xsdt;
 	struct acpi_table_header *hdr;
+#ifdef CONFIG_DMAR
+	u64 i, nentries;
+#endif
 
 	rsdp_phys = acpi_find_rsdp();
 	if (!rsdp_phys) {
@@ -121,8 +125,22 @@
 			return "uv";
 		else
 			return "sn2";
+	} else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) {
+		return "xen";
 	}
 
+#ifdef CONFIG_DMAR
+	/* Look for Intel IOMMU */
+	nentries = (hdr->length - sizeof(*hdr)) /
+			 sizeof(xsdt->table_offset_entry[0]);
+	for (i = 0; i < nentries; i++) {
+		hdr = __va(xsdt->table_offset_entry[i]);
+		if (strncmp(hdr->signature, ACPI_SIG_DMAR,
+			sizeof(ACPI_SIG_DMAR) - 1) == 0)
+			return "dig_vtd";
+	}
+#endif
+
 	return "dig";
 #else
 # if defined (CONFIG_IA64_HP_SIM)
@@ -137,6 +155,10 @@
 	return "uv";
 # elif defined (CONFIG_IA64_DIG)
 	return "dig";
+# elif defined (CONFIG_IA64_XEN_GUEST)
+	return "xen";
+# elif defined(CONFIG_IA64_DIG_VTD)
+	return "dig_vtd";
 # else
 #	error Unknown platform.  Fix acpi.c.
 # endif
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 94c44b1..742dbb1 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -16,6 +16,9 @@
 #include <asm/sigcontext.h>
 #include <asm/mca.h>
 
+#include <asm/xen/interface.h>
+#include <asm/xen/hypervisor.h>
+
 #include "../kernel/sigframe.h"
 #include "../kernel/fsyscall_gtod_data.h"
 
@@ -286,4 +289,32 @@
 		offsetof (struct itc_jitter_data_t, itc_jitter));
 	DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
 		offsetof (struct itc_jitter_data_t, itc_lastcycle));
+
+#ifdef CONFIG_XEN
+	BLANK();
+
+	DEFINE(XEN_NATIVE_ASM, XEN_NATIVE);
+	DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN);
+
+#define DEFINE_MAPPED_REG_OFS(sym, field) \
+	DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field)))
+
+	DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
+	DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
+	DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
+	DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
+	DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
+	DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
+	DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
+	DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
+	DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
+	DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
+	DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
+	DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
+	DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
+	DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
+	DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
+	DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
+	DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
+#endif /* CONFIG_XEN */
 }
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index da60e90..23e9129 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -8,10 +8,14 @@
 
 #include <linux/errno.h>
 #include <linux/types.h>
+#include <linux/crash_dump.h>
 
 #include <asm/page.h>
 #include <asm/uaccess.h>
 
+/* Stores the physical address of elf header of crash image. */
+unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
+
 /**
  * copy_oldmem_page - copy one page from "oldmem"
  * @pfn: page frame number to be copied
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 51b75ce..efaff15 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -1335,7 +1335,7 @@
 }
 #endif
 
-#ifdef CONFIG_PROC_VMCORE
+#ifdef CONFIG_CRASH_DUMP
 /* locate the size find a the descriptor at a certain address */
 unsigned long __init
 vmcore_find_descriptor_size (unsigned long address)
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 0dd6c14..7ef0c59 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -534,6 +534,11 @@
  	stf.spill [r16]=f10
  	stf.spill [r17]=f11
 	br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
+	cmp.lt p6,p0=r8,r0			// check tracehook
+	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
+	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10
+	mov r10=0
+(p6)	br.cond.sptk strace_error		// syscall failed ->
 	adds r16=PT(F6)+16,sp
 	adds r17=PT(F7)+16,sp
 	;;
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 416a952..f675d8e3 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -580,7 +580,7 @@
 	mov b0=r29				// restore b0
 	;;
 	st8 [r17]=r18				// store back updated PTE
-	itc.d r18				// install updated PTE
+	ITC_D(p0, r18, r16)			// install updated PTE
 #endif
 	mov pr=r31,-1				// restore pr
 	RFI
@@ -646,7 +646,7 @@
 	mov b0=r29				// restore b0
 	;;
 	st8 [r17]=r18				// store back updated PTE
-	itc.i r18				// install updated PTE
+	ITC_I(p0, r18, r16)			// install updated PTE
 #endif /* !CONFIG_SMP */
 	mov pr=r31,-1
 	RFI
@@ -698,7 +698,7 @@
 	or r18=_PAGE_A,r18			// set the accessed bit
 	;;
 	st8 [r17]=r18				// store back updated PTE
-	itc.d r18				// install updated PTE
+	ITC_D(p0, r18, r16)			// install updated PTE
 #endif
 	mov b0=r29				// restore b0
 	mov pr=r31,-1
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 60c6ef6..702a09c 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -5,6 +5,7 @@
 #include <linux/pci.h>
 #include <linux/irq.h>
 #include <linux/msi.h>
+#include <linux/dmar.h>
 #include <asm/smp.h>
 
 /*
@@ -162,3 +163,82 @@
 
 	return ia64_teardown_msi_irq(irq);
 }
+
+#ifdef CONFIG_DMAR
+#ifdef CONFIG_SMP
+static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
+{
+	struct irq_cfg *cfg = irq_cfg + irq;
+	struct msi_msg msg;
+	int cpu = first_cpu(mask);
+
+
+	if (!cpu_online(cpu))
+		return;
+
+	if (irq_prepare_move(irq, cpu))
+		return;
+
+	dmar_msi_read(irq, &msg);
+
+	msg.data &= ~MSI_DATA_VECTOR_MASK;
+	msg.data |= MSI_DATA_VECTOR(cfg->vector);
+	msg.address_lo &= ~MSI_ADDR_DESTID_MASK;
+	msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
+
+	dmar_msi_write(irq, &msg);
+	irq_desc[irq].affinity = mask;
+}
+#endif /* CONFIG_SMP */
+
+struct irq_chip dmar_msi_type = {
+	.name = "DMAR_MSI",
+	.unmask = dmar_msi_unmask,
+	.mask = dmar_msi_mask,
+	.ack = ia64_ack_msi_irq,
+#ifdef CONFIG_SMP
+	.set_affinity = dmar_msi_set_affinity,
+#endif
+	.retrigger = ia64_msi_retrigger_irq,
+};
+
+static int
+msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
+{
+	struct irq_cfg *cfg = irq_cfg + irq;
+	unsigned dest;
+	cpumask_t mask;
+
+	cpus_and(mask, irq_to_domain(irq), cpu_online_map);
+	dest = cpu_physical_id(first_cpu(mask));
+
+	msg->address_hi = 0;
+	msg->address_lo =
+		MSI_ADDR_HEADER |
+		MSI_ADDR_DESTMODE_PHYS |
+		MSI_ADDR_REDIRECTION_CPU |
+		MSI_ADDR_DESTID_CPU(dest);
+
+	msg->data =
+		MSI_DATA_TRIGGER_EDGE |
+		MSI_DATA_LEVEL_ASSERT |
+		MSI_DATA_DELIVERY_FIXED |
+		MSI_DATA_VECTOR(cfg->vector);
+	return 0;
+}
+
+int arch_setup_dmar_msi(unsigned int irq)
+{
+	int ret;
+	struct msi_msg msg;
+
+	ret = msi_compose_msg(NULL, irq, &msg);
+	if (ret < 0)
+		return ret;
+	dmar_msi_write(irq, &msg);
+	set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
+		"edge");
+	return 0;
+}
+#endif /* CONFIG_DMAR */
+
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c
index 8273afc..ee56457 100644
--- a/arch/ia64/kernel/nr-irqs.c
+++ b/arch/ia64/kernel/nr-irqs.c
@@ -10,6 +10,7 @@
 #include <linux/kbuild.h>
 #include <linux/threads.h>
 #include <asm/native/irq.h>
+#include <asm/xen/irq.h>
 
 void foo(void)
 {
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
index afaf5b9..de35d8e 100644
--- a/arch/ia64/kernel/paravirt.c
+++ b/arch/ia64/kernel/paravirt.c
@@ -332,7 +332,7 @@
 
 struct pv_iosapic_ops pv_iosapic_ops = {
 	.pcat_compat_init = ia64_native_iosapic_pcat_compat_init,
-	.get_irq_chip = ia64_native_iosapic_get_irq_chip,
+	.__get_irq_chip = ia64_native_iosapic_get_irq_chip,
 
 	.__read = ia64_native_iosapic_read,
 	.__write = ia64_native_iosapic_write,
diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h
index 5cad6fb..64d6d81 100644
--- a/arch/ia64/kernel/paravirt_inst.h
+++ b/arch/ia64/kernel/paravirt_inst.h
@@ -20,7 +20,9 @@
  *
  */
 
-#ifdef __IA64_ASM_PARAVIRTUALIZED_XEN
+#ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK
+#include <asm/native/pvchk_inst.h>
+#elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN)
 #include <asm/xen/inst.h>
 #include <asm/xen/minstate.h>
 #else
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
new file mode 100644
index 0000000..10a75b5
--- /dev/null
+++ b/arch/ia64/kernel/pci-dma.c
@@ -0,0 +1,129 @@
+/*
+ * Dynamic DMA mapping support.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/dmar.h>
+#include <asm/iommu.h>
+#include <asm/machvec.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/machvec.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_DMAR
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/page.h>
+#include <asm/iommu.h>
+
+dma_addr_t bad_dma_address __read_mostly;
+EXPORT_SYMBOL(bad_dma_address);
+
+static int iommu_sac_force __read_mostly;
+
+int no_iommu __read_mostly;
+#ifdef CONFIG_IOMMU_DEBUG
+int force_iommu __read_mostly = 1;
+#else
+int force_iommu __read_mostly;
+#endif
+
+/* Set this to 1 if there is a HW IOMMU in the system */
+int iommu_detected __read_mostly;
+
+/* Dummy device used for NULL arguments (normally ISA). Better would
+   be probably a smaller DMA mask, but this is bug-to-bug compatible
+   to i386. */
+struct device fallback_dev = {
+	.bus_id = "fallback device",
+	.coherent_dma_mask = DMA_32BIT_MASK,
+	.dma_mask = &fallback_dev.coherent_dma_mask,
+};
+
+void __init pci_iommu_alloc(void)
+{
+	/*
+	 * The order of these functions is important for
+	 * fall-back/fail-over reasons
+	 */
+	detect_intel_iommu();
+
+#ifdef CONFIG_SWIOTLB
+	pci_swiotlb_init();
+#endif
+}
+
+static int __init pci_iommu_init(void)
+{
+	if (iommu_detected)
+		intel_iommu_init();
+
+	return 0;
+}
+
+/* Must execute after PCI subsystem */
+fs_initcall(pci_iommu_init);
+
+void pci_iommu_shutdown(void)
+{
+	return;
+}
+
+void __init
+iommu_dma_init(void)
+{
+	return;
+}
+
+struct dma_mapping_ops *dma_ops;
+EXPORT_SYMBOL(dma_ops);
+
+int iommu_dma_supported(struct device *dev, u64 mask)
+{
+	struct dma_mapping_ops *ops = get_dma_ops(dev);
+
+#ifdef CONFIG_PCI
+	if (mask > 0xffffffff && forbid_dac > 0) {
+		dev_info(dev, "Disallowing DAC for device\n");
+		return 0;
+	}
+#endif
+
+	if (ops->dma_supported_op)
+		return ops->dma_supported_op(dev, mask);
+
+	/* Copied from i386. Doesn't make much sense, because it will
+	   only work for pci_alloc_coherent.
+	   The caller just has to use GFP_DMA in this case. */
+	if (mask < DMA_24BIT_MASK)
+		return 0;
+
+	/* Tell the device to use SAC when IOMMU force is on.  This
+	   allows the driver to use cheaper accesses in some cases.
+
+	   Problem with this is that if we overflow the IOMMU area and
+	   return DAC as fallback address the device may not handle it
+	   correctly.
+
+	   As a special case some controllers have a 39bit address
+	   mode that is as efficient as 32bit (aic79xx). Don't force
+	   SAC for these.  Assume all masks <= 40 bits are of this
+	   type. Normally this doesn't make any difference, but gives
+	   more gentle handling of IOMMU overflow. */
+	if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
+		dev_info(dev, "Force SAC with mask %lx\n", mask);
+		return 0;
+	}
+
+	return 1;
+}
+EXPORT_SYMBOL(iommu_dma_supported);
+
+#endif
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
new file mode 100644
index 0000000..16c5051
--- /dev/null
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -0,0 +1,46 @@
+/* Glue code to lib/swiotlb.c */
+
+#include <linux/pci.h>
+#include <linux/cache.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/swiotlb.h>
+#include <asm/dma.h>
+#include <asm/iommu.h>
+#include <asm/machvec.h>
+
+int swiotlb __read_mostly;
+EXPORT_SYMBOL(swiotlb);
+
+struct dma_mapping_ops swiotlb_dma_ops = {
+	.mapping_error = swiotlb_dma_mapping_error,
+	.alloc_coherent = swiotlb_alloc_coherent,
+	.free_coherent = swiotlb_free_coherent,
+	.map_single = swiotlb_map_single,
+	.unmap_single = swiotlb_unmap_single,
+	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+	.sync_single_for_device = swiotlb_sync_single_for_device,
+	.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
+	.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
+	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+	.sync_sg_for_device = swiotlb_sync_sg_for_device,
+	.map_sg = swiotlb_map_sg,
+	.unmap_sg = swiotlb_unmap_sg,
+	.dma_supported_op = swiotlb_dma_supported,
+};
+
+void __init pci_swiotlb_init(void)
+{
+	if (!iommu_detected) {
+#ifdef CONFIG_IA64_GENERIC
+		swiotlb = 1;
+		printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
+		machvec_init("dig");
+		swiotlb_init();
+		dma_ops = &swiotlb_dma_ops;
+#else
+		panic("Unable to find Intel IOMMU");
+#endif
+	}
+}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index fc8f350..ada4605 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -40,6 +40,7 @@
 #include <linux/capability.h>
 #include <linux/rcupdate.h>
 #include <linux/completion.h>
+#include <linux/tracehook.h>
 
 #include <asm/errno.h>
 #include <asm/intrinsics.h>
@@ -3684,7 +3685,7 @@
 
 		PFM_SET_WORK_PENDING(task, 1);
 
-		tsk_set_notify_resume(task);
+		set_notify_resume(task);
 
 		/*
 		 * XXX: send reschedule if task runs on another CPU
@@ -5044,8 +5045,6 @@
 
 	PFM_SET_WORK_PENDING(current, 0);
 
-	tsk_clear_notify_resume(current);
-
 	regs = task_pt_regs(current);
 
 	/*
@@ -5414,7 +5413,7 @@
 			 * when coming from ctxsw, current still points to the
 			 * previous task, therefore we must work with task and not current.
 			 */
-			tsk_set_notify_resume(task);
+			set_notify_resume(task);
 		}
 		/*
 		 * defer until state is changed (shorten spin window). the context is locked
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 3ab8373..c571627 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -28,6 +28,7 @@
 #include <linux/delay.h>
 #include <linux/kdebug.h>
 #include <linux/utsname.h>
+#include <linux/tracehook.h>
 
 #include <asm/cpu.h>
 #include <asm/delay.h>
@@ -160,21 +161,6 @@
 		show_stack(NULL, NULL);
 }
 
-void tsk_clear_notify_resume(struct task_struct *tsk)
-{
-#ifdef CONFIG_PERFMON
-	if (tsk->thread.pfm_needs_checking)
-		return;
-#endif
-	if (test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_RSE))
-		return;
-	clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME);
-}
-
-/*
- * do_notify_resume_user():
- *	Called from notify_resume_user at entry.S, with interrupts disabled.
- */
 void
 do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
 {
@@ -203,6 +189,11 @@
 		ia64_do_signal(scr, in_syscall);
 	}
 
+	if (test_thread_flag(TIF_NOTIFY_RESUME)) {
+		clear_thread_flag(TIF_NOTIFY_RESUME);
+		tracehook_notify_resume(&scr->pt);
+	}
+
 	/* copy user rbs to kernel rbs */
 	if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) {
 		local_irq_enable();	/* force interrupt enable */
@@ -251,7 +242,6 @@
 /* We don't actually take CPU down, just spin without interrupts. */
 static inline void play_dead(void)
 {
-	extern void ia64_cpu_local_tick (void);
 	unsigned int this_cpu = smp_processor_id();
 
 	/* Ack it */
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 2a9943b..92c9689 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -22,6 +22,7 @@
 #include <linux/signal.h>
 #include <linux/regset.h>
 #include <linux/elf.h>
+#include <linux/tracehook.h>
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
@@ -603,7 +604,7 @@
 {
 	if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
 		return;
-	tsk_set_notify_resume(current);
+	set_notify_resume(current);
 	unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
 }
 
@@ -613,7 +614,6 @@
 void ia64_sync_krbs(void)
 {
 	clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
-	tsk_clear_notify_resume(current);
 
 	unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
 }
@@ -644,7 +644,7 @@
 		spin_lock_irq(&child->sighand->siglock);
 		if (child->state == TASK_STOPPED &&
 		    !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
-			tsk_set_notify_resume(child);
+			set_notify_resume(child);
 
 			child->state = TASK_TRACED;
 			stopped = 1;
@@ -1232,37 +1232,16 @@
 }
 
 
-static void
-syscall_trace (void)
-{
-	/*
-	 * The 0x80 provides a way for the tracing parent to
-	 * distinguish between a syscall stop and SIGTRAP delivery.
-	 */
-	ptrace_notify(SIGTRAP
-		      | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
-
-	/*
-	 * This isn't the same as continuing with a signal, but it
-	 * will do for normal use.  strace only continues with a
-	 * signal if the stopping signal is not SIGTRAP.  -brl
-	 */
-	if (current->exit_code) {
-		send_sig(current->exit_code, current, 1);
-		current->exit_code = 0;
-	}
-}
-
 /* "asmlinkage" so the input arguments are preserved... */
 
-asmlinkage void
+asmlinkage long
 syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
 		     long arg4, long arg5, long arg6, long arg7,
 		     struct pt_regs regs)
 {
-	if (test_thread_flag(TIF_SYSCALL_TRACE) 
-	    && (current->ptrace & PT_PTRACED))
-		syscall_trace();
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		if (tracehook_report_syscall_entry(&regs))
+			return -ENOSYS;
 
 	/* copy user rbs to kernel rbs */
 	if (test_thread_flag(TIF_RESTORE_RSE))
@@ -1283,6 +1262,7 @@
 		audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
 	}
 
+	return 0;
 }
 
 /* "asmlinkage" so the input arguments are preserved... */
@@ -1292,6 +1272,8 @@
 		     long arg4, long arg5, long arg6, long arg7,
 		     struct pt_regs regs)
 {
+	int step;
+
 	if (unlikely(current->audit_context)) {
 		int success = AUDITSC_RESULT(regs.r10);
 		long result = regs.r8;
@@ -1301,10 +1283,9 @@
 		audit_syscall_exit(success, result);
 	}
 
-	if ((test_thread_flag(TIF_SYSCALL_TRACE)
-	    || test_thread_flag(TIF_SINGLESTEP))
-	    && (current->ptrace & PT_PTRACED))
-		syscall_trace();
+	step = test_thread_flag(TIF_SINGLESTEP);
+	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall_exit(&regs, step);
 
 	/* copy user rbs to kernel rbs */
 	if (test_thread_flag(TIF_RESTORE_RSE))
@@ -1940,7 +1921,7 @@
 {
 	if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
 		return 0;
-	tsk_set_notify_resume(target);
+	set_notify_resume(target);
 	return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
 		NULL, NULL);
 }
@@ -2199,3 +2180,68 @@
 #endif
 	return &user_ia64_view;
 }
+
+struct syscall_get_set_args {
+	unsigned int i;
+	unsigned int n;
+	unsigned long *args;
+	struct pt_regs *regs;
+	int rw;
+};
+
+static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
+{
+	struct syscall_get_set_args *args = data;
+	struct pt_regs *pt = args->regs;
+	unsigned long *krbs, cfm, ndirty;
+	int i, count;
+
+	if (unw_unwind_to_user(info) < 0)
+		return;
+
+	cfm = pt->cr_ifs;
+	krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
+	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
+
+	count = 0;
+	if (in_syscall(pt))
+		count = min_t(int, args->n, cfm & 0x7f);
+
+	for (i = 0; i < count; i++) {
+		if (args->rw)
+			*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
+				args->args[i];
+		else
+			args->args[i] = *ia64_rse_skip_regs(krbs,
+				ndirty + i + args->i);
+	}
+
+	if (!args->rw) {
+		while (i < args->n) {
+			args->args[i] = 0;
+			i++;
+		}
+	}
+}
+
+void ia64_syscall_get_set_arguments(struct task_struct *task,
+	struct pt_regs *regs, unsigned int i, unsigned int n,
+	unsigned long *args, int rw)
+{
+	struct syscall_get_set_args data = {
+		.i = i,
+		.n = n,
+		.args = args,
+		.regs = regs,
+		.rw = rw,
+	};
+
+	if (task == current)
+		unw_init_running(syscall_get_set_args_cb, &data);
+	else {
+		struct unw_frame_info ufi;
+		memset(&ufi, 0, sizeof(ufi));
+		unw_init_from_blocked_task(&ufi, task);
+		syscall_get_set_args_cb(&ufi, &data);
+	}
+}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index de636b2..ae79117 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -116,6 +116,13 @@
  */
 #define	I_CACHE_STRIDE_SHIFT	5	/* Safest way to go: 32 bytes by 32 bytes */
 unsigned long ia64_i_cache_stride_shift = ~0;
+/*
+ * "clflush_cache_range()" needs to know what processor dependent stride size to
+ * use when it flushes cache lines including both d-cache and i-cache.
+ */
+/* Safest way to go: 32 bytes by 32 bytes */
+#define	CACHE_STRIDE_SHIFT	5
+unsigned long ia64_cache_stride_shift = ~0;
 
 /*
  * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1).  This
@@ -352,7 +359,7 @@
 	}
 #endif
 
-#ifdef CONFIG_PROC_VMCORE
+#ifdef CONFIG_CRASH_KERNEL
 	if (reserve_elfcorehdr(&rsvd_region[n].start,
 			       &rsvd_region[n].end) == 0)
 		n++;
@@ -478,7 +485,12 @@
 }
 early_param("nomca", setup_nomca);
 
-#ifdef CONFIG_PROC_VMCORE
+/*
+ * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
+ * is_kdump_kernel() to determine if we are booting after a panic. Hence
+ * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
+ */
+#ifdef CONFIG_CRASH_DUMP
 /* elfcorehdr= specifies the location of elf core header
  * stored by the crashed kernel.
  */
@@ -502,11 +514,11 @@
 	 * to work properly.
 	 */
 
-	if (elfcorehdr_addr >= ELFCORE_ADDR_MAX)
+	if (!is_vmcore_usable())
 		return -EINVAL;
 
 	if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
-		elfcorehdr_addr = ELFCORE_ADDR_MAX;
+		vmcore_unusable();
 		return -EINVAL;
 	}
 
@@ -847,13 +859,14 @@
 }
 
 /*
- * Calculate the max. cache line size.
+ * Do the following calculations:
  *
- * In addition, the minimum of the i-cache stride sizes is calculated for
- * "flush_icache_range()".
+ * 1. the max. cache line size.
+ * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
+ * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
  */
 static void __cpuinit
-get_max_cacheline_size (void)
+get_cache_info(void)
 {
 	unsigned long line_size, max = 1;
 	u64 l, levels, unique_caches;
@@ -867,12 +880,14 @@
                 max = SMP_CACHE_BYTES;
 		/* Safest setup for "flush_icache_range()" */
 		ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
+		/* Safest setup for "clflush_cache_range()" */
+		ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
 		goto out;
         }
 
 	for (l = 0; l < levels; ++l) {
-		status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
-						    &cci);
+		/* cache_type (data_or_unified)=2 */
+		status = ia64_pal_cache_config_info(l, 2, &cci);
 		if (status != 0) {
 			printk(KERN_ERR
 			       "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
@@ -880,15 +895,21 @@
 			max = SMP_CACHE_BYTES;
 			/* The safest setup for "flush_icache_range()" */
 			cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
+			/* The safest setup for "clflush_cache_range()" */
+			ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
 			cci.pcci_unified = 1;
+		} else {
+			if (cci.pcci_stride < ia64_cache_stride_shift)
+				ia64_cache_stride_shift = cci.pcci_stride;
+
+			line_size = 1 << cci.pcci_line_size;
+			if (line_size > max)
+				max = line_size;
 		}
-		line_size = 1 << cci.pcci_line_size;
-		if (line_size > max)
-			max = line_size;
+
 		if (!cci.pcci_unified) {
-			status = ia64_pal_cache_config_info(l,
-						    /* cache_type (instruction)= */ 1,
-						    &cci);
+			/* cache_type (instruction)=1*/
+			status = ia64_pal_cache_config_info(l, 1, &cci);
 			if (status != 0) {
 				printk(KERN_ERR
 				"%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
@@ -942,7 +963,7 @@
 	}
 #endif
 
-	get_max_cacheline_size();
+	get_cache_info();
 
 	/*
 	 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 19c5a78..e12500a 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <linux/smp.h>
@@ -439,6 +440,13 @@
 		sigaddset(&current->blocked, sig);
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
+
+	/*
+	 * Let tracing know that we've done the handler setup.
+	 */
+	tracehook_signal_handler(sig, info, ka, &scr->pt,
+				 test_thread_flag(TIF_SINGLESTEP));
+
 	return 1;
 }
 
diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S
index 2a0d27f..1d8c888 100644
--- a/arch/ia64/lib/flush.S
+++ b/arch/ia64/lib/flush.S
@@ -60,3 +60,58 @@
 	mov	ar.lc=r3		// restore ar.lc
 	br.ret.sptk.many rp
 END(flush_icache_range)
+
+	/*
+	 * clflush_cache_range(start,size)
+	 *
+	 *	Flush cache lines from start to start+size-1.
+	 *
+	 *	Must deal with range from start to start+size-1 but nothing else
+	 *	(need to be careful not to touch addresses that may be
+	 *	unmapped).
+	 *
+	 *	Note: "in0" and "in1" are preserved for debugging purposes.
+	 */
+	.section .kprobes.text,"ax"
+GLOBAL_ENTRY(clflush_cache_range)
+
+	.prologue
+	alloc	r2=ar.pfs,2,0,0,0
+	movl	r3=ia64_cache_stride_shift
+	mov	r21=1
+	add     r22=in1,in0
+	;;
+	ld8	r20=[r3]		// r20: stride shift
+	sub	r22=r22,r0,1		// last byte address
+	;;
+	shr.u	r23=in0,r20		// start / (stride size)
+	shr.u	r22=r22,r20		// (last byte address) / (stride size)
+	shl	r21=r21,r20		// r21: stride size of the i-cache(s)
+	;;
+	sub	r8=r22,r23		// number of strides - 1
+	shl	r24=r23,r20		// r24: addresses for "fc" =
+					//	"start" rounded down to stride
+					//	boundary
+	.save	ar.lc,r3
+	mov	r3=ar.lc		// save ar.lc
+	;;
+
+	.body
+	mov	ar.lc=r8
+	;;
+	/*
+	 * 32 byte aligned loop, even number of (actually 2) bundles
+	 */
+.Loop_fc:
+	fc	r24		// issuable on M0 only
+	add	r24=r21,r24	// we flush "stride size" bytes per iteration
+	nop.i	0
+	br.cloop.sptk.few .Loop_fc
+	;;
+	sync.i
+	;;
+	srlz.i
+	;;
+	mov	ar.lc=r3		// restore ar.lc
+	br.ret.sptk.many rp
+END(clflush_cache_range)
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index f482a90..054bcd9 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -700,23 +700,6 @@
 
 	return ret;
 }
-#ifdef CONFIG_MEMORY_HOTREMOVE
-int remove_memory(u64 start, u64 size)
-{
-	unsigned long start_pfn, end_pfn;
-	unsigned long timeout = 120 * HZ;
-	int ret;
-	start_pfn = start >> PAGE_SHIFT;
-	end_pfn = start_pfn + (size >> PAGE_SHIFT);
-	ret = offline_pages(start_pfn, end_pfn, timeout);
-	if (ret)
-		goto out;
-	/* we can free mem_map at this point */
-out:
-	return ret;
-}
-EXPORT_SYMBOL_GPL(remove_memory);
-#endif /* CONFIG_MEMORY_HOTREMOVE */
 #endif
 
 /*
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 8caf424..bd9818a 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -362,9 +362,13 @@
 		per_cpu(ia64_tr_num, cpu) =
 				vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
 	if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
+		static int justonce = 1;
 		per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
-		printk(KERN_DEBUG "TR register number exceeds IA64_TR_ALLOC_MAX!"
-			"IA64_TR_ALLOC_MAX should be extended\n");
+		if (justonce) {
+			justonce = 0;
+			printk(KERN_DEBUG "TR register number exceeds "
+			       "IA64_TR_ALLOC_MAX!\n");
+		}
 	}
 }
 
diff --git a/arch/ia64/oprofile/init.c b/arch/ia64/oprofile/init.c
index 125a602..31b545c 100644
--- a/arch/ia64/oprofile/init.c
+++ b/arch/ia64/oprofile/init.c
@@ -12,11 +12,11 @@
 #include <linux/init.h>
 #include <linux/errno.h>
  
-extern int perfmon_init(struct oprofile_operations * ops);
+extern int perfmon_init(struct oprofile_operations *ops);
 extern void perfmon_exit(void);
 extern void ia64_backtrace(struct pt_regs * const regs, unsigned int depth);
 
-int __init oprofile_arch_init(struct oprofile_operations * ops)
+int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
 	int ret = -ENODEV;
 
diff --git a/arch/ia64/oprofile/perfmon.c b/arch/ia64/oprofile/perfmon.c
index bc41dd3..192d3e8 100644
--- a/arch/ia64/oprofile/perfmon.c
+++ b/arch/ia64/oprofile/perfmon.c
@@ -56,7 +56,7 @@
 };
 
 
-static char * get_cpu_type(void)
+static char *get_cpu_type(void)
 {
 	__u8 family = local_cpu_data->family;
 
@@ -75,7 +75,7 @@
 
 static int using_perfmon;
 
-int perfmon_init(struct oprofile_operations * ops)
+int perfmon_init(struct oprofile_operations *ops)
 {
 	int ret = pfm_register_buffer_fmt(&oprofile_fmt);
 	if (ret)
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 7545037..211fcfd 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -614,12 +614,17 @@
  * vector to get the base address.
  */
 int
-pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
+pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
+			   enum pci_mmap_state mmap_state)
 {
 	unsigned long size = vma->vm_end - vma->vm_start;
 	pgprot_t prot;
 	char *addr;
 
+	/* We only support mmap'ing of legacy memory space */
+	if (mmap_state != pci_mmap_mem)
+		return -ENOSYS;
+
 	/*
 	 * Avoid attribute aliasing.  See Documentation/ia64/aliasing.txt
 	 * for more details.
diff --git a/arch/ia64/scripts/pvcheck.sed b/arch/ia64/scripts/pvcheck.sed
new file mode 100644
index 0000000..ba66ac2
--- /dev/null
+++ b/arch/ia64/scripts/pvcheck.sed
@@ -0,0 +1,32 @@
+#
+# Checker for paravirtualizations of privileged operations.
+#
+s/ssm.*psr\.ic.*/.warning \"ssm psr.ic should not be used directly\"/g
+s/rsm.*psr\.ic.*/.warning \"rsm psr.ic should not be used directly\"/g
+s/ssm.*psr\.i.*/.warning \"ssm psr.i should not be used directly\"/g
+s/rsm.*psr\.i.*/.warning \"rsm psr.i should not be used directly\"/g
+s/ssm.*psr\.dt.*/.warning \"ssm psr.dt should not be used directly\"/g
+s/rsm.*psr\.dt.*/.warning \"rsm psr.dt should not be used directly\"/g
+s/mov.*=.*cr\.ifa/.warning \"cr.ifa should not used directly\"/g
+s/mov.*=.*cr\.itir/.warning \"cr.itir should not used directly\"/g
+s/mov.*=.*cr\.isr/.warning \"cr.isr should not used directly\"/g
+s/mov.*=.*cr\.iha/.warning \"cr.iha should not used directly\"/g
+s/mov.*=.*cr\.ipsr/.warning \"cr.ipsr should not used directly\"/g
+s/mov.*=.*cr\.iim/.warning \"cr.iim should not used directly\"/g
+s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g
+s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g
+s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g	# avoid ar.fpsr
+s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g
+s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g
+s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g
+s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g
+s/mov.*cr\.ipsr.*=.*/.warning \"cr.ipsr should not used directly\"/g
+s/mov.*cr\.ifs.*=.*/.warning \"cr.ifs should not used directly\"/g
+s/mov.*cr\.iip.*=.*/.warning \"cr.iip should not used directly\"/g
+s/mov.*cr\.kr.*=.*/.warning \"cr.kr should not used directly\"/g
+s/mov.*ar\.eflags.*=.*/.warning \"ar.eflags should not used directly\"/g
+s/itc\.i.*/.warning \"itc.i should not be used directly.\"/g
+s/itc\.d.*/.warning \"itc.d should not be used directly.\"/g
+s/bsw\.0/.warning \"bsw.0 should not be used directly.\"/g
+s/bsw\.1/.warning \"bsw.1 should not be used directly.\"/g
+s/ptc\.ga.*/.warning \"ptc.ga should not be used directly.\"/g
diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig
new file mode 100644
index 0000000..f1683a2
--- /dev/null
+++ b/arch/ia64/xen/Kconfig
@@ -0,0 +1,26 @@
+#
+# This Kconfig describes xen/ia64 options
+#
+
+config XEN
+	bool "Xen hypervisor support"
+	default y
+	depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB && EXPERIMENTAL
+	select XEN_XENCOMM
+	select NO_IDLE_HZ
+
+	# those are required to save/restore.
+	select ARCH_SUSPEND_POSSIBLE
+	select SUSPEND
+	select PM_SLEEP
+	help
+	  Enable Xen hypervisor support.  Resulting kernel runs
+	  both as a guest OS on Xen and natively on hardware.
+
+config XEN_XENCOMM
+	depends on XEN
+	bool
+
+config NO_IDLE_HZ
+	depends on XEN
+	bool
diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile
new file mode 100644
index 0000000..0ad0224
--- /dev/null
+++ b/arch/ia64/xen/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for Xen components
+#
+
+obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \
+	 hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o
+
+obj-$(CONFIG_IA64_GENERIC) += machvec.o
+
+AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN
+
+# xen multi compile
+ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S
+ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o))
+obj-y += $(ASM_PARAVIRT_OBJS)
+define paravirtualized_xen
+AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN
+endef
+$(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o))))
+
+$(obj)/xen-%.o: $(src)/../kernel/%.S FORCE
+	$(call if_changed_dep,as_o_S)
diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c
new file mode 100644
index 0000000..777dd9a
--- /dev/null
+++ b/arch/ia64/xen/grant-table.c
@@ -0,0 +1,155 @@
+/******************************************************************************
+ * arch/ia64/xen/grant-table.c
+ *
+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/memory.h>
+#include <xen/grant_table.h>
+
+#include <asm/xen/hypervisor.h>
+
+struct vm_struct *xen_alloc_vm_area(unsigned long size)
+{
+	int order;
+	unsigned long virt;
+	unsigned long nr_pages;
+	struct vm_struct *area;
+
+	order = get_order(size);
+	virt = __get_free_pages(GFP_KERNEL, order);
+	if (virt == 0)
+		goto err0;
+	nr_pages = 1 << order;
+	scrub_pages(virt, nr_pages);
+
+	area = kmalloc(sizeof(*area), GFP_KERNEL);
+	if (area == NULL)
+		goto err1;
+
+	area->flags = VM_IOREMAP;
+	area->addr = (void *)virt;
+	area->size = size;
+	area->pages = NULL;
+	area->nr_pages = nr_pages;
+	area->phys_addr = 0;	/* xenbus_map_ring_valloc uses this field!  */
+
+	return area;
+
+err1:
+	free_pages(virt, order);
+err0:
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(xen_alloc_vm_area);
+
+void xen_free_vm_area(struct vm_struct *area)
+{
+	unsigned int order = get_order(area->size);
+	unsigned long i;
+	unsigned long phys_addr = __pa(area->addr);
+
+	/* This area is used for foreign page mappping.
+	 * So underlying machine page may not be assigned. */
+	for (i = 0; i < (1 << order); i++) {
+		unsigned long ret;
+		unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i;
+		struct xen_memory_reservation reservation = {
+			.nr_extents   = 1,
+			.address_bits = 0,
+			.extent_order = 0,
+			.domid        = DOMID_SELF
+		};
+		set_xen_guest_handle(reservation.extent_start, &gpfn);
+		ret = HYPERVISOR_memory_op(XENMEM_populate_physmap,
+					   &reservation);
+		BUG_ON(ret != 1);
+	}
+	free_pages((unsigned long)area->addr, order);
+	kfree(area);
+}
+EXPORT_SYMBOL_GPL(xen_free_vm_area);
+
+
+/****************************************************************************
+ * grant table hack
+ * cmd: GNTTABOP_xxx
+ */
+
+int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
+			   unsigned long max_nr_gframes,
+			   struct grant_entry **__shared)
+{
+	*__shared = __va(frames[0] << PAGE_SHIFT);
+	return 0;
+}
+
+void arch_gnttab_unmap_shared(struct grant_entry *shared,
+			      unsigned long nr_gframes)
+{
+	/* nothing */
+}
+
+static void
+gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
+{
+	uint32_t flags;
+
+	flags = uop->flags;
+
+	if (flags & GNTMAP_host_map) {
+		if (flags & GNTMAP_application_map) {
+			printk(KERN_DEBUG
+			       "GNTMAP_application_map is not supported yet: "
+			       "flags 0x%x\n", flags);
+			BUG();
+		}
+		if (flags & GNTMAP_contains_pte) {
+			printk(KERN_DEBUG
+			       "GNTMAP_contains_pte is not supported yet: "
+			       "flags 0x%x\n", flags);
+			BUG();
+		}
+	} else if (flags & GNTMAP_device_map) {
+		printk("GNTMAP_device_map is not supported yet 0x%x\n", flags);
+		BUG();	/* not yet. actually this flag is not used. */
+	} else {
+		BUG();
+	}
+}
+
+int
+HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
+{
+	if (cmd == GNTTABOP_map_grant_ref) {
+		unsigned int i;
+		for (i = 0; i < count; i++) {
+			gnttab_map_grant_ref_pre(
+				(struct gnttab_map_grant_ref *)uop + i);
+		}
+	}
+	return xencomm_hypercall_grant_table_op(cmd, uop, count);
+}
+
+EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
new file mode 100644
index 0000000..d4ff0b9
--- /dev/null
+++ b/arch/ia64/xen/hypercall.S
@@ -0,0 +1,91 @@
+/*
+ * Support routines for Xen hypercalls
+ *
+ * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
+ * Copyright (C) 2008 Yaozu (Eddie) Dong <eddie.dong@intel.com>
+ */
+
+#include <asm/asmmacro.h>
+#include <asm/intrinsics.h>
+#include <asm/xen/privop.h>
+
+/*
+ * Hypercalls without parameter.
+ */
+#define __HCALL0(name,hcall)		\
+	GLOBAL_ENTRY(name);		\
+	break	hcall;			\
+	br.ret.sptk.many rp;		\
+	END(name)
+
+/*
+ * Hypercalls with 1 parameter.
+ */
+#define __HCALL1(name,hcall)		\
+	GLOBAL_ENTRY(name);		\
+	mov r8=r32;			\
+	break	hcall;			\
+	br.ret.sptk.many rp;		\
+	END(name)
+
+/*
+ * Hypercalls with 2 parameters.
+ */
+#define __HCALL2(name,hcall)		\
+	GLOBAL_ENTRY(name);		\
+	mov r8=r32;			\
+	mov r9=r33;			\
+	break	hcall;			\
+	br.ret.sptk.many rp;		\
+	END(name)
+
+__HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR)
+__HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR)
+__HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR)
+__HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I)
+
+__HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR)
+__HCALL1(xen_eoi, HYPERPRIVOP_EOI)
+__HCALL1(xen_thash, HYPERPRIVOP_THASH)
+__HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM)
+__HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR)
+__HCALL1(xen_fc, HYPERPRIVOP_FC)
+__HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID)
+__HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD)
+
+__HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA)
+__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
+__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
+
+#ifdef CONFIG_IA32_SUPPORT
+__HCALL1(xen_get_eflag, HYPERPRIVOP_GET_EFLAG)
+__HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG)	// refer SDM vol1 3.1.8
+#endif /* CONFIG_IA32_SUPPORT */
+
+GLOBAL_ENTRY(xen_set_rr0_to_rr4)
+	mov r8=r32
+	mov r9=r33
+	mov r10=r34
+	mov r11=r35
+	mov r14=r36
+	XEN_HYPER_SET_RR0_TO_RR4
+	br.ret.sptk.many rp
+	;;
+END(xen_set_rr0_to_rr4)
+
+GLOBAL_ENTRY(xen_send_ipi)
+	mov r14=r32
+	mov r15=r33
+	mov r2=0x400
+	break 0x1000
+	;;
+	br.ret.sptk.many rp
+	;;
+END(xen_send_ipi)
+
+GLOBAL_ENTRY(__hypercall)
+	mov r2=r37
+	break 0x1000
+	br.ret.sptk.many b0
+	;;
+END(__hypercall)
diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c
new file mode 100644
index 0000000..cac4d97
--- /dev/null
+++ b/arch/ia64/xen/hypervisor.c
@@ -0,0 +1,96 @@
+/******************************************************************************
+ * arch/ia64/xen/hypervisor.c
+ *
+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/efi.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/privop.h>
+
+#include "irq_xen.h"
+
+struct shared_info *HYPERVISOR_shared_info __read_mostly =
+	(struct shared_info *)XSI_BASE;
+EXPORT_SYMBOL(HYPERVISOR_shared_info);
+
+DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+
+struct start_info *xen_start_info;
+EXPORT_SYMBOL(xen_start_info);
+
+EXPORT_SYMBOL(xen_domain_type);
+
+EXPORT_SYMBOL(__hypercall);
+
+/* Stolen from arch/x86/xen/enlighten.c */
+/*
+ * Flag to determine whether vcpu info placement is available on all
+ * VCPUs.  We assume it is to start with, and then set it to zero on
+ * the first failure.  This is because it can succeed on some VCPUs
+ * and not others, since it can involve hypervisor memory allocation,
+ * or because the guest failed to guarantee all the appropriate
+ * constraints on all VCPUs (ie buffer can't cross a page boundary).
+ *
+ * Note that any particular CPU may be using a placed vcpu structure,
+ * but we can only optimise if the all are.
+ *
+ * 0: not available, 1: available
+ */
+
+static void __init xen_vcpu_setup(int cpu)
+{
+	/*
+	 * WARNING:
+	 * before changing MAX_VIRT_CPUS,
+	 * check that shared_info fits on a page
+	 */
+	BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE);
+	per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+}
+
+void __init xen_setup_vcpu_info_placement(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		xen_vcpu_setup(cpu);
+}
+
+void __cpuinit
+xen_cpu_init(void)
+{
+	xen_smp_intr_init();
+}
+
+/**************************************************************************
+ * opt feature
+ */
+void
+xen_ia64_enable_opt_feature(void)
+{
+	/* Enable region 7 identity map optimizations in Xen */
+	struct xen_ia64_opt_feature optf;
+
+	optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7;
+	optf.on = XEN_IA64_OPTF_ON;
+	optf.pgprot = pgprot_val(PAGE_KERNEL);
+	optf.key = 0;	/* No key on linux. */
+	HYPERVISOR_opt_feature(&optf);
+}
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c
new file mode 100644
index 0000000..af93aad
--- /dev/null
+++ b/arch/ia64/xen/irq_xen.c
@@ -0,0 +1,435 @@
+/******************************************************************************
+ * arch/ia64/xen/irq_xen.c
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/cpu.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/callback.h>
+#include <xen/events.h>
+
+#include <asm/xen/privop.h>
+
+#include "irq_xen.h"
+
+/***************************************************************************
+ * pv_irq_ops
+ * irq operations
+ */
+
+static int
+xen_assign_irq_vector(int irq)
+{
+	struct physdev_irq irq_op;
+
+	irq_op.irq = irq;
+	if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
+		return -ENOSPC;
+
+	return irq_op.vector;
+}
+
+static void
+xen_free_irq_vector(int vector)
+{
+	struct physdev_irq irq_op;
+
+	if (vector < IA64_FIRST_DEVICE_VECTOR ||
+	    vector > IA64_LAST_DEVICE_VECTOR)
+		return;
+
+	irq_op.vector = vector;
+	if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
+		printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n",
+		       __func__, vector);
+}
+
+
+static DEFINE_PER_CPU(int, timer_irq) = -1;
+static DEFINE_PER_CPU(int, ipi_irq) = -1;
+static DEFINE_PER_CPU(int, resched_irq) = -1;
+static DEFINE_PER_CPU(int, cmc_irq) = -1;
+static DEFINE_PER_CPU(int, cmcp_irq) = -1;
+static DEFINE_PER_CPU(int, cpep_irq) = -1;
+#define NAME_SIZE	15
+static DEFINE_PER_CPU(char[NAME_SIZE], timer_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], resched_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name);
+#undef NAME_SIZE
+
+struct saved_irq {
+	unsigned int irq;
+	struct irqaction *action;
+};
+/* 16 should be far optimistic value, since only several percpu irqs
+ * are registered early.
+ */
+#define MAX_LATE_IRQ	16
+static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
+static unsigned short late_irq_cnt;
+static unsigned short saved_irq_cnt;
+static int xen_slab_ready;
+
+#ifdef CONFIG_SMP
+/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
+ * it ends up to issue several memory accesses upon percpu data and
+ * thus adds unnecessary traffic to other paths.
+ */
+static irqreturn_t
+xen_dummy_handler(int irq, void *dev_id)
+{
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction xen_ipi_irqaction = {
+	.handler =	handle_IPI,
+	.flags =	IRQF_DISABLED,
+	.name =		"IPI"
+};
+
+static struct irqaction xen_resched_irqaction = {
+	.handler =	xen_dummy_handler,
+	.flags =	IRQF_DISABLED,
+	.name =		"resched"
+};
+
+static struct irqaction xen_tlb_irqaction = {
+	.handler =	xen_dummy_handler,
+	.flags =	IRQF_DISABLED,
+	.name =		"tlb_flush"
+};
+#endif
+
+/*
+ * This is xen version percpu irq registration, which needs bind
+ * to xen specific evtchn sub-system. One trick here is that xen
+ * evtchn binding interface depends on kmalloc because related
+ * port needs to be freed at device/cpu down. So we cache the
+ * registration on BSP before slab is ready and then deal them
+ * at later point. For rest instances happening after slab ready,
+ * we hook them to xen evtchn immediately.
+ *
+ * FIXME: MCA is not supported by far, and thus "nomca" boot param is
+ * required.
+ */
+static void
+__xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
+			struct irqaction *action, int save)
+{
+	irq_desc_t *desc;
+	int irq = 0;
+
+	if (xen_slab_ready) {
+		switch (vec) {
+		case IA64_TIMER_VECTOR:
+			snprintf(per_cpu(timer_name, cpu),
+				 sizeof(per_cpu(timer_name, cpu)),
+				 "%s%d", action->name, cpu);
+			irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
+				action->handler, action->flags,
+				per_cpu(timer_name, cpu), action->dev_id);
+			per_cpu(timer_irq, cpu) = irq;
+			break;
+		case IA64_IPI_RESCHEDULE:
+			snprintf(per_cpu(resched_name, cpu),
+				 sizeof(per_cpu(resched_name, cpu)),
+				 "%s%d", action->name, cpu);
+			irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
+				action->handler, action->flags,
+				per_cpu(resched_name, cpu), action->dev_id);
+			per_cpu(resched_irq, cpu) = irq;
+			break;
+		case IA64_IPI_VECTOR:
+			snprintf(per_cpu(ipi_name, cpu),
+				 sizeof(per_cpu(ipi_name, cpu)),
+				 "%s%d", action->name, cpu);
+			irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
+				action->handler, action->flags,
+				per_cpu(ipi_name, cpu), action->dev_id);
+			per_cpu(ipi_irq, cpu) = irq;
+			break;
+		case IA64_CMC_VECTOR:
+			snprintf(per_cpu(cmc_name, cpu),
+				 sizeof(per_cpu(cmc_name, cpu)),
+				 "%s%d", action->name, cpu);
+			irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
+						      action->handler,
+						      action->flags,
+						      per_cpu(cmc_name, cpu),
+						      action->dev_id);
+			per_cpu(cmc_irq, cpu) = irq;
+			break;
+		case IA64_CMCP_VECTOR:
+			snprintf(per_cpu(cmcp_name, cpu),
+				 sizeof(per_cpu(cmcp_name, cpu)),
+				 "%s%d", action->name, cpu);
+			irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
+						     action->handler,
+						     action->flags,
+						     per_cpu(cmcp_name, cpu),
+						     action->dev_id);
+			per_cpu(cmcp_irq, cpu) = irq;
+			break;
+		case IA64_CPEP_VECTOR:
+			snprintf(per_cpu(cpep_name, cpu),
+				 sizeof(per_cpu(cpep_name, cpu)),
+				 "%s%d", action->name, cpu);
+			irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
+						     action->handler,
+						     action->flags,
+						     per_cpu(cpep_name, cpu),
+						     action->dev_id);
+			per_cpu(cpep_irq, cpu) = irq;
+			break;
+		case IA64_CPE_VECTOR:
+		case IA64_MCA_RENDEZ_VECTOR:
+		case IA64_PERFMON_VECTOR:
+		case IA64_MCA_WAKEUP_VECTOR:
+		case IA64_SPURIOUS_INT_VECTOR:
+			/* No need to complain, these aren't supported. */
+			break;
+		default:
+			printk(KERN_WARNING "Percpu irq %d is unsupported "
+			       "by xen!\n", vec);
+			break;
+		}
+		BUG_ON(irq < 0);
+
+		if (irq > 0) {
+			/*
+			 * Mark percpu.  Without this, migrate_irqs() will
+			 * mark the interrupt for migrations and trigger it
+			 * on cpu hotplug.
+			 */
+			desc = irq_desc + irq;
+			desc->status |= IRQ_PER_CPU;
+		}
+	}
+
+	/* For BSP, we cache registered percpu irqs, and then re-walk
+	 * them when initializing APs
+	 */
+	if (!cpu && save) {
+		BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
+		saved_percpu_irqs[saved_irq_cnt].irq = vec;
+		saved_percpu_irqs[saved_irq_cnt].action = action;
+		saved_irq_cnt++;
+		if (!xen_slab_ready)
+			late_irq_cnt++;
+	}
+}
+
+static void
+xen_register_percpu_irq(ia64_vector vec, struct irqaction *action)
+{
+	__xen_register_percpu_irq(smp_processor_id(), vec, action, 1);
+}
+
+static void
+xen_bind_early_percpu_irq(void)
+{
+	int i;
+
+	xen_slab_ready = 1;
+	/* There's no race when accessing this cached array, since only
+	 * BSP will face with such step shortly
+	 */
+	for (i = 0; i < late_irq_cnt; i++)
+		__xen_register_percpu_irq(smp_processor_id(),
+					  saved_percpu_irqs[i].irq,
+					  saved_percpu_irqs[i].action, 0);
+}
+
+/* FIXME: There's no obvious point to check whether slab is ready. So
+ * a hack is used here by utilizing a late time hook.
+ */
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int __devinit
+unbind_evtchn_callback(struct notifier_block *nfb,
+		       unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+
+	if (action == CPU_DEAD) {
+		/* Unregister evtchn.  */
+		if (per_cpu(cpep_irq, cpu) >= 0) {
+			unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
+			per_cpu(cpep_irq, cpu) = -1;
+		}
+		if (per_cpu(cmcp_irq, cpu) >= 0) {
+			unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
+			per_cpu(cmcp_irq, cpu) = -1;
+		}
+		if (per_cpu(cmc_irq, cpu) >= 0) {
+			unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
+			per_cpu(cmc_irq, cpu) = -1;
+		}
+		if (per_cpu(ipi_irq, cpu) >= 0) {
+			unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL);
+			per_cpu(ipi_irq, cpu) = -1;
+		}
+		if (per_cpu(resched_irq, cpu) >= 0) {
+			unbind_from_irqhandler(per_cpu(resched_irq, cpu),
+						NULL);
+			per_cpu(resched_irq, cpu) = -1;
+		}
+		if (per_cpu(timer_irq, cpu) >= 0) {
+			unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
+			per_cpu(timer_irq, cpu) = -1;
+		}
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block unbind_evtchn_notifier = {
+	.notifier_call = unbind_evtchn_callback,
+	.priority = 0
+};
+#endif
+
+void xen_smp_intr_init_early(unsigned int cpu)
+{
+#ifdef CONFIG_SMP
+	unsigned int i;
+
+	for (i = 0; i < saved_irq_cnt; i++)
+		__xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq,
+					  saved_percpu_irqs[i].action, 0);
+#endif
+}
+
+void xen_smp_intr_init(void)
+{
+#ifdef CONFIG_SMP
+	unsigned int cpu = smp_processor_id();
+	struct callback_register event = {
+		.type = CALLBACKTYPE_event,
+		.address = { .ip = (unsigned long)&xen_event_callback },
+	};
+
+	if (cpu == 0) {
+		/* Initialization was already done for boot cpu.  */
+#ifdef CONFIG_HOTPLUG_CPU
+		/* Register the notifier only once.  */
+		register_cpu_notifier(&unbind_evtchn_notifier);
+#endif
+		return;
+	}
+
+	/* This should be piggyback when setup vcpu guest context */
+	BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
+#endif /* CONFIG_SMP */
+}
+
+void __init
+xen_irq_init(void)
+{
+	struct callback_register event = {
+		.type = CALLBACKTYPE_event,
+		.address = { .ip = (unsigned long)&xen_event_callback },
+	};
+
+	xen_init_IRQ();
+	BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
+	late_time_init = xen_bind_early_percpu_irq;
+}
+
+void
+xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
+{
+#ifdef CONFIG_SMP
+	/* TODO: we need to call vcpu_up here */
+	if (unlikely(vector == ap_wakeup_vector)) {
+		/* XXX
+		 * This should be in __cpu_up(cpu) in ia64 smpboot.c
+		 * like x86. But don't want to modify it,
+		 * keep it untouched.
+		 */
+		xen_smp_intr_init_early(cpu);
+
+		xen_send_ipi(cpu, vector);
+		/* vcpu_prepare_and_up(cpu); */
+		return;
+	}
+#endif
+
+	switch (vector) {
+	case IA64_IPI_VECTOR:
+		xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
+		break;
+	case IA64_IPI_RESCHEDULE:
+		xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
+		break;
+	case IA64_CMCP_VECTOR:
+		xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
+		break;
+	case IA64_CPEP_VECTOR:
+		xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
+		break;
+	case IA64_TIMER_VECTOR: {
+		/* this is used only once by check_sal_cache_flush()
+		   at boot time */
+		static int used = 0;
+		if (!used) {
+			xen_send_ipi(cpu, IA64_TIMER_VECTOR);
+			used = 1;
+			break;
+		}
+		/* fallthrough */
+	}
+	default:
+		printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
+		       vector);
+		notify_remote_via_irq(0); /* defaults to 0 irq */
+		break;
+	}
+}
+
+static void __init
+xen_register_ipi(void)
+{
+#ifdef CONFIG_SMP
+	register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction);
+	register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction);
+	register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction);
+#endif
+}
+
+static void
+xen_resend_irq(unsigned int vector)
+{
+	(void)resend_irq_on_evtchn(vector);
+}
+
+const struct pv_irq_ops xen_irq_ops __initdata = {
+	.register_ipi = xen_register_ipi,
+
+	.assign_irq_vector = xen_assign_irq_vector,
+	.free_irq_vector = xen_free_irq_vector,
+	.register_percpu_irq = xen_register_percpu_irq,
+
+	.resend_irq = xen_resend_irq,
+};
diff --git a/arch/ia64/xen/irq_xen.h b/arch/ia64/xen/irq_xen.h
new file mode 100644
index 0000000..26110f3
--- /dev/null
+++ b/arch/ia64/xen/irq_xen.h
@@ -0,0 +1,34 @@
+/******************************************************************************
+ * arch/ia64/xen/irq_xen.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef IRQ_XEN_H
+#define IRQ_XEN_H
+
+extern void (*late_time_init)(void);
+extern char xen_event_callback;
+void __init xen_init_IRQ(void);
+
+extern const struct pv_irq_ops xen_irq_ops __initdata;
+extern void xen_smp_intr_init(void);
+extern void xen_send_ipi(int cpu, int vec);
+
+#endif /* IRQ_XEN_H */
diff --git a/arch/ia64/xen/machvec.c b/arch/ia64/xen/machvec.c
new file mode 100644
index 0000000..4ad588a
--- /dev/null
+++ b/arch/ia64/xen/machvec.c
@@ -0,0 +1,4 @@
+#define MACHVEC_PLATFORM_NAME           xen
+#define MACHVEC_PLATFORM_HEADER         <asm/machvec_xen.h>
+#include <asm/machvec_init.h>
+
diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c
new file mode 100644
index 0000000..fd66b04
--- /dev/null
+++ b/arch/ia64/xen/suspend.c
@@ -0,0 +1,64 @@
+/******************************************************************************
+ * arch/ia64/xen/suspend.c
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * suspend/resume
+ */
+
+#include <xen/xen-ops.h>
+#include <asm/xen/hypervisor.h>
+#include "time.h"
+
+void
+xen_mm_pin_all(void)
+{
+	/* nothing */
+}
+
+void
+xen_mm_unpin_all(void)
+{
+	/* nothing */
+}
+
+void xen_pre_device_suspend(void)
+{
+	/* nothing */
+}
+
+void
+xen_pre_suspend()
+{
+	/* nothing */
+}
+
+void
+xen_post_suspend(int suspend_cancelled)
+{
+	if (suspend_cancelled)
+		return;
+
+	xen_ia64_enable_opt_feature();
+	/* add more if necessary */
+}
+
+void xen_arch_resume(void)
+{
+	xen_timer_resume_on_aps();
+}
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
new file mode 100644
index 0000000..d15a94c
--- /dev/null
+++ b/arch/ia64/xen/time.c
@@ -0,0 +1,213 @@
+/******************************************************************************
+ * arch/ia64/xen/time.c
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel_stat.h>
+#include <linux/posix-timers.h>
+#include <linux/irq.h>
+#include <linux/clocksource.h>
+
+#include <asm/timex.h>
+
+#include <asm/xen/hypervisor.h>
+
+#include <xen/interface/vcpu.h>
+
+#include "../kernel/fsyscall_gtod_data.h"
+
+DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
+DEFINE_PER_CPU(unsigned long, processed_stolen_time);
+DEFINE_PER_CPU(unsigned long, processed_blocked_time);
+
+/* taken from i386/kernel/time-xen.c */
+static void xen_init_missing_ticks_accounting(int cpu)
+{
+	struct vcpu_register_runstate_memory_area area;
+	struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
+	int rc;
+
+	memset(runstate, 0, sizeof(*runstate));
+
+	area.addr.v = runstate;
+	rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu,
+				&area);
+	WARN_ON(rc && rc != -ENOSYS);
+
+	per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
+	per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
+					    + runstate->time[RUNSTATE_offline];
+}
+
+/*
+ * Runstate accounting
+ */
+/* stolen from arch/x86/xen/time.c */
+static void get_runstate_snapshot(struct vcpu_runstate_info *res)
+{
+	u64 state_time;
+	struct vcpu_runstate_info *state;
+
+	BUG_ON(preemptible());
+
+	state = &__get_cpu_var(runstate);
+
+	/*
+	 * The runstate info is always updated by the hypervisor on
+	 * the current CPU, so there's no need to use anything
+	 * stronger than a compiler barrier when fetching it.
+	 */
+	do {
+		state_time = state->state_entry_time;
+		rmb();
+		*res = *state;
+		rmb();
+	} while (state->state_entry_time != state_time);
+}
+
+#define NS_PER_TICK (1000000000LL/HZ)
+
+static unsigned long
+consider_steal_time(unsigned long new_itm)
+{
+	unsigned long stolen, blocked;
+	unsigned long delta_itm = 0, stolentick = 0;
+	int cpu = smp_processor_id();
+	struct vcpu_runstate_info runstate;
+	struct task_struct *p = current;
+
+	get_runstate_snapshot(&runstate);
+
+	/*
+	 * Check for vcpu migration effect
+	 * In this case, itc value is reversed.
+	 * This causes huge stolen value.
+	 * This function just checks and reject this effect.
+	 */
+	if (!time_after_eq(runstate.time[RUNSTATE_blocked],
+			   per_cpu(processed_blocked_time, cpu)))
+		blocked = 0;
+
+	if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
+			   runstate.time[RUNSTATE_offline],
+			   per_cpu(processed_stolen_time, cpu)))
+		stolen = 0;
+
+	if (!time_after(delta_itm + new_itm, ia64_get_itc()))
+		stolentick = ia64_get_itc() - new_itm;
+
+	do_div(stolentick, NS_PER_TICK);
+	stolentick++;
+
+	do_div(stolen, NS_PER_TICK);
+
+	if (stolen > stolentick)
+		stolen = stolentick;
+
+	stolentick -= stolen;
+	do_div(blocked, NS_PER_TICK);
+
+	if (blocked > stolentick)
+		blocked = stolentick;
+
+	if (stolen > 0 || blocked > 0) {
+		account_steal_time(NULL, jiffies_to_cputime(stolen));
+		account_steal_time(idle_task(cpu), jiffies_to_cputime(blocked));
+		run_local_timers();
+
+		if (rcu_pending(cpu))
+			rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
+
+		scheduler_tick();
+		run_posix_cpu_timers(p);
+		delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
+
+		if (cpu == time_keeper_id) {
+			write_seqlock(&xtime_lock);
+			do_timer(stolen + blocked);
+			local_cpu_data->itm_next = delta_itm + new_itm;
+			write_sequnlock(&xtime_lock);
+		} else {
+			local_cpu_data->itm_next = delta_itm + new_itm;
+		}
+		per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen;
+		per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked;
+	}
+	return delta_itm;
+}
+
+static int xen_do_steal_accounting(unsigned long *new_itm)
+{
+	unsigned long delta_itm;
+	delta_itm = consider_steal_time(*new_itm);
+	*new_itm += delta_itm;
+	if (time_after(*new_itm, ia64_get_itc()) && delta_itm)
+		return 1;
+
+	return 0;
+}
+
+static void xen_itc_jitter_data_reset(void)
+{
+	u64 lcycle, ret;
+
+	do {
+		lcycle = itc_jitter_data.itc_lastcycle;
+		ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0);
+	} while (unlikely(ret != lcycle));
+}
+
+struct pv_time_ops xen_time_ops __initdata = {
+	.init_missing_ticks_accounting	= xen_init_missing_ticks_accounting,
+	.do_steal_accounting		= xen_do_steal_accounting,
+	.clocksource_resume		= xen_itc_jitter_data_reset,
+};
+
+/* Called after suspend, to resume time.  */
+static void xen_local_tick_resume(void)
+{
+	/* Just trigger a tick.  */
+	ia64_cpu_local_tick();
+	touch_softlockup_watchdog();
+}
+
+void
+xen_timer_resume(void)
+{
+	unsigned int cpu;
+
+	xen_local_tick_resume();
+
+	for_each_online_cpu(cpu)
+		xen_init_missing_ticks_accounting(cpu);
+}
+
+static void ia64_cpu_local_tick_fn(void *unused)
+{
+	xen_local_tick_resume();
+	xen_init_missing_ticks_accounting(smp_processor_id());
+}
+
+void
+xen_timer_resume_on_aps(void)
+{
+	smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1);
+}
diff --git a/arch/ia64/xen/time.h b/arch/ia64/xen/time.h
new file mode 100644
index 0000000..f98d7e1
--- /dev/null
+++ b/arch/ia64/xen/time.h
@@ -0,0 +1,24 @@
+/******************************************************************************
+ * arch/ia64/xen/time.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+extern struct pv_time_ops xen_time_ops __initdata;
+void xen_timer_resume_on_aps(void);
diff --git a/arch/ia64/xen/xcom_hcall.c b/arch/ia64/xen/xcom_hcall.c
new file mode 100644
index 0000000..ccaf743
--- /dev/null
+++ b/arch/ia64/xen/xcom_hcall.c
@@ -0,0 +1,441 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ *          Tristan Gingold <tristan.gingold@bull.net>
+ *
+ *          Copyright (c) 2007
+ *          Isaku Yamahata <yamahata at valinux co jp>
+ *                          VA Linux Systems Japan K.K.
+ *          consolidate mini and inline version.
+ */
+
+#include <linux/module.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/memory.h>
+#include <xen/interface/grant_table.h>
+#include <xen/interface/callback.h>
+#include <xen/interface/vcpu.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/xencomm.h>
+
+/* Xencomm notes:
+ * This file defines hypercalls to be used by xencomm.  The hypercalls simply
+ * create inlines or mini descriptors for pointers and then call the raw arch
+ * hypercall xencomm_arch_hypercall_XXX
+ *
+ * If the arch wants to directly use these hypercalls, simply define macros
+ * in asm/xen/hypercall.h, eg:
+ *  #define HYPERVISOR_sched_op xencomm_hypercall_sched_op
+ *
+ * The arch may also define HYPERVISOR_xxx as a function and do more operations
+ * before/after doing the hypercall.
+ *
+ * Note: because only inline or mini descriptors are created these functions
+ * must only be called with in kernel memory parameters.
+ */
+
+int
+xencomm_hypercall_console_io(int cmd, int count, char *str)
+{
+	/* xen early printk uses console io hypercall before
+	 * xencomm initialization. In that case, we just ignore it.
+	 */
+	if (!xencomm_is_initialized())
+		return 0;
+
+	return xencomm_arch_hypercall_console_io
+		(cmd, count, xencomm_map_no_alloc(str, count));
+}
+EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io);
+
+int
+xencomm_hypercall_event_channel_op(int cmd, void *op)
+{
+	struct xencomm_handle *desc;
+	desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op));
+	if (desc == NULL)
+		return -EINVAL;
+
+	return xencomm_arch_hypercall_event_channel_op(cmd, desc);
+}
+EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op);
+
+int
+xencomm_hypercall_xen_version(int cmd, void *arg)
+{
+	struct xencomm_handle *desc;
+	unsigned int argsize;
+
+	switch (cmd) {
+	case XENVER_version:
+		/* do not actually pass an argument */
+		return xencomm_arch_hypercall_xen_version(cmd, 0);
+	case XENVER_extraversion:
+		argsize = sizeof(struct xen_extraversion);
+		break;
+	case XENVER_compile_info:
+		argsize = sizeof(struct xen_compile_info);
+		break;
+	case XENVER_capabilities:
+		argsize = sizeof(struct xen_capabilities_info);
+		break;
+	case XENVER_changeset:
+		argsize = sizeof(struct xen_changeset_info);
+		break;
+	case XENVER_platform_parameters:
+		argsize = sizeof(struct xen_platform_parameters);
+		break;
+	case XENVER_get_features:
+		argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info);
+		break;
+
+	default:
+		printk(KERN_DEBUG
+		       "%s: unknown version op %d\n", __func__, cmd);
+		return -ENOSYS;
+	}
+
+	desc = xencomm_map_no_alloc(arg, argsize);
+	if (desc == NULL)
+		return -EINVAL;
+
+	return xencomm_arch_hypercall_xen_version(cmd, desc);
+}
+EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version);
+
+int
+xencomm_hypercall_physdev_op(int cmd, void *op)
+{
+	unsigned int argsize;
+
+	switch (cmd) {
+	case PHYSDEVOP_apic_read:
+	case PHYSDEVOP_apic_write:
+		argsize = sizeof(struct physdev_apic);
+		break;
+	case PHYSDEVOP_alloc_irq_vector:
+	case PHYSDEVOP_free_irq_vector:
+		argsize = sizeof(struct physdev_irq);
+		break;
+	case PHYSDEVOP_irq_status_query:
+		argsize = sizeof(struct physdev_irq_status_query);
+		break;
+
+	default:
+		printk(KERN_DEBUG
+		       "%s: unknown physdev op %d\n", __func__, cmd);
+		return -ENOSYS;
+	}
+
+	return xencomm_arch_hypercall_physdev_op
+		(cmd, xencomm_map_no_alloc(op, argsize));
+}
+
+static int
+xencommize_grant_table_op(struct xencomm_mini **xc_area,
+			  unsigned int cmd, void *op, unsigned int count,
+			  struct xencomm_handle **desc)
+{
+	struct xencomm_handle *desc1;
+	unsigned int argsize;
+
+	switch (cmd) {
+	case GNTTABOP_map_grant_ref:
+		argsize = sizeof(struct gnttab_map_grant_ref);
+		break;
+	case GNTTABOP_unmap_grant_ref:
+		argsize = sizeof(struct gnttab_unmap_grant_ref);
+		break;
+	case GNTTABOP_setup_table:
+	{
+		struct gnttab_setup_table *setup = op;
+
+		argsize = sizeof(*setup);
+
+		if (count != 1)
+			return -EINVAL;
+		desc1 = __xencomm_map_no_alloc
+			(xen_guest_handle(setup->frame_list),
+			 setup->nr_frames *
+			 sizeof(*xen_guest_handle(setup->frame_list)),
+			 *xc_area);
+		if (desc1 == NULL)
+			return -EINVAL;
+		(*xc_area)++;
+		set_xen_guest_handle(setup->frame_list, (void *)desc1);
+		break;
+	}
+	case GNTTABOP_dump_table:
+		argsize = sizeof(struct gnttab_dump_table);
+		break;
+	case GNTTABOP_transfer:
+		argsize = sizeof(struct gnttab_transfer);
+		break;
+	case GNTTABOP_copy:
+		argsize = sizeof(struct gnttab_copy);
+		break;
+	case GNTTABOP_query_size:
+		argsize = sizeof(struct gnttab_query_size);
+		break;
+	default:
+		printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n",
+		       __func__, cmd);
+		BUG();
+	}
+
+	*desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area);
+	if (*desc == NULL)
+		return -EINVAL;
+	(*xc_area)++;
+
+	return 0;
+}
+
+int
+xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
+				 unsigned int count)
+{
+	int rc;
+	struct xencomm_handle *desc;
+	XENCOMM_MINI_ALIGNED(xc_area, 2);
+
+	rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc);
+	if (rc)
+		return rc;
+
+	return xencomm_arch_hypercall_grant_table_op(cmd, desc, count);
+}
+EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op);
+
+int
+xencomm_hypercall_sched_op(int cmd, void *arg)
+{
+	struct xencomm_handle *desc;
+	unsigned int argsize;
+
+	switch (cmd) {
+	case SCHEDOP_yield:
+	case SCHEDOP_block:
+		argsize = 0;
+		break;
+	case SCHEDOP_shutdown:
+		argsize = sizeof(struct sched_shutdown);
+		break;
+	case SCHEDOP_poll:
+	{
+		struct sched_poll *poll = arg;
+		struct xencomm_handle *ports;
+
+		argsize = sizeof(struct sched_poll);
+		ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports),
+				     sizeof(*xen_guest_handle(poll->ports)));
+
+		set_xen_guest_handle(poll->ports, (void *)ports);
+		break;
+	}
+	default:
+		printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd);
+		return -ENOSYS;
+	}
+
+	desc = xencomm_map_no_alloc(arg, argsize);
+	if (desc == NULL)
+		return -EINVAL;
+
+	return xencomm_arch_hypercall_sched_op(cmd, desc);
+}
+EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op);
+
+int
+xencomm_hypercall_multicall(void *call_list, int nr_calls)
+{
+	int rc;
+	int i;
+	struct multicall_entry *mce;
+	struct xencomm_handle *desc;
+	XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2);
+
+	for (i = 0; i < nr_calls; i++) {
+		mce = (struct multicall_entry *)call_list + i;
+
+		switch (mce->op) {
+		case __HYPERVISOR_update_va_mapping:
+		case __HYPERVISOR_mmu_update:
+			/* No-op on ia64.  */
+			break;
+		case __HYPERVISOR_grant_table_op:
+			rc = xencommize_grant_table_op
+				(&xc_area,
+				 mce->args[0], (void *)mce->args[1],
+				 mce->args[2], &desc);
+			if (rc)
+				return rc;
+			mce->args[1] = (unsigned long)desc;
+			break;
+		case __HYPERVISOR_memory_op:
+		default:
+			printk(KERN_DEBUG
+			       "%s: unhandled multicall op entry op %lu\n",
+			       __func__, mce->op);
+			return -ENOSYS;
+		}
+	}
+
+	desc = xencomm_map_no_alloc(call_list,
+				    nr_calls * sizeof(struct multicall_entry));
+	if (desc == NULL)
+		return -EINVAL;
+
+	return xencomm_arch_hypercall_multicall(desc, nr_calls);
+}
+EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall);
+
+int
+xencomm_hypercall_callback_op(int cmd, void *arg)
+{
+	unsigned int argsize;
+	switch (cmd) {
+	case CALLBACKOP_register:
+		argsize = sizeof(struct callback_register);
+		break;
+	case CALLBACKOP_unregister:
+		argsize = sizeof(struct callback_unregister);
+		break;
+	default:
+		printk(KERN_DEBUG
+		       "%s: unknown callback op %d\n", __func__, cmd);
+		return -ENOSYS;
+	}
+
+	return xencomm_arch_hypercall_callback_op
+		(cmd, xencomm_map_no_alloc(arg, argsize));
+}
+
+static int
+xencommize_memory_reservation(struct xencomm_mini *xc_area,
+			      struct xen_memory_reservation *mop)
+{
+	struct xencomm_handle *desc;
+
+	desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start),
+			mop->nr_extents *
+			sizeof(*xen_guest_handle(mop->extent_start)),
+			xc_area);
+	if (desc == NULL)
+		return -EINVAL;
+
+	set_xen_guest_handle(mop->extent_start, (void *)desc);
+	return 0;
+}
+
+int
+xencomm_hypercall_memory_op(unsigned int cmd, void *arg)
+{
+	GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} };
+	struct xen_memory_reservation *xmr = NULL;
+	int rc;
+	struct xencomm_handle *desc;
+	unsigned int argsize;
+	XENCOMM_MINI_ALIGNED(xc_area, 2);
+
+	switch (cmd) {
+	case XENMEM_increase_reservation:
+	case XENMEM_decrease_reservation:
+	case XENMEM_populate_physmap:
+		xmr = (struct xen_memory_reservation *)arg;
+		set_xen_guest_handle(extent_start_va[0],
+				     xen_guest_handle(xmr->extent_start));
+
+		argsize = sizeof(*xmr);
+		rc = xencommize_memory_reservation(xc_area, xmr);
+		if (rc)
+			return rc;
+		xc_area++;
+		break;
+
+	case XENMEM_maximum_ram_page:
+		argsize = 0;
+		break;
+
+	case XENMEM_add_to_physmap:
+		argsize = sizeof(struct xen_add_to_physmap);
+		break;
+
+	default:
+		printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd);
+		return -ENOSYS;
+	}
+
+	desc = xencomm_map_no_alloc(arg, argsize);
+	if (desc == NULL)
+		return -EINVAL;
+
+	rc = xencomm_arch_hypercall_memory_op(cmd, desc);
+
+	switch (cmd) {
+	case XENMEM_increase_reservation:
+	case XENMEM_decrease_reservation:
+	case XENMEM_populate_physmap:
+		set_xen_guest_handle(xmr->extent_start,
+				     xen_guest_handle(extent_start_va[0]));
+		break;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op);
+
+int
+xencomm_hypercall_suspend(unsigned long srec)
+{
+	struct sched_shutdown arg;
+
+	arg.reason = SHUTDOWN_suspend;
+
+	return xencomm_arch_hypercall_sched_op(
+		SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg)));
+}
+
+long
+xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg)
+{
+	unsigned int argsize;
+	switch (cmd) {
+	case VCPUOP_register_runstate_memory_area: {
+		struct vcpu_register_runstate_memory_area *area =
+			(struct vcpu_register_runstate_memory_area *)arg;
+		argsize = sizeof(*arg);
+		set_xen_guest_handle(area->addr.h,
+		     (void *)xencomm_map_no_alloc(area->addr.v,
+						  sizeof(area->addr.v)));
+		break;
+	}
+
+	default:
+		printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd);
+		return -ENOSYS;
+	}
+
+	return xencomm_arch_hypercall_vcpu_op(cmd, cpu,
+					xencomm_map_no_alloc(arg, argsize));
+}
+
+long
+xencomm_hypercall_opt_feature(void *arg)
+{
+	return xencomm_arch_hypercall_opt_feature(
+		xencomm_map_no_alloc(arg,
+				     sizeof(struct xen_ia64_opt_feature)));
+}
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
new file mode 100644
index 0000000..04cd123
--- /dev/null
+++ b/arch/ia64/xen/xen_pv_ops.c
@@ -0,0 +1,364 @@
+/******************************************************************************
+ * arch/ia64/xen/xen_pv_ops.c
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/pm.h>
+
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/xencomm.h>
+#include <asm/xen/privop.h>
+
+#include "irq_xen.h"
+#include "time.h"
+
+/***************************************************************************
+ * general info
+ */
+static struct pv_info xen_info __initdata = {
+	.kernel_rpl = 2,	/* or 1: determin at runtime */
+	.paravirt_enabled = 1,
+	.name = "Xen/ia64",
+};
+
+#define IA64_RSC_PL_SHIFT	2
+#define IA64_RSC_PL_BIT_SIZE	2
+#define IA64_RSC_PL_MASK	\
+	(((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT)
+
+static void __init
+xen_info_init(void)
+{
+	/* Xenified Linux/ia64 may run on pl = 1 or 2.
+	 * determin at run time. */
+	unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC);
+	unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT;
+	xen_info.kernel_rpl = rpl;
+}
+
+/***************************************************************************
+ * pv_init_ops
+ * initialization hooks.
+ */
+
+static void
+xen_panic_hypercall(struct unw_frame_info *info, void *arg)
+{
+	current->thread.ksp = (__u64)info->sw - 16;
+	HYPERVISOR_shutdown(SHUTDOWN_crash);
+	/* we're never actually going to get here... */
+}
+
+static int
+xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+	unw_init_running(xen_panic_hypercall, NULL);
+	/* we're never actually going to get here... */
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block xen_panic_block = {
+	xen_panic_event, NULL, 0 /* try to go last */
+};
+
+static void xen_pm_power_off(void)
+{
+	local_irq_disable();
+	HYPERVISOR_shutdown(SHUTDOWN_poweroff);
+}
+
+static void __init
+xen_banner(void)
+{
+	printk(KERN_INFO
+	       "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld "
+	       "flags=0x%x\n",
+	       xen_info.kernel_rpl,
+	       HYPERVISOR_shared_info->arch.start_info_pfn,
+	       xen_start_info->nr_pages, xen_start_info->flags);
+}
+
+static int __init
+xen_reserve_memory(struct rsvd_region *region)
+{
+	region->start = (unsigned long)__va(
+		(HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
+	region->end   = region->start + PAGE_SIZE;
+	return 1;
+}
+
+static void __init
+xen_arch_setup_early(void)
+{
+	struct shared_info *s;
+	BUG_ON(!xen_pv_domain());
+
+	s = HYPERVISOR_shared_info;
+	xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
+
+	/* Must be done before any hypercall.  */
+	xencomm_initialize();
+
+	xen_setup_features();
+	/* Register a call for panic conditions. */
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &xen_panic_block);
+	pm_power_off = xen_pm_power_off;
+
+	xen_ia64_enable_opt_feature();
+}
+
+static void __init
+xen_arch_setup_console(char **cmdline_p)
+{
+	add_preferred_console("xenboot", 0, NULL);
+	add_preferred_console("tty", 0, NULL);
+	/* use hvc_xen */
+	add_preferred_console("hvc", 0, NULL);
+
+#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
+	conswitchp = NULL;
+#endif
+}
+
+static int __init
+xen_arch_setup_nomca(void)
+{
+	return 1;
+}
+
+static void __init
+xen_post_smp_prepare_boot_cpu(void)
+{
+	xen_setup_vcpu_info_placement();
+}
+
+static const struct pv_init_ops xen_init_ops __initdata = {
+	.banner = xen_banner,
+
+	.reserve_memory = xen_reserve_memory,
+
+	.arch_setup_early = xen_arch_setup_early,
+	.arch_setup_console = xen_arch_setup_console,
+	.arch_setup_nomca = xen_arch_setup_nomca,
+
+	.post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
+};
+
+/***************************************************************************
+ * pv_cpu_ops
+ * intrinsics hooks.
+ */
+
+static void xen_setreg(int regnum, unsigned long val)
+{
+	switch (regnum) {
+	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
+		xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
+		break;
+#ifdef CONFIG_IA32_SUPPORT
+	case _IA64_REG_AR_EFLAG:
+		xen_set_eflag(val);
+		break;
+#endif
+	case _IA64_REG_CR_TPR:
+		xen_set_tpr(val);
+		break;
+	case _IA64_REG_CR_ITM:
+		xen_set_itm(val);
+		break;
+	case _IA64_REG_CR_EOI:
+		xen_eoi(val);
+		break;
+	default:
+		ia64_native_setreg_func(regnum, val);
+		break;
+	}
+}
+
+static unsigned long xen_getreg(int regnum)
+{
+	unsigned long res;
+
+	switch (regnum) {
+	case _IA64_REG_PSR:
+		res = xen_get_psr();
+		break;
+#ifdef CONFIG_IA32_SUPPORT
+	case _IA64_REG_AR_EFLAG:
+		res = xen_get_eflag();
+		break;
+#endif
+	case _IA64_REG_CR_IVR:
+		res = xen_get_ivr();
+		break;
+	case _IA64_REG_CR_TPR:
+		res = xen_get_tpr();
+		break;
+	default:
+		res = ia64_native_getreg_func(regnum);
+		break;
+	}
+	return res;
+}
+
+/* turning on interrupts is a bit more complicated.. write to the
+ * memory-mapped virtual psr.i bit first (to avoid race condition),
+ * then if any interrupts were pending, we have to execute a hyperprivop
+ * to ensure the pending interrupt gets delivered; else we're done! */
+static void
+xen_ssm_i(void)
+{
+	int old = xen_get_virtual_psr_i();
+	xen_set_virtual_psr_i(1);
+	barrier();
+	if (!old && xen_get_virtual_pend())
+		xen_hyper_ssm_i();
+}
+
+/* turning off interrupts can be paravirtualized simply by writing
+ * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
+static void
+xen_rsm_i(void)
+{
+	xen_set_virtual_psr_i(0);
+	barrier();
+}
+
+static unsigned long
+xen_get_psr_i(void)
+{
+	return xen_get_virtual_psr_i() ? IA64_PSR_I : 0;
+}
+
+static void
+xen_intrin_local_irq_restore(unsigned long mask)
+{
+	if (mask & IA64_PSR_I)
+		xen_ssm_i();
+	else
+		xen_rsm_i();
+}
+
+static const struct pv_cpu_ops xen_cpu_ops __initdata = {
+	.fc		= xen_fc,
+	.thash		= xen_thash,
+	.get_cpuid	= xen_get_cpuid,
+	.get_pmd	= xen_get_pmd,
+	.getreg		= xen_getreg,
+	.setreg		= xen_setreg,
+	.ptcga		= xen_ptcga,
+	.get_rr		= xen_get_rr,
+	.set_rr		= xen_set_rr,
+	.set_rr0_to_rr4	= xen_set_rr0_to_rr4,
+	.ssm_i		= xen_ssm_i,
+	.rsm_i		= xen_rsm_i,
+	.get_psr_i	= xen_get_psr_i,
+	.intrin_local_irq_restore
+			= xen_intrin_local_irq_restore,
+};
+
+/******************************************************************************
+ * replacement of hand written assembly codes.
+ */
+
+extern char xen_switch_to;
+extern char xen_leave_syscall;
+extern char xen_work_processed_syscall;
+extern char xen_leave_kernel;
+
+const struct pv_cpu_asm_switch xen_cpu_asm_switch = {
+	.switch_to		= (unsigned long)&xen_switch_to,
+	.leave_syscall		= (unsigned long)&xen_leave_syscall,
+	.work_processed_syscall	= (unsigned long)&xen_work_processed_syscall,
+	.leave_kernel		= (unsigned long)&xen_leave_kernel,
+};
+
+/***************************************************************************
+ * pv_iosapic_ops
+ * iosapic read/write hooks.
+ */
+static void
+xen_pcat_compat_init(void)
+{
+	/* nothing */
+}
+
+static struct irq_chip*
+xen_iosapic_get_irq_chip(unsigned long trigger)
+{
+	return NULL;
+}
+
+static unsigned int
+xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
+{
+	struct physdev_apic apic_op;
+	int ret;
+
+	apic_op.apic_physbase = (unsigned long)iosapic -
+					__IA64_UNCACHED_OFFSET;
+	apic_op.reg = reg;
+	ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
+	if (ret)
+		return ret;
+	return apic_op.value;
+}
+
+static void
+xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
+{
+	struct physdev_apic apic_op;
+
+	apic_op.apic_physbase = (unsigned long)iosapic -
+					__IA64_UNCACHED_OFFSET;
+	apic_op.reg = reg;
+	apic_op.value = val;
+	HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
+}
+
+static const struct pv_iosapic_ops xen_iosapic_ops __initdata = {
+	.pcat_compat_init = xen_pcat_compat_init,
+	.__get_irq_chip = xen_iosapic_get_irq_chip,
+
+	.__read = xen_iosapic_read,
+	.__write = xen_iosapic_write,
+};
+
+/***************************************************************************
+ * pv_ops initialization
+ */
+
+void __init
+xen_setup_pv_ops(void)
+{
+	xen_info_init();
+	pv_info = xen_info;
+	pv_init_ops = xen_init_ops;
+	pv_cpu_ops = xen_cpu_ops;
+	pv_iosapic_ops = xen_iosapic_ops;
+	pv_irq_ops = xen_irq_ops;
+	pv_time_ops = xen_time_ops;
+
+	paravirt_cpu_asm_init(&xen_cpu_asm_switch);
+}
diff --git a/arch/ia64/xen/xencomm.c b/arch/ia64/xen/xencomm.c
new file mode 100644
index 0000000..1f5d7ac
--- /dev/null
+++ b/arch/ia64/xen/xencomm.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/mm.h>
+
+static unsigned long kernel_virtual_offset;
+static int is_xencomm_initialized;
+
+/* for xen early printk. It uses console io hypercall which uses xencomm.
+ * However early printk may use it before xencomm initialization.
+ */
+int
+xencomm_is_initialized(void)
+{
+	return is_xencomm_initialized;
+}
+
+void
+xencomm_initialize(void)
+{
+	kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START);
+	is_xencomm_initialized = 1;
+}
+
+/* Translate virtual address to physical address.  */
+unsigned long
+xencomm_vtop(unsigned long vaddr)
+{
+	struct page *page;
+	struct vm_area_struct *vma;
+
+	if (vaddr == 0)
+		return 0UL;
+
+	if (REGION_NUMBER(vaddr) == 5) {
+		pgd_t *pgd;
+		pud_t *pud;
+		pmd_t *pmd;
+		pte_t *ptep;
+
+		/* On ia64, TASK_SIZE refers to current.  It is not initialized
+		   during boot.
+		   Furthermore the kernel is relocatable and __pa() doesn't
+		   work on  addresses.  */
+		if (vaddr >= KERNEL_START
+		    && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE))
+			return vaddr - kernel_virtual_offset;
+
+		/* In kernel area -- virtually mapped.  */
+		pgd = pgd_offset_k(vaddr);
+		if (pgd_none(*pgd) || pgd_bad(*pgd))
+			return ~0UL;
+
+		pud = pud_offset(pgd, vaddr);
+		if (pud_none(*pud) || pud_bad(*pud))
+			return ~0UL;
+
+		pmd = pmd_offset(pud, vaddr);
+		if (pmd_none(*pmd) || pmd_bad(*pmd))
+			return ~0UL;
+
+		ptep = pte_offset_kernel(pmd, vaddr);
+		if (!ptep)
+			return ~0UL;
+
+		return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK);
+	}
+
+	if (vaddr > TASK_SIZE) {
+		/* percpu variables */
+		if (REGION_NUMBER(vaddr) == 7 &&
+		    REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS))
+			ia64_tpa(vaddr);
+
+		/* kernel address */
+		return __pa(vaddr);
+	}
+
+	/* XXX double-check (lack of) locking */
+	vma = find_extend_vma(current->mm, vaddr);
+	if (!vma)
+		return ~0UL;
+
+	/* We assume the page is modified.  */
+	page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH);
+	if (!page)
+		return ~0UL;
+
+	return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
+}
diff --git a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S
new file mode 100644
index 0000000..3e71d50
--- /dev/null
+++ b/arch/ia64/xen/xenivt.S
@@ -0,0 +1,52 @@
+/*
+ * arch/ia64/xen/ivt.S
+ *
+ * Copyright (C) 2005 Hewlett-Packard Co
+ *	Dan Magenheimer <dan.magenheimer@hp.com>
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *                    pv_ops.
+ */
+
+#include <asm/asmmacro.h>
+#include <asm/kregs.h>
+#include <asm/pgtable.h>
+
+#include "../kernel/minstate.h"
+
+	.section .text,"ax"
+GLOBAL_ENTRY(xen_event_callback)
+	mov r31=pr		// prepare to save predicates
+	;;
+	SAVE_MIN_WITH_COVER	// uses r31; defines r2 and r3
+	;;
+	movl r3=XSI_PSR_IC
+	mov r14=1
+	;;
+	st4 [r3]=r14
+	;;
+	adds r3=8,r2		// set up second base pointer for SAVE_REST
+	srlz.i			// ensure everybody knows psr.ic is back on
+	;;
+	SAVE_REST
+	;;
+1:
+	alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
+	add out0=16,sp		// pass pointer to pt_regs as first arg
+	;;
+	br.call.sptk.many b0=xen_evtchn_do_upcall
+	;;
+	movl r20=XSI_PSR_I_ADDR
+	;;
+	ld8 r20=[r20]
+	;;
+	adds r20=-1,r20		// vcpu_info->evtchn_upcall_pending
+	;;
+	ld1 r20=[r20]
+	;;
+	cmp.ne p6,p0=r20,r0	// if there are pending events,
+	(p6) br.spnt.few 1b	// call evtchn_do_upcall again.
+	br.sptk.many xen_leave_kernel	// we know ia64_leave_kernel is
+					// paravirtualized as xen_leave_kernel
+END(xen_event_callback)
diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S
new file mode 100644
index 0000000..28fed1f
--- /dev/null
+++ b/arch/ia64/xen/xensetup.S
@@ -0,0 +1,83 @@
+/*
+ * Support routines for Xen
+ *
+ * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
+ */
+
+#include <asm/processor.h>
+#include <asm/asmmacro.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/paravirt.h>
+#include <asm/xen/privop.h>
+#include <linux/elfnote.h>
+#include <linux/init.h>
+#include <xen/interface/elfnote.h>
+
+	.section .data.read_mostly
+	.align 8
+	.global xen_domain_type
+xen_domain_type:
+	data4 XEN_NATIVE_ASM
+	.previous
+
+	__INIT
+ENTRY(startup_xen)
+	// Calculate load offset.
+	// The constant, LOAD_OFFSET, can't be used because the boot
+	// loader doesn't always load to the LMA specified by the vmlinux.lds.
+	mov r9=ip	// must be the first instruction to make sure
+			// that r9 = the physical address of startup_xen.
+			// Usually r9 = startup_xen - LOAD_OFFSET
+	movl r8=startup_xen
+	;;
+	sub r9=r9,r8	// Usually r9 = -LOAD_OFFSET.
+
+	mov r10=PARAVIRT_HYPERVISOR_TYPE_XEN
+	movl r11=_start
+	;;
+	add r11=r11,r9
+	movl r8=hypervisor_type
+	;;
+	add r8=r8,r9
+	mov b0=r11
+	;;
+	st8 [r8]=r10
+	br.cond.sptk.many b0
+	;;
+END(startup_xen)
+
+	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS,	.asciz "linux")
+	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION,	.asciz "2.6")
+	ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION,	.asciz "xen-3.0")
+	ELFNOTE(Xen, XEN_ELFNOTE_ENTRY,		data8.ua startup_xen - LOAD_OFFSET)
+
+#define isBP	p3	// are we the Bootstrap Processor?
+
+	.text
+
+GLOBAL_ENTRY(xen_setup_hook)
+	mov r8=XEN_PV_DOMAIN_ASM
+(isBP)	movl r9=xen_domain_type;;
+(isBP)	st4 [r9]=r8
+	movl r10=xen_ivt;;
+
+	mov cr.iva=r10
+
+	/* Set xsi base.  */
+#define FW_HYPERCALL_SET_SHARED_INFO_VA			0x600
+(isBP)	mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA
+(isBP)	movl r28=XSI_BASE;;
+(isBP)	break 0x1000;;
+
+	/* setup pv_ops */
+(isBP)	mov r4=rp
+	;;
+(isBP)	br.call.sptk.many rp=xen_setup_pv_ops
+	;;
+(isBP)	mov rp=r4
+	;;
+
+	br.ret.sptk.many rp
+	;;
+END(xen_setup_hook)
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 00289c1..dbaed4a 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -42,6 +42,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 
 menu "Processor type and features"
 
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index fc29948..39cb6da 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -40,6 +40,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/cpu.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
diff --git a/arch/m32r/oprofile/init.c b/arch/m32r/oprofile/init.c
index b7773e4..fa56860 100644
--- a/arch/m32r/oprofile/init.c
+++ b/arch/m32r/oprofile/init.c
@@ -12,7 +12,7 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 
-int __init oprofile_arch_init(struct oprofile_operations * ops)
+int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
 	return -ENODEV;
 }
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 677c93a..836fb66 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -62,6 +62,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 menu "Platform dependent setup"
 
 config EISA
diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c
index 808c901..c50bec8 100644
--- a/arch/m68k/bvme6000/rtc.c
+++ b/arch/m68k/bvme6000/rtc.c
@@ -18,7 +18,6 @@
 #include <linux/poll.h>
 #include <linux/module.h>
 #include <linux/mc146818rtc.h>	/* For struct rtc_time and ioctls, etc */
-#include <linux/smp_lock.h>
 #include <linux/bcd.h>
 #include <asm/bvme6000hw.h>
 
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 0a89983..76b66fe 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -75,6 +75,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 menu "Processor type and features"
 
 choice
diff --git a/arch/m68knommu/include/asm/thread_info.h b/arch/m68knommu/include/asm/thread_info.h
index 0c9bc09..82529f4 100644
--- a/arch/m68knommu/include/asm/thread_info.h
+++ b/arch/m68knommu/include/asm/thread_info.h
@@ -84,12 +84,14 @@
 #define TIF_POLLING_NRFLAG	3	/* true if poll_idle() is polling
 					   TIF_NEED_RESCHED */
 #define TIF_MEMDIE		4
+#define TIF_FREEZE		16	/* is freezing for suspend */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
+#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index b905744..5f149b0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1885,6 +1885,8 @@
 	  add initrd or initramfs image to the kernel image.
 	  Otherwise, say N.
 
+source "kernel/Kconfig.freezer"
+
 menu "Bus options (PCI, PCMCIA, EISA, ISA, TC)"
 
 config HW_HAS_EISA
diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c
index 3965fda..1359c03 100644
--- a/arch/mips/dec/time.c
+++ b/arch/mips/dec/time.c
@@ -45,12 +45,12 @@
 	spin_unlock_irqrestore(&rtc_lock, flags);
 
 	if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-		sec = BCD2BIN(sec);
-		min = BCD2BIN(min);
-		hour = BCD2BIN(hour);
-		day = BCD2BIN(day);
-		mon = BCD2BIN(mon);
-		year = BCD2BIN(year);
+		sec = bcd2bin(sec);
+		min = bcd2bin(min);
+		hour = bcd2bin(hour);
+		day = bcd2bin(day);
+		mon = bcd2bin(mon);
+		year = bcd2bin(year);
 	}
 
 	year += real_year - 72 + 2000;
@@ -83,7 +83,7 @@
 
 	cmos_minutes = CMOS_READ(RTC_MINUTES);
 	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-		cmos_minutes = BCD2BIN(cmos_minutes);
+		cmos_minutes = bcd2bin(cmos_minutes);
 
 	/*
 	 * since we're only adjusting minutes and seconds,
@@ -99,8 +99,8 @@
 
 	if (abs(real_minutes - cmos_minutes) < 30) {
 		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-			real_seconds = BIN2BCD(real_seconds);
-			real_minutes = BIN2BCD(real_minutes);
+			real_seconds = bin2bcd(real_seconds);
+			real_minutes = bin2bcd(real_minutes);
 		}
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
diff --git a/arch/mips/include/asm/mc146818-time.h b/arch/mips/include/asm/mc146818-time.h
index cdc379a..199b457 100644
--- a/arch/mips/include/asm/mc146818-time.h
+++ b/arch/mips/include/asm/mc146818-time.h
@@ -44,7 +44,7 @@
 
 	cmos_minutes = CMOS_READ(RTC_MINUTES);
 	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-		BCD_TO_BIN(cmos_minutes);
+		cmos_minutes = bcd2bin(cmos_minutes);
 
 	/*
 	 * since we're only adjusting minutes and seconds,
@@ -60,8 +60,8 @@
 
 	if (abs(real_minutes - cmos_minutes) < 30) {
 		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-			BIN_TO_BCD(real_seconds);
-			BIN_TO_BCD(real_minutes);
+			real_seconds = bin2bcd(real_seconds);
+			real_minutes = bin2bcd(real_minutes);
 		}
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
@@ -103,12 +103,12 @@
 	} while (sec != CMOS_READ(RTC_SECONDS));
 
 	if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-		BCD_TO_BIN(sec);
-		BCD_TO_BIN(min);
-		BCD_TO_BIN(hour);
-		BCD_TO_BIN(day);
-		BCD_TO_BIN(mon);
-		BCD_TO_BIN(year);
+		sec = bcd2bin(sec);
+		min = bcd2bin(min);
+		hour = bcd2bin(hour);
+		day = bcd2bin(day);
+		mon = bcd2bin(mon);
+		year = bcd2bin(year);
 	}
 	spin_unlock_irqrestore(&rtc_lock, flags);
 	year = mc146818_decode_year(year);
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index dd2fbd6..3bf3354 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -32,7 +32,7 @@
         return 0;
 }
 
-static int op_mips_create_files(struct super_block * sb, struct dentry * root)
+static int op_mips_create_files(struct super_block *sb, struct dentry *root)
 {
 	int i;
 
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h
index 2bfc17c..f04b54f 100644
--- a/arch/mips/oprofile/op_impl.h
+++ b/arch/mips/oprofile/op_impl.h
@@ -27,7 +27,7 @@
 /* Per-architecture configury and hooks.  */
 struct op_mips_model {
 	void (*reg_setup) (struct op_counter_config *);
-	void (*cpu_setup) (void * dummy);
+	void (*cpu_setup) (void *dummy);
 	int (*init)(void);
 	void (*exit)(void);
 	void (*cpu_start)(void *args);
diff --git a/arch/mips/oprofile/op_model_rm9000.c b/arch/mips/oprofile/op_model_rm9000.c
index a45d320..3aa8138 100644
--- a/arch/mips/oprofile/op_model_rm9000.c
+++ b/arch/mips/oprofile/op_model_rm9000.c
@@ -80,7 +80,7 @@
 	write_c0_perfcontrol(0);
 }
 
-static irqreturn_t rm9000_perfcount_handler(int irq, void * dev_id)
+static irqreturn_t rm9000_perfcount_handler(int irq, void *dev_id)
 {
 	unsigned int control = read_c0_perfcontrol();
 	struct pt_regs *regs = get_irq_regs();
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index 6537d90..2d3c0dc 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -79,14 +79,14 @@
 	/* Stop the update to the time */
 	m48t37_base->control = 0x40;
 
-	year = BCD2BIN(m48t37_base->year);
-	year += BCD2BIN(m48t37_base->century) * 100;
+	year = bcd2bin(m48t37_base->year);
+	year += bcd2bin(m48t37_base->century) * 100;
 
-	month = BCD2BIN(m48t37_base->month);
-	day = BCD2BIN(m48t37_base->date);
-	hour = BCD2BIN(m48t37_base->hour);
-	min = BCD2BIN(m48t37_base->min);
-	sec = BCD2BIN(m48t37_base->sec);
+	month = bcd2bin(m48t37_base->month);
+	day = bcd2bin(m48t37_base->date);
+	hour = bcd2bin(m48t37_base->hour);
+	min = bcd2bin(m48t37_base->min);
+	sec = bcd2bin(m48t37_base->sec);
 
 	/* Start the update to the time again */
 	m48t37_base->control = 0x00;
@@ -113,22 +113,22 @@
 	m48t37_base->control = 0x80;
 
 	/* year */
-	m48t37_base->year = BIN2BCD(tm.tm_year % 100);
-	m48t37_base->century = BIN2BCD(tm.tm_year / 100);
+	m48t37_base->year = bin2bcd(tm.tm_year % 100);
+	m48t37_base->century = bin2bcd(tm.tm_year / 100);
 
 	/* month */
-	m48t37_base->month = BIN2BCD(tm.tm_mon);
+	m48t37_base->month = bin2bcd(tm.tm_mon);
 
 	/* day */
-	m48t37_base->date = BIN2BCD(tm.tm_mday);
+	m48t37_base->date = bin2bcd(tm.tm_mday);
 
 	/* hour/min/sec */
-	m48t37_base->hour = BIN2BCD(tm.tm_hour);
-	m48t37_base->min = BIN2BCD(tm.tm_min);
-	m48t37_base->sec = BIN2BCD(tm.tm_sec);
+	m48t37_base->hour = bin2bcd(tm.tm_hour);
+	m48t37_base->min = bin2bcd(tm.tm_min);
+	m48t37_base->sec = bin2bcd(tm.tm_sec);
 
 	/* day of week -- not really used, but let's keep it up-to-date */
-	m48t37_base->day = BIN2BCD(tm.tm_wday + 1);
+	m48t37_base->day = bin2bcd(tm.tm_wday + 1);
 
 	/* disable writing */
 	m48t37_base->control = 0x00;
diff --git a/arch/mips/sibyte/swarm/rtc_m41t81.c b/arch/mips/sibyte/swarm/rtc_m41t81.c
index 26fbff4..b732600 100644
--- a/arch/mips/sibyte/swarm/rtc_m41t81.c
+++ b/arch/mips/sibyte/swarm/rtc_m41t81.c
@@ -156,32 +156,32 @@
 	 */
 
 	spin_lock_irqsave(&rtc_lock, flags);
-	tm.tm_sec = BIN2BCD(tm.tm_sec);
+	tm.tm_sec = bin2bcd(tm.tm_sec);
 	m41t81_write(M41T81REG_SC, tm.tm_sec);
 
-	tm.tm_min = BIN2BCD(tm.tm_min);
+	tm.tm_min = bin2bcd(tm.tm_min);
 	m41t81_write(M41T81REG_MN, tm.tm_min);
 
-	tm.tm_hour = BIN2BCD(tm.tm_hour);
+	tm.tm_hour = bin2bcd(tm.tm_hour);
 	tm.tm_hour = (tm.tm_hour & 0x3f) | (m41t81_read(M41T81REG_HR) & 0xc0);
 	m41t81_write(M41T81REG_HR, tm.tm_hour);
 
 	/* tm_wday starts from 0 to 6 */
 	if (tm.tm_wday == 0) tm.tm_wday = 7;
-	tm.tm_wday = BIN2BCD(tm.tm_wday);
+	tm.tm_wday = bin2bcd(tm.tm_wday);
 	m41t81_write(M41T81REG_DY, tm.tm_wday);
 
-	tm.tm_mday = BIN2BCD(tm.tm_mday);
+	tm.tm_mday = bin2bcd(tm.tm_mday);
 	m41t81_write(M41T81REG_DT, tm.tm_mday);
 
 	/* tm_mon starts from 0, *ick* */
 	tm.tm_mon ++;
-	tm.tm_mon = BIN2BCD(tm.tm_mon);
+	tm.tm_mon = bin2bcd(tm.tm_mon);
 	m41t81_write(M41T81REG_MO, tm.tm_mon);
 
 	/* we don't do century, everything is beyond 2000 */
 	tm.tm_year %= 100;
-	tm.tm_year = BIN2BCD(tm.tm_year);
+	tm.tm_year = bin2bcd(tm.tm_year);
 	m41t81_write(M41T81REG_YR, tm.tm_year);
 	spin_unlock_irqrestore(&rtc_lock, flags);
 
@@ -209,12 +209,12 @@
 	year = m41t81_read(M41T81REG_YR);
 	spin_unlock_irqrestore(&rtc_lock, flags);
 
-	sec = BCD2BIN(sec);
-	min = BCD2BIN(min);
-	hour = BCD2BIN(hour);
-	day = BCD2BIN(day);
-	mon = BCD2BIN(mon);
-	year = BCD2BIN(year);
+	sec = bcd2bin(sec);
+	min = bcd2bin(min);
+	hour = bcd2bin(hour);
+	day = bcd2bin(day);
+	mon = bcd2bin(mon);
+	year = bcd2bin(year);
 
 	year += 2000;
 
diff --git a/arch/mips/sibyte/swarm/rtc_xicor1241.c b/arch/mips/sibyte/swarm/rtc_xicor1241.c
index ff3e5da..4438b21 100644
--- a/arch/mips/sibyte/swarm/rtc_xicor1241.c
+++ b/arch/mips/sibyte/swarm/rtc_xicor1241.c
@@ -124,18 +124,18 @@
 	xicor_write(X1241REG_SR, X1241REG_SR_WEL | X1241REG_SR_RWEL);
 
 	/* trivial ones */
-	tm.tm_sec = BIN2BCD(tm.tm_sec);
+	tm.tm_sec = bin2bcd(tm.tm_sec);
 	xicor_write(X1241REG_SC, tm.tm_sec);
 
-	tm.tm_min = BIN2BCD(tm.tm_min);
+	tm.tm_min = bin2bcd(tm.tm_min);
 	xicor_write(X1241REG_MN, tm.tm_min);
 
-	tm.tm_mday = BIN2BCD(tm.tm_mday);
+	tm.tm_mday = bin2bcd(tm.tm_mday);
 	xicor_write(X1241REG_DT, tm.tm_mday);
 
 	/* tm_mon starts from 0, *ick* */
 	tm.tm_mon ++;
-	tm.tm_mon = BIN2BCD(tm.tm_mon);
+	tm.tm_mon = bin2bcd(tm.tm_mon);
 	xicor_write(X1241REG_MO, tm.tm_mon);
 
 	/* year is split */
@@ -148,7 +148,7 @@
 	tmp = xicor_read(X1241REG_HR);
 	if (tmp & X1241REG_HR_MIL) {
 		/* 24 hour format */
-		tm.tm_hour = BIN2BCD(tm.tm_hour);
+		tm.tm_hour = bin2bcd(tm.tm_hour);
 		tmp = (tmp & ~0x3f) | (tm.tm_hour & 0x3f);
 	} else {
 		/* 12 hour format, with 0x2 for pm */
@@ -157,7 +157,7 @@
 			tmp |= 0x20;
 			tm.tm_hour -= 12;
 		}
-		tm.tm_hour = BIN2BCD(tm.tm_hour);
+		tm.tm_hour = bin2bcd(tm.tm_hour);
 		tmp |= tm.tm_hour;
 	}
 	xicor_write(X1241REG_HR, tmp);
@@ -191,13 +191,13 @@
 	y2k = xicor_read(X1241REG_Y2K);
 	spin_unlock_irqrestore(&rtc_lock, flags);
 
-	sec = BCD2BIN(sec);
-	min = BCD2BIN(min);
-	hour = BCD2BIN(hour);
-	day = BCD2BIN(day);
-	mon = BCD2BIN(mon);
-	year = BCD2BIN(year);
-	y2k = BCD2BIN(y2k);
+	sec = bcd2bin(sec);
+	min = bcd2bin(min);
+	hour = bcd2bin(hour);
+	day = bcd2bin(day);
+	mon = bcd2bin(mon);
+	year = bcd2bin(year);
+	y2k = bcd2bin(y2k);
 
 	year += (y2k * 100);
 
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index dd557c9..9a9f433 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -68,6 +68,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 
 menu "Matsushita MN10300 system setup"
 
diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c
index 042f792..7978470 100644
--- a/arch/mn10300/kernel/rtc.c
+++ b/arch/mn10300/kernel/rtc.c
@@ -67,7 +67,7 @@
 
 	cmos_minutes = CMOS_READ(RTC_MINUTES);
 	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-		BCD_TO_BIN(cmos_minutes);
+		cmos_minutes = bcd2bin(cmos_minutes);
 
 	/*
 	 * since we're only adjusting minutes and seconds,
@@ -84,8 +84,8 @@
 
 	if (abs(real_minutes - cmos_minutes) < 30) {
 		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-			BIN_TO_BCD(real_seconds);
-			BIN_TO_BCD(real_minutes);
+			real_seconds = bin2bcd(real_seconds);
+			real_minutes = bin2bcd(real_minutes);
 		}
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 8313fcc..644a70b 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -9,6 +9,8 @@
 	def_bool y
 	select HAVE_IDE
 	select HAVE_OPROFILE
+	select RTC_CLASS
+	select RTC_DRV_PARISC
 	help
 	  The PA-RISC microprocessor is designed by Hewlett-Packard and used
 	  in many of their workstations & servers (HP9000 700 and 800 series,
@@ -90,6 +92,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 
 menu "Processor type and features"
 
diff --git a/include/asm-parisc/Kbuild b/arch/parisc/include/asm/Kbuild
similarity index 100%
rename from include/asm-parisc/Kbuild
rename to arch/parisc/include/asm/Kbuild
diff --git a/include/asm-parisc/agp.h b/arch/parisc/include/asm/agp.h
similarity index 100%
rename from include/asm-parisc/agp.h
rename to arch/parisc/include/asm/agp.h
diff --git a/include/asm-parisc/asmregs.h b/arch/parisc/include/asm/asmregs.h
similarity index 100%
rename from include/asm-parisc/asmregs.h
rename to arch/parisc/include/asm/asmregs.h
diff --git a/include/asm-parisc/assembly.h b/arch/parisc/include/asm/assembly.h
similarity index 100%
rename from include/asm-parisc/assembly.h
rename to arch/parisc/include/asm/assembly.h
diff --git a/include/asm-parisc/atomic.h b/arch/parisc/include/asm/atomic.h
similarity index 100%
rename from include/asm-parisc/atomic.h
rename to arch/parisc/include/asm/atomic.h
diff --git a/include/asm-parisc/auxvec.h b/arch/parisc/include/asm/auxvec.h
similarity index 100%
rename from include/asm-parisc/auxvec.h
rename to arch/parisc/include/asm/auxvec.h
diff --git a/include/asm-parisc/bitops.h b/arch/parisc/include/asm/bitops.h
similarity index 100%
rename from include/asm-parisc/bitops.h
rename to arch/parisc/include/asm/bitops.h
diff --git a/include/asm-parisc/bug.h b/arch/parisc/include/asm/bug.h
similarity index 100%
rename from include/asm-parisc/bug.h
rename to arch/parisc/include/asm/bug.h
diff --git a/include/asm-parisc/bugs.h b/arch/parisc/include/asm/bugs.h
similarity index 100%
rename from include/asm-parisc/bugs.h
rename to arch/parisc/include/asm/bugs.h
diff --git a/include/asm-parisc/byteorder.h b/arch/parisc/include/asm/byteorder.h
similarity index 100%
rename from include/asm-parisc/byteorder.h
rename to arch/parisc/include/asm/byteorder.h
diff --git a/include/asm-parisc/cache.h b/arch/parisc/include/asm/cache.h
similarity index 100%
rename from include/asm-parisc/cache.h
rename to arch/parisc/include/asm/cache.h
diff --git a/include/asm-parisc/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
similarity index 100%
rename from include/asm-parisc/cacheflush.h
rename to arch/parisc/include/asm/cacheflush.h
diff --git a/include/asm-parisc/checksum.h b/arch/parisc/include/asm/checksum.h
similarity index 100%
rename from include/asm-parisc/checksum.h
rename to arch/parisc/include/asm/checksum.h
diff --git a/include/asm-parisc/compat.h b/arch/parisc/include/asm/compat.h
similarity index 100%
rename from include/asm-parisc/compat.h
rename to arch/parisc/include/asm/compat.h
diff --git a/include/asm-parisc/compat_rt_sigframe.h b/arch/parisc/include/asm/compat_rt_sigframe.h
similarity index 100%
rename from include/asm-parisc/compat_rt_sigframe.h
rename to arch/parisc/include/asm/compat_rt_sigframe.h
diff --git a/include/asm-parisc/compat_signal.h b/arch/parisc/include/asm/compat_signal.h
similarity index 100%
rename from include/asm-parisc/compat_signal.h
rename to arch/parisc/include/asm/compat_signal.h
diff --git a/include/asm-parisc/compat_ucontext.h b/arch/parisc/include/asm/compat_ucontext.h
similarity index 100%
rename from include/asm-parisc/compat_ucontext.h
rename to arch/parisc/include/asm/compat_ucontext.h
diff --git a/include/asm-parisc/cputime.h b/arch/parisc/include/asm/cputime.h
similarity index 100%
rename from include/asm-parisc/cputime.h
rename to arch/parisc/include/asm/cputime.h
diff --git a/include/asm-parisc/current.h b/arch/parisc/include/asm/current.h
similarity index 100%
rename from include/asm-parisc/current.h
rename to arch/parisc/include/asm/current.h
diff --git a/include/asm-parisc/delay.h b/arch/parisc/include/asm/delay.h
similarity index 100%
rename from include/asm-parisc/delay.h
rename to arch/parisc/include/asm/delay.h
diff --git a/include/asm-parisc/device.h b/arch/parisc/include/asm/device.h
similarity index 100%
rename from include/asm-parisc/device.h
rename to arch/parisc/include/asm/device.h
diff --git a/include/asm-parisc/div64.h b/arch/parisc/include/asm/div64.h
similarity index 100%
rename from include/asm-parisc/div64.h
rename to arch/parisc/include/asm/div64.h
diff --git a/include/asm-parisc/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
similarity index 100%
rename from include/asm-parisc/dma-mapping.h
rename to arch/parisc/include/asm/dma-mapping.h
diff --git a/include/asm-parisc/dma.h b/arch/parisc/include/asm/dma.h
similarity index 100%
rename from include/asm-parisc/dma.h
rename to arch/parisc/include/asm/dma.h
diff --git a/include/asm-parisc/eisa_bus.h b/arch/parisc/include/asm/eisa_bus.h
similarity index 100%
rename from include/asm-parisc/eisa_bus.h
rename to arch/parisc/include/asm/eisa_bus.h
diff --git a/include/asm-parisc/eisa_eeprom.h b/arch/parisc/include/asm/eisa_eeprom.h
similarity index 100%
rename from include/asm-parisc/eisa_eeprom.h
rename to arch/parisc/include/asm/eisa_eeprom.h
diff --git a/include/asm-parisc/elf.h b/arch/parisc/include/asm/elf.h
similarity index 100%
rename from include/asm-parisc/elf.h
rename to arch/parisc/include/asm/elf.h
diff --git a/include/asm-parisc/emergency-restart.h b/arch/parisc/include/asm/emergency-restart.h
similarity index 100%
rename from include/asm-parisc/emergency-restart.h
rename to arch/parisc/include/asm/emergency-restart.h
diff --git a/include/asm-parisc/errno.h b/arch/parisc/include/asm/errno.h
similarity index 100%
rename from include/asm-parisc/errno.h
rename to arch/parisc/include/asm/errno.h
diff --git a/include/asm-parisc/fb.h b/arch/parisc/include/asm/fb.h
similarity index 100%
rename from include/asm-parisc/fb.h
rename to arch/parisc/include/asm/fb.h
diff --git a/include/asm-parisc/fcntl.h b/arch/parisc/include/asm/fcntl.h
similarity index 100%
rename from include/asm-parisc/fcntl.h
rename to arch/parisc/include/asm/fcntl.h
diff --git a/include/asm-parisc/fixmap.h b/arch/parisc/include/asm/fixmap.h
similarity index 100%
rename from include/asm-parisc/fixmap.h
rename to arch/parisc/include/asm/fixmap.h
diff --git a/include/asm-parisc/floppy.h b/arch/parisc/include/asm/floppy.h
similarity index 100%
rename from include/asm-parisc/floppy.h
rename to arch/parisc/include/asm/floppy.h
diff --git a/include/asm-parisc/futex.h b/arch/parisc/include/asm/futex.h
similarity index 100%
rename from include/asm-parisc/futex.h
rename to arch/parisc/include/asm/futex.h
diff --git a/include/asm-parisc/grfioctl.h b/arch/parisc/include/asm/grfioctl.h
similarity index 100%
rename from include/asm-parisc/grfioctl.h
rename to arch/parisc/include/asm/grfioctl.h
diff --git a/include/asm-parisc/hardirq.h b/arch/parisc/include/asm/hardirq.h
similarity index 100%
rename from include/asm-parisc/hardirq.h
rename to arch/parisc/include/asm/hardirq.h
diff --git a/include/asm-parisc/hardware.h b/arch/parisc/include/asm/hardware.h
similarity index 100%
rename from include/asm-parisc/hardware.h
rename to arch/parisc/include/asm/hardware.h
diff --git a/include/asm-parisc/hw_irq.h b/arch/parisc/include/asm/hw_irq.h
similarity index 100%
rename from include/asm-parisc/hw_irq.h
rename to arch/parisc/include/asm/hw_irq.h
diff --git a/include/asm-parisc/ide.h b/arch/parisc/include/asm/ide.h
similarity index 77%
rename from include/asm-parisc/ide.h
rename to arch/parisc/include/asm/ide.h
index c246ef7..81700a2 100644
--- a/include/asm-parisc/ide.h
+++ b/arch/parisc/include/asm/ide.h
@@ -13,10 +13,6 @@
 
 #ifdef __KERNEL__
 
-#define ide_request_irq(irq,hand,flg,dev,id)	request_irq((irq),(hand),(flg),(dev),(id))
-#define ide_free_irq(irq,dev_id)		free_irq((irq), (dev_id))
-#define ide_request_region(from,extent,name)	request_region((from), (extent), (name))
-#define ide_release_region(from,extent)		release_region((from), (extent))
 /* Generic I/O and MEMIO string operations.  */
 
 #define __ide_insw	insw
diff --git a/include/asm-parisc/io.h b/arch/parisc/include/asm/io.h
similarity index 100%
rename from include/asm-parisc/io.h
rename to arch/parisc/include/asm/io.h
diff --git a/include/asm-parisc/ioctl.h b/arch/parisc/include/asm/ioctl.h
similarity index 100%
rename from include/asm-parisc/ioctl.h
rename to arch/parisc/include/asm/ioctl.h
diff --git a/include/asm-parisc/ioctls.h b/arch/parisc/include/asm/ioctls.h
similarity index 100%
rename from include/asm-parisc/ioctls.h
rename to arch/parisc/include/asm/ioctls.h
diff --git a/include/asm-parisc/ipcbuf.h b/arch/parisc/include/asm/ipcbuf.h
similarity index 100%
rename from include/asm-parisc/ipcbuf.h
rename to arch/parisc/include/asm/ipcbuf.h
diff --git a/include/asm-parisc/irq.h b/arch/parisc/include/asm/irq.h
similarity index 100%
rename from include/asm-parisc/irq.h
rename to arch/parisc/include/asm/irq.h
diff --git a/include/asm-parisc/irq_regs.h b/arch/parisc/include/asm/irq_regs.h
similarity index 100%
rename from include/asm-parisc/irq_regs.h
rename to arch/parisc/include/asm/irq_regs.h
diff --git a/include/asm-parisc/kdebug.h b/arch/parisc/include/asm/kdebug.h
similarity index 100%
rename from include/asm-parisc/kdebug.h
rename to arch/parisc/include/asm/kdebug.h
diff --git a/include/asm-parisc/kmap_types.h b/arch/parisc/include/asm/kmap_types.h
similarity index 100%
rename from include/asm-parisc/kmap_types.h
rename to arch/parisc/include/asm/kmap_types.h
diff --git a/include/asm-parisc/led.h b/arch/parisc/include/asm/led.h
similarity index 100%
rename from include/asm-parisc/led.h
rename to arch/parisc/include/asm/led.h
diff --git a/include/asm-parisc/linkage.h b/arch/parisc/include/asm/linkage.h
similarity index 100%
rename from include/asm-parisc/linkage.h
rename to arch/parisc/include/asm/linkage.h
diff --git a/include/asm-parisc/local.h b/arch/parisc/include/asm/local.h
similarity index 100%
rename from include/asm-parisc/local.h
rename to arch/parisc/include/asm/local.h
diff --git a/include/asm-parisc/machdep.h b/arch/parisc/include/asm/machdep.h
similarity index 100%
rename from include/asm-parisc/machdep.h
rename to arch/parisc/include/asm/machdep.h
diff --git a/include/asm-parisc/mc146818rtc.h b/arch/parisc/include/asm/mc146818rtc.h
similarity index 100%
rename from include/asm-parisc/mc146818rtc.h
rename to arch/parisc/include/asm/mc146818rtc.h
diff --git a/include/asm-parisc/mckinley.h b/arch/parisc/include/asm/mckinley.h
similarity index 100%
rename from include/asm-parisc/mckinley.h
rename to arch/parisc/include/asm/mckinley.h
diff --git a/include/asm-parisc/mman.h b/arch/parisc/include/asm/mman.h
similarity index 100%
rename from include/asm-parisc/mman.h
rename to arch/parisc/include/asm/mman.h
diff --git a/include/asm-parisc/mmu.h b/arch/parisc/include/asm/mmu.h
similarity index 100%
rename from include/asm-parisc/mmu.h
rename to arch/parisc/include/asm/mmu.h
diff --git a/include/asm-parisc/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
similarity index 100%
rename from include/asm-parisc/mmu_context.h
rename to arch/parisc/include/asm/mmu_context.h
diff --git a/include/asm-parisc/mmzone.h b/arch/parisc/include/asm/mmzone.h
similarity index 100%
rename from include/asm-parisc/mmzone.h
rename to arch/parisc/include/asm/mmzone.h
diff --git a/include/asm-parisc/module.h b/arch/parisc/include/asm/module.h
similarity index 100%
rename from include/asm-parisc/module.h
rename to arch/parisc/include/asm/module.h
diff --git a/include/asm-parisc/msgbuf.h b/arch/parisc/include/asm/msgbuf.h
similarity index 100%
rename from include/asm-parisc/msgbuf.h
rename to arch/parisc/include/asm/msgbuf.h
diff --git a/include/asm-parisc/mutex.h b/arch/parisc/include/asm/mutex.h
similarity index 100%
rename from include/asm-parisc/mutex.h
rename to arch/parisc/include/asm/mutex.h
diff --git a/include/asm-parisc/page.h b/arch/parisc/include/asm/page.h
similarity index 100%
rename from include/asm-parisc/page.h
rename to arch/parisc/include/asm/page.h
diff --git a/include/asm-parisc/param.h b/arch/parisc/include/asm/param.h
similarity index 100%
rename from include/asm-parisc/param.h
rename to arch/parisc/include/asm/param.h
diff --git a/include/asm-parisc/parisc-device.h b/arch/parisc/include/asm/parisc-device.h
similarity index 100%
rename from include/asm-parisc/parisc-device.h
rename to arch/parisc/include/asm/parisc-device.h
diff --git a/include/asm-parisc/parport.h b/arch/parisc/include/asm/parport.h
similarity index 100%
rename from include/asm-parisc/parport.h
rename to arch/parisc/include/asm/parport.h
diff --git a/include/asm-parisc/pci.h b/arch/parisc/include/asm/pci.h
similarity index 100%
rename from include/asm-parisc/pci.h
rename to arch/parisc/include/asm/pci.h
diff --git a/include/asm-parisc/pdc.h b/arch/parisc/include/asm/pdc.h
similarity index 98%
rename from include/asm-parisc/pdc.h
rename to arch/parisc/include/asm/pdc.h
index 9eaa794..c584b00 100644
--- a/include/asm-parisc/pdc.h
+++ b/arch/parisc/include/asm/pdc.h
@@ -332,6 +332,9 @@
 #define BOOT_CONSOLE_SPA_OFFSET  0x3c4
 #define BOOT_CONSOLE_PATH_OFFSET 0x3a8
 
+/* size of the pdc_result buffer for firmware.c */
+#define NUM_PDC_RESULT	32
+
 #if !defined(__ASSEMBLY__)
 #ifdef __KERNEL__
 
@@ -600,6 +603,7 @@
 int pdc_chassis_disp(unsigned long disp);
 int pdc_chassis_warn(unsigned long *warn);
 int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info);
+int pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info);
 int pdc_iodc_read(unsigned long *actcnt, unsigned long hpa, unsigned int index,
 		  void *iodc_data, unsigned int iodc_data_size);
 int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info,
@@ -638,6 +642,7 @@
 #endif
 
 void set_firmware_width(void);
+void set_firmware_width_unlocked(void);
 int pdc_do_firm_test_reset(unsigned long ftc_bitmap);
 int pdc_do_reset(void);
 int pdc_soft_power_info(unsigned long *power_reg);
diff --git a/include/asm-parisc/pdc_chassis.h b/arch/parisc/include/asm/pdc_chassis.h
similarity index 100%
rename from include/asm-parisc/pdc_chassis.h
rename to arch/parisc/include/asm/pdc_chassis.h
diff --git a/include/asm-parisc/pdcpat.h b/arch/parisc/include/asm/pdcpat.h
similarity index 100%
rename from include/asm-parisc/pdcpat.h
rename to arch/parisc/include/asm/pdcpat.h
diff --git a/include/asm-parisc/percpu.h b/arch/parisc/include/asm/percpu.h
similarity index 100%
rename from include/asm-parisc/percpu.h
rename to arch/parisc/include/asm/percpu.h
diff --git a/include/asm-parisc/perf.h b/arch/parisc/include/asm/perf.h
similarity index 100%
rename from include/asm-parisc/perf.h
rename to arch/parisc/include/asm/perf.h
diff --git a/include/asm-parisc/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
similarity index 100%
rename from include/asm-parisc/pgalloc.h
rename to arch/parisc/include/asm/pgalloc.h
diff --git a/include/asm-parisc/pgtable.h b/arch/parisc/include/asm/pgtable.h
similarity index 100%
rename from include/asm-parisc/pgtable.h
rename to arch/parisc/include/asm/pgtable.h
diff --git a/include/asm-parisc/poll.h b/arch/parisc/include/asm/poll.h
similarity index 100%
rename from include/asm-parisc/poll.h
rename to arch/parisc/include/asm/poll.h
diff --git a/include/asm-parisc/posix_types.h b/arch/parisc/include/asm/posix_types.h
similarity index 100%
rename from include/asm-parisc/posix_types.h
rename to arch/parisc/include/asm/posix_types.h
diff --git a/include/asm-parisc/prefetch.h b/arch/parisc/include/asm/prefetch.h
similarity index 100%
rename from include/asm-parisc/prefetch.h
rename to arch/parisc/include/asm/prefetch.h
diff --git a/include/asm-parisc/processor.h b/arch/parisc/include/asm/processor.h
similarity index 100%
rename from include/asm-parisc/processor.h
rename to arch/parisc/include/asm/processor.h
diff --git a/include/asm-parisc/psw.h b/arch/parisc/include/asm/psw.h
similarity index 100%
rename from include/asm-parisc/psw.h
rename to arch/parisc/include/asm/psw.h
diff --git a/include/asm-parisc/ptrace.h b/arch/parisc/include/asm/ptrace.h
similarity index 85%
rename from include/asm-parisc/ptrace.h
rename to arch/parisc/include/asm/ptrace.h
index 3e94c5d..afa5333 100644
--- a/include/asm-parisc/ptrace.h
+++ b/arch/parisc/include/asm/ptrace.h
@@ -47,6 +47,16 @@
 
 #define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS))
 
+#define __ARCH_WANT_COMPAT_SYS_PTRACE
+
+struct task_struct;
+#define arch_has_single_step()	1
+void user_disable_single_step(struct task_struct *task);
+void user_enable_single_step(struct task_struct *task);
+
+#define arch_has_block_step()	1
+void user_enable_block_step(struct task_struct *task);
+
 /* XXX should we use iaoq[1] or iaoq[0] ? */
 #define user_mode(regs)			(((regs)->iaoq[0] & 3) ? 1 : 0)
 #define user_space(regs)		(((regs)->iasq[1] != 0) ? 1 : 0)
diff --git a/include/asm-parisc/real.h b/arch/parisc/include/asm/real.h
similarity index 100%
rename from include/asm-parisc/real.h
rename to arch/parisc/include/asm/real.h
diff --git a/include/asm-parisc/resource.h b/arch/parisc/include/asm/resource.h
similarity index 100%
rename from include/asm-parisc/resource.h
rename to arch/parisc/include/asm/resource.h
diff --git a/include/asm-parisc/ropes.h b/arch/parisc/include/asm/ropes.h
similarity index 99%
rename from include/asm-parisc/ropes.h
rename to arch/parisc/include/asm/ropes.h
index 007a8806..09f51d5 100644
--- a/include/asm-parisc/ropes.h
+++ b/arch/parisc/include/asm/ropes.h
@@ -1,7 +1,7 @@
 #ifndef _ASM_PARISC_ROPES_H_
 #define _ASM_PARISC_ROPES_H_
 
-#include <asm-parisc/parisc-device.h>
+#include <asm/parisc-device.h>
 
 #ifdef CONFIG_64BIT
 /* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */
diff --git a/include/asm-parisc/rt_sigframe.h b/arch/parisc/include/asm/rt_sigframe.h
similarity index 100%
rename from include/asm-parisc/rt_sigframe.h
rename to arch/parisc/include/asm/rt_sigframe.h
diff --git a/include/asm-parisc/rtc.h b/arch/parisc/include/asm/rtc.h
similarity index 100%
rename from include/asm-parisc/rtc.h
rename to arch/parisc/include/asm/rtc.h
diff --git a/include/asm-parisc/runway.h b/arch/parisc/include/asm/runway.h
similarity index 100%
rename from include/asm-parisc/runway.h
rename to arch/parisc/include/asm/runway.h
diff --git a/include/asm-parisc/scatterlist.h b/arch/parisc/include/asm/scatterlist.h
similarity index 100%
rename from include/asm-parisc/scatterlist.h
rename to arch/parisc/include/asm/scatterlist.h
diff --git a/include/asm-parisc/sections.h b/arch/parisc/include/asm/sections.h
similarity index 100%
rename from include/asm-parisc/sections.h
rename to arch/parisc/include/asm/sections.h
diff --git a/include/asm-parisc/segment.h b/arch/parisc/include/asm/segment.h
similarity index 100%
rename from include/asm-parisc/segment.h
rename to arch/parisc/include/asm/segment.h
diff --git a/include/asm-parisc/sembuf.h b/arch/parisc/include/asm/sembuf.h
similarity index 100%
rename from include/asm-parisc/sembuf.h
rename to arch/parisc/include/asm/sembuf.h
diff --git a/include/asm-parisc/serial.h b/arch/parisc/include/asm/serial.h
similarity index 100%
rename from include/asm-parisc/serial.h
rename to arch/parisc/include/asm/serial.h
diff --git a/include/asm-parisc/setup.h b/arch/parisc/include/asm/setup.h
similarity index 100%
rename from include/asm-parisc/setup.h
rename to arch/parisc/include/asm/setup.h
diff --git a/include/asm-parisc/shmbuf.h b/arch/parisc/include/asm/shmbuf.h
similarity index 100%
rename from include/asm-parisc/shmbuf.h
rename to arch/parisc/include/asm/shmbuf.h
diff --git a/include/asm-parisc/shmparam.h b/arch/parisc/include/asm/shmparam.h
similarity index 100%
rename from include/asm-parisc/shmparam.h
rename to arch/parisc/include/asm/shmparam.h
diff --git a/include/asm-parisc/sigcontext.h b/arch/parisc/include/asm/sigcontext.h
similarity index 100%
rename from include/asm-parisc/sigcontext.h
rename to arch/parisc/include/asm/sigcontext.h
diff --git a/include/asm-parisc/siginfo.h b/arch/parisc/include/asm/siginfo.h
similarity index 100%
rename from include/asm-parisc/siginfo.h
rename to arch/parisc/include/asm/siginfo.h
diff --git a/include/asm-parisc/signal.h b/arch/parisc/include/asm/signal.h
similarity index 100%
rename from include/asm-parisc/signal.h
rename to arch/parisc/include/asm/signal.h
diff --git a/include/asm-parisc/smp.h b/arch/parisc/include/asm/smp.h
similarity index 100%
rename from include/asm-parisc/smp.h
rename to arch/parisc/include/asm/smp.h
diff --git a/include/asm-parisc/socket.h b/arch/parisc/include/asm/socket.h
similarity index 100%
rename from include/asm-parisc/socket.h
rename to arch/parisc/include/asm/socket.h
diff --git a/include/asm-parisc/sockios.h b/arch/parisc/include/asm/sockios.h
similarity index 100%
rename from include/asm-parisc/sockios.h
rename to arch/parisc/include/asm/sockios.h
diff --git a/include/asm-parisc/spinlock.h b/arch/parisc/include/asm/spinlock.h
similarity index 100%
rename from include/asm-parisc/spinlock.h
rename to arch/parisc/include/asm/spinlock.h
diff --git a/include/asm-parisc/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
similarity index 100%
rename from include/asm-parisc/spinlock_types.h
rename to arch/parisc/include/asm/spinlock_types.h
diff --git a/include/asm-parisc/stat.h b/arch/parisc/include/asm/stat.h
similarity index 100%
rename from include/asm-parisc/stat.h
rename to arch/parisc/include/asm/stat.h
diff --git a/include/asm-parisc/statfs.h b/arch/parisc/include/asm/statfs.h
similarity index 100%
rename from include/asm-parisc/statfs.h
rename to arch/parisc/include/asm/statfs.h
diff --git a/include/asm-parisc/string.h b/arch/parisc/include/asm/string.h
similarity index 100%
rename from include/asm-parisc/string.h
rename to arch/parisc/include/asm/string.h
diff --git a/include/asm-parisc/superio.h b/arch/parisc/include/asm/superio.h
similarity index 100%
rename from include/asm-parisc/superio.h
rename to arch/parisc/include/asm/superio.h
diff --git a/include/asm-parisc/system.h b/arch/parisc/include/asm/system.h
similarity index 100%
rename from include/asm-parisc/system.h
rename to arch/parisc/include/asm/system.h
diff --git a/include/asm-parisc/termbits.h b/arch/parisc/include/asm/termbits.h
similarity index 100%
rename from include/asm-parisc/termbits.h
rename to arch/parisc/include/asm/termbits.h
diff --git a/include/asm-parisc/termios.h b/arch/parisc/include/asm/termios.h
similarity index 100%
rename from include/asm-parisc/termios.h
rename to arch/parisc/include/asm/termios.h
diff --git a/include/asm-parisc/thread_info.h b/arch/parisc/include/asm/thread_info.h
similarity index 95%
rename from include/asm-parisc/thread_info.h
rename to arch/parisc/include/asm/thread_info.h
index 9f81274..0407959 100644
--- a/include/asm-parisc/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -58,6 +58,7 @@
 #define TIF_32BIT               4       /* 32 bit binary */
 #define TIF_MEMDIE		5
 #define TIF_RESTORE_SIGMASK	6	/* restore saved signal mask */
+#define TIF_FREEZE		7	/* is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
@@ -65,6 +66,7 @@
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_32BIT		(1 << TIF_32BIT)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
+#define _TIF_FREEZE		(1 << TIF_FREEZE)
 
 #define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | \
                                  _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
diff --git a/include/asm-parisc/timex.h b/arch/parisc/include/asm/timex.h
similarity index 100%
rename from include/asm-parisc/timex.h
rename to arch/parisc/include/asm/timex.h
diff --git a/include/asm-parisc/tlb.h b/arch/parisc/include/asm/tlb.h
similarity index 100%
rename from include/asm-parisc/tlb.h
rename to arch/parisc/include/asm/tlb.h
diff --git a/include/asm-parisc/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
similarity index 100%
rename from include/asm-parisc/tlbflush.h
rename to arch/parisc/include/asm/tlbflush.h
diff --git a/include/asm-parisc/topology.h b/arch/parisc/include/asm/topology.h
similarity index 100%
rename from include/asm-parisc/topology.h
rename to arch/parisc/include/asm/topology.h
diff --git a/include/asm-parisc/traps.h b/arch/parisc/include/asm/traps.h
similarity index 100%
rename from include/asm-parisc/traps.h
rename to arch/parisc/include/asm/traps.h
diff --git a/include/asm-parisc/types.h b/arch/parisc/include/asm/types.h
similarity index 100%
rename from include/asm-parisc/types.h
rename to arch/parisc/include/asm/types.h
diff --git a/include/asm-parisc/uaccess.h b/arch/parisc/include/asm/uaccess.h
similarity index 100%
rename from include/asm-parisc/uaccess.h
rename to arch/parisc/include/asm/uaccess.h
diff --git a/include/asm-parisc/ucontext.h b/arch/parisc/include/asm/ucontext.h
similarity index 100%
rename from include/asm-parisc/ucontext.h
rename to arch/parisc/include/asm/ucontext.h
diff --git a/include/asm-parisc/unaligned.h b/arch/parisc/include/asm/unaligned.h
similarity index 100%
rename from include/asm-parisc/unaligned.h
rename to arch/parisc/include/asm/unaligned.h
diff --git a/include/asm-parisc/unistd.h b/arch/parisc/include/asm/unistd.h
similarity index 98%
rename from include/asm-parisc/unistd.h
rename to arch/parisc/include/asm/unistd.h
index a7d857f..ef26b00 100644
--- a/include/asm-parisc/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -801,8 +801,14 @@
 #define __NR_timerfd_create	(__NR_Linux + 306)
 #define __NR_timerfd_settime	(__NR_Linux + 307)
 #define __NR_timerfd_gettime	(__NR_Linux + 308)
+#define __NR_signalfd4		(__NR_Linux + 309)
+#define __NR_eventfd2		(__NR_Linux + 310)
+#define __NR_epoll_create1	(__NR_Linux + 311)
+#define __NR_dup3		(__NR_Linux + 312)
+#define __NR_pipe2		(__NR_Linux + 313)
+#define __NR_inotify_init1	(__NR_Linux + 314)
 
-#define __NR_Linux_syscalls	(__NR_timerfd_gettime + 1)
+#define __NR_Linux_syscalls	(__NR_inotify_init1 + 1)
 
 
 #define __IGNORE_select		/* newselect */
diff --git a/include/asm-parisc/unwind.h b/arch/parisc/include/asm/unwind.h
similarity index 98%
rename from include/asm-parisc/unwind.h
rename to arch/parisc/include/asm/unwind.h
index 2f7e6e5..52482e4 100644
--- a/include/asm-parisc/unwind.h
+++ b/arch/parisc/include/asm/unwind.h
@@ -74,4 +74,6 @@
 int unwind_once(struct unwind_frame_info *info);
 int unwind_to_user(struct unwind_frame_info *info);
 
+int unwind_init(void);
+
 #endif
diff --git a/include/asm-parisc/user.h b/arch/parisc/include/asm/user.h
similarity index 100%
rename from include/asm-parisc/user.h
rename to arch/parisc/include/asm/user.h
diff --git a/include/asm-parisc/vga.h b/arch/parisc/include/asm/vga.h
similarity index 100%
rename from include/asm-parisc/vga.h
rename to arch/parisc/include/asm/vga.h
diff --git a/include/asm-parisc/xor.h b/arch/parisc/include/asm/xor.h
similarity index 100%
rename from include/asm-parisc/xor.h
rename to arch/parisc/include/asm/xor.h
diff --git a/arch/parisc/kernel/.gitignore b/arch/parisc/kernel/.gitignore
new file mode 100644
index 0000000..c5f676c
--- /dev/null
+++ b/arch/parisc/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 3efc0b7..699cf8e 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -290,5 +290,8 @@
 	DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
 	DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
 	DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
+	BLANK();
+	DEFINE(ASM_PDC_RESULT_SIZE, NUM_PDC_RESULT * sizeof(unsigned long));
+	BLANK();
 	return 0;
 }
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 7177a6c..03f26bd 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -71,8 +71,8 @@
 #include <asm/processor.h>	/* for boot_cpu_data */
 
 static DEFINE_SPINLOCK(pdc_lock);
-static unsigned long pdc_result[32] __attribute__ ((aligned (8)));
-static unsigned long pdc_result2[32] __attribute__ ((aligned (8)));
+extern unsigned long pdc_result[NUM_PDC_RESULT];
+extern unsigned long pdc_result2[NUM_PDC_RESULT];
 
 #ifdef CONFIG_64BIT
 #define WIDE_FIRMWARE 0x1
@@ -150,26 +150,40 @@
 #endif
 }
 
+#ifdef CONFIG_64BIT
+void __init set_firmware_width_unlocked(void)
+{
+	int ret;
+
+	ret = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES,
+		__pa(pdc_result), 0);
+	convert_to_wide(pdc_result);
+	if (pdc_result[0] != NARROW_FIRMWARE)
+		parisc_narrow_firmware = 0;
+}
+	
 /**
  * set_firmware_width - Determine if the firmware is wide or narrow.
  * 
- * This function must be called before any pdc_* function that uses the convert_to_wide
- * function.
+ * This function must be called before any pdc_* function that uses the
+ * convert_to_wide function.
  */
 void __init set_firmware_width(void)
 {
-#ifdef CONFIG_64BIT
-	int retval;
 	unsigned long flags;
-
-        spin_lock_irqsave(&pdc_lock, flags);
-	retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, __pa(pdc_result), 0);
-	convert_to_wide(pdc_result);
-	if(pdc_result[0] != NARROW_FIRMWARE)
-		parisc_narrow_firmware = 0;
-        spin_unlock_irqrestore(&pdc_lock, flags);
-#endif
+	spin_lock_irqsave(&pdc_lock, flags);
+	set_firmware_width_unlocked();
+	spin_unlock_irqrestore(&pdc_lock, flags);
 }
+#else
+void __init set_firmware_width_unlocked(void) {
+	return;
+}
+
+void __init set_firmware_width(void) {
+	return;
+}
+#endif /*CONFIG_64BIT*/
 
 /**
  * pdc_emergency_unlock - Unlock the linux pdc lock
@@ -288,6 +302,20 @@
 	return retval;
 }
 
+int __init pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info)
+{
+	int ret;
+
+	ret = mem_pdc_call(PDC_COPROC, PDC_COPROC_CFG, __pa(pdc_result));
+	convert_to_wide(pdc_result);
+	pdc_coproc_info->ccr_functional = pdc_result[0];
+	pdc_coproc_info->ccr_present = pdc_result[1];
+	pdc_coproc_info->revision = pdc_result[17];
+	pdc_coproc_info->model = pdc_result[18];
+
+	return ret;
+}
+
 /**
  * pdc_coproc_cfg - To identify coprocessors attached to the processor.
  * @pdc_coproc_info: Return buffer address.
@@ -297,19 +325,14 @@
  */
 int __init pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info)
 {
-        int retval;
+	int ret;
 	unsigned long flags;
 
-        spin_lock_irqsave(&pdc_lock, flags);
-        retval = mem_pdc_call(PDC_COPROC, PDC_COPROC_CFG, __pa(pdc_result));
-        convert_to_wide(pdc_result);
-        pdc_coproc_info->ccr_functional = pdc_result[0];
-        pdc_coproc_info->ccr_present = pdc_result[1];
-        pdc_coproc_info->revision = pdc_result[17];
-        pdc_coproc_info->model = pdc_result[18];
-        spin_unlock_irqrestore(&pdc_lock, flags);
+	spin_lock_irqsave(&pdc_lock, flags);
+	ret = pdc_coproc_cfg_unlocked(pdc_coproc_info);
+	spin_unlock_irqrestore(&pdc_lock, flags);
 
-        return retval;
+	return ret;
 }
 
 /**
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index a84e31e..0e3d9f9 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -121,7 +121,7 @@
 	copy		%r0,%r2
 
 	/* And the RFI Target address too */
-	load32		start_kernel,%r11
+	load32		start_parisc,%r11
 
 	/* And the initial task pointer */
 	load32		init_thread_union,%r6
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 49c6379..90904f9 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -4,6 +4,7 @@
  * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc.
  * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx>
  * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
+ * Copyright (C) 2008 Helge Deller <deller@gmx.de>
  */
 
 #include <linux/kernel.h>
@@ -27,15 +28,149 @@
 /* PSW bits we allow the debugger to modify */
 #define USER_PSW_BITS	(PSW_N | PSW_V | PSW_CB)
 
-#undef DEBUG_PTRACE
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *task)
+{
+	task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP);
 
-#ifdef DEBUG_PTRACE
-#define DBG(x...)	printk(x)
-#else
-#define DBG(x...)
-#endif
+	/* make sure the trap bits are not set */
+	pa_psw(task)->r = 0;
+	pa_psw(task)->t = 0;
+	pa_psw(task)->h = 0;
+	pa_psw(task)->l = 0;
+}
 
-#ifdef CONFIG_64BIT
+/*
+ * The following functions are called by ptrace_resume() when
+ * enabling or disabling single/block tracing.
+ */
+void user_disable_single_step(struct task_struct *task)
+{
+	ptrace_disable(task);
+}
+
+void user_enable_single_step(struct task_struct *task)
+{
+	task->ptrace &= ~PT_BLOCKSTEP;
+	task->ptrace |= PT_SINGLESTEP;
+
+	if (pa_psw(task)->n) {
+		struct siginfo si;
+
+		/* Nullified, just crank over the queue. */
+		task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1];
+		task_regs(task)->iasq[0] = task_regs(task)->iasq[1];
+		task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4;
+		pa_psw(task)->n = 0;
+		pa_psw(task)->x = 0;
+		pa_psw(task)->y = 0;
+		pa_psw(task)->z = 0;
+		pa_psw(task)->b = 0;
+		ptrace_disable(task);
+		/* Don't wake up the task, but let the
+		   parent know something happened. */
+		si.si_code = TRAP_TRACE;
+		si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3);
+		si.si_signo = SIGTRAP;
+		si.si_errno = 0;
+		force_sig_info(SIGTRAP, &si, task);
+		/* notify_parent(task, SIGCHLD); */
+		return;
+	}
+
+	/* Enable recovery counter traps.  The recovery counter
+	 * itself will be set to zero on a task switch.  If the
+	 * task is suspended on a syscall then the syscall return
+	 * path will overwrite the recovery counter with a suitable
+	 * value such that it traps once back in user space.  We
+	 * disable interrupts in the tasks PSW here also, to avoid
+	 * interrupts while the recovery counter is decrementing.
+	 */
+	pa_psw(task)->r = 1;
+	pa_psw(task)->t = 0;
+	pa_psw(task)->h = 0;
+	pa_psw(task)->l = 0;
+}
+
+void user_enable_block_step(struct task_struct *task)
+{
+	task->ptrace &= ~PT_SINGLESTEP;
+	task->ptrace |= PT_BLOCKSTEP;
+
+	/* Enable taken branch trap. */
+	pa_psw(task)->r = 0;
+	pa_psw(task)->t = 1;
+	pa_psw(task)->h = 0;
+	pa_psw(task)->l = 0;
+}
+
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+{
+	unsigned long tmp;
+	long ret = -EIO;
+
+	switch (request) {
+
+	/* Read the word at location addr in the USER area.  For ptraced
+	   processes, the kernel saves all regs on a syscall. */
+	case PTRACE_PEEKUSR:
+		if ((addr & (sizeof(long)-1)) ||
+		    (unsigned long) addr >= sizeof(struct pt_regs))
+			break;
+		tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
+		ret = put_user(tmp, (unsigned long *) data);
+		break;
+
+	/* Write the word at location addr in the USER area.  This will need
+	   to change when the kernel no longer saves all regs on a syscall.
+	   FIXME.  There is a problem at the moment in that r3-r18 are only
+	   saved if the process is ptraced on syscall entry, and even then
+	   those values are overwritten by actual register values on syscall
+	   exit. */
+	case PTRACE_POKEUSR:
+		/* Some register values written here may be ignored in
+		 * entry.S:syscall_restore_rfi; e.g. iaoq is written with
+		 * r31/r31+4, and not with the values in pt_regs.
+		 */
+		if (addr == PT_PSW) {
+			/* Allow writing to Nullify, Divide-step-correction,
+			 * and carry/borrow bits.
+			 * BEWARE, if you set N, and then single step, it won't
+			 * stop on the nullified instruction.
+			 */
+			data &= USER_PSW_BITS;
+			task_regs(child)->gr[0] &= ~USER_PSW_BITS;
+			task_regs(child)->gr[0] |= data;
+			ret = 0;
+			break;
+		}
+
+		if ((addr & (sizeof(long)-1)) ||
+		    (unsigned long) addr >= sizeof(struct pt_regs))
+			break;
+		if ((addr >= PT_GR1 && addr <= PT_GR31) ||
+				addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
+				(addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
+				addr == PT_SAR) {
+			*(unsigned long *) ((char *) task_regs(child) + addr) = data;
+			ret = 0;
+		}
+		break;
+
+	default:
+		ret = ptrace_request(child, request, addr, data);
+		break;
+	}
+
+	return ret;
+}
+
+
+#ifdef CONFIG_COMPAT
 
 /* This function is needed to translate 32 bit pt_regs offsets in to
  * 64 bit pt_regs offsets.  For example, a 32 bit gdb under a 64 bit kernel
@@ -61,106 +196,25 @@
 	else
 		return -1;
 }
-#endif
 
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
- */
-void ptrace_disable(struct task_struct *child)
+long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+			compat_ulong_t addr, compat_ulong_t data)
 {
-	/* make sure the trap bits are not set */
-	pa_psw(child)->r = 0;
-	pa_psw(child)->t = 0;
-	pa_psw(child)->h = 0;
-	pa_psw(child)->l = 0;
-}
-
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
-{
-	long ret;
-#ifdef DEBUG_PTRACE
-	long oaddr=addr, odata=data;
-#endif
+	compat_uint_t tmp;
+	long ret = -EIO;
 
 	switch (request) {
-	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
-	case PTRACE_PEEKDATA: {
-#ifdef CONFIG_64BIT
-		if (__is_compat_task(child)) {
-			int copied;
-			unsigned int tmp;
 
-			addr &= 0xffffffffL;
-			copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-			ret = -EIO;
-			if (copied != sizeof(tmp))
-				goto out_tsk;
-			ret = put_user(tmp,(unsigned int *) data);
-			DBG("sys_ptrace(PEEK%s, %d, %lx, %lx) returning %ld, data %x\n",
-				request == PTRACE_PEEKTEXT ? "TEXT" : "DATA",
-				pid, oaddr, odata, ret, tmp);
-		}
-		else
-#endif
-			ret = generic_ptrace_peekdata(child, addr, data);
-		goto out_tsk;
-	}
+	case PTRACE_PEEKUSR:
+		if (addr & (sizeof(compat_uint_t)-1))
+			break;
+		addr = translate_usr_offset(addr);
+		if (addr < 0)
+			break;
 
-	/* when I and D space are separate, this will have to be fixed. */
-	case PTRACE_POKETEXT: /* write the word at location addr. */
-	case PTRACE_POKEDATA:
-		ret = 0;
-#ifdef CONFIG_64BIT
-		if (__is_compat_task(child)) {
-			unsigned int tmp = (unsigned int)data;
-			DBG("sys_ptrace(POKE%s, %d, %lx, %lx)\n",
-				request == PTRACE_POKETEXT ? "TEXT" : "DATA",
-				pid, oaddr, odata);
-			addr &= 0xffffffffL;
-			if (access_process_vm(child, addr, &tmp, sizeof(tmp), 1) == sizeof(tmp))
-				goto out_tsk;
-		}
-		else
-#endif
-		{
-			if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
-				goto out_tsk;
-		}
-		ret = -EIO;
-		goto out_tsk;
-
-	/* Read the word at location addr in the USER area.  For ptraced
-	   processes, the kernel saves all regs on a syscall. */
-	case PTRACE_PEEKUSR: {
-		ret = -EIO;
-#ifdef CONFIG_64BIT
-		if (__is_compat_task(child)) {
-			unsigned int tmp;
-
-			if (addr & (sizeof(int)-1))
-				goto out_tsk;
-			if ((addr = translate_usr_offset(addr)) < 0)
-				goto out_tsk;
-
-			tmp = *(unsigned int *) ((char *) task_regs(child) + addr);
-			ret = put_user(tmp, (unsigned int *) data);
-			DBG("sys_ptrace(PEEKUSR, %d, %lx, %lx) returning %ld, addr %lx, data %x\n",
-				pid, oaddr, odata, ret, addr, tmp);
-		}
-		else
-#endif
-		{
-			unsigned long tmp;
-
-			if ((addr & (sizeof(long)-1)) || (unsigned long) addr >= sizeof(struct pt_regs))
-				goto out_tsk;
-			tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
-			ret = put_user(tmp, (unsigned long *) data);
-		}
-		goto out_tsk;
-	}
+		tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr);
+		ret = put_user(tmp, (compat_uint_t *) (unsigned long) data);
+		break;
 
 	/* Write the word at location addr in the USER area.  This will need
 	   to change when the kernel no longer saves all regs on a syscall.
@@ -169,185 +223,46 @@
 	   those values are overwritten by actual register values on syscall
 	   exit. */
 	case PTRACE_POKEUSR:
-		ret = -EIO;
 		/* Some register values written here may be ignored in
 		 * entry.S:syscall_restore_rfi; e.g. iaoq is written with
 		 * r31/r31+4, and not with the values in pt_regs.
 		 */
-		 /* PT_PSW=0, so this is valid for 32 bit processes under 64
-		 * bit kernels.
-		 */
 		if (addr == PT_PSW) {
-			/* PT_PSW=0, so this is valid for 32 bit processes
-			 * under 64 bit kernels.
-			 *
-			 * Allow writing to Nullify, Divide-step-correction,
-			 * and carry/borrow bits.
-			 * BEWARE, if you set N, and then single step, it won't
-			 * stop on the nullified instruction.
+			/* Since PT_PSW==0, it is valid for 32 bit processes
+			 * under 64 bit kernels as well.
 			 */
-			DBG("sys_ptrace(POKEUSR, %d, %lx, %lx)\n",
-				pid, oaddr, odata);
-			data &= USER_PSW_BITS;
-			task_regs(child)->gr[0] &= ~USER_PSW_BITS;
-			task_regs(child)->gr[0] |= data;
-			ret = 0;
-			goto out_tsk;
-		}
-#ifdef CONFIG_64BIT
-		if (__is_compat_task(child)) {
-			if (addr & (sizeof(int)-1))
-				goto out_tsk;
-			if ((addr = translate_usr_offset(addr)) < 0)
-				goto out_tsk;
-			DBG("sys_ptrace(POKEUSR, %d, %lx, %lx) addr %lx\n",
-				pid, oaddr, odata, addr);
+			ret = arch_ptrace(child, request, addr, data);
+		} else {
+			if (addr & (sizeof(compat_uint_t)-1))
+				break;
+			addr = translate_usr_offset(addr);
+			if (addr < 0)
+				break;
 			if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
 				/* Special case, fp regs are 64 bits anyway */
-				*(unsigned int *) ((char *) task_regs(child) + addr) = data;
+				*(__u64 *) ((char *) task_regs(child) + addr) = data;
 				ret = 0;
 			}
 			else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
 					addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 ||
 					addr == PT_SAR+4) {
 				/* Zero the top 32 bits */
-				*(unsigned int *) ((char *) task_regs(child) + addr - 4) = 0;
-				*(unsigned int *) ((char *) task_regs(child) + addr) = data;
+				*(__u32 *) ((char *) task_regs(child) + addr - 4) = 0;
+				*(__u32 *) ((char *) task_regs(child) + addr) = data;
 				ret = 0;
 			}
-			goto out_tsk;
 		}
-		else
-#endif
-		{
-			if ((addr & (sizeof(long)-1)) || (unsigned long) addr >= sizeof(struct pt_regs))
-				goto out_tsk;
-			if ((addr >= PT_GR1 && addr <= PT_GR31) ||
-					addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
-					(addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
-					addr == PT_SAR) {
-				*(unsigned long *) ((char *) task_regs(child) + addr) = data;
-				ret = 0;
-			}
-			goto out_tsk;
-		}
-
-	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
-	case PTRACE_CONT:
-		ret = -EIO;
-		DBG("sys_ptrace(%s)\n",
-			request == PTRACE_SYSCALL ? "SYSCALL" : "CONT");
-		if (!valid_signal(data))
-			goto out_tsk;
-		child->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP);
-		if (request == PTRACE_SYSCALL) {
-			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		} else {
-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		}		
-		child->exit_code = data;
-		goto out_wake_notrap;
-
-	case PTRACE_KILL:
-		/*
-		 * make the child exit.  Best I can do is send it a
-		 * sigkill.  perhaps it should be put in the status
-		 * that it wants to exit.
-		 */
-		ret = 0;
-		DBG("sys_ptrace(KILL)\n");
-		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
-			goto out_tsk;
-		child->exit_code = SIGKILL;
-		goto out_wake_notrap;
-
-	case PTRACE_SINGLEBLOCK:
-		DBG("sys_ptrace(SINGLEBLOCK)\n");
-		ret = -EIO;
-		if (!valid_signal(data))
-			goto out_tsk;
-		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		child->ptrace &= ~PT_SINGLESTEP;
-		child->ptrace |= PT_BLOCKSTEP;
-		child->exit_code = data;
-
-		/* Enable taken branch trap. */
-		pa_psw(child)->r = 0;
-		pa_psw(child)->t = 1;
-		pa_psw(child)->h = 0;
-		pa_psw(child)->l = 0;
-		goto out_wake;
-
-	case PTRACE_SINGLESTEP:
-		DBG("sys_ptrace(SINGLESTEP)\n");
-		ret = -EIO;
-		if (!valid_signal(data))
-			goto out_tsk;
-
-		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		child->ptrace &= ~PT_BLOCKSTEP;
-		child->ptrace |= PT_SINGLESTEP;
-		child->exit_code = data;
-
-		if (pa_psw(child)->n) {
-			struct siginfo si;
-
-			/* Nullified, just crank over the queue. */
-			task_regs(child)->iaoq[0] = task_regs(child)->iaoq[1];
-			task_regs(child)->iasq[0] = task_regs(child)->iasq[1];
-			task_regs(child)->iaoq[1] = task_regs(child)->iaoq[0] + 4;
-			pa_psw(child)->n = 0;
-			pa_psw(child)->x = 0;
-			pa_psw(child)->y = 0;
-			pa_psw(child)->z = 0;
-			pa_psw(child)->b = 0;
-			ptrace_disable(child);
-			/* Don't wake up the child, but let the
-			   parent know something happened. */
-			si.si_code = TRAP_TRACE;
-			si.si_addr = (void __user *) (task_regs(child)->iaoq[0] & ~3);
-			si.si_signo = SIGTRAP;
-			si.si_errno = 0;
-			force_sig_info(SIGTRAP, &si, child);
-			//notify_parent(child, SIGCHLD);
-			//ret = 0;
-			goto out_wake;
-		}
-
-		/* Enable recovery counter traps.  The recovery counter
-		 * itself will be set to zero on a task switch.  If the
-		 * task is suspended on a syscall then the syscall return
-		 * path will overwrite the recovery counter with a suitable
-		 * value such that it traps once back in user space.  We
-		 * disable interrupts in the childs PSW here also, to avoid
-		 * interrupts while the recovery counter is decrementing.
-		 */
-		pa_psw(child)->r = 1;
-		pa_psw(child)->t = 0;
-		pa_psw(child)->h = 0;
-		pa_psw(child)->l = 0;
-		/* give it a chance to run. */
-		goto out_wake;
-
-	case PTRACE_GETEVENTMSG:
-                ret = put_user(child->ptrace_message, (unsigned int __user *) data);
-		goto out_tsk;
+		break;
 
 	default:
-		ret = ptrace_request(child, request, addr, data);
-		goto out_tsk;
+		ret = compat_ptrace_request(child, request, addr, data);
+		break;
 	}
 
-out_wake_notrap:
-	ptrace_disable(child);
-out_wake:
-	wake_up_process(child);
-	ret = 0;
-out_tsk:
-	DBG("arch_ptrace(%ld, %d, %lx, %lx) returning %ld\n",
-		request, pid, oaddr, odata, ret);
 	return ret;
 }
+#endif
+
 
 void syscall_trace(void)
 {
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index 7a92695..5f3d3a1 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -8,12 +8,24 @@
  *
  */
 
+#include <asm/pdc.h>
 #include <asm/psw.h>
 #include <asm/assembly.h>
+#include <asm/asm-offsets.h>
 
 #include <linux/linkage.h>
 
+
 	.section	.bss
+
+	.export pdc_result
+	.export pdc_result2
+	.align 8
+pdc_result:
+	.block	ASM_PDC_RESULT_SIZE
+pdc_result2:
+	.block	ASM_PDC_RESULT_SIZE
+
 	.export real_stack
 	.export real32_stack
 	.export real64_stack
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 39e7c5a..7d27853 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -44,6 +44,7 @@
 #include <asm/pdc_chassis.h>
 #include <asm/io.h>
 #include <asm/setup.h>
+#include <asm/unwind.h>
 
 static char __initdata command_line[COMMAND_LINE_SIZE];
 
@@ -123,6 +124,7 @@
 #ifdef CONFIG_64BIT
 	extern int parisc_narrow_firmware;
 #endif
+	unwind_init();
 
 	init_per_cpu(smp_processor_id());	/* Set Modes & Enable FP */
 
@@ -368,6 +370,31 @@
 
 	return 0;
 }
-
 arch_initcall(parisc_init);
 
+void start_parisc(void)
+{
+	extern void start_kernel(void);
+
+	int ret, cpunum;
+	struct pdc_coproc_cfg coproc_cfg;
+
+	cpunum = smp_processor_id();
+
+	set_firmware_width_unlocked();
+
+	ret = pdc_coproc_cfg_unlocked(&coproc_cfg);
+	if (ret >= 0 && coproc_cfg.ccr_functional) {
+		mtctl(coproc_cfg.ccr_functional, 10);
+
+		cpu_data[cpunum].fp_rev = coproc_cfg.revision;
+		cpu_data[cpunum].fp_model = coproc_cfg.model;
+
+		asm volatile ("fstd	%fr0,8(%sp)");
+	} else {
+		panic("must have an fpu to boot linux");
+	}
+
+	start_kernel();
+	// not reached
+}
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index c7e59f5..303d2b6 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -87,7 +87,7 @@
 	ENTRY_SAME(setuid)
 	ENTRY_SAME(getuid)
 	ENTRY_COMP(stime)		/* 25 */
-	ENTRY_SAME(ptrace)
+	ENTRY_COMP(ptrace)
 	ENTRY_SAME(alarm)
 	/* see stat comment */
 	ENTRY_COMP(newfstat)
@@ -407,6 +407,12 @@
 	ENTRY_SAME(timerfd_create)
 	ENTRY_COMP(timerfd_settime)
 	ENTRY_COMP(timerfd_gettime)
+	ENTRY_COMP(signalfd4)
+	ENTRY_SAME(eventfd2)		/* 310 */
+	ENTRY_SAME(epoll_create1)
+	ENTRY_SAME(dup3)
+	ENTRY_SAME(pipe2)
+	ENTRY_SAME(inotify_init1)
 
 	/* Nothing yet */
 
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 24be86b..4d09203 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -23,6 +23,7 @@
 #include <linux/smp.h>
 #include <linux/profile.h>
 #include <linux/clocksource.h>
+#include <linux/platform_device.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -215,6 +216,24 @@
 	cpu_data[cpu].it_value = next_tick;
 }
 
+struct platform_device rtc_parisc_dev = {
+	.name = "rtc-parisc",
+	.id = -1,
+};
+
+static int __init rtc_init(void)
+{
+	int ret;
+
+	ret = platform_device_register(&rtc_parisc_dev);
+	if (ret < 0)
+		printk(KERN_ERR "unable to register rtc device...\n");
+
+	/* not necessarily an error */
+	return 0;
+}
+module_init(rtc_init);
+
 void __init time_init(void)
 {
 	static struct pdc_tod tod_data;
@@ -245,4 +264,3 @@
 		xtime.tv_nsec = 0;
 	}
 }
-
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 701b2d2..6773c58 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -170,7 +170,7 @@
 }
 
 /* Called from setup_arch to import the kernel unwind info */
-static int unwind_init(void)
+int unwind_init(void)
 {
 	long start, stop;
 	register unsigned long gp __asm__ ("r27");
@@ -417,5 +417,3 @@
 
 	return ret;
 }
-
-module_init(unwind_init);
diff --git a/arch/parisc/oprofile/init.c b/arch/parisc/oprofile/init.c
index 113f513..026cba2 100644
--- a/arch/parisc/oprofile/init.c
+++ b/arch/parisc/oprofile/init.c
@@ -12,7 +12,7 @@
 #include <linux/kernel.h>
 #include <linux/oprofile.h>
 
-int __init oprofile_arch_init(struct oprofile_operations * ops)
+int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
 	return -ENODEV;
 }
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 380baa1..5b15278 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -19,9 +19,6 @@
 	default 64 if PPC64
 	default 32 if !PPC64
 
-config PPC_MERGE
-	def_bool y
-
 config ARCH_PHYS_ADDR_T_64BIT
        def_bool PPC64 || PHYS_64BIT
 
@@ -230,6 +227,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 source "arch/powerpc/sysdev/Kconfig"
 source "arch/powerpc/platforms/Kconfig"
 
@@ -324,13 +323,11 @@
 
 config CRASH_DUMP
 	bool "Build a kdump crash kernel"
-	depends on PPC_MULTIPLATFORM && PPC64
+	depends on PPC_MULTIPLATFORM && PPC64 && RELOCATABLE
 	help
 	  Build a kernel suitable for use as a kdump capture kernel.
-	  The kernel will be linked at a different address than normal, and
-	  so can only be used for Kdump.
-
-	  Don't change this unless you know what you are doing.
+	  The same kernel binary can be used as production kernel and dump
+	  capture kernel.
 
 config PHYP_DUMP
 	bool "Hypervisor-assisted dump (EXPERIMENTAL)"
@@ -830,11 +827,9 @@
 	default "0xc000000000000000"
 config KERNEL_START
 	hex
-	default "0xc000000002000000" if CRASH_DUMP
 	default "0xc000000000000000"
 config PHYSICAL_START
 	hex
-	default "0x02000000" if CRASH_DUMP
 	default "0x00000000"
 endif
 
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index aac1406..8fc6d72 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -68,7 +68,8 @@
 		fixed-head.S ep88xc.c ep405.c cuboot-c2k.c \
 		cuboot-katmai.c cuboot-rainier.c redboot-8xx.c ep8248e.c \
 		cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \
-		virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c
+		virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \
+		cuboot-acadia.c
 src-boot := $(src-wlib) $(src-plat) empty.c
 
 src-boot := $(addprefix $(obj)/, $(src-boot))
@@ -211,6 +212,7 @@
 # Board ports in arch/powerpc/platform/40x/Kconfig
 image-$(CONFIG_EP405)			+= dtbImage.ep405
 image-$(CONFIG_WALNUT)			+= treeImage.walnut
+image-$(CONFIG_ACADIA)			+= cuImage.acadia
 
 # Board ports in arch/powerpc/platform/44x/Kconfig
 image-$(CONFIG_EBONY)			+= treeImage.ebony cuImage.ebony
@@ -319,6 +321,9 @@
 $(obj)/uImage: vmlinux $(wrapperbits)
 	$(call if_changed,wrap,uboot)
 
+$(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+	$(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
+
 $(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
 	$(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb)
 
diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c
index dcc9ab2..3091d1d 100644
--- a/arch/powerpc/boot/addnote.c
+++ b/arch/powerpc/boot/addnote.c
@@ -11,7 +11,7 @@
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
  *
- * Usage: addnote zImage [note.elf]
+ * Usage: addnote [-r realbase] zImage [note.elf]
  *
  * If note.elf is supplied, it is the name of an ELF file that contains
  * an RPA note to use instead of the built-in one.  Alternatively, the
@@ -153,18 +153,31 @@
 int
 main(int ac, char **av)
 {
-	int fd, n, i;
+	int fd, n, i, ai;
 	int ph, ps, np;
 	int nnote, nnote2, ns;
 	unsigned char *rpap;
+	char *p, *endp;
 
-	if (ac != 2 && ac != 3) {
-		fprintf(stderr, "Usage: %s elf-file [rpanote.elf]\n", av[0]);
+	ai = 1;
+	if (ac >= ai + 2 && strcmp(av[ai], "-r") == 0) {
+		/* process -r realbase */
+		p = av[ai + 1];
+		descr[1] = strtol(p, &endp, 16);
+		if (endp == p || *endp != 0) {
+			fprintf(stderr, "Can't parse -r argument '%s' as hex\n",
+				p);
+			exit(1);
+		}
+		ai += 2;
+	}
+	if (ac != ai + 1 && ac != ai + 2) {
+		fprintf(stderr, "Usage: %s [-r realbase] elf-file [rpanote.elf]\n", av[0]);
 		exit(1);
 	}
-	fd = open(av[1], O_RDWR);
+	fd = open(av[ai], O_RDWR);
 	if (fd < 0) {
-		perror(av[1]);
+		perror(av[ai]);
 		exit(1);
 	}
 
@@ -184,12 +197,12 @@
 	if (buf[E_IDENT+EI_CLASS] != ELFCLASS32
 	    || buf[E_IDENT+EI_DATA] != ELFDATA2MSB) {
 		fprintf(stderr, "%s is not a big-endian 32-bit ELF image\n",
-			av[1]);
+			av[ai]);
 		exit(1);
 	}
 
-	if (ac == 3)
-		rpap = read_rpanote(av[2], &nnote2);
+	if (ac == ai + 2)
+		rpap = read_rpanote(av[ai + 1], &nnote2);
 
 	ph = GET_32BE(buf, E_PHOFF);
 	ps = GET_16BE(buf, E_PHENTSIZE);
@@ -202,7 +215,7 @@
 	for (i = 0; i < np; ++i) {
 		if (GET_32BE(buf, ph + PH_TYPE) == PT_NOTE) {
 			fprintf(stderr, "%s already has a note entry\n",
-				av[1]);
+				av[ai]);
 			exit(0);
 		}
 		ph += ps;
@@ -260,18 +273,18 @@
 		exit(1);
 	}
 	if (i < n) {
-		fprintf(stderr, "%s: write truncated\n", av[1]);
+		fprintf(stderr, "%s: write truncated\n", av[ai]);
 		exit(1);
 	}
 
 	exit(0);
 
  notelf:
-	fprintf(stderr, "%s does not appear to be an ELF file\n", av[1]);
+	fprintf(stderr, "%s does not appear to be an ELF file\n", av[ai]);
 	exit(1);
 
  nospace:
 	fprintf(stderr, "sorry, I can't find space in %s to put the note\n",
-		av[1]);
+		av[ai]);
 	exit(1);
 }
diff --git a/arch/powerpc/boot/cuboot-52xx.c b/arch/powerpc/boot/cuboot-52xx.c
index a861154..4c42ec8 100644
--- a/arch/powerpc/boot/cuboot-52xx.c
+++ b/arch/powerpc/boot/cuboot-52xx.c
@@ -37,6 +37,10 @@
 	 * this can do a simple path lookup.
 	 */
 	soc = find_node_by_devtype(NULL, "soc");
+	if (!soc)
+		soc = find_node_by_compatible(NULL, "fsl,mpc5200-immr");
+	if (!soc)
+		soc = find_node_by_compatible(NULL, "fsl,mpc5200b-immr");
 	if (soc) {
 		setprop(soc, "bus-frequency", &bd.bi_ipbfreq,
 			sizeof(bd.bi_ipbfreq));
diff --git a/arch/powerpc/boot/cuboot-acadia.c b/arch/powerpc/boot/cuboot-acadia.c
new file mode 100644
index 0000000..0634aba
--- /dev/null
+++ b/arch/powerpc/boot/cuboot-acadia.c
@@ -0,0 +1,174 @@
+/*
+ * Old U-boot compatibility for Acadia
+ *
+ * Author: Josh Boyer <jwboyer@linux.vnet.ibm.com>
+ *
+ * Copyright 2008 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include "ops.h"
+#include "io.h"
+#include "dcr.h"
+#include "stdio.h"
+#include "4xx.h"
+#include "44x.h"
+#include "cuboot.h"
+
+#define TARGET_4xx
+#include "ppcboot.h"
+
+static bd_t bd;
+
+#define CPR_PERD0_SPIDV_MASK   0x000F0000     /* SPI Clock Divider */
+
+#define PLLC_SRC_MASK	       0x20000000     /* PLL feedback source */
+
+#define PLLD_FBDV_MASK	       0x1F000000     /* PLL feedback divider value */
+#define PLLD_FWDVA_MASK        0x000F0000     /* PLL forward divider A value */
+#define PLLD_FWDVB_MASK        0x00000700     /* PLL forward divider B value */
+
+#define PRIMAD_CPUDV_MASK      0x0F000000     /* CPU Clock Divisor Mask */
+#define PRIMAD_PLBDV_MASK      0x000F0000     /* PLB Clock Divisor Mask */
+#define PRIMAD_OPBDV_MASK      0x00000F00     /* OPB Clock Divisor Mask */
+#define PRIMAD_EBCDV_MASK      0x0000000F     /* EBC Clock Divisor Mask */
+
+#define PERD0_PWMDV_MASK       0xFF000000     /* PWM Divider Mask */
+#define PERD0_SPIDV_MASK       0x000F0000     /* SPI Divider Mask */
+#define PERD0_U0DV_MASK        0x0000FF00     /* UART 0 Divider Mask */
+#define PERD0_U1DV_MASK        0x000000FF     /* UART 1 Divider Mask */
+
+static void get_clocks(void)
+{
+	unsigned long sysclk, cpr_plld, cpr_pllc, cpr_primad, plloutb, i;
+	unsigned long pllFwdDiv, pllFwdDivB, pllFbkDiv, pllPlbDiv, pllExtBusDiv;
+	unsigned long pllOpbDiv, freqEBC, freqUART, freqOPB;
+	unsigned long div;		/* total divisor udiv * bdiv */
+	unsigned long umin;		/* minimum udiv	*/
+	unsigned short diff;		/* smallest diff */
+	unsigned long udiv;		/* best udiv */
+	unsigned short idiff;		/* current diff */
+	unsigned short ibdiv;		/* current bdiv */
+	unsigned long est;		/* current estimate */
+	unsigned long baud;
+	void *np;
+
+	/* read the sysclk value from the CPLD */
+	sysclk = (in_8((unsigned char *)0x80000000) == 0xc) ? 66666666 : 33333000;
+
+	/*
+	 * Read PLL Mode registers
+	 */
+	cpr_plld = CPR0_READ(DCRN_CPR0_PLLD);
+	cpr_pllc = CPR0_READ(DCRN_CPR0_PLLC);
+
+	/*
+	 * Determine forward divider A
+	 */
+	pllFwdDiv = ((cpr_plld & PLLD_FWDVA_MASK) >> 16);
+
+	/*
+	 * Determine forward divider B
+	 */
+	pllFwdDivB = ((cpr_plld & PLLD_FWDVB_MASK) >> 8);
+	if (pllFwdDivB == 0)
+		pllFwdDivB = 8;
+
+	/*
+	 * Determine FBK_DIV.
+	 */
+	pllFbkDiv = ((cpr_plld & PLLD_FBDV_MASK) >> 24);
+	if (pllFbkDiv == 0)
+		pllFbkDiv = 256;
+
+	/*
+	 * Read CPR_PRIMAD register
+	 */
+	cpr_primad = CPR0_READ(DCRN_CPR0_PRIMAD);
+
+	/*
+	 * Determine PLB_DIV.
+	 */
+	pllPlbDiv = ((cpr_primad & PRIMAD_PLBDV_MASK) >> 16);
+	if (pllPlbDiv == 0)
+		pllPlbDiv = 16;
+
+	/*
+	 * Determine EXTBUS_DIV.
+	 */
+	pllExtBusDiv = (cpr_primad & PRIMAD_EBCDV_MASK);
+	if (pllExtBusDiv == 0)
+		pllExtBusDiv = 16;
+
+	/*
+	 * Determine OPB_DIV.
+	 */
+	pllOpbDiv = ((cpr_primad & PRIMAD_OPBDV_MASK) >> 8);
+	if (pllOpbDiv == 0)
+		pllOpbDiv = 16;
+
+	/* There is a bug in U-Boot that prevents us from using
+	 * bd.bi_opbfreq because U-Boot doesn't populate it for
+	 * 405EZ.  We get to calculate it, yay!
+	 */
+	freqOPB = (sysclk *pllFbkDiv) /pllOpbDiv;
+
+	freqEBC = (sysclk * pllFbkDiv) / pllExtBusDiv;
+
+	plloutb = ((sysclk * ((cpr_pllc & PLLC_SRC_MASK) ?
+					   pllFwdDivB : pllFwdDiv) *
+		    pllFbkDiv) / pllFwdDivB);
+
+	np = find_node_by_alias("serial0");
+	if (getprop(np, "current-speed", &baud, sizeof(baud)) != sizeof(baud))
+		fatal("no current-speed property\n\r");
+
+	udiv = 256;			/* Assume lowest possible serial clk */
+	div = plloutb / (16 * baud); /* total divisor */
+	umin = (plloutb / freqOPB) << 1;	/* 2 x OPB divisor */
+	diff = 256;			/* highest possible */
+
+	/* i is the test udiv value -- start with the largest
+	 * possible (256) to minimize serial clock and constrain
+	 * search to umin.
+	 */
+	for (i = 256; i > umin; i--) {
+		ibdiv = div / i;
+		est = i * ibdiv;
+		idiff = (est > div) ? (est-div) : (div-est);
+		if (idiff == 0) {
+			udiv = i;
+			break;      /* can't do better */
+		} else if (idiff < diff) {
+			udiv = i;       /* best so far */
+			diff = idiff;   /* update lowest diff*/
+		}
+	}
+	freqUART = plloutb / udiv;
+
+	dt_fixup_cpu_clocks(bd.bi_procfreq, bd.bi_intfreq, bd.bi_plb_busfreq);
+	dt_fixup_clock("/plb/ebc", freqEBC);
+	dt_fixup_clock("/plb/opb", freqOPB);
+	dt_fixup_clock("/plb/opb/serial@ef600300", freqUART);
+	dt_fixup_clock("/plb/opb/serial@ef600400", freqUART);
+}
+
+static void acadia_fixups(void)
+{
+	dt_fixup_memory(bd.bi_memstart, bd.bi_memsize);
+	get_clocks();
+	dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr);
+}
+	
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+		unsigned long r6, unsigned long r7)
+{
+	CUBOOT_INIT();
+	platform_ops.fixups = acadia_fixups;
+	platform_ops.exit = ibm40x_dbcr_reset;
+	fdt_init(_dtb_start);
+	serial_console_init();
+}
diff --git a/arch/powerpc/boot/dts/acadia.dts b/arch/powerpc/boot/dts/acadia.dts
new file mode 100644
index 0000000..57291f6
--- /dev/null
+++ b/arch/powerpc/boot/dts/acadia.dts
@@ -0,0 +1,224 @@
+/*
+ * Device Tree Source for AMCC Acadia (405EZ)
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	model = "amcc,acadia";
+	compatible = "amcc,acadia";
+	dcr-parent = <&{/cpus/cpu@0}>;
+
+	aliases {
+		ethernet0 = &EMAC0;
+		serial0 = &UART0;
+		serial1 = &UART1;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			model = "PowerPC,405EZ";
+			reg = <0x0>;
+			clock-frequency = <0>; /* Filled in by wrapper */
+			timebase-frequency = <0>; /* Filled in by wrapper */
+			i-cache-line-size = <32>;
+			d-cache-line-size = <32>;
+			i-cache-size = <16384>;
+			d-cache-size = <16384>;
+			dcr-controller;
+			dcr-access-method = "native";
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x0>; /* Filled in by wrapper */
+	};
+
+	UIC0: interrupt-controller {
+		compatible = "ibm,uic-405ez", "ibm,uic";
+		interrupt-controller;
+		dcr-reg = <0x0c0 0x009>;
+		cell-index = <0>;
+		#address-cells = <0>;
+		#size-cells = <0>;
+		#interrupt-cells = <2>;
+	};
+
+	plb {
+		compatible = "ibm,plb-405ez", "ibm,plb3";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		clock-frequency = <0>; /* Filled in by wrapper */
+
+		MAL0: mcmal {
+			compatible = "ibm,mcmal-405ez", "ibm,mcmal";
+			dcr-reg = <0x380 0x62>;
+			num-tx-chans = <1>;
+			num-rx-chans = <1>;
+			interrupt-parent = <&UIC0>;
+			/* 405EZ has only 3 interrupts to the UIC, as
+			 * SERR, TXDE, and RXDE are or'd together into
+			 * one UIC bit
+			 */
+			interrupts = <
+				0x13 0x4 /* TXEOB */
+				0x15 0x4 /* RXEOB */
+				0x12 0x4 /* SERR, TXDE, RXDE */>;
+		};
+
+		POB0: opb {
+			compatible = "ibm,opb-405ez", "ibm,opb";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+			dcr-reg = <0x0a 0x05>;
+			clock-frequency = <0>; /* Filled in by wrapper */
+
+			UART0: serial@ef600300 {
+				device_type = "serial";
+				compatible = "ns16550";
+				reg = <0xef600300 0x8>;
+				virtual-reg = <0xef600300>;
+				clock-frequency = <0>; /* Filled in by wrapper */
+				current-speed = <115200>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0x5 0x4>;
+			};
+
+			UART1: serial@ef600400 {
+				device_type = "serial";
+				compatible = "ns16550";
+				reg = <0xef600400 0x8>;
+				clock-frequency = <0>; /* Filled in by wrapper */
+				current-speed = <115200>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0x6 0x4>;
+			};
+
+			IIC: i2c@ef600500 {
+				compatible = "ibm,iic-405ez", "ibm,iic";
+				reg = <0xef600500 0x11>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0xa 0x4>;
+			};
+
+			GPIO0: gpio@ef600700 {
+				compatible = "ibm,gpio-405ez";
+				reg = <0xef600700 0x20>;
+			};
+
+			GPIO1: gpio@ef600800 {
+				compatible = "ibm,gpio-405ez";
+				reg = <0xef600800 0x20>;
+			};
+
+			EMAC0: ethernet@ef600900 {
+				device_type = "network";
+				compatible = "ibm,emac-405ez", "ibm,emac";
+				interrupt-parent = <&UIC0>;
+				interrupts = <
+					0x10 0x4 /* Ethernet */
+					0x11 0x4 /* Ethernet Wake up */>;
+				local-mac-address = [000000000000]; /* Filled in by wrapper */
+				reg = <0xef600900 0x70>;
+				mal-device = <&MAL0>;
+				mal-tx-channel = <0>;
+				mal-rx-channel = <0>;
+				cell-index = <0>;
+				max-frame-size = <1500>;
+				rx-fifo-size = <4096>;
+				tx-fifo-size = <2048>;
+				phy-mode = "mii";
+				phy-map = <0x0>;
+			};
+
+			CAN0: can@ef601000 {
+				compatible = "amcc,can-405ez";
+				reg = <0xef601000 0x620>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0x7 0x4>;
+			};
+
+			CAN1: can@ef601800 {
+				compatible = "amcc,can-405ez";
+				reg = <0xef601800 0x620>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0x8 0x4>;
+			};
+
+			cameleon@ef602000 {
+				compatible = "amcc,cameleon-405ez";
+				reg = <0xef602000 0x800>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0xb 0x4 0xc 0x4>;
+			};
+
+			ieee1588@ef602800 {
+				compatible = "amcc,ieee1588-405ez";
+				reg = <0xef602800 0x60>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0x4 0x4>;
+				/* This thing is a bit weird.  It has it's own UIC
+				 * that it uses to generate snapshot triggers.  We
+				 * don't really support this device yet, and it needs
+				 * work to figure this out.
+				 */
+				dcr-reg = <0xe0 0x9>;
+			};
+
+			usb@ef603000 {
+				compatible = "ohci-be";
+				reg = <0xef603000 0x80>;
+				interrupts-parent = <&UIC0>;
+				interrupts = <0xd 0x4 0xe 0x4>;
+			};
+
+			dac@ef603300 {
+				compatible = "amcc,dac-405ez";
+				reg = <0xef603300 0x40>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0x18 0x4>;
+			};
+
+			adc@ef603400 {
+				compatible = "amcc,adc-405ez";
+				reg = <0xef603400 0x40>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0x17 0x4>;
+			};
+
+			spi@ef603500 {
+				compatible = "amcc,spi-405ez";
+				reg = <0xef603500 0x100>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0x9 0x4>;
+			};
+		};
+
+		EBC0: ebc {
+			compatible = "ibm,ebc-405ez", "ibm,ebc";
+			dcr-reg = <0x12 0x2>;
+			#address-cells = <2>;
+			#size-cells = <1>;
+			clock-frequency = <0>; /* Filled in by wrapper */
+		};
+	};
+
+	chosen {
+		linux,stdout-path = "/plb/opb/serial@ef600300";
+	};
+};
diff --git a/arch/powerpc/boot/dts/hcu4.dts b/arch/powerpc/boot/dts/hcu4.dts
new file mode 100644
index 0000000..7988598
--- /dev/null
+++ b/arch/powerpc/boot/dts/hcu4.dts
@@ -0,0 +1,168 @@
+/*
+* Device Tree Source for Netstal Maschinen HCU4
+* based on the IBM Walnut
+*
+* Copyright 2008
+* Niklaus Giger <niklaus.giger@member.fsf.org>
+*
+* Copyright 2007 IBM Corp.
+* Josh Boyer <jwboyer@linux.vnet.ibm.com>
+*
+* This file is licensed under the terms of the GNU General Public
+* License version 2.  This program is licensed "as is" without
+* any warranty of any kind, whether express or implied.
+*/
+
+/dts-v1/;
+
+/ {
+	#address-cells = <0x1>;
+	#size-cells = <0x1>;
+	model = "netstal,hcu4";
+	compatible = "netstal,hcu4";
+	dcr-parent = <0x1>;
+
+	aliases {
+		ethernet0 = "/plb/opb/ethernet@ef600800";
+		serial0 = "/plb/opb/serial@ef600300";
+	};
+
+	cpus {
+		#address-cells = <0x1>;
+		#size-cells = <0x0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			model = "PowerPC,405GPr";
+			reg = <0x0>;
+			clock-frequency = <0>;		/* Filled in by U-Boot */
+			timebase-frequency = <0x0>;	/* Filled in by U-Boot */
+			i-cache-line-size = <0x20>;
+			d-cache-line-size = <0x20>;
+			i-cache-size = <0x4000>;
+			d-cache-size = <0x4000>;
+			dcr-controller;
+			dcr-access-method = "native";
+			linux,phandle = <0x1>;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x0>;	/* Filled in by U-Boot */
+	};
+
+	UIC0: interrupt-controller {
+		compatible = "ibm,uic";
+		interrupt-controller;
+		cell-index = <0x0>;
+		dcr-reg = <0xc0 0x9>;
+		#address-cells = <0x0>;
+		#size-cells = <0x0>;
+		#interrupt-cells = <0x2>;
+		linux,phandle = <0x2>;
+	};
+
+	plb {
+		compatible = "ibm,plb3";
+		#address-cells = <0x1>;
+		#size-cells = <0x1>;
+		ranges;
+		clock-frequency = <0x0>;	/* Filled in by U-Boot */
+
+		SDRAM0: memory-controller {
+			compatible = "ibm,sdram-405gp";
+			dcr-reg = <0x10 0x2>;
+		};
+
+		MAL: mcmal {
+			compatible = "ibm,mcmal-405gp", "ibm,mcmal";
+			dcr-reg = <0x180 0x62>;
+			num-tx-chans = <0x1>;
+			num-rx-chans = <0x1>;
+			interrupt-parent = <0x2>;
+			interrupts = <0xb 0x4 0xc 0x4 0xa 0x4 0xd 0x4 0xe 0x4>;
+			linux,phandle = <0x3>;
+		};
+
+		POB0: opb {
+			compatible = "ibm,opb-405gp", "ibm,opb";
+			#address-cells = <0x1>;
+			#size-cells = <0x1>;
+			ranges = <0xef600000 0xef600000 0xa00000>;
+			dcr-reg = <0xa0 0x5>;
+			clock-frequency = <0x0>;	/* Filled in by U-Boot */
+
+			UART0: serial@ef600300 {
+				device_type = "serial";
+				compatible = "ns16550";
+				reg = <0xef600300 0x8>;
+				virtual-reg = <0xef600300>;
+				clock-frequency = <0x0>;/* Filled in by U-Boot */
+				current-speed = <0>;	/* Filled in by U-Boot */
+				interrupt-parent = <0x2>;
+				interrupts = <0x0 0x4>;
+			};
+
+			IIC: i2c@ef600500 {
+				compatible = "ibm,iic-405gp", "ibm,iic";
+				reg = <0xef600500 0x11>;
+				interrupt-parent = <0x2>;
+				interrupts = <0x2 0x4>;
+			};
+
+			GPIO: gpio@ef600700 {
+				compatible = "ibm,gpio-405gp";
+				reg = <0xef600700 0x20>;
+			};
+
+			EMAC: ethernet@ef600800 {
+				device_type = "network";
+				compatible = "ibm,emac-405gp", "ibm,emac";
+				interrupt-parent = <0x2>;
+				interrupts = <0xf 0x4 0x9 0x4>;
+				local-mac-address = [00 00 00 00 00 00];
+				reg = <0xef600800 0x70>;
+				mal-device = <0x3>;
+				mal-tx-channel = <0x0>;
+				mal-rx-channel = <0x0>;
+				cell-index = <0x0>;
+				max-frame-size = <0x5dc>;
+				rx-fifo-size = <0x1000>;
+				tx-fifo-size = <0x800>;
+				phy-mode = "rmii";
+				phy-map = <0x1>;
+			};
+		};
+
+		EBC0: ebc {
+			compatible = "ibm,ebc-405gp", "ibm,ebc";
+			dcr-reg = <0x12 0x2>;
+			#address-cells = <0x2>;
+			#size-cells = <0x1>;
+			clock-frequency = <0x0>;	/* Filled in by U-Boot */
+
+			sram@0,0 {
+				reg = <0x0 0x0 0x80000>;
+			};
+
+			flash@0,80000 {
+				compatible = "jedec-flash";
+				bank-width = <0x1>;
+				reg = <0x0 0x80000 0x80000>;
+				#address-cells = <0x1>;
+				#size-cells = <0x1>;
+
+				partition@0 {
+					label = "OpenBIOS";
+					reg = <0x0 0x80000>;
+					read-only;
+				};
+			};
+		};
+	};
+
+	chosen {
+		linux,stdout-path = "/plb/opb/serial@ef600300";
+	};
+};
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index 7449e54c..6b85067 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -121,6 +121,14 @@
 				compatible = "dallas,ds1339";
 				reg = <0x68>;
 			};
+
+			mcu_pio: mcu@a {
+				#gpio-cells = <2>;
+				compatible = "fsl,mc9s08qg8-mpc8315erdb",
+					     "fsl,mcu-mpc8349emitx";
+				reg = <0x0a>;
+				gpio-controller;
+			};
 		};
 
 		spi@7000 {
diff --git a/arch/powerpc/boot/dts/mpc832x_mds.dts b/arch/powerpc/boot/dts/mpc832x_mds.dts
index e4cc176..57c595b 100644
--- a/arch/powerpc/boot/dts/mpc832x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc832x_mds.dts
@@ -60,7 +60,7 @@
 	};
 
 	bcsr@f8000000 {
-		device_type = "board-control";
+		compatible = "fsl,mpc8323mds-bcsr";
 		reg = <0xf8000000 0x8000>;
 	};
 
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index 5cedf37..2c9d54a 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -83,6 +83,14 @@
 			interrupts = <15 0x8>;
 			interrupt-parent = <&ipic>;
 			dfsrr;
+
+			rtc@68 {
+				device_type = "rtc";
+				compatible = "dallas,ds1339";
+				reg = <0x68>;
+				interrupts = <18 0x8>;
+				interrupt-parent = <&ipic>;
+			};
 		};
 
 		spi@7000 {
@@ -131,6 +139,14 @@
 				interrupt-parent = <&ipic>;
 				interrupts = <71 8>;
 			};
+
+			mcu_pio: mcu@a {
+				#gpio-cells = <2>;
+				compatible = "fsl,mc9s08qg8-mpc8349emitx",
+					     "fsl,mcu-mpc8349emitx";
+				reg = <0x0a>;
+				gpio-controller;
+			};
 		};
 
 		usb@22000 {
diff --git a/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
index 81ae1d3..fa40647 100644
--- a/arch/powerpc/boot/dts/mpc8349emitxgp.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
@@ -81,6 +81,14 @@
 			interrupts = <15 0x8>;
 			interrupt-parent = <&ipic>;
 			dfsrr;
+
+			rtc@68 {
+				device_type = "rtc";
+				compatible = "dallas,ds1339";
+				reg = <0x68>;
+				interrupts = <18 0x8>;
+				interrupt-parent = <&ipic>;
+			};
 		};
 
 		spi@7000 {
diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts
index 04bfde3..c986c54 100644
--- a/arch/powerpc/boot/dts/mpc834x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc834x_mds.dts
@@ -49,7 +49,7 @@
 	};
 
 	bcsr@e2400000 {
-		device_type = "board-control";
+		compatible = "fsl,mpc8349mds-bcsr";
 		reg = <0xe2400000 0x8000>;
 	};
 
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index 66a12d2..14534d0 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -69,7 +69,7 @@
 		};
 
 		bcsr@1,0 {
-			device_type = "board-control";
+ 			compatible = "fsl,mpc8360mds-bcsr";
 			reg = <1 0 0x8000>;
 		};
 	};
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
index 53191ba..435ef3d 100644
--- a/arch/powerpc/boot/dts/mpc8377_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
@@ -121,6 +121,14 @@
 				compatible = "dallas,ds1339";
 				reg = <0x68>;
 			};
+
+			mcu_pio: mcu@a {
+				#gpio-cells = <2>;
+				compatible = "fsl,mc9s08qg8-mpc8377erdb",
+					     "fsl,mcu-mpc8349emitx";
+				reg = <0x0a>;
+				gpio-controller;
+			};
 		};
 
 		i2c@3100 {
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
index 4a09153..b11e68f 100644
--- a/arch/powerpc/boot/dts/mpc8378_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
@@ -121,6 +121,14 @@
 				compatible = "dallas,ds1339";
 				reg = <0x68>;
 			};
+
+			mcu_pio: mcu@a {
+				#gpio-cells = <2>;
+				compatible = "fsl,mc9s08qg8-mpc8378erdb",
+					     "fsl,mcu-mpc8349emitx";
+				reg = <0x0a>;
+				gpio-controller;
+			};
 		};
 
 		i2c@3100 {
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
index bbd884a..337af6e 100644
--- a/arch/powerpc/boot/dts/mpc8379_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
@@ -121,6 +121,14 @@
 				compatible = "dallas,ds1339";
 				reg = <0x68>;
 			};
+
+			mcu_pio: mcu@a {
+				#gpio-cells = <2>;
+				compatible = "fsl,mc9s08qg8-mpc8379erdb",
+					     "fsl,mcu-mpc8349emitx";
+				reg = <0x0a>;
+				gpio-controller;
+			};
 		};
 
 		i2c@3100 {
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dts b/arch/powerpc/boot/dts/mpc8536ds.dts
index 93fdd99..35db1e5 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds.dts
@@ -109,7 +109,7 @@
 				reg = <0x0 0x80>;
 				cell-index = <0>;
 				interrupt-parent = <&mpic>;
-				interrupts = <14 0x2>;
+				interrupts = <20 2>;
 			};
 			dma-channel@80 {
 				compatible = "fsl,mpc8536-dma-channel",
@@ -117,7 +117,7 @@
 				reg = <0x80 0x80>;
 				cell-index = <1>;
 				interrupt-parent = <&mpic>;
-				interrupts = <15 0x2>;
+				interrupts = <21 2>;
 			};
 			dma-channel@100 {
 				compatible = "fsl,mpc8536-dma-channel",
@@ -125,7 +125,7 @@
 				reg = <0x100 0x80>;
 				cell-index = <2>;
 				interrupt-parent = <&mpic>;
-				interrupts = <16 0x2>;
+				interrupts = <22 2>;
 			};
 			dma-channel@180 {
 				compatible = "fsl,mpc8536-dma-channel",
@@ -133,7 +133,7 @@
 				reg = <0x180 0x80>;
 				cell-index = <3>;
 				interrupt-parent = <&mpic>;
-				interrupts = <17 0x2>;
+				interrupts = <23 2>;
 			};
 		};
 
@@ -180,7 +180,7 @@
 		enet0: ethernet@24000 {
 			cell-index = <0>;
 			device_type = "network";
-			model = "TSEC";
+			model = "eTSEC";
 			compatible = "gianfar";
 			reg = <0x24000 0x1000>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
@@ -193,7 +193,7 @@
 		enet1: ethernet@26000 {
 			cell-index = <1>;
 			device_type = "network";
-			model = "TSEC";
+			model = "eTSEC";
 			compatible = "gianfar";
 			reg = <0x26000 0x1000>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index a15f103..c80158f 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -52,7 +52,7 @@
 	};
 
 	bcsr@f8000000 {
-		device_type = "board-control";
+		compatible = "fsl,mpc8568mds-bcsr";
 		reg = <0xf8000000 0x8000>;
 	};
 
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts
index e124dd1..cadd465 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds.dts
@@ -13,8 +13,8 @@
 / {
 	model = "fsl,MPC8572DS";
 	compatible = "fsl,MPC8572DS";
-	#address-cells = <1>;
-	#size-cells = <1>;
+	#address-cells = <2>;
+	#size-cells = <2>;
 
 	aliases {
 		ethernet0 = &enet0;
@@ -61,7 +61,6 @@
 
 	memory {
 		device_type = "memory";
-		reg = <0x0 0x0>;	// Filled by U-Boot
 	};
 
 	soc8572@ffe00000 {
@@ -69,8 +68,8 @@
 		#size-cells = <1>;
 		device_type = "soc";
 		compatible = "simple-bus";
-		ranges = <0x0 0xffe00000 0x100000>;
-		reg = <0xffe00000 0x1000>;	// CCSRBAR & soc regs, remove once parse code for immrbase fixed
+		ranges = <0x0 0 0xffe00000 0x100000>;
+		reg = <0 0xffe00000 0 0x1000>;	// CCSRBAR & soc regs, remove once parse code for immrbase fixed
 		bus-frequency = <0>;		// Filled out by uboot.
 
 		memory-controller@2000 {
@@ -351,10 +350,10 @@
 		#interrupt-cells = <1>;
 		#size-cells = <2>;
 		#address-cells = <3>;
-		reg = <0xffe08000 0x1000>;
+		reg = <0 0xffe08000 0 0x1000>;
 		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000
-			  0x1000000 0x0 0x0 0xffc00000 0x0 0x10000>;
+		ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x00010000>;
 		clock-frequency = <33333333>;
 		interrupt-parent = <&mpic>;
 		interrupts = <24 2>;
@@ -561,10 +560,10 @@
 		#interrupt-cells = <1>;
 		#size-cells = <2>;
 		#address-cells = <3>;
-		reg = <0xffe09000 0x1000>;
+		reg = <0 0xffe09000 0 0x1000>;
 		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
-			  0x1000000 0x0 0x0 0xffc10000 0x0 0x10000>;
+		ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x00010000>;
 		clock-frequency = <33333333>;
 		interrupt-parent = <&mpic>;
 		interrupts = <26 2>;
@@ -598,10 +597,10 @@
 		#interrupt-cells = <1>;
 		#size-cells = <2>;
 		#address-cells = <3>;
-		reg = <0xffe0a000 0x1000>;
+		reg = <0 0xffe0a000 0 0x1000>;
 		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000
-			  0x1000000 0x0 0x0 0xffc20000 0x0 0x10000>;
+		ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x00010000>;
 		clock-frequency = <33333333>;
 		interrupt-parent = <&mpic>;
 		interrupts = <27 2>;
diff --git a/arch/powerpc/boot/libfdt-wrapper.c b/arch/powerpc/boot/libfdt-wrapper.c
index c541fd8..9276327 100644
--- a/arch/powerpc/boot/libfdt-wrapper.c
+++ b/arch/powerpc/boot/libfdt-wrapper.c
@@ -105,6 +105,11 @@
 	return check_err(rc);
 }
 
+static int fdt_wrapper_del_node(const void *devp)
+{
+	return fdt_del_node(fdt, devp_offset(devp));
+}
+
 static void *fdt_wrapper_get_parent(const void *devp)
 {
 	return offset_devp(fdt_parent_offset(fdt, devp_offset(devp)));
@@ -165,6 +170,7 @@
 void fdt_init(void *blob)
 {
 	int err;
+	int bufsize;
 
 	dt_ops.finddevice = fdt_wrapper_finddevice;
 	dt_ops.getprop = fdt_wrapper_getprop;
@@ -173,21 +179,21 @@
 	dt_ops.create_node = fdt_wrapper_create_node;
 	dt_ops.find_node_by_prop_value = fdt_wrapper_find_node_by_prop_value;
 	dt_ops.find_node_by_compatible = fdt_wrapper_find_node_by_compatible;
+	dt_ops.del_node = fdt_wrapper_del_node;
 	dt_ops.get_path = fdt_wrapper_get_path;
 	dt_ops.finalize = fdt_wrapper_finalize;
 
 	/* Make sure the dt blob is the right version and so forth */
 	fdt = blob;
-	err = fdt_open_into(fdt, fdt, fdt_totalsize(blob));
-	if (err == -FDT_ERR_NOSPACE) {
-		int bufsize = fdt_totalsize(fdt) + 4;
-		buf = malloc(bufsize);
-		err = fdt_open_into(fdt, buf, bufsize);
-	}
+	bufsize = fdt_totalsize(fdt) + 4;
+	buf = malloc(bufsize);
+	if(!buf)
+		fatal("malloc failed. can't relocate the device tree\n\r");
+
+	err = fdt_open_into(fdt, buf, bufsize);
 
 	if (err != 0)
 		fatal("fdt_init(): %s\n\r", fdt_strerror(err));
 
-	if (buf)
-		fdt = buf;
+	fdt = buf;
 }
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index 9e7f3dd..ae32801 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -56,9 +56,19 @@
 	if (platform_ops.vmlinux_alloc) {
 		addr = platform_ops.vmlinux_alloc(ei.memsize);
 	} else {
-		if ((unsigned long)_start < ei.memsize)
+		/*
+		 * Check if the kernel image (without bss) would overwrite the
+		 * bootwrapper. The device tree has been moved in fdt_init()
+		 * to an area allocated with malloc() (somewhere past _end).
+		 */
+		if ((unsigned long)_start < ei.loadsize)
 			fatal("Insufficient memory for kernel at address 0!"
-			       " (_start=%p)\n\r", _start);
+			       " (_start=%p, uncomressed size=%08x)\n\r",
+			       _start, ei.loadsize);
+
+		if ((unsigned long)_end < ei.memsize)
+			fatal("The final kernel image would overwrite the "
+					"device tree\n\r");
 	}
 
 	/* Finally, gunzip the kernel */
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index 321e2f5..b3218ce 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -40,6 +40,7 @@
 			const int buflen);
 	int	(*setprop)(const void *phandle, const char *name,
 			const void *buf, const int buflen);
+	int (*del_node)(const void *phandle);
 	void *(*get_parent)(const void *phandle);
 	/* The node must not already exist. */
 	void *(*create_node)(const void *parent, const char *name);
@@ -126,6 +127,11 @@
 	return -1;
 }
 
+static inline int del_node(const void *devp)
+{
+	return dt_ops.del_node ? dt_ops.del_node(devp) : -1;
+}
+
 static inline void *get_parent(const char *devp)
 {
 	return dt_ops.get_parent ? dt_ops.get_parent(devp) : NULL;
diff --git a/arch/powerpc/boot/string.S b/arch/powerpc/boot/string.S
index 643e4cb..acc9428 100644
--- a/arch/powerpc/boot/string.S
+++ b/arch/powerpc/boot/string.S
@@ -235,7 +235,7 @@
 	.globl	memcmp
 memcmp:
 	cmpwi	0,r5,0
-	blelr
+	ble	2f
 	mtctr	r5
 	addi	r6,r3,-1
 	addi	r4,r4,-1
@@ -244,6 +244,8 @@
 	subf.	r3,r0,r3
 	bdnzt	2,1b
 	blr
+2:	li	r3,0
+	blr
 
 
 /*
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index ee0dc41..f390735 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -306,11 +306,14 @@
 
 # post-processing needed for some platforms
 case "$platform" in
-pseries|chrp)
+pseries)
     ${CROSS}objcopy -O binary -j .fakeelf "$kernel" "$ofile".rpanote
     $objbin/addnote "$ofile" "$ofile".rpanote
     rm -r "$ofile".rpanote
     ;;
+chrp)
+    $objbin/addnote -r c00000 "$ofile"
+    ;;
 coff)
     ${CROSS}objcopy -O aixcoff-rs6000 --set-start "$entry" "$ofile"
     $objbin/hack-coff "$ofile"
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
new file mode 100644
index 0000000..39bd9eb4
--- /dev/null
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -0,0 +1,921 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.27-rc5
+# Mon Oct 13 13:47:16 2008
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+# CONFIG_6xx is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+CONFIG_40x=y
+# CONFIG_44x is not set
+# CONFIG_E200 is not set
+CONFIG_4xx=y
+# CONFIG_PPC_MM_SLICES is not set
+CONFIG_NOT_COHERENT_CACHE=y
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+CONFIG_PPC_UDBG_16550=y
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+# CONFIG_DEFAULT_UIMAGE is not set
+CONFIG_PPC_DCR_NATIVE=y
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_PPC_DCR=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_GROUP_SCHED=y
+# CONFIG_FAIR_GROUP_SCHED is not set
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+# CONFIG_HAVE_DMA_ATTRS is not set
+# CONFIG_USE_GENERIC_SMP_HELPERS is not set
+# CONFIG_HAVE_CLK is not set
+CONFIG_PROC_PAGE_MONITOR=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+CONFIG_LBD=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_PPC4xx_PCI_EXPRESS is not set
+
+#
+# Platform support
+#
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PQ2ADS is not set
+CONFIG_ACADIA=y
+# CONFIG_EP405 is not set
+# CONFIG_KILAUEA is not set
+# CONFIG_MAKALU is not set
+# CONFIG_WALNUT is not set
+# CONFIG_XILINX_VIRTEX_GENERIC_BOARD is not set
+CONFIG_PPC40x_SIMPLE=y
+CONFIG_405EZ=y
+# CONFIG_IPIC is not set
+# CONFIG_MPIC is not set
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_FSL_ULI1575 is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_TICK_ONESHOT is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_MATH_EMULATION is not set
+# CONFIG_IOMMU_HELPER is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_MIGRATION=y
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_EXTRA_TARGETS=""
+# CONFIG_PM is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_4xx_SOC=y
+CONFIG_PPC_PCI_CHOICE=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_SYSCALL=y
+# CONFIG_PCIEPORTBUS is not set
+CONFIG_ARCH_SUPPORTS_MSI=y
+# CONFIG_PCI_MSI is not set
+CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_HAS_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_PAGE_OFFSET=0xc0000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_PHYSICAL_START=0x00000000
+CONFIG_TASK_SIZE=0xc0000000
+CONFIG_CONSISTENT_START=0xff100000
+CONFIG_CONSISTENT_SIZE=0x00200000
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_OF_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=m
+CONFIG_MTD_BLOCK=m
+# CONFIG_MTD_BLOCK_RO is not set
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_PHYSMAP_OF=y
+# CONFIG_MTD_INTEL_VR_NOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+CONFIG_OF_DEVICE=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=35000
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_XILINX_SYSACE is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# Enable only one of the two stacks, unless you know what you are doing
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_ARCNET is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+CONFIG_IBM_NEW_EMAC=y
+CONFIG_IBM_NEW_EMAC_RXB=256
+CONFIG_IBM_NEW_EMAC_TXB=256
+CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
+CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
+CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
+CONFIG_IBM_NEW_EMAC_DEBUG=y
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL=y
+CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT=y
+CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR=y
+# CONFIG_NET_PCI is not set
+# CONFIG_B44 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_TR is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+# CONFIG_SERIAL_8250_MANY_PORTS is not set
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
+# CONFIG_SERIAL_8250_RSA is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_DEVPORT=y
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_NLS is not set
+# CONFIG_DLM is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_HAVE_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+# CONFIG_FTRACE is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_CODE_PATCHING_SELFTEST is not set
+# CONFIG_FTR_FIXUP_SELFTEST is not set
+# CONFIG_MSI_BITMAP_SELFTEST is not set
+# CONFIG_XMON is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_VIRQ_DEBUG is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=y
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_PPC_CLOCK is not set
+# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/configs/40x/hcu4_defconfig b/arch/powerpc/configs/40x/hcu4_defconfig
new file mode 100644
index 0000000..682fce0
--- /dev/null
+++ b/arch/powerpc/configs/40x/hcu4_defconfig
@@ -0,0 +1,929 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.26.5
+# Tue Sep 16 00:44:33 2008
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+# CONFIG_6xx is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+CONFIG_40x=y
+# CONFIG_44x is not set
+# CONFIG_E200 is not set
+CONFIG_4xx=y
+# CONFIG_PPC_MM_SLICES is not set
+CONFIG_NOT_COHERENT_CACHE=y
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+CONFIG_PPC_UDBG_16550=y
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+# CONFIG_DEFAULT_UIMAGE is not set
+CONFIG_PPC_DCR_NATIVE=y
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_PPC_DCR=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+# CONFIG_LOGBUFFER is not set
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+# CONFIG_HAVE_DMA_ATTRS is not set
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+CONFIG_LBD=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_PPC4xx_PCI_EXPRESS is not set
+
+#
+# Platform support
+#
+# CONFIG_PPC_MPC512x is not set
+# CONFIG_PPC_MPC5121 is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PQ2ADS is not set
+# CONFIG_EP405 is not set
+CONFIG_HCU4=y
+# CONFIG_KILAUEA is not set
+# CONFIG_MAKALU is not set
+# CONFIG_WALNUT is not set
+# CONFIG_XILINX_VIRTEX_GENERIC_BOARD is not set
+# CONFIG_IPIC is not set
+# CONFIG_MPIC is not set
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_FSL_ULI1575 is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_TICK_ONESHOT is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_MATH_EMULATION is not set
+# CONFIG_IOMMU_HELPER is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_RESOURCES_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_4xx_SOC=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_SYSCALL=y
+# CONFIG_PCIEPORTBUS is not set
+CONFIG_ARCH_SUPPORTS_MSI=y
+# CONFIG_PCI_MSI is not set
+# CONFIG_PCI_LEGACY is not set
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_HAS_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_PAGE_OFFSET=0xc0000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_PHYSICAL_START=0x00000000
+CONFIG_TASK_SIZE=0xc0000000
+CONFIG_CONSISTENT_START=0xff100000
+CONFIG_CONSISTENT_SIZE=0x00200000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_OF_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=m
+CONFIG_MTD_BLOCK=m
+# CONFIG_MTD_BLOCK_RO is not set
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_PHYSMAP_OF=y
+# CONFIG_MTD_INTEL_VR_NOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+CONFIG_OF_DEVICE=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=35000
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_XILINX_SYSACE is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_PHANTOM is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# Enable only one of the two stacks, unless you know what you are doing
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_ARCNET is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+CONFIG_IBM_NEW_EMAC=y
+CONFIG_IBM_NEW_EMAC_RXB=128
+CONFIG_IBM_NEW_EMAC_TXB=64
+CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
+CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
+CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
+# CONFIG_IBM_NEW_EMAC_DEBUG is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_NET_PCI is not set
+# CONFIG_B44 is not set
+CONFIG_NETDEV_1000=y
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_E1000E is not set
+# CONFIG_E1000E_ENABLED is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IGB is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_QLA3XXX is not set
+# CONFIG_ATL1 is not set
+CONFIG_NETDEV_10000=y
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_IXGBE is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+# CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
+# CONFIG_NIU is not set
+# CONFIG_MLX4_CORE is not set
+# CONFIG_TEHUTI is not set
+# CONFIG_BNX2X is not set
+# CONFIG_SFC is not set
+# CONFIG_TR is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+# CONFIG_SERIAL_8250_MANY_PORTS is not set
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
+# CONFIG_SERIAL_8250_RSA is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_DEVPORT=y
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_YAFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_NLS is not set
+# CONFIG_DLM is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SAMPLES is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUGGER is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_VIRQ_DEBUG is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=y
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_PPC_CLOCK is not set
+# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
index f6c93c7..a503da9 100644
--- a/arch/powerpc/include/asm/kdump.h
+++ b/arch/powerpc/include/asm/kdump.h
@@ -9,6 +9,12 @@
  * Reserve to the end of the FWNMI area, see head_64.S */
 #define KDUMP_RESERVE_LIMIT	0x10000 /* 64K */
 
+/*
+ * Used to differentiate between relocatable kdump kernel and other
+ * kernels
+ */
+#define KDUMP_SIGNATURE	0xfeed1234
+
 #ifdef CONFIG_CRASH_DUMP
 
 #define KDUMP_TRAMPOLINE_START	0x0100
@@ -19,17 +25,18 @@
 #endif /* CONFIG_CRASH_DUMP */
 
 #ifndef __ASSEMBLY__
-#ifdef CONFIG_CRASH_DUMP
 
+extern unsigned long __kdump_flag;
+
+#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_RELOCATABLE)
 extern void reserve_kdump_trampoline(void);
 extern void setup_kdump_trampoline(void);
-
-#else /* !CONFIG_CRASH_DUMP */
-
+#else
+/* !CRASH_DUMP || RELOCATABLE */
 static inline void reserve_kdump_trampoline(void) { ; }
 static inline void setup_kdump_trampoline(void) { ; }
+#endif
 
-#endif /* CONFIG_CRASH_DUMP */
 #endif /* __ASSEMBLY__ */
 
 #endif /* __PPC64_KDUMP_H */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 64e1445..c0b8d4a 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -10,9 +10,13 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#else
+#include <asm/types.h>
+#endif
 #include <asm/asm-compat.h>
 #include <asm/kdump.h>
-#include <asm/types.h>
 
 /*
  * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
@@ -73,6 +77,7 @@
 
 #if defined(CONFIG_RELOCATABLE)
 #ifndef __ASSEMBLY__
+
 extern phys_addr_t memstart_addr;
 extern phys_addr_t kernstart_addr;
 #endif
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index ae2ea80..9047af7 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -74,6 +74,13 @@
 	unsigned long pci_io_size;
 #endif
 
+	/* Some machines have a special region to forward the ISA
+	 * "memory" cycles such as VGA memory regions. Left to 0
+	 * if unsupported
+	 */
+	resource_size_t	isa_mem_phys;
+	resource_size_t	isa_mem_size;
+
 	struct pci_ops *ops;
 	unsigned int __iomem *cfg_addr;
 	void __iomem *cfg_data;
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 0e52c78..39d547f 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -123,6 +123,16 @@
 /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
 #define HAVE_PCI_MMAP	1
 
+extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
+			   size_t count);
+extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
+			   size_t count);
+extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
+				      struct vm_area_struct *vma,
+				      enum pci_mmap_state mmap_state);
+
+#define HAVE_PCI_LEGACY	1
+
 #if defined(CONFIG_PPC64) || defined(CONFIG_NOT_COHERENT_CACHE)
 /*
  * For 64-bit kernels, pci_unmap_{single,page} is not a nop.
@@ -226,5 +236,6 @@
 extern void pcibios_do_bus_setup(struct pci_bus *bus);
 extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus);
 
+
 #endif	/* __KERNEL__ */
 #endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
index fda9871..5aa22cf 100644
--- a/arch/powerpc/include/asm/ps3av.h
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -678,6 +678,8 @@
 	u8 buf[PS3AV_PKT_AVB_PARAM_MAX_BUF_SIZE];
 };
 
+/* channel status */
+extern u8 ps3av_mode_cs_info[];
 
 /** command status **/
 #define PS3AV_STATUS_SUCCESS			0x0000	/* success */
@@ -735,6 +737,7 @@
 extern int ps3av_video_mode2res(u32, u32 *, u32 *);
 extern int ps3av_video_mute(int);
 extern int ps3av_audio_mute(int);
+extern int ps3av_audio_mute_analog(int);
 extern int ps3av_dev_open(void);
 extern int ps3av_dev_close(void);
 extern void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data),
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 734e075..280a90c 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -129,7 +129,7 @@
 #define CHECK_FULL_REGS(regs)						      \
 do {									      \
 	if ((regs)->trap & 1)						      \
-		printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \
+		printk(KERN_CRIT "%s: partial register set\n", __func__); \
 } while (0)
 #endif /* __powerpc64__ */
 
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index e70d048..b1eb834 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1277,6 +1277,19 @@
 		.machine_check		= machine_check_4xx,
 		.platform		= "ppc405",
 	},
+	{
+		/* 405EZ */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x41510000,
+		.cpu_name		= "405EZ",
+		.cpu_features		= CPU_FTRS_40X,
+		.cpu_user_features	= PPC_FEATURE_32 |
+			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+		.icache_bsize		= 32,
+		.dcache_bsize		= 32,
+		.machine_check		= machine_check_4xx,
+		.platform		= "ppc405",
+	},
 	{	/* default match */
 		.pvr_mask		= 0x00000000,
 		.pvr_value		= 0x00000000,
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index a323c9b..19671ac 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -27,6 +27,10 @@
 #define DBG(fmt...)
 #endif
 
+/* Stores the physical address of elf header of crash image. */
+unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
+
+#ifndef CONFIG_RELOCATABLE
 void __init reserve_kdump_trampoline(void)
 {
 	lmb_reserve(0, KDUMP_RESERVE_LIMIT);
@@ -65,8 +69,13 @@
 
 	DBG(" <- setup_kdump_trampoline()\n");
 }
+#endif /* CONFIG_RELOCATABLE */
 
-#ifdef CONFIG_PROC_VMCORE
+/*
+ * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
+ * is_kdump_kernel() to determine if we are booting after a panic. Hence
+ * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
+ */
 static int __init parse_elfcorehdr(char *p)
 {
 	if (p)
@@ -75,7 +84,6 @@
 	return 1;
 }
 __setup("elfcorehdr=", parse_elfcorehdr);
-#endif
 
 static int __init parse_savemaxmem(char *p)
 {
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 84856be..69489bd 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -97,6 +97,12 @@
 __secondary_hold_acknowledge:
 	.llong	0x0
 
+	/* This flag is set by purgatory if we should be a kdump kernel. */
+	/* Do not move this variable as purgatory knows about it. */
+	.globl	__kdump_flag
+__kdump_flag:
+	.llong	0x0
+
 #ifdef CONFIG_PPC_ISERIES
 	/*
 	 * At offset 0x20, there is a pointer to iSeries LPAR data.
@@ -1384,7 +1390,13 @@
 	/* process relocations for the final address of the kernel */
 	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
 	sldi	r25,r25,32
-	mr	r3,r25
+#ifdef CONFIG_CRASH_DUMP
+	ld	r7,__kdump_flag-_stext(r26)
+	cmpldi	cr0,r7,1	/* kdump kernel ? - stay where we are */
+	bne	1f
+	add	r25,r25,r26
+#endif
+1:	mr	r3,r25
 	bl	.relocate
 #endif
 
@@ -1398,11 +1410,26 @@
 	li	r3,0			/* target addr */
 	mr.	r4,r26			/* In some cases the loader may  */
 	beq	9f			/* have already put us at zero */
-	lis	r5,(copy_to_here - _stext)@ha
-	addi	r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
 	li	r6,0x100		/* Start offset, the first 0x100 */
 					/* bytes were copied earlier.	 */
 
+#ifdef CONFIG_CRASH_DUMP
+/*
+ * Check if the kernel has to be running as relocatable kernel based on the
+ * variable __kdump_flag, if it is set the kernel is treated as relocatable
+ * kernel, otherwise it will be moved to PHYSICAL_START
+ */
+	ld	r7,__kdump_flag-_stext(r26)
+	cmpldi	cr0,r7,1
+	bne	3f
+
+	li	r5,__end_interrupts - _stext	/* just copy interrupts */
+	b	5f
+3:
+#endif
+	lis	r5,(copy_to_here - _stext)@ha
+	addi	r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
+
 	bl	.copy_and_flush		/* copy the first n bytes	 */
 					/* this includes the code being	 */
 					/* executed here.		 */
@@ -1411,15 +1438,15 @@
 	mtctr	r8
 	bctr
 
+p_end:	.llong	_end - _stext
+
 4:	/* Now copy the rest of the kernel up to _end */
 	addis	r5,r26,(p_end - _stext)@ha
 	ld	r5,(p_end - _stext)@l(r5)	/* get _end */
-	bl	.copy_and_flush		/* copy the rest */
+5:	bl	.copy_and_flush		/* copy the rest */
 
 9:	b	.start_here_multiplatform
 
-p_end:	.llong	_end - _stext
-
 /*
  * Copy routine used to copy the kernel to start at physical address 0
  * and flush and invalidate the caches as needed.
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ea1ba89..3857d7e 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -458,6 +458,42 @@
 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
 }
 
+static void iommu_table_clear(struct iommu_table *tbl)
+{
+	if (!__kdump_flag) {
+		/* Clear the table in case firmware left allocations in it */
+		ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
+		return;
+	}
+
+#ifdef CONFIG_CRASH_DUMP
+	if (ppc_md.tce_get) {
+		unsigned long index, tceval, tcecount = 0;
+
+		/* Reserve the existing mappings left by the first kernel. */
+		for (index = 0; index < tbl->it_size; index++) {
+			tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
+			/*
+			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
+			 */
+			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
+				__set_bit(index, tbl->it_map);
+				tcecount++;
+			}
+		}
+
+		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
+			printk(KERN_WARNING "TCE table is full; freeing ");
+			printk(KERN_WARNING "%d entries for the kdump boot\n",
+				KDUMP_MIN_TCE_ENTRIES);
+			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
+				index < tbl->it_size; index++)
+				__clear_bit(index, tbl->it_map);
+		}
+	}
+#endif
+}
+
 /*
  * Build a iommu_table structure.  This contains a bit map which
  * is used to manage allocation of the tce space.
@@ -484,38 +520,7 @@
 	tbl->it_largehint = tbl->it_halfpoint;
 	spin_lock_init(&tbl->it_lock);
 
-#ifdef CONFIG_CRASH_DUMP
-	if (ppc_md.tce_get) {
-		unsigned long index;
-		unsigned long tceval;
-		unsigned long tcecount = 0;
-
-		/*
-		 * Reserve the existing mappings left by the first kernel.
-		 */
-		for (index = 0; index < tbl->it_size; index++) {
-			tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
-			/*
-			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
-			 */
-			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
-				__set_bit(index, tbl->it_map);
-				tcecount++;
-			}
-		}
-		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
-			printk(KERN_WARNING "TCE table is full; ");
-			printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
-				KDUMP_MIN_TCE_ENTRIES);
-			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
-				index < tbl->it_size; index++)
-				__clear_bit(index, tbl->it_map);
-		}
-	}
-#else
-	/* Clear the hardware table in case firmware left allocations in it */
-	ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
-#endif
+	iommu_table_clear(tbl);
 
 	if (!welcomed) {
 		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index aab7688..ac2a21f 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -88,11 +88,13 @@
 
 	crash_size = crashk_res.end - crashk_res.start + 1;
 
+#ifndef CONFIG_RELOCATABLE
 	if (crashk_res.start != KDUMP_KERNELBASE)
 		printk("Crash kernel location must be 0x%x\n",
 				KDUMP_KERNELBASE);
 
 	crashk_res.start = KDUMP_KERNELBASE;
+#endif
 	crash_size = PAGE_ALIGN(crash_size);
 	crashk_res.end = crashk_res.start + crash_size - 1;
 
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index a168514..e6efec7 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -255,11 +255,14 @@
 /* Our assembly helper, in kexec_stub.S */
 extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
 					void *image, void *control,
-					void (*clear_all)(void)) ATTRIB_NORET;
+					void (*clear_all)(void),
+					unsigned long kdump_flag) ATTRIB_NORET;
 
 /* too late to fail here */
 void default_machine_kexec(struct kimage *image)
 {
+	unsigned long kdump_flag = 0;
+
 	/* prepare control code if any */
 
 	/*
@@ -270,8 +273,10 @@
         * using debugger IPI.
         */
 
-       if (crashing_cpu == -1)
-               kexec_prepare_cpus();
+	if (crashing_cpu == -1)
+		kexec_prepare_cpus();
+	else
+		kdump_flag = KDUMP_SIGNATURE;
 
 	/* switch to a staticly allocated stack.  Based on irq stack code.
 	 * XXX: the task struct will likely be invalid once we do the copy!
@@ -284,7 +289,7 @@
 	 */
 	kexec_sequence(&kexec_stack, image->start, image,
 			page_address(image->control_code_page),
-			ppc_md.hpte_clear_all);
+			ppc_md.hpte_clear_all, kdump_flag);
 	/* NOTREACHED */
 }
 
@@ -312,11 +317,24 @@
 static void __init export_htab_values(void)
 {
 	struct device_node *node;
+	struct property *prop;
 
 	node = of_find_node_by_path("/chosen");
 	if (!node)
 		return;
 
+	/* remove any stale propertys so ours can be found */
+	prop = of_find_property(node, kernel_end_prop.name, NULL);
+	if (prop)
+		prom_remove_property(node, prop);
+	prop = of_find_property(node, htab_base_prop.name, NULL);
+	if (prop)
+		prom_remove_property(node, prop);
+	prop = of_find_property(node, htab_size_prop.name, NULL);
+	if (prop)
+		prom_remove_property(node, prop);
+
+	/* information needed by userspace when using default_machine_kexec */
 	kernel_end = __pa(_end);
 	prom_add_property(node, &kernel_end_prop);
 
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 3053fe5..a243fd0 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -611,10 +611,12 @@
 
 
 /*
- * kexec_sequence(newstack, start, image, control, clear_all())
+ * kexec_sequence(newstack, start, image, control, clear_all(), kdump_flag)
  *
  * does the grungy work with stack switching and real mode switches
  * also does simple calls to other code
+ *
+ * kdump_flag says whether the next kernel should be a kdump kernel.
  */
 
 _GLOBAL(kexec_sequence)
@@ -647,7 +649,7 @@
 	mr	r29,r5			/* image (virt) */
 	mr	r28,r6			/* control, unused */
 	mr	r27,r7			/* clear_all() fn desc */
-	mr	r26,r8			/* spare */
+	mr	r26,r8			/* kdump flag */
 	lhz	r25,PACAHWCPUID(r13)	/* get our phys cpu from paca */
 
 	/* disable interrupts, we are overwriting kernel data next */
@@ -709,5 +711,6 @@
 	mr	r4,r30	# start, aka phys mem offset
 	mtlr	4
 	li	r5,0
-	blr	/* image->start(physid, image->start, 0); */
+	mr	r6,r26			/* kdump_flag */
+	blr	/* image->start(physid, image->start, 0, kdump_flag); */
 #endif /* CONFIG_KEXEC */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 01ce8c3..1ec7393 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -451,7 +451,8 @@
 		pci_dev_put(pdev);
 	}
 
-	DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
+	DBG("non-PCI map for %llx, prot: %lx\n",
+	    (unsigned long long)offset, prot);
 
 	return __pgprot(prot);
 }
@@ -490,6 +491,132 @@
 	return ret;
 }
 
+/* This provides legacy IO read access on a bus */
+int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
+{
+	unsigned long offset;
+	struct pci_controller *hose = pci_bus_to_host(bus);
+	struct resource *rp = &hose->io_resource;
+	void __iomem *addr;
+
+	/* Check if port can be supported by that bus. We only check
+	 * the ranges of the PHB though, not the bus itself as the rules
+	 * for forwarding legacy cycles down bridges are not our problem
+	 * here. So if the host bridge supports it, we do it.
+	 */
+	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+	offset += port;
+
+	if (!(rp->flags & IORESOURCE_IO))
+		return -ENXIO;
+	if (offset < rp->start || (offset + size) > rp->end)
+		return -ENXIO;
+	addr = hose->io_base_virt + port;
+
+	switch(size) {
+	case 1:
+		*((u8 *)val) = in_8(addr);
+		return 1;
+	case 2:
+		if (port & 1)
+			return -EINVAL;
+		*((u16 *)val) = in_le16(addr);
+		return 2;
+	case 4:
+		if (port & 3)
+			return -EINVAL;
+		*((u32 *)val) = in_le32(addr);
+		return 4;
+	}
+	return -EINVAL;
+}
+
+/* This provides legacy IO write access on a bus */
+int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
+{
+	unsigned long offset;
+	struct pci_controller *hose = pci_bus_to_host(bus);
+	struct resource *rp = &hose->io_resource;
+	void __iomem *addr;
+
+	/* Check if port can be supported by that bus. We only check
+	 * the ranges of the PHB though, not the bus itself as the rules
+	 * for forwarding legacy cycles down bridges are not our problem
+	 * here. So if the host bridge supports it, we do it.
+	 */
+	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+	offset += port;
+
+	if (!(rp->flags & IORESOURCE_IO))
+		return -ENXIO;
+	if (offset < rp->start || (offset + size) > rp->end)
+		return -ENXIO;
+	addr = hose->io_base_virt + port;
+
+	/* WARNING: The generic code is idiotic. It gets passed a pointer
+	 * to what can be a 1, 2 or 4 byte quantity and always reads that
+	 * as a u32, which means that we have to correct the location of
+	 * the data read within those 32 bits for size 1 and 2
+	 */
+	switch(size) {
+	case 1:
+		out_8(addr, val >> 24);
+		return 1;
+	case 2:
+		if (port & 1)
+			return -EINVAL;
+		out_le16(addr, val >> 16);
+		return 2;
+	case 4:
+		if (port & 3)
+			return -EINVAL;
+		out_le32(addr, val);
+		return 4;
+	}
+	return -EINVAL;
+}
+
+/* This provides legacy IO or memory mmap access on a bus */
+int pci_mmap_legacy_page_range(struct pci_bus *bus,
+			       struct vm_area_struct *vma,
+			       enum pci_mmap_state mmap_state)
+{
+	struct pci_controller *hose = pci_bus_to_host(bus);
+	resource_size_t offset =
+		((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
+	resource_size_t size = vma->vm_end - vma->vm_start;
+	struct resource *rp;
+
+	pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
+		 pci_domain_nr(bus), bus->number,
+		 mmap_state == pci_mmap_mem ? "MEM" : "IO",
+		 (unsigned long long)offset,
+		 (unsigned long long)(offset + size - 1));
+
+	if (mmap_state == pci_mmap_mem) {
+		if ((offset + size) > hose->isa_mem_size)
+			return -ENXIO;
+		offset += hose->isa_mem_phys;
+	} else {
+		unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+		unsigned long roffset = offset + io_offset;
+		rp = &hose->io_resource;
+		if (!(rp->flags & IORESOURCE_IO))
+			return -ENXIO;
+		if (roffset < rp->start || (roffset + size) > rp->end)
+			return -ENXIO;
+		offset += hose->io_base_phys;
+	}
+	pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
+
+	vma->vm_pgoff = offset >> PAGE_SHIFT;
+	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
+				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
+	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+			       vma->vm_end - vma->vm_start,
+			       vma->vm_page_prot);
+}
+
 void pci_resource_to_user(const struct pci_dev *dev, int bar,
 			  const struct resource *rsrc,
 			  resource_size_t *start, resource_size_t *end)
@@ -592,6 +719,12 @@
 		cpu_addr = of_translate_address(dev, ranges + 3);
 		size = of_read_number(ranges + pna + 3, 2);
 		ranges += np;
+
+		/* If we failed translation or got a zero-sized region
+		 * (some FW try to feed us with non sensical zero sized regions
+		 * such as power3 which look like some kind of attempt at exposing
+		 * the VGA memory hole)
+		 */
 		if (cpu_addr == OF_BAD_ADDR || size == 0)
 			continue;
 
@@ -665,6 +798,8 @@
 				isa_hole = memno;
 				if (primary || isa_mem_base == 0)
 					isa_mem_base = cpu_addr;
+				hose->isa_mem_phys = cpu_addr;
+				hose->isa_mem_size = size;
 			}
 
 			/* We get the PCI/Mem offset from the first range or
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 2fdbc18..23e0db2 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -487,67 +487,6 @@
 	return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
 }
 
-/* We can't use the standard versions because of RELOC headaches. */
-#define isxdigit(c)	(('0' <= (c) && (c) <= '9') \
-			 || ('a' <= (c) && (c) <= 'f') \
-			 || ('A' <= (c) && (c) <= 'F'))
-
-#define isdigit(c)	('0' <= (c) && (c) <= '9')
-#define islower(c)	('a' <= (c) && (c) <= 'z')
-#define toupper(c)	(islower(c) ? ((c) - 'a' + 'A') : (c))
-
-unsigned long prom_strtoul(const char *cp, const char **endp)
-{
-	unsigned long result = 0, base = 10, value;
-
-	if (*cp == '0') {
-		base = 8;
-		cp++;
-		if (toupper(*cp) == 'X') {
-			cp++;
-			base = 16;
-		}
-	}
-
-	while (isxdigit(*cp) &&
-	       (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
-		result = result * base + value;
-		cp++;
-	}
-
-	if (endp)
-		*endp = cp;
-
-	return result;
-}
-
-unsigned long prom_memparse(const char *ptr, const char **retptr)
-{
-	unsigned long ret = prom_strtoul(ptr, retptr);
-	int shift = 0;
-
-	/*
-	 * We can't use a switch here because GCC *may* generate a
-	 * jump table which won't work, because we're not running at
-	 * the address we're linked at.
-	 */
-	if ('G' == **retptr || 'g' == **retptr)
-		shift = 30;
-
-	if ('M' == **retptr || 'm' == **retptr)
-		shift = 20;
-
-	if ('K' == **retptr || 'k' == **retptr)
-		shift = 10;
-
-	if (shift) {
-		ret <<= shift;
-		(*retptr)++;
-	}
-
-	return ret;
-}
-
 /*
  * Early parsing of the command line passed to the kernel, used for
  * "mem=x" and the options that affect the iommu
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 2c7e8e8..ea3a2ec 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -20,7 +20,7 @@
 _end enter_prom memcpy memset reloc_offset __secondary_hold
 __secondary_hold_acknowledge __secondary_hold_spinloop __start
 strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
-reloc_got2 kernstart_addr"
+reloc_got2 kernstart_addr memstart_addr"
 
 NM="$1"
 OBJ="$2"
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 5ec56ff..705fc4b 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -59,6 +59,7 @@
 #include <asm/mmu.h>
 #include <asm/xmon.h>
 #include <asm/cputhreads.h>
+#include <mm/mmu_decl.h>
 
 #include "setup.h"
 
@@ -190,6 +191,12 @@
 		if (ppc_md.show_cpuinfo != NULL)
 			ppc_md.show_cpuinfo(m);
 
+#ifdef CONFIG_PPC32
+		/* Display the amount of memory */
+		seq_printf(m, "Memory\t\t: %d MB\n",
+			   (unsigned int)(total_memory / (1024 * 1024)));
+#endif
+
 		return 0;
 	}
 
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 65ad925..c6a8f232 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -235,8 +235,6 @@
 	else
 		for (i = 0; i < 32 ; i++)
 			current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
-
-#else
 #endif
 	return err;
 }
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index cb01ebc..7b7da8c 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -142,7 +142,7 @@
 	speed = (clock / prescaler) / (divisor * 16);
 
 	/* sanity check */
-	if (speed < 0 || speed > (clock / 16))
+	if (speed > (clock / 16))
 		speed = 9600;
 
 	return speed;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5c64af1..8d5b475 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -382,8 +382,10 @@
 	printk(KERN_INFO "Huge page(16GB) memory: "
 			"addr = 0x%lX size = 0x%lX pages = %d\n",
 			phys_addr, block_size, expected_pages);
-	lmb_reserve(phys_addr, block_size * expected_pages);
-	add_gpage(phys_addr, block_size, expected_pages);
+	if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) {
+		lmb_reserve(phys_addr, block_size * expected_pages);
+		add_gpage(phys_addr, block_size, expected_pages);
+	}
 	return 0;
 }
 #endif /* CONFIG_HUGETLB_PAGE */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 98d7bf9..b9e1a1d 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -134,23 +134,6 @@
 
 	return __add_pages(zone, start_pfn, nr_pages);
 }
-
-#ifdef CONFIG_MEMORY_HOTREMOVE
-int remove_memory(u64 start, u64 size)
-{
-	unsigned long start_pfn, end_pfn;
-	int ret;
-
-	start_pfn = start >> PAGE_SHIFT;
-	end_pfn = start_pfn + (size >> PAGE_SHIFT);
-	ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
-	if (ret)
-		goto out;
-	/* Arch-specific calls go here - next patch */
-out:
-	return ret;
-}
-#endif /* CONFIG_MEMORY_HOTREMOVE */
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
 /*
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 6cf5c71..eb505ad 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -116,6 +116,7 @@
 
 /*
  * get_node_active_region - Return active region containing start_pfn
+ * Active range returned is empty if none found.
  * @start_pfn: The page to return the region for.
  * @node_ar: Returned set to the active region containing start_pfn
  */
@@ -126,6 +127,7 @@
 
 	node_ar->nid = nid;
 	node_ar->start_pfn = start_pfn;
+	node_ar->end_pfn = start_pfn;
 	work_with_active_regions(nid, get_active_region_work_fn, node_ar);
 }
 
@@ -526,12 +528,10 @@
 	/*
 	 * We use lmb_end_of_DRAM() in here instead of memory_limit because
 	 * we've already adjusted it for the limit and it takes care of
-	 * having memory holes below the limit.
+	 * having memory holes below the limit.  Also, in the case of
+	 * iommu_is_off, memory_limit is not set but is implicitly enforced.
 	 */
 
-	if (! memory_limit)
-		return size;
-
 	if (start + size <= lmb_end_of_DRAM())
 		return size;
 
@@ -933,18 +933,20 @@
 		struct node_active_region node_ar;
 
 		get_node_active_region(start_pfn, &node_ar);
-		while (start_pfn < end_pfn) {
+		while (start_pfn < end_pfn &&
+			node_ar.start_pfn < node_ar.end_pfn) {
+			unsigned long reserve_size = size;
 			/*
 			 * if reserved region extends past active region
 			 * then trim size to active region
 			 */
 			if (end_pfn > node_ar.end_pfn)
-				size = (node_ar.end_pfn << PAGE_SHIFT)
+				reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
 					- (start_pfn << PAGE_SHIFT);
-			dbg("reserve_bootmem %lx %lx nid=%d\n", physbase, size,
-				node_ar.nid);
+			dbg("reserve_bootmem %lx %lx nid=%d\n", physbase,
+				reserve_size, node_ar.nid);
 			reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase,
-						size, BOOTMEM_DEFAULT);
+						reserve_size, BOOTMEM_DEFAULT);
 			/*
 			 * if reserved region is contained in the active region
 			 * then done.
@@ -959,6 +961,7 @@
 			 */
 			start_pfn = node_ar.end_pfn;
 			physbase = start_pfn << PAGE_SHIFT;
+			size = size - reserve_size;
 			get_node_active_region(start_pfn, &node_ar);
 		}
 
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h
index 22e4e8d..628009c 100644
--- a/arch/powerpc/oprofile/cell/pr_util.h
+++ b/arch/powerpc/oprofile/cell/pr_util.h
@@ -24,6 +24,11 @@
 #define SKIP_GENERIC_SYNC 0
 #define SYNC_START_ERROR -1
 #define DO_GENERIC_SYNC 1
+#define SPUS_PER_NODE   8
+#define DEFAULT_TIMER_EXPIRE  (HZ / 10)
+
+extern struct delayed_work spu_work;
+extern int spu_prof_running;
 
 struct spu_overlay_info {	/* map of sections within an SPU overlay */
 	unsigned int vma;	/* SPU virtual memory address from elf */
@@ -62,6 +67,14 @@
 
 };
 
+struct spu_buffer {
+	int last_guard_val;
+	int ctx_sw_seen;
+	unsigned long *buff;
+	unsigned int head, tail;
+};
+
+
 /* The three functions below are for maintaining and accessing
  * the vma-to-fileoffset map.
  */
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
index 380d7e2..6edaebd 100644
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -23,12 +23,11 @@
 
 static u32 *samples;
 
-static int spu_prof_running;
+int spu_prof_running;
 static unsigned int profiling_interval;
 
 #define NUM_SPU_BITS_TRBUF 16
 #define SPUS_PER_TB_ENTRY   4
-#define SPUS_PER_NODE	     8
 
 #define SPU_PC_MASK	     0xFFFF
 
@@ -208,6 +207,7 @@
 
 	spu_prof_running = 1;
 	hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
+	schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
 
 	return 0;
 }
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index 2a9b4a0..2949126 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -35,7 +35,102 @@
 static DEFINE_SPINLOCK(cache_lock);
 static int num_spu_nodes;
 int spu_prof_num_nodes;
-int last_guard_val[MAX_NUMNODES * 8];
+
+struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE];
+struct delayed_work spu_work;
+static unsigned max_spu_buff;
+
+static void spu_buff_add(unsigned long int value, int spu)
+{
+	/* spu buff is a circular buffer.  Add entries to the
+	 * head.  Head is the index to store the next value.
+	 * The buffer is full when there is one available entry
+	 * in the queue, i.e. head and tail can't be equal.
+	 * That way we can tell the difference between the
+	 * buffer being full versus empty.
+	 *
+	 *  ASSUPTION: the buffer_lock is held when this function
+	 *             is called to lock the buffer, head and tail.
+	 */
+	int full = 1;
+
+	if (spu_buff[spu].head >= spu_buff[spu].tail) {
+		if ((spu_buff[spu].head - spu_buff[spu].tail)
+		    <  (max_spu_buff - 1))
+			full = 0;
+
+	} else if (spu_buff[spu].tail > spu_buff[spu].head) {
+		if ((spu_buff[spu].tail - spu_buff[spu].head)
+		    > 1)
+			full = 0;
+	}
+
+	if (!full) {
+		spu_buff[spu].buff[spu_buff[spu].head] = value;
+		spu_buff[spu].head++;
+
+		if (spu_buff[spu].head >= max_spu_buff)
+			spu_buff[spu].head = 0;
+	} else {
+		/* From the user's perspective make the SPU buffer
+		 * size management/overflow look like we are using
+		 * per cpu buffers.  The user uses the same
+		 * per cpu parameter to adjust the SPU buffer size.
+		 * Increment the sample_lost_overflow to inform
+		 * the user the buffer size needs to be increased.
+		 */
+		oprofile_cpu_buffer_inc_smpl_lost();
+	}
+}
+
+/* This function copies the per SPU buffers to the
+ * OProfile kernel buffer.
+ */
+void sync_spu_buff(void)
+{
+	int spu;
+	unsigned long flags;
+	int curr_head;
+
+	for (spu = 0; spu < num_spu_nodes; spu++) {
+		/* In case there was an issue and the buffer didn't
+		 * get created skip it.
+		 */
+		if (spu_buff[spu].buff == NULL)
+			continue;
+
+		/* Hold the lock to make sure the head/tail
+		 * doesn't change while spu_buff_add() is
+		 * deciding if the buffer is full or not.
+		 * Being a little paranoid.
+		 */
+		spin_lock_irqsave(&buffer_lock, flags);
+		curr_head = spu_buff[spu].head;
+		spin_unlock_irqrestore(&buffer_lock, flags);
+
+		/* Transfer the current contents to the kernel buffer.
+		 * data can still be added to the head of the buffer.
+		 */
+		oprofile_put_buff(spu_buff[spu].buff,
+				  spu_buff[spu].tail,
+				  curr_head, max_spu_buff);
+
+		spin_lock_irqsave(&buffer_lock, flags);
+		spu_buff[spu].tail = curr_head;
+		spin_unlock_irqrestore(&buffer_lock, flags);
+	}
+
+}
+
+static void wq_sync_spu_buff(struct work_struct *work)
+{
+	/* move data from spu buffers to kernel buffer */
+	sync_spu_buff();
+
+	/* only reschedule if profiling is not done */
+	if (spu_prof_running)
+		schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
+}
 
 /* Container for caching information about an active SPU task. */
 struct cached_info {
@@ -305,14 +400,21 @@
 
 	/* Record context info in event buffer */
 	spin_lock_irqsave(&buffer_lock, flags);
-	add_event_entry(ESCAPE_CODE);
-	add_event_entry(SPU_CTX_SWITCH_CODE);
-	add_event_entry(spu->number);
-	add_event_entry(spu->pid);
-	add_event_entry(spu->tgid);
-	add_event_entry(app_dcookie);
-	add_event_entry(spu_cookie);
-	add_event_entry(offset);
+	spu_buff_add(ESCAPE_CODE, spu->number);
+	spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number);
+	spu_buff_add(spu->number, spu->number);
+	spu_buff_add(spu->pid, spu->number);
+	spu_buff_add(spu->tgid, spu->number);
+	spu_buff_add(app_dcookie, spu->number);
+	spu_buff_add(spu_cookie, spu->number);
+	spu_buff_add(offset, spu->number);
+
+	/* Set flag to indicate SPU PC data can now be written out.  If
+	 * the SPU program counter data is seen before an SPU context
+	 * record is seen, the postprocessing will fail.
+	 */
+	spu_buff[spu->number].ctx_sw_seen = 1;
+
 	spin_unlock_irqrestore(&buffer_lock, flags);
 	smp_wmb();	/* insure spu event buffer updates are written */
 			/* don't want entries intermingled... */
@@ -360,6 +462,47 @@
         return nodes;
 }
 
+static int oprofile_spu_buff_create(void)
+{
+	int spu;
+
+	max_spu_buff = oprofile_get_cpu_buffer_size();
+
+	for (spu = 0; spu < num_spu_nodes; spu++) {
+		/* create circular buffers to store the data in.
+		 * use locks to manage accessing the buffers
+		 */
+		spu_buff[spu].head = 0;
+		spu_buff[spu].tail = 0;
+
+		/*
+		 * Create a buffer for each SPU.  Can't reliably
+		 * create a single buffer for all spus due to not
+		 * enough contiguous kernel memory.
+		 */
+
+		spu_buff[spu].buff = kzalloc((max_spu_buff
+					      * sizeof(unsigned long)),
+					     GFP_KERNEL);
+
+		if (!spu_buff[spu].buff) {
+			printk(KERN_ERR "SPU_PROF: "
+			       "%s, line %d:  oprofile_spu_buff_create "
+		       "failed to allocate spu buffer %d.\n",
+			       __func__, __LINE__, spu);
+
+			/* release the spu buffers that have been allocated */
+			while (spu >= 0) {
+				kfree(spu_buff[spu].buff);
+				spu_buff[spu].buff = 0;
+				spu--;
+			}
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
 /* The main purpose of this function is to synchronize
  * OProfile with SPUFS by registering to be notified of
  * SPU task switches.
@@ -372,20 +515,35 @@
  */
 int spu_sync_start(void)
 {
-	int k;
+	int spu;
 	int ret = SKIP_GENERIC_SYNC;
 	int register_ret;
 	unsigned long flags = 0;
 
 	spu_prof_num_nodes = number_of_online_nodes();
 	num_spu_nodes = spu_prof_num_nodes * 8;
+	INIT_DELAYED_WORK(&spu_work, wq_sync_spu_buff);
+
+	/* create buffer for storing the SPU data to put in
+	 * the kernel buffer.
+	 */
+	ret = oprofile_spu_buff_create();
+	if (ret)
+		goto out;
 
 	spin_lock_irqsave(&buffer_lock, flags);
-	add_event_entry(ESCAPE_CODE);
-	add_event_entry(SPU_PROFILING_CODE);
-	add_event_entry(num_spu_nodes);
+	for (spu = 0; spu < num_spu_nodes; spu++) {
+		spu_buff_add(ESCAPE_CODE, spu);
+		spu_buff_add(SPU_PROFILING_CODE, spu);
+		spu_buff_add(num_spu_nodes, spu);
+	}
 	spin_unlock_irqrestore(&buffer_lock, flags);
 
+	for (spu = 0; spu < num_spu_nodes; spu++) {
+		spu_buff[spu].ctx_sw_seen = 0;
+		spu_buff[spu].last_guard_val = 0;
+	}
+
 	/* Register for SPU events  */
 	register_ret = spu_switch_event_register(&spu_active);
 	if (register_ret) {
@@ -393,8 +551,6 @@
 		goto out;
 	}
 
-	for (k = 0; k < (MAX_NUMNODES * 8); k++)
-		last_guard_val[k] = 0;
 	pr_debug("spu_sync_start -- running.\n");
 out:
 	return ret;
@@ -446,13 +602,20 @@
 		 * use.	 We need to discard samples taken during the time
 		 * period which an overlay occurs (i.e., guard value changes).
 		 */
-		if (grd_val && grd_val != last_guard_val[spu_num]) {
-			last_guard_val[spu_num] = grd_val;
+		if (grd_val && grd_val != spu_buff[spu_num].last_guard_val) {
+			spu_buff[spu_num].last_guard_val = grd_val;
 			/* Drop the rest of the samples. */
 			break;
 		}
 
-		add_event_entry(file_offset | spu_num_shifted);
+		/* We must ensure that the SPU context switch has been written
+		 * out before samples for the SPU.  Otherwise, the SPU context
+		 * information is not available and the postprocessing of the
+		 * SPU PC will fail with no available anonymous map information.
+		 */
+		if (spu_buff[spu_num].ctx_sw_seen)
+			spu_buff_add((file_offset | spu_num_shifted),
+					 spu_num);
 	}
 	spin_unlock(&buffer_lock);
 out:
@@ -463,20 +626,41 @@
 int spu_sync_stop(void)
 {
 	unsigned long flags = 0;
-	int ret = spu_switch_event_unregister(&spu_active);
-	if (ret) {
+	int ret;
+	int k;
+
+	ret = spu_switch_event_unregister(&spu_active);
+
+	if (ret)
 		printk(KERN_ERR "SPU_PROF: "
-			"%s, line %d: spu_switch_event_unregister returned %d\n",
-			__func__, __LINE__, ret);
-		goto out;
-	}
+		       "%s, line %d: spu_switch_event_unregister "	\
+		       "returned %d\n",
+		       __func__, __LINE__, ret);
+
+	/* flush any remaining data in the per SPU buffers */
+	sync_spu_buff();
 
 	spin_lock_irqsave(&cache_lock, flags);
 	ret = release_cached_info(RELEASE_ALL);
 	spin_unlock_irqrestore(&cache_lock, flags);
-out:
+
+	/* remove scheduled work queue item rather then waiting
+	 * for every queued entry to execute.  Then flush pending
+	 * system wide buffer to event buffer.
+	 */
+	cancel_delayed_work(&spu_work);
+
+	for (k = 0; k < num_spu_nodes; k++) {
+		spu_buff[k].ctx_sw_seen = 0;
+
+		/*
+		 * spu_sys_buff will be null if there was a problem
+		 * allocating the buffer.  Only delete if it exists.
+		 */
+		kfree(spu_buff[k].buff);
+		spu_buff[k].buff = 0;
+	}
 	pr_debug("spu_sync_stop -- done.\n");
 	return ret;
 }
 
-
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 5ff4de3..35141a8 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -404,7 +404,7 @@
 	}
 }
 
-static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl)
+static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
 {
 
 	pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index a9260e2..6573027 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -14,6 +14,15 @@
 #	help
 #	  This option enables support for the CPCI405 board.
 
+config ACADIA
+	bool "Acadia"
+	depends on 40x
+	default n
+	select PPC40x_SIMPLE
+	select 405EZ
+	help
+	  This option enables support for the AMCC 405EZ Acadia evaluation board.
+
 config EP405
 	bool "EP405/EP405PC"
 	depends on 40x
@@ -23,6 +32,14 @@
 	help
 	  This option enables support for the EP405/EP405PC boards.
 
+config HCU4
+	bool "Hcu4"
+	depends on 40x
+	default y
+	select 405GPR
+	help
+	  This option enables support for the Nestal Maschinen HCU4 board.
+
 config KILAUEA
 	bool "Kilauea"
 	depends on 40x
@@ -93,6 +110,13 @@
 	  Most Virtex designs should use this unless it needs to do some
 	  special configuration at board probe time.
 
+config PPC40x_SIMPLE
+	bool "Simple PowerPC 40x board support"
+	depends on 40x
+	default n
+	help
+	  This option enables the simple PowerPC 40x platform support.
+
 # 40x specific CPU modules, selected based on the board above.
 config NP405H
 	bool
@@ -118,6 +142,12 @@
 	select IBM_NEW_EMAC_EMAC4
 	select IBM_NEW_EMAC_RGMII
 
+config 405EZ
+	bool
+	select IBM_NEW_EMAC_NO_FLOW_CTRL
+	select IBM_NEW_EMAC_MAL_CLR_ICINTSTAT
+	select IBM_NEW_EMAC_MAL_COMMON_ERR
+
 config 405GPR
 	bool
 
@@ -139,6 +169,14 @@
 	select IBM405_ERR77
 	select IBM405_ERR51
 
+config PPC4xx_GPIO
+	bool "PPC4xx GPIO support"
+	depends on 40x
+	select ARCH_REQUIRE_GPIOLIB
+	select GENERIC_GPIO
+	help
+	  Enable gpiolib support for ppc40x based boards
+
 # 40x errata/workaround config symbols, selected by the CPU models above
 
 # All 405-based cores up until the 405GPR and 405EP have this errata.
diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile
index 5533a5c..9bab76a 100644
--- a/arch/powerpc/platforms/40x/Makefile
+++ b/arch/powerpc/platforms/40x/Makefile
@@ -1,5 +1,7 @@
 obj-$(CONFIG_KILAUEA)				+= kilauea.o
+obj-$(CONFIG_HCU4)				+= hcu4.o
 obj-$(CONFIG_MAKALU)				+= makalu.o
 obj-$(CONFIG_WALNUT)				+= walnut.o
 obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD)	+= virtex.o
 obj-$(CONFIG_EP405)				+= ep405.o
+obj-$(CONFIG_PPC40x_SIMPLE)		+= ppc40x_simple.o
diff --git a/arch/powerpc/platforms/40x/hcu4.c b/arch/powerpc/platforms/40x/hcu4.c
new file mode 100644
index 0000000..60b2afe
--- /dev/null
+++ b/arch/powerpc/platforms/40x/hcu4.c
@@ -0,0 +1,61 @@
+/*
+ * Architecture- / platform-specific boot-time initialization code for
+ * IBM PowerPC 4xx based boards. Adapted from original
+ * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
+ * <dan@net4x.com>.
+ *
+ * Copyright(c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
+ *
+ * Rewritten and ported to the merged powerpc tree:
+ * Copyright 2007 IBM Corporation
+ * Josh Boyer <jwboyer@linux.vnet.ibm.com>
+ *
+ * 2002 (c) MontaVista, Software, Inc.  This file is licensed under
+ * the terms of the GNU General Public License version 2.  This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/time.h>
+#include <asm/uic.h>
+#include <asm/ppc4xx.h>
+
+static __initdata struct of_device_id hcu4_of_bus[] = {
+	{ .compatible = "ibm,plb3", },
+	{ .compatible = "ibm,opb", },
+	{ .compatible = "ibm,ebc", },
+	{},
+};
+
+static int __init hcu4_device_probe(void)
+{
+	of_platform_bus_probe(NULL, hcu4_of_bus, NULL);
+	return 0;
+}
+machine_device_initcall(hcu4, hcu4_device_probe);
+
+static int __init hcu4_probe(void)
+{
+	unsigned long root = of_get_flat_dt_root();
+
+	if (!of_flat_dt_is_compatible(root, "netstal,hcu4"))
+		return 0;
+
+	return 1;
+}
+
+define_machine(hcu4) {
+	.name			= "HCU4",
+	.probe			= hcu4_probe,
+	.progress		= udbg_progress,
+	.init_IRQ		= uic_init_tree,
+	.get_irq		= uic_get_irq,
+	.restart		= ppc4xx_reset_system,
+	.calibrate_decr		= generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
new file mode 100644
index 0000000..4498a86
--- /dev/null
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -0,0 +1,80 @@
+/*
+ * Generic PowerPC 40x platform support
+ *
+ * Copyright 2008 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ *
+ * This implements simple platform support for PowerPC 44x chips.  This is
+ * mostly used for eval boards or other simple and "generic" 44x boards.  If
+ * your board has custom functions or hardware, then you will likely want to
+ * implement your own board.c file to accommodate it.
+ */
+
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppc4xx.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+#include <asm/udbg.h>
+#include <asm/uic.h>
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+
+static __initdata struct of_device_id ppc40x_of_bus[] = {
+	{ .compatible = "ibm,plb3", },
+	{ .compatible = "ibm,plb4", },
+	{ .compatible = "ibm,opb", },
+	{ .compatible = "ibm,ebc", },
+	{ .compatible = "simple-bus", },
+	{},
+};
+
+static int __init ppc40x_device_probe(void)
+{
+	of_platform_bus_probe(NULL, ppc40x_of_bus, NULL);
+
+	return 0;
+}
+machine_device_initcall(ppc40x_simple, ppc40x_device_probe);
+
+/* This is the list of boards that can be supported by this simple
+ * platform code.  This does _not_ mean the boards are compatible,
+ * as they most certainly are not from a device tree perspective.
+ * However, their differences are handled by the device tree and the
+ * drivers and therefore they don't need custom board support files.
+ *
+ * Again, if your board needs to do things differently then create a
+ * board.c file for it rather than adding it to this list.
+ */
+static char *board[] __initdata = {
+	"amcc,acadia"
+};
+
+static int __init ppc40x_probe(void)
+{
+	unsigned long root = of_get_flat_dt_root();
+	int i = 0;
+
+	for (i = 0; i < ARRAY_SIZE(board); i++) {
+		if (of_flat_dt_is_compatible(root, board[i])) {
+			ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+define_machine(ppc40x_simple) {
+	.name = "PowerPC 40x Platform",
+	.probe = ppc40x_probe,
+	.progress = udbg_progress,
+	.init_IRQ = uic_init_tree,
+	.get_irq = uic_get_irq,
+	.restart = ppc4xx_reset_system,
+	.calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 79c1154..3496bc0 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -167,6 +167,14 @@
 	help
 	  This option enables the simple PowerPC 44x platform support.
 
+config PPC4xx_GPIO
+	bool "PPC4xx GPIO support"
+	depends on 44x
+	select ARCH_REQUIRE_GPIOLIB
+	select GENERIC_GPIO
+	help
+	  Enable gpiolib support for ppc440 based boards
+
 # 44x specific CPU modules, selected based on the board above.
 config 440EP
 	bool
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 044b4e6..ae7c34f 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -99,11 +99,14 @@
 	out_be32(&xlb->master_pri_enable, 0xff);
 	out_be32(&xlb->master_priority, 0x11111111);
 
-	/* Disable XLB pipelining
+	/*
+	 * Disable XLB pipelining
 	 * (cfr errate 292. We could do this only just before ATA PIO
 	 *  transaction and re-enable it afterwards ...)
+	 * Not needed on MPC5200B.
 	 */
-	out_be32(&xlb->config, in_be32(&xlb->config) | MPC52xx_XLB_CFG_PLDIS);
+	if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
+		out_be32(&xlb->config, in_be32(&xlb->config) | MPC52xx_XLB_CFG_PLDIS);
 
 	iounmap(xlb);
 }
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c
index 8a3b117..81cee7b 100644
--- a/arch/powerpc/platforms/85xx/ksi8560.c
+++ b/arch/powerpc/platforms/85xx/ksi8560.c
@@ -193,7 +193,6 @@
 static void ksi8560_show_cpuinfo(struct seq_file *m)
 {
 	uint pvid, svid, phid1;
-	uint memsize = total_memory;
 
 	pvid = mfspr(SPRN_PVR);
 	svid = mfspr(SPRN_SVR);
@@ -215,9 +214,6 @@
 	/* Display cpu Pll setting */
 	phid1 = mfspr(SPRN_HID1);
 	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
-
-	/* Display the amount of memory */
-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
 }
 
 static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 0293e3d..21f0090 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -207,7 +207,6 @@
 static void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
 {
 	uint pvid, svid, phid1;
-	uint memsize = total_memory;
 
 	pvid = mfspr(SPRN_PVR);
 	svid = mfspr(SPRN_SVR);
@@ -219,9 +218,6 @@
 	/* Display cpu Pll setting */
 	phid1 = mfspr(SPRN_HID1);
 	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
-
-	/* Display the amount of memory */
-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
 }
 
 static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 50d7ea8..aeb6a5b 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -307,7 +307,6 @@
 static void mpc85xx_cds_show_cpuinfo(struct seq_file *m)
 {
 	uint pvid, svid, phid1;
-	uint memsize = total_memory;
 
 	pvid = mfspr(SPRN_PVR);
 	svid = mfspr(SPRN_SVR);
@@ -320,9 +319,6 @@
 	/* Display cpu Pll setting */
 	phid1 = mfspr(SPRN_HID1);
 	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
-
-	/* Display the amount of memory */
-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
 }
 
 
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index b9246ea..7ec77ce 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -136,7 +136,6 @@
 static void sbc8548_show_cpuinfo(struct seq_file *m)
 {
 	uint pvid, svid, phid1;
-	uint memsize = total_memory;
 
 	pvid = mfspr(SPRN_PVR);
 	svid = mfspr(SPRN_SVR);
@@ -149,9 +148,6 @@
 	/* Display cpu Pll setting */
 	phid1 = mfspr(SPRN_HID1);
 	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
-
-	/* Display the amount of memory */
-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
 }
 
 static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
index 0c9a856..472f254 100644
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ b/arch/powerpc/platforms/85xx/sbc8560.c
@@ -194,7 +194,6 @@
 static void sbc8560_show_cpuinfo(struct seq_file *m)
 {
 	uint pvid, svid, phid1;
-	uint memsize = total_memory;
 
 	pvid = mfspr(SPRN_PVR);
 	svid = mfspr(SPRN_SVR);
@@ -206,9 +205,6 @@
 	/* Display cpu Pll setting */
 	phid1 = mfspr(SPRN_HID1);
 	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
-
-	/* Display the amount of memory */
-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
 }
 
 static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index 18499d7..0cca8f5 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -130,7 +130,6 @@
 static void stx_gp3_show_cpuinfo(struct seq_file *m)
 {
 	uint pvid, svid, phid1;
-	uint memsize = total_memory;
 
 	pvid = mfspr(SPRN_PVR);
 	svid = mfspr(SPRN_SVR);
@@ -142,9 +141,6 @@
 	/* Display cpu Pll setting */
 	phid1 = mfspr(SPRN_HID1);
 	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
-
-	/* Display the amount of memory */
-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
 }
 
 static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index d850880..2933a8e 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -138,7 +138,6 @@
 static void tqm85xx_show_cpuinfo(struct seq_file *m)
 {
 	uint pvid, svid, phid1;
-	uint memsize = total_memory;
 
 	pvid = mfspr(SPRN_PVR);
 	svid = mfspr(SPRN_SVR);
@@ -150,9 +149,6 @@
 	/* Display cpu Pll setting */
 	phid1 = mfspr(SPRN_HID1);
 	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
-
-	/* Display the amount of memory */
-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
 }
 
 static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index 821c45f..fb371f5 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -127,7 +127,6 @@
 
 static void gef_sbc610_show_cpuinfo(struct seq_file *m)
 {
-	uint memsize = total_memory;
 	uint svid = mfspr(SPRN_SVR);
 
 	seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n");
@@ -137,7 +136,6 @@
 	seq_printf(m, "FPGA Revision\t: %u\n", gef_sbc610_get_fpga_rev());
 
 	seq_printf(m, "SVR\t\t: 0x%x\n", svid);
-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
 }
 
 static void __init gef_sbc610_nec_fixup(struct pci_dev *pdev)
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 2672829..27e0e68 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -101,13 +101,11 @@
 static void
 mpc86xx_hpcn_show_cpuinfo(struct seq_file *m)
 {
-	uint memsize = total_memory;
 	uint svid = mfspr(SPRN_SVR);
 
 	seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
 
 	seq_printf(m, "SVR\t\t: 0x%x\n", svid);
-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
 }
 
 
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c
index da677a7..5fd7ed4 100644
--- a/arch/powerpc/platforms/86xx/sbc8641d.c
+++ b/arch/powerpc/platforms/86xx/sbc8641d.c
@@ -63,13 +63,11 @@
 static void
 sbc8641_show_cpuinfo(struct seq_file *m)
 {
-	uint memsize = total_memory;
 	uint svid = mfspr(SPRN_SVR);
 
 	seq_printf(m, "Vendor\t\t: Wind River Systems\n");
 
 	seq_printf(m, "SVR\t\t: 0x%x\n", svid);
-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
 }
 
 
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 2a14b05..665af1c 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -21,6 +21,7 @@
 #include <asm/machdep.h>
 #include <asm/rtas.h>
 #include <asm/cell-regs.h>
+#include <asm/kdump.h>
 
 #include "ras.h"
 
@@ -111,9 +112,8 @@
 	int ret = -ENOMEM;
 	unsigned long addr;
 
-#ifdef CONFIG_CRASH_DUMP
-	rtas_call(ptcal_stop_tok, 1, 1, NULL, nid);
-#endif
+	if (__kdump_flag)
+		rtas_call(ptcal_stop_tok, 1, 1, NULL, nid);
 
 	area = kmalloc(sizeof(*area), GFP_KERNEL);
 	if (!area)
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index efb3964..c0d86e1 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -54,8 +54,8 @@
 #endif
 
 /*
- * The primary thread of each non-boot processor is recorded here before
- * smp init.
+ * The Primary thread of each non-boot processor was started from the OF client
+ * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
  */
 static cpumask_t of_spin_map;
 
@@ -208,11 +208,7 @@
 	/* Mark threads which are still spinning in hold loops. */
 	if (cpu_has_feature(CPU_FTR_SMT)) {
 		for_each_present_cpu(i) {
-			if (i % 2 == 0)
-				/*
-				 * Even-numbered logical cpus correspond to
-				 * primary threads.
-				 */
+			if (cpu_thread_in_core(i) == 0)
 				cpu_set(i, of_spin_map);
 		}
 	} else {
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 010a51f..b73c369 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -548,6 +548,11 @@
 	int ret;
 	struct spu_context *ctx = file->private_data;
 
+	/* pre-check for file position: if we'd return EOF, there's no point
+	 * causing a deschedule */
+	if (*pos >= sizeof(ctx->csa.lscsa->gprs))
+		return 0;
+
 	ret = spu_acquire_saved(ctx);
 	if (ret)
 		return ret;
@@ -2426,38 +2431,49 @@
 static int spufs_switch_log_open(struct inode *inode, struct file *file)
 {
 	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+	int rc;
 
-	/*
-	 * We (ab-)use the mapping_lock here because it serves the similar
-	 * purpose for synchronizing open/close elsewhere.  Maybe it should
-	 * be renamed eventually.
-	 */
-	mutex_lock(&ctx->mapping_lock);
+	rc = spu_acquire(ctx);
+	if (rc)
+		return rc;
+
 	if (ctx->switch_log) {
-		spin_lock(&ctx->switch_log->lock);
-		ctx->switch_log->head = 0;
-		ctx->switch_log->tail = 0;
-		spin_unlock(&ctx->switch_log->lock);
-	} else {
-		/*
-		 * We allocate the switch log data structures on first open.
-		 * They will never be free because we assume a context will
-		 * be traced until it goes away.
-		 */
-		ctx->switch_log = kzalloc(sizeof(struct switch_log) +
-			SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
-			GFP_KERNEL);
-		if (!ctx->switch_log)
-			goto out;
-		spin_lock_init(&ctx->switch_log->lock);
-		init_waitqueue_head(&ctx->switch_log->wait);
+		rc = -EBUSY;
+		goto out;
 	}
-	mutex_unlock(&ctx->mapping_lock);
+
+	ctx->switch_log = kmalloc(sizeof(struct switch_log) +
+		SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
+		GFP_KERNEL);
+
+	if (!ctx->switch_log) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	ctx->switch_log->head = ctx->switch_log->tail = 0;
+	init_waitqueue_head(&ctx->switch_log->wait);
+	rc = 0;
+
+out:
+	spu_release(ctx);
+	return rc;
+}
+
+static int spufs_switch_log_release(struct inode *inode, struct file *file)
+{
+	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+	int rc;
+
+	rc = spu_acquire(ctx);
+	if (rc)
+		return rc;
+
+	kfree(ctx->switch_log);
+	ctx->switch_log = NULL;
+	spu_release(ctx);
 
 	return 0;
- out:
-	mutex_unlock(&ctx->mapping_lock);
-	return -ENOMEM;
 }
 
 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
@@ -2485,42 +2501,54 @@
 	if (!buf || len < 0)
 		return -EINVAL;
 
+	error = spu_acquire(ctx);
+	if (error)
+		return error;
+
 	while (cnt < len) {
 		char tbuf[128];
 		int width;
 
-		if (file->f_flags & O_NONBLOCK) {
-			if (spufs_switch_log_used(ctx) <= 0)
-				return cnt ? cnt : -EAGAIN;
-		} else {
-			/* Wait for data in buffer */
-			error = wait_event_interruptible(ctx->switch_log->wait,
-					spufs_switch_log_used(ctx) > 0);
-			if (error)
+		if (spufs_switch_log_used(ctx) == 0) {
+			if (cnt > 0) {
+				/* If there's data ready to go, we can
+				 * just return straight away */
 				break;
-		}
 
-		spin_lock(&ctx->switch_log->lock);
-		if (ctx->switch_log->head == ctx->switch_log->tail) {
-			/* multiple readers race? */
-			spin_unlock(&ctx->switch_log->lock);
-			continue;
+			} else if (file->f_flags & O_NONBLOCK) {
+				error = -EAGAIN;
+				break;
+
+			} else {
+				/* spufs_wait will drop the mutex and
+				 * re-acquire, but since we're in read(), the
+				 * file cannot be _released (and so
+				 * ctx->switch_log is stable).
+				 */
+				error = spufs_wait(ctx->switch_log->wait,
+						spufs_switch_log_used(ctx) > 0);
+
+				/* On error, spufs_wait returns without the
+				 * state mutex held */
+				if (error)
+					return error;
+
+				/* We may have had entries read from underneath
+				 * us while we dropped the mutex in spufs_wait,
+				 * so re-check */
+				if (spufs_switch_log_used(ctx) == 0)
+					continue;
+			}
 		}
 
 		width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
-		if (width < len) {
+		if (width < len)
 			ctx->switch_log->tail =
 				(ctx->switch_log->tail + 1) %
 				 SWITCH_LOG_BUFSIZE;
-		}
-
-		spin_unlock(&ctx->switch_log->lock);
-
-		/*
-		 * If the record is greater than space available return
-		 * partial buffer (so far)
-		 */
-		if (width >= len)
+		else
+			/* If the record is greater than space available return
+			 * partial buffer (so far) */
 			break;
 
 		error = copy_to_user(buf + cnt, tbuf, width);
@@ -2529,6 +2557,8 @@
 		cnt += width;
 	}
 
+	spu_release(ctx);
+
 	return cnt == 0 ? error : cnt;
 }
 
@@ -2537,29 +2567,41 @@
 	struct inode *inode = file->f_path.dentry->d_inode;
 	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
 	unsigned int mask = 0;
+	int rc;
 
 	poll_wait(file, &ctx->switch_log->wait, wait);
 
+	rc = spu_acquire(ctx);
+	if (rc)
+		return rc;
+
 	if (spufs_switch_log_used(ctx) > 0)
 		mask |= POLLIN;
 
+	spu_release(ctx);
+
 	return mask;
 }
 
 static const struct file_operations spufs_switch_log_fops = {
-	.owner	= THIS_MODULE,
-	.open	= spufs_switch_log_open,
-	.read	= spufs_switch_log_read,
-	.poll	= spufs_switch_log_poll,
+	.owner		= THIS_MODULE,
+	.open		= spufs_switch_log_open,
+	.read		= spufs_switch_log_read,
+	.poll		= spufs_switch_log_poll,
+	.release	= spufs_switch_log_release,
 };
 
+/**
+ * Log a context switch event to a switch log reader.
+ *
+ * Must be called with ctx->state_mutex held.
+ */
 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
 		u32 type, u32 val)
 {
 	if (!ctx->switch_log)
 		return;
 
-	spin_lock(&ctx->switch_log->lock);
 	if (spufs_switch_log_avail(ctx) > 1) {
 		struct switch_log_entry *p;
 
@@ -2573,7 +2615,6 @@
 		ctx->switch_log->head =
 			(ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
 	}
-	spin_unlock(&ctx->switch_log->lock);
 
 	wake_up(&ctx->switch_log->wait);
 }
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index c9bb7cf..c58bd36 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -249,6 +249,7 @@
 
 	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
 	clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
+	spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
 	spu_release(ctx);
 
 	if (signal_pending(current))
@@ -417,8 +418,6 @@
 	ret = spu_run_fini(ctx, npc, &status);
 	spu_yield(ctx);
 
-	spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
-
 	if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
 	    (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
 		ctx->stats.libassist++;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 67595bc..2ad914c 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -312,6 +312,15 @@
 	 */
 	node = cpu_to_node(raw_smp_processor_id());
 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
+		/*
+		 * "available_spus" counts how many spus are not potentially
+		 * going to be used by other affinity gangs whose reference
+		 * context is already in place. Although this code seeks to
+		 * avoid having affinity gangs with a summed amount of
+		 * contexts bigger than the amount of spus in the node,
+		 * this may happen sporadically. In this case, available_spus
+		 * becomes negative, which is harmless.
+		 */
 		int available_spus;
 
 		node = (node < MAX_NUMNODES) ? node : 0;
@@ -321,12 +330,10 @@
 		available_spus = 0;
 		mutex_lock(&cbe_spu_info[node].list_mutex);
 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
-			if (spu->ctx && spu->ctx->gang
-					&& spu->ctx->aff_offset == 0)
-				available_spus -=
-					(spu->ctx->gang->contexts - 1);
-			else
-				available_spus++;
+			if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
+					&& spu->ctx->gang->aff_ref_spu)
+				available_spus -= spu->ctx->gang->contexts;
+			available_spus++;
 		}
 		if (available_spus < ctx->gang->contexts) {
 			mutex_unlock(&cbe_spu_info[node].list_mutex);
@@ -437,6 +444,11 @@
 		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
 
 	if (ctx->gang)
+		/*
+		 * If ctx->gang->aff_sched_count is positive, SPU affinity is
+		 * being considered in this gang. Using atomic_dec_if_positive
+		 * allow us to skip an explicit check for affinity in this gang
+		 */
 		atomic_dec_if_positive(&ctx->gang->aff_sched_count);
 
 	spu_switch_notify(spu, NULL);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 8ae8ef9..15c62d3 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -65,7 +65,6 @@
 };
 
 struct switch_log {
-	spinlock_t		lock;
 	wait_queue_head_t	wait;
 	unsigned long		head;
 	unsigned long		tail;
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
index 92d20e9..d0b1f3f 100644
--- a/arch/powerpc/platforms/cell/spufs/sputrace.c
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -40,6 +40,7 @@
 static ktime_t sputrace_start;
 static unsigned long sputrace_head, sputrace_tail;
 static struct sputrace *sputrace_log;
+static int sputrace_logging;
 
 static int sputrace_used(void)
 {
@@ -79,6 +80,11 @@
 		char tbuf[128];
 		int width;
 
+		/* If we have data ready to return, don't block waiting
+		 * for more */
+		if (cnt > 0 && sputrace_used() == 0)
+			break;
+
 		error = wait_event_interruptible(sputrace_wait,
 						 sputrace_used() > 0);
 		if (error)
@@ -109,24 +115,49 @@
 
 static int sputrace_open(struct inode *inode, struct file *file)
 {
+	int rc;
+
 	spin_lock(&sputrace_lock);
+	if (sputrace_logging) {
+		rc = -EBUSY;
+		goto out;
+	}
+
+	sputrace_logging = 1;
 	sputrace_head = sputrace_tail = 0;
 	sputrace_start = ktime_get();
-	spin_unlock(&sputrace_lock);
+	rc = 0;
 
+out:
+	spin_unlock(&sputrace_lock);
+	return rc;
+}
+
+static int sputrace_release(struct inode *inode, struct file *file)
+{
+	spin_lock(&sputrace_lock);
+	sputrace_logging = 0;
+	spin_unlock(&sputrace_lock);
 	return 0;
 }
 
 static const struct file_operations sputrace_fops = {
-	.owner	= THIS_MODULE,
-	.open	= sputrace_open,
-	.read	= sputrace_read,
+	.owner   = THIS_MODULE,
+	.open    = sputrace_open,
+	.read    = sputrace_read,
+	.release = sputrace_release,
 };
 
 static void sputrace_log_item(const char *name, struct spu_context *ctx,
 		struct spu *spu)
 {
 	spin_lock(&sputrace_lock);
+
+	if (!sputrace_logging) {
+		spin_unlock(&sputrace_lock);
+		return;
+	}
+
 	if (sputrace_avail() > 1) {
 		struct sputrace *t = sputrace_log + sputrace_head;
 
@@ -232,6 +263,7 @@
 
 	remove_proc_entry("sputrace", NULL);
 	kfree(sputrace_log);
+	marker_synchronize_unregister();
 }
 
 module_init(sputrace_init);
diff --git a/arch/powerpc/platforms/embedded6xx/c2k.c b/arch/powerpc/platforms/embedded6xx/c2k.c
index d0b25b8..32ba0fa0 100644
--- a/arch/powerpc/platforms/embedded6xx/c2k.c
+++ b/arch/powerpc/platforms/embedded6xx/c2k.c
@@ -116,10 +116,7 @@
 
 void c2k_show_cpuinfo(struct seq_file *m)
 {
-	uint memsize = total_memory;
-
 	seq_printf(m, "Vendor\t\t: GEFanuc\n");
-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
 	seq_printf(m, "coherency\t: %s\n", COHERENCY_SETTING);
 }
 
diff --git a/arch/powerpc/platforms/embedded6xx/prpmc2800.c b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
index 5a19b9a..4c485e9 100644
--- a/arch/powerpc/platforms/embedded6xx/prpmc2800.c
+++ b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
@@ -119,10 +119,7 @@
 
 void prpmc2800_show_cpuinfo(struct seq_file *m)
 {
-	uint memsize = total_memory;
-
 	seq_printf(m, "Vendor\t\t: Motorola\n");
-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
 	seq_printf(m, "coherency\t: %s\n", PPRPM2800_COHERENCY_SETTING);
 }
 
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 140d02a..a623ad2 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -22,6 +22,12 @@
 	int ret;
 
 	start_pfn = base >> PAGE_SHIFT;
+
+	if (!pfn_valid(start_pfn)) {
+		lmb_remove(base, lmb_size);
+		return 0;
+	}
+
 	zone = page_zone(pfn_to_page(start_pfn));
 
 	/*
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index a8c4466..d56491d 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -44,6 +44,7 @@
 #include <asm/tce.h>
 #include <asm/ppc-pci.h>
 #include <asm/udbg.h>
+#include <asm/kdump.h>
 
 #include "plpar_wrappers.h"
 
@@ -291,9 +292,8 @@
 
 	tbl->it_base = (unsigned long)__va(*basep);
 
-#ifndef CONFIG_CRASH_DUMP
-	memset((void *)tbl->it_base, 0, *sizep);
-#endif
+	if (!__kdump_flag)
+		memset((void *)tbl->it_base, 0, *sizep);
 
 	tbl->it_busno = phb->bus->number;
 
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index e00f96b..1a231c3 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -52,8 +52,8 @@
 
 
 /*
- * The primary thread of each non-boot processor is recorded here before
- * smp init.
+ * The Primary thread of each non-boot processor was started from the OF client
+ * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
  */
 static cpumask_t of_spin_map;
 
@@ -161,8 +161,7 @@
 static int smp_pSeries_cpu_bootable(unsigned int nr)
 {
 	/* Special case - we inhibit secondary thread startup
-	 * during boot if the user requests it.  Odd-numbered
-	 * cpus are assumed to be secondary threads.
+	 * during boot if the user requests it.
 	 */
 	if (system_state < SYSTEM_RUNNING &&
 	    cpu_has_feature(CPU_FTR_SMT) &&
@@ -199,11 +198,7 @@
 	/* Mark threads which are still spinning in hold loops. */
 	if (cpu_has_feature(CPU_FTR_SMT)) {
 		for_each_present_cpu(i) { 
-			if (i % 2 == 0)
-				/*
-				 * Even-numbered logical cpus correspond to
-				 * primary threads.
-				 */
+			if (cpu_thread_in_core(i) == 0)
 				cpu_set(i, of_spin_map);
 		}
 	} else {
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index a44709a..5afce11 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -37,6 +37,7 @@
 ifeq ($(CONFIG_PCI),y)
 obj-$(CONFIG_4xx)		+= ppc4xx_pci.o
 endif
+obj-$(CONFIG_PPC4xx_GPIO)	+= ppc4xx_gpio.o
 
 obj-$(CONFIG_CPM)		+= cpm_common.o
 obj-$(CONFIG_CPM2)		+= cpm2.o cpm2_pic.o
diff --git a/arch/powerpc/sysdev/ppc4xx_gpio.c b/arch/powerpc/sysdev/ppc4xx_gpio.c
new file mode 100644
index 0000000..110efe2
--- /dev/null
+++ b/arch/powerpc/sysdev/ppc4xx_gpio.c
@@ -0,0 +1,217 @@
+/*
+ * PPC4xx gpio driver
+ *
+ * Copyright (c) 2008 Harris Corporation
+ * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *
+ * Author: Steve Falco <sfalco@harris.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/types.h>
+
+#define GPIO_MASK(gpio)		(0x80000000 >> (gpio))
+#define GPIO_MASK2(gpio)	(0xc0000000 >> ((gpio) * 2))
+
+/* Physical GPIO register layout */
+struct ppc4xx_gpio {
+	__be32 or;
+	__be32 tcr;
+	__be32 osrl;
+	__be32 osrh;
+	__be32 tsrl;
+	__be32 tsrh;
+	__be32 odr;
+	__be32 ir;
+	__be32 rr1;
+	__be32 rr2;
+	__be32 rr3;
+	__be32 reserved1;
+	__be32 isr1l;
+	__be32 isr1h;
+	__be32 isr2l;
+	__be32 isr2h;
+	__be32 isr3l;
+	__be32 isr3h;
+};
+
+struct ppc4xx_gpio_chip {
+	struct of_mm_gpio_chip mm_gc;
+	spinlock_t lock;
+};
+
+/*
+ * GPIO LIB API implementation for GPIOs
+ *
+ * There are a maximum of 32 gpios in each gpio controller.
+ */
+
+static inline struct ppc4xx_gpio_chip *
+to_ppc4xx_gpiochip(struct of_mm_gpio_chip *mm_gc)
+{
+	return container_of(mm_gc, struct ppc4xx_gpio_chip, mm_gc);
+}
+
+static int ppc4xx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+	struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
+
+	return in_be32(&regs->ir) & GPIO_MASK(gpio);
+}
+
+static inline void
+__ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+	struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
+
+	if (val)
+		setbits32(&regs->or, GPIO_MASK(gpio));
+	else
+		clrbits32(&regs->or, GPIO_MASK(gpio));
+}
+
+static void
+ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+	struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&chip->lock, flags);
+
+	__ppc4xx_gpio_set(gc, gpio, val);
+
+	spin_unlock_irqrestore(&chip->lock, flags);
+
+	pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+}
+
+static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+	struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc);
+	struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chip->lock, flags);
+
+	/* Disable open-drain function */
+	clrbits32(&regs->odr, GPIO_MASK(gpio));
+
+	/* Float the pin */
+	clrbits32(&regs->tcr, GPIO_MASK(gpio));
+
+	/* Bits 0-15 use TSRL/OSRL, bits 16-31 use TSRH/OSRH */
+	if (gpio < 16) {
+		clrbits32(&regs->osrl, GPIO_MASK2(gpio));
+		clrbits32(&regs->tsrl, GPIO_MASK2(gpio));
+	} else {
+		clrbits32(&regs->osrh, GPIO_MASK2(gpio));
+		clrbits32(&regs->tsrh, GPIO_MASK2(gpio));
+	}
+
+	spin_unlock_irqrestore(&chip->lock, flags);
+
+	return 0;
+}
+
+static int
+ppc4xx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+	struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc);
+	struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chip->lock, flags);
+
+	/* First set initial value */
+	__ppc4xx_gpio_set(gc, gpio, val);
+
+	/* Disable open-drain function */
+	clrbits32(&regs->odr, GPIO_MASK(gpio));
+
+	/* Drive the pin */
+	setbits32(&regs->tcr, GPIO_MASK(gpio));
+
+	/* Bits 0-15 use TSRL, bits 16-31 use TSRH */
+	if (gpio < 16) {
+		clrbits32(&regs->osrl, GPIO_MASK2(gpio));
+		clrbits32(&regs->tsrl, GPIO_MASK2(gpio));
+	} else {
+		clrbits32(&regs->osrh, GPIO_MASK2(gpio));
+		clrbits32(&regs->tsrh, GPIO_MASK2(gpio));
+	}
+
+	spin_unlock_irqrestore(&chip->lock, flags);
+
+	pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+
+	return 0;
+}
+
+static int __init ppc4xx_add_gpiochips(void)
+{
+	struct device_node *np;
+
+	for_each_compatible_node(np, NULL, "ibm,ppc4xx-gpio") {
+		int ret;
+		struct ppc4xx_gpio_chip *ppc4xx_gc;
+		struct of_mm_gpio_chip *mm_gc;
+		struct of_gpio_chip *of_gc;
+		struct gpio_chip *gc;
+
+		ppc4xx_gc = kzalloc(sizeof(*ppc4xx_gc), GFP_KERNEL);
+		if (!ppc4xx_gc) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		spin_lock_init(&ppc4xx_gc->lock);
+
+		mm_gc = &ppc4xx_gc->mm_gc;
+		of_gc = &mm_gc->of_gc;
+		gc = &of_gc->gc;
+
+		of_gc->gpio_cells = 2;
+		gc->ngpio = 32;
+		gc->direction_input = ppc4xx_gpio_dir_in;
+		gc->direction_output = ppc4xx_gpio_dir_out;
+		gc->get = ppc4xx_gpio_get;
+		gc->set = ppc4xx_gpio_set;
+
+		ret = of_mm_gpiochip_add(np, mm_gc);
+		if (ret)
+			goto err;
+		continue;
+err:
+		pr_err("%s: registration failed with status %d\n",
+		       np->full_name, ret);
+		kfree(ppc4xx_gc);
+		/* try others anyway */
+	}
+	return 0;
+}
+arch_initcall(ppc4xx_add_gpiochips);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index bc581d8..70b7645 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -78,6 +78,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 menu "Base setup"
 
 comment "Processor type and features"
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index ea40a9d..de3fad6 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -99,6 +99,7 @@
 #define TIF_31BIT		18	/* 32bit process */ 
 #define TIF_MEMDIE		19
 #define TIF_RESTORE_SIGMASK	20	/* restore signal mask in do_signal() */
+#define TIF_FREEZE		21	/* thread is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
@@ -112,6 +113,7 @@
 #define _TIF_USEDFPU		(1<<TIF_USEDFPU)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_31BIT		(1<<TIF_31BIT)
+#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #endif /* __KERNEL__ */
 
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 1169130..158b0d6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -189,14 +189,3 @@
 	return rc;
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */
-
-#ifdef CONFIG_MEMORY_HOTREMOVE
-int remove_memory(u64 start, u64 size)
-{
-	unsigned long start_pfn, end_pfn;
-
-	start_pfn = PFN_DOWN(start);
-	end_pfn = start_pfn + PFN_DOWN(size);
-	return offline_pages(start_pfn, end_pfn, 120 * HZ);
-}
-#endif /* CONFIG_MEMORY_HOTREMOVE */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5131d50..cb2c87d 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -12,6 +12,7 @@
 	select HAVE_IDE
 	select HAVE_OPROFILE
 	select HAVE_GENERIC_DMA_COHERENT
+	select HAVE_IOREMAP_PROT if MMU
 	help
 	  The SuperH is a RISC processor targeted for use in embedded systems
 	  and consumer electronics; it was also used in the Sega Dreamcast
@@ -20,6 +21,10 @@
 
 config SUPERH32
 	def_bool !SUPERH64
+	select HAVE_KPROBES
+	select HAVE_KRETPROBES
+	select HAVE_ARCH_TRACEHOOK
+	select HAVE_FTRACE
 
 config SUPERH64
 	def_bool y if CPU_SH5
@@ -54,8 +59,11 @@
 config GENERIC_IRQ_PROBE
 	def_bool y
 
+config GENERIC_GPIO
+	def_bool n
+
 config GENERIC_CALIBRATE_DELAY
-	def_bool y
+	bool
 
 config GENERIC_IOMAP
 	bool
@@ -66,6 +74,9 @@
 config GENERIC_CLOCKEVENTS
 	def_bool n
 
+config GENERIC_CLOCKEVENTS_BROADCAST
+	bool
+
 config GENERIC_LOCKBREAK
 	def_bool y
 	depends on SMP && PREEMPT
@@ -92,6 +103,10 @@
 config LOCKDEP_SUPPORT
 	def_bool y
 
+config HAVE_LATENCYTOP_SUPPORT
+	def_bool y
+	depends on !SMP
+
 config ARCH_HAS_ILOG2_U32
 	def_bool n
 
@@ -106,6 +121,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 menu "System type"
 
 #
@@ -323,6 +340,7 @@
 	select ARCH_SPARSEMEM_ENABLE
 	select SYS_SUPPORTS_NUMA
 	select SYS_SUPPORTS_SMP
+	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
 
 # SH4AL-DSP Processor Support
 
@@ -490,7 +508,6 @@
 config SECCOMP
 	bool "Enable seccomp to safely compute untrusted bytecode"
 	depends on PROC_FS
-	default y
 	help
 	  This kernel feature is useful for number crunching applications
 	  that may need to compute untrusted bytecode during their
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 4d2d102..e6d2c8b 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -82,7 +82,7 @@
 
 config 4KSTACKS
 	bool "Use 4Kb for kernel stacks instead of 8Kb"
-	depends on DEBUG_KERNEL && (MMU || BROKEN)
+	depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB
 	help
 	  If you say Y here the kernel will use a 4Kb stacksize for the
 	  kernel stack attached to each process/thread. This facilitates
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 01d85c7..1f409bf 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -76,8 +76,10 @@
 # error messages during linking.
 #
 ifdef CONFIG_SUPERH32
+UTS_MACHINE	:= sh
 LDFLAGS_vmlinux	+= -e _stext
 else
+UTS_MACHINE	:= sh64
 LDFLAGS_vmlinux	+= --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
 		   --defsym phys_stext_shmedia=phys_stext+1 \
 		   -e phys_stext_shmedia
@@ -123,6 +125,9 @@
 	     $(filter-out ., $(patsubst %,%/,$(machdir-y))))
 endif
 
+# Common machine type headers. Not part of the arch/sh/boards/ hierarchy.
+machdir-y	+= mach-common
+
 # Companion chips
 core-$(CONFIG_HD6446X_SERIES)	+= arch/sh/cchips/hd6446x/
 
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index ae19486..50467f9 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -128,6 +128,7 @@
 
 config SH_RSK7203
 	bool "RSK7203"
+	select GENERIC_GPIO
 	depends on CPU_SUBTYPE_SH7203
 
 config SH_SDK7780
@@ -162,6 +163,7 @@
 config SH_MIGOR
 	bool "Migo-R"
 	depends on CPU_SUBTYPE_SH7722
+	select GENERIC_GPIO
 	help
 	  Select Migo-R if configuring for the SH7722 Migo-R platform
           by Renesas System Solutions Asia Pte. Ltd.
@@ -169,6 +171,7 @@
 config SH_AP325RXA
 	bool "AP-325RXA"
 	depends on CPU_SUBTYPE_SH7723
+	select GENERIC_GPIO
 	help
 	  Renesas "AP-325RXA" support.
 	  Compatible with ALGO SYSTEM CO.,LTD. "AP-320A"
@@ -184,6 +187,13 @@
 	bool "EDOSK7705"
 	depends on CPU_SUBTYPE_SH7705
 
+config SH_EDOSK7760
+	bool "EDOSK7760"
+	depends on CPU_SUBTYPE_SH7760
+	help
+	  Select if configuring for a Renesas EDOSK7760
+	  evaluation board.
+
 config SH_SH4202_MICRODEV
 	bool "SH4-202 MicroDev"
 	depends on CPU_SUBTYPE_SH4_202
@@ -228,6 +238,7 @@
 config SH_MAGIC_PANEL_R2
 	bool "Magic Panel R2"
 	depends on CPU_SUBTYPE_SH7720
+	select GENERIC_GPIO
 	help
 	  Select Magic Panel R2 if configuring for Magic Panel R2.
 
diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile
index 463022c..d9efa39 100644
--- a/arch/sh/boards/Makefile
+++ b/arch/sh/boards/Makefile
@@ -6,3 +6,4 @@
 obj-$(CONFIG_SH_RSK7203)	+= board-rsk7203.o
 obj-$(CONFIG_SH_SH7785LCR)	+= board-sh7785lcr.o
 obj-$(CONFIG_SH_SHMIN)		+= board-shmin.o
+obj-$(CONFIG_SH_EDOSK7760)	+= board-edosk7760.o
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c
index fd16125..7c7874e 100644
--- a/arch/sh/boards/board-ap325rxa.c
+++ b/arch/sh/boards/board-ap325rxa.c
@@ -18,11 +18,13 @@
 #include <linux/delay.h>
 #include <linux/i2c.h>
 #include <linux/smc911x.h>
+#include <linux/gpio.h>
 #include <media/soc_camera_platform.h>
 #include <media/sh_mobile_ceu.h>
-#include <asm/sh_mobile_lcdc.h>
+#include <video/sh_mobile_lcdc.h>
 #include <asm/io.h>
 #include <asm/clock.h>
+#include <cpu/sh7723.h>
 
 static struct smc911x_platdata smc911x_info = {
 	.flags = SMC911X_USE_32BIT,
@@ -52,20 +54,33 @@
 	},
 };
 
+/*
+ * AP320 and AP325RXA has CPLD data in NOR Flash(0xA80000-0xABFFFF).
+ * If this area erased, this board can not boot.
+ */
 static struct mtd_partition ap325rxa_nor_flash_partitions[] = {
 	{
-		 .name = "uboot",
-		 .offset = 0,
-		 .size = (1 * 1024 * 1024),
-		 .mask_flags = MTD_WRITEABLE,	/* Read-only */
+		.name = "uboot",
+		.offset = 0,
+		.size = (1 * 1024 * 1024),
+		.mask_flags = MTD_WRITEABLE,	/* Read-only */
 	}, {
-		 .name = "kernel",
-		 .offset = MTDPART_OFS_APPEND,
-		 .size = (2 * 1024 * 1024),
+		.name = "kernel",
+		.offset = MTDPART_OFS_APPEND,
+		.size = (2 * 1024 * 1024),
 	}, {
-		 .name = "other",
-		 .offset = MTDPART_OFS_APPEND,
-		 .size = MTDPART_SIZ_FULL,
+		.name = "free-area0",
+		.offset = MTDPART_OFS_APPEND,
+		.size = ((7 * 1024 * 1024) + (512 * 1024)),
+	}, {
+		.name = "CPLD-Data",
+		.offset = MTDPART_OFS_APPEND,
+		.mask_flags = MTD_WRITEABLE,	/* Read-only */
+		.size = (1024 * 128 * 2),
+	}, {
+		.name = "free-area1",
+		.offset = MTDPART_OFS_APPEND,
+		.size = MTDPART_SIZ_FULL,
 	},
 };
 
@@ -96,17 +111,7 @@
 #define FPGA_LCDREG	0xB4100180
 #define FPGA_BKLREG	0xB4100212
 #define FPGA_LCDREG_VAL	0x0018
-#define PORT_PHCR	0xA405010E
-#define PORT_PLCR	0xA4050114
-#define PORT_PMCR	0xA4050116
-#define PORT_PRCR	0xA405011C
-#define PORT_PSCR	0xA405011E
-#define PORT_PZCR	0xA405014C
-#define PORT_HIZCRA	0xA4050158
 #define PORT_MSELCRB	0xA4050182
-#define PORT_PSDR	0xA405013E
-#define PORT_PZDR	0xA405016C
-#define PORT_PSELD	0xA4050154
 
 static void ap320_wvga_power_on(void *board_data)
 {
@@ -116,8 +121,7 @@
 	ctrl_outw(FPGA_LCDREG_VAL, FPGA_LCDREG);
 
 	/* backlight */
-	ctrl_outw((ctrl_inw(PORT_PSCR) & ~0x00C0) | 0x40, PORT_PSCR);
-	ctrl_outb(ctrl_inb(PORT_PSDR) & ~0x08, PORT_PSDR);
+	gpio_set_value(GPIO_PTS3, 0);
 	ctrl_outw(0x100, FPGA_BKLREG);
 }
 
@@ -281,12 +285,84 @@
 };
 
 static struct i2c_board_info __initdata ap325rxa_i2c_devices[] = {
+	{
+		I2C_BOARD_INFO("pcf8563", 0x51),
+	},
 };
 
 static int __init ap325rxa_devices_setup(void)
 {
-	clk_always_enable("mstp200"); /* LCDC */
-	clk_always_enable("mstp203"); /* CEU */
+	/* LD3 and LD4 LEDs */
+	gpio_request(GPIO_PTX5, NULL); /* RUN */
+	gpio_direction_output(GPIO_PTX5, 1);
+	gpio_export(GPIO_PTX5, 0);
+
+	gpio_request(GPIO_PTX4, NULL); /* INDICATOR */
+	gpio_direction_output(GPIO_PTX4, 0);
+	gpio_export(GPIO_PTX4, 0);
+
+	/* SW1 input */
+	gpio_request(GPIO_PTF7, NULL); /* MODE */
+	gpio_direction_input(GPIO_PTF7);
+	gpio_export(GPIO_PTF7, 0);
+
+	/* LCDC */
+	clk_always_enable("mstp200");
+	gpio_request(GPIO_FN_LCDD15, NULL);
+	gpio_request(GPIO_FN_LCDD14, NULL);
+	gpio_request(GPIO_FN_LCDD13, NULL);
+	gpio_request(GPIO_FN_LCDD12, NULL);
+	gpio_request(GPIO_FN_LCDD11, NULL);
+	gpio_request(GPIO_FN_LCDD10, NULL);
+	gpio_request(GPIO_FN_LCDD9, NULL);
+	gpio_request(GPIO_FN_LCDD8, NULL);
+	gpio_request(GPIO_FN_LCDD7, NULL);
+	gpio_request(GPIO_FN_LCDD6, NULL);
+	gpio_request(GPIO_FN_LCDD5, NULL);
+	gpio_request(GPIO_FN_LCDD4, NULL);
+	gpio_request(GPIO_FN_LCDD3, NULL);
+	gpio_request(GPIO_FN_LCDD2, NULL);
+	gpio_request(GPIO_FN_LCDD1, NULL);
+	gpio_request(GPIO_FN_LCDD0, NULL);
+	gpio_request(GPIO_FN_LCDLCLK_PTR, NULL);
+	gpio_request(GPIO_FN_LCDDCK, NULL);
+	gpio_request(GPIO_FN_LCDVEPWC, NULL);
+	gpio_request(GPIO_FN_LCDVCPWC, NULL);
+	gpio_request(GPIO_FN_LCDVSYN, NULL);
+	gpio_request(GPIO_FN_LCDHSYN, NULL);
+	gpio_request(GPIO_FN_LCDDISP, NULL);
+	gpio_request(GPIO_FN_LCDDON, NULL);
+
+	/* LCD backlight */
+	gpio_request(GPIO_PTS3, NULL);
+	gpio_direction_output(GPIO_PTS3, 1);
+
+	/* CEU */
+	clk_always_enable("mstp203");
+	gpio_request(GPIO_FN_VIO_CLK2, NULL);
+	gpio_request(GPIO_FN_VIO_VD2, NULL);
+	gpio_request(GPIO_FN_VIO_HD2, NULL);
+	gpio_request(GPIO_FN_VIO_FLD, NULL);
+	gpio_request(GPIO_FN_VIO_CKO, NULL);
+	gpio_request(GPIO_FN_VIO_D15, NULL);
+	gpio_request(GPIO_FN_VIO_D14, NULL);
+	gpio_request(GPIO_FN_VIO_D13, NULL);
+	gpio_request(GPIO_FN_VIO_D12, NULL);
+	gpio_request(GPIO_FN_VIO_D11, NULL);
+	gpio_request(GPIO_FN_VIO_D10, NULL);
+	gpio_request(GPIO_FN_VIO_D9, NULL);
+	gpio_request(GPIO_FN_VIO_D8, NULL);
+
+	gpio_request(GPIO_PTZ7, NULL);
+	gpio_direction_output(GPIO_PTZ7, 0); /* OE_CAM */
+	gpio_request(GPIO_PTZ6, NULL);
+	gpio_direction_output(GPIO_PTZ6, 0); /* STBY_CAM */
+	gpio_request(GPIO_PTZ5, NULL);
+	gpio_direction_output(GPIO_PTZ5, 1); /* RST_CAM */
+	gpio_request(GPIO_PTZ4, NULL);
+	gpio_direction_output(GPIO_PTZ4, 0); /* SADDR */
+
+	ctrl_outw(ctrl_inw(PORT_MSELCRB) & ~0x0001, PORT_MSELCRB);
 
 	platform_resource_setup_memory(&ceu_device, "ceu", 4 << 20);
 
@@ -300,18 +376,6 @@
 
 static void __init ap325rxa_setup(char **cmdline_p)
 {
-	/* LCDC configuration */
-	ctrl_outw(ctrl_inw(PORT_PHCR) & ~0xffff, PORT_PHCR);
-	ctrl_outw(ctrl_inw(PORT_PLCR) & ~0xffff, PORT_PLCR);
-	ctrl_outw(ctrl_inw(PORT_PMCR) & ~0xffff, PORT_PMCR);
-	ctrl_outw(ctrl_inw(PORT_PRCR) & ~0x03ff, PORT_PRCR);
-	ctrl_outw(ctrl_inw(PORT_HIZCRA) & ~0x01C0, PORT_HIZCRA);
-
-	/* CEU */
-	ctrl_outw(ctrl_inw(PORT_MSELCRB) & ~0x0001, PORT_MSELCRB);
-	ctrl_outw(ctrl_inw(PORT_PSELD) & ~0x0003, PORT_PSELD);
-	ctrl_outw((ctrl_inw(PORT_PZCR) & ~0xff00) | 0x5500, PORT_PZCR);
-	ctrl_outb((ctrl_inb(PORT_PZDR) & ~0xf0) | 0x20, PORT_PZDR);
 }
 
 static struct sh_machine_vector mv_ap325rxa __initmv = {
diff --git a/arch/sh/boards/board-edosk7760.c b/arch/sh/boards/board-edosk7760.c
new file mode 100644
index 0000000..35dc099
--- /dev/null
+++ b/arch/sh/boards/board-edosk7760.c
@@ -0,0 +1,193 @@
+/*
+ * Renesas Europe EDOSK7760 Board Support
+ *
+ * Copyright (C) 2008 SPES Societa' Progettazione Elettronica e Software Ltd.
+ * Author: Luca Santini <luca.santini@spesonline.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/smc91x.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/mtd/physmap.h>
+#include <asm/machvec.h>
+#include <asm/io.h>
+#include <asm/addrspace.h>
+#include <asm/delay.h>
+#include <asm/i2c-sh7760.h>
+#include <asm/sizes.h>
+
+/* Bus state controller registers for CS4 area */
+#define BSC_CS4BCR	0xA4FD0010
+#define BSC_CS4WCR	0xA4FD0030
+
+#define SMC_IOBASE	0xA2000000
+#define SMC_IO_OFFSET	0x300
+#define SMC_IOADDR	(SMC_IOBASE + SMC_IO_OFFSET)
+
+#define ETHERNET_IRQ	5
+
+/* NOR flash */
+static struct mtd_partition edosk7760_nor_flash_partitions[] = {
+	{
+		.name = "bootloader",
+		.offset = 0,
+		.size = SZ_256K,
+		.mask_flags = MTD_WRITEABLE,	/* Read-only */
+	}, {
+		.name = "kernel",
+		.offset = MTDPART_OFS_APPEND,
+		.size = SZ_2M,
+	}, {
+		.name = "fs",
+		.offset = MTDPART_OFS_APPEND,
+		.size = SZ_26M,
+	}, {
+		.name = "other",
+		.offset = MTDPART_OFS_APPEND,
+		.size = MTDPART_SIZ_FULL,
+	},
+};
+
+static struct physmap_flash_data edosk7760_nor_flash_data = {
+	.width		= 4,
+	.parts		= edosk7760_nor_flash_partitions,
+	.nr_parts	= ARRAY_SIZE(edosk7760_nor_flash_partitions),
+};
+
+static struct resource edosk7760_nor_flash_resources[] = {
+	[0] = {
+		.name	= "NOR Flash",
+		.start	= 0x00000000,
+		.end	= 0x00000000 + SZ_32M - 1,
+		.flags	= IORESOURCE_MEM,
+	}
+};
+
+static struct platform_device edosk7760_nor_flash_device = {
+	.name		= "physmap-flash",
+	.resource	= edosk7760_nor_flash_resources,
+	.num_resources	= ARRAY_SIZE(edosk7760_nor_flash_resources),
+	.dev		= {
+		.platform_data = &edosk7760_nor_flash_data,
+	},
+};
+
+/* i2c initialization functions */
+static struct sh7760_i2c_platdata i2c_pd = {
+	.speed_khz	= 400,
+};
+
+static struct resource sh7760_i2c1_res[] = {
+	{
+		.start	= SH7760_I2C1_MMIO,
+		.end	= SH7760_I2C1_MMIOEND,
+		.flags	= IORESOURCE_MEM,
+	},{
+		.start	= SH7760_I2C1_IRQ,
+		.end	= SH7760_I2C1_IRQ,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device sh7760_i2c1_dev = {
+	.dev    = {
+		.platform_data	= &i2c_pd,
+	},
+
+	.name		= SH7760_I2C_DEVNAME,
+	.id		= 1,
+	.resource	= sh7760_i2c1_res,
+	.num_resources	= ARRAY_SIZE(sh7760_i2c1_res),
+};
+
+static struct resource sh7760_i2c0_res[] = {
+	{
+		.start	= SH7760_I2C0_MMIO,
+		.end	= SH7760_I2C0_MMIOEND,
+		.flags	= IORESOURCE_MEM,
+	}, {
+		.start	= SH7760_I2C0_IRQ,
+		.end	= SH7760_I2C0_IRQ,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device sh7760_i2c0_dev = {
+	.dev    = {
+		.platform_data	= &i2c_pd,
+	},
+	.name		= SH7760_I2C_DEVNAME,
+	.id		= 0,
+	.resource	= sh7760_i2c0_res,
+	.num_resources	= ARRAY_SIZE(sh7760_i2c0_res),
+};
+
+/* eth initialization functions */
+static struct smc91x_platdata smc91x_info = {
+	.flags = SMC91X_USE_16BIT | SMC91X_IO_SHIFT_1 | IORESOURCE_IRQ_LOWLEVEL,
+};
+
+static struct resource smc91x_res[] = {
+	[0] = {
+		.start	= SMC_IOADDR,
+		.end	= SMC_IOADDR + SZ_32 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= ETHERNET_IRQ,
+		.end	= ETHERNET_IRQ,
+		.flags	= IORESOURCE_IRQ ,
+	}
+};
+
+static struct platform_device smc91x_dev = {
+	.name		= "smc91x",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(smc91x_res),
+	.resource	= smc91x_res,
+
+	.dev	= {
+		.platform_data	= &smc91x_info,
+	},
+};
+
+/* platform init code */
+static struct platform_device *edosk7760_devices[] __initdata = {
+	&smc91x_dev,
+	&edosk7760_nor_flash_device,
+	&sh7760_i2c0_dev,
+	&sh7760_i2c1_dev,
+};
+
+static int __init init_edosk7760_devices(void)
+{
+	plat_irq_setup_pins(IRQ_MODE_IRQ);
+
+	return platform_add_devices(edosk7760_devices,
+				    ARRAY_SIZE(edosk7760_devices));
+}
+__initcall(init_edosk7760_devices);
+
+/*
+ * The Machine Vector
+ */
+struct sh_machine_vector mv_edosk7760 __initmv = {
+	.mv_name	= "EDOSK7760",
+	.mv_nr_irqs	= 128,
+};
diff --git a/arch/sh/boards/board-magicpanelr2.c b/arch/sh/boards/board-magicpanelr2.c
index f3b8b07..3de22cc 100644
--- a/arch/sh/boards/board-magicpanelr2.c
+++ b/arch/sh/boards/board-magicpanelr2.c
@@ -13,12 +13,14 @@
 #include <linux/irq.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
+#include <linux/gpio.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/map.h>
-#include <asm/magicpanelr2.h>
+#include <mach/magicpanelr2.h>
 #include <asm/heartbeat.h>
+#include <cpu/sh7720.h>
 
 #define LAN9115_READY	(ctrl_inl(0xA8000084UL) & 0x00000001UL)
 
@@ -170,7 +172,14 @@
 	/* R7 A25;	     R6 A24;	     R5 A23;		  R4 A22;
 	 * R3 A21;	     R2 A20;	     R1 A19;		  R0 A0;
 	 */
-	ctrl_outw(0x0000, PORT_PRCR);	/* 00 00 00 00 00 00 00 00 */
+	gpio_request(GPIO_FN_A25, NULL);
+	gpio_request(GPIO_FN_A24, NULL);
+	gpio_request(GPIO_FN_A23, NULL);
+	gpio_request(GPIO_FN_A22, NULL);
+	gpio_request(GPIO_FN_A21, NULL);
+	gpio_request(GPIO_FN_A20, NULL);
+	gpio_request(GPIO_FN_A19, NULL);
+	gpio_request(GPIO_FN_A0, NULL);
 
 	/* S7 (x);		S6 (x);        S5 (x);	     S4 GPO(EEPROM_CS2);
 	 * S3 GPO(EEPROM_CS1);  S2 SIOF0_TXD;  S1 SIOF0_RXD; S0 SIOF0_SCK;
diff --git a/arch/sh/boards/board-rsk7203.c b/arch/sh/boards/board-rsk7203.c
index ffbedc5..ded799c 100644
--- a/arch/sh/boards/board-rsk7203.c
+++ b/arch/sh/boards/board-rsk7203.c
@@ -16,8 +16,10 @@
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/map.h>
 #include <linux/smc911x.h>
+#include <linux/gpio.h>
 #include <asm/machvec.h>
 #include <asm/io.h>
+#include <asm/sh7203.h>
 
 static struct smc911x_platdata smc911x_info = {
 	.flags		= SMC911X_USE_16BIT,
@@ -122,6 +124,15 @@
 
 static int __init rsk7203_devices_setup(void)
 {
+	/* Select pins for SCIF0 */
+	gpio_request(GPIO_FN_TXD0, NULL);
+	gpio_request(GPIO_FN_RXD0, NULL);
+
+	/* Lit LED0 */
+	gpio_request(GPIO_PE10, NULL);
+	gpio_direction_output(GPIO_PE10, 0);
+	gpio_export(GPIO_PE10, 0);
+
 	set_mtd_partitions();
 	return platform_add_devices(rsk7203_devices,
 				    ARRAY_SIZE(rsk7203_devices));
diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c
index b95d674..408bbdd 100644
--- a/arch/sh/boards/board-sh7785lcr.c
+++ b/arch/sh/boards/board-sh7785lcr.c
@@ -19,7 +19,7 @@
 #include <linux/i2c-pca-platform.h>
 #include <linux/i2c-algo-pca.h>
 #include <asm/heartbeat.h>
-#include <asm/sh7785lcr.h>
+#include <mach/sh7785lcr.h>
 
 /*
  * NOTE: This board has 2 physical memory maps.
diff --git a/arch/sh/boards/board-shmin.c b/arch/sh/boards/board-shmin.c
index 16e5dae..5cc0867d 100644
--- a/arch/sh/boards/board-shmin.c
+++ b/arch/sh/boards/board-shmin.c
@@ -8,7 +8,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <asm/machvec.h>
-#include <asm/shmin.h>
+#include <mach/shmin.h>
 #include <asm/clock.h>
 #include <asm/io.h>
 
diff --git a/arch/sh/boards/mach-edosk7705/io.c b/arch/sh/boards/mach-edosk7705/io.c
index 541cea2..7d153e5 100644
--- a/arch/sh/boards/mach-edosk7705/io.c
+++ b/arch/sh/boards/mach-edosk7705/io.c
@@ -11,7 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <asm/io.h>
-#include <asm/edosk7705/io.h>
+#include <mach/edosk7705.h>
 #include <asm/addrspace.h>
 
 #define SMC_IOADDR	0xA2000000
diff --git a/arch/sh/boards/mach-edosk7705/setup.c b/arch/sh/boards/mach-edosk7705/setup.c
index f076c45..ab3f47b 100644
--- a/arch/sh/boards/mach-edosk7705/setup.c
+++ b/arch/sh/boards/mach-edosk7705/setup.c
@@ -10,7 +10,7 @@
  */
 #include <linux/init.h>
 #include <asm/machvec.h>
-#include <asm/edosk7705/io.h>
+#include <mach/edosk7705.h>
 
 static void __init sh_edosk7705_init_irq(void)
 {
diff --git a/arch/sh/boards/mach-highlander/irq-r7780mp.c b/arch/sh/boards/mach-highlander/irq-r7780mp.c
index ae1cfcb..83c28bc 100644
--- a/arch/sh/boards/mach-highlander/irq-r7780mp.c
+++ b/arch/sh/boards/mach-highlander/irq-r7780mp.c
@@ -12,7 +12,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/io.h>
-#include <asm/r7780rp.h>
+#include <mach/highlander.h>
 
 enum {
 	UNUSED = 0,
diff --git a/arch/sh/boards/mach-highlander/irq-r7780rp.c b/arch/sh/boards/mach-highlander/irq-r7780rp.c
index 9d3921f..b721e86 100644
--- a/arch/sh/boards/mach-highlander/irq-r7780rp.c
+++ b/arch/sh/boards/mach-highlander/irq-r7780rp.c
@@ -12,7 +12,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/io.h>
-#include <asm/r7780rp.h>
+#include <mach/highlander.h>
 
 enum {
 	UNUSED = 0,
diff --git a/arch/sh/boards/mach-highlander/irq-r7785rp.c b/arch/sh/boards/mach-highlander/irq-r7785rp.c
index 896c045..3811b06 100644
--- a/arch/sh/boards/mach-highlander/irq-r7785rp.c
+++ b/arch/sh/boards/mach-highlander/irq-r7785rp.c
@@ -12,7 +12,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/io.h>
-#include <asm/r7780rp.h>
+#include <mach/highlander.h>
 
 enum {
 	UNUSED = 0,
diff --git a/arch/sh/boards/mach-highlander/psw.c b/arch/sh/boards/mach-highlander/psw.c
index be8d547..37b1a2e 100644
--- a/arch/sh/boards/mach-highlander/psw.c
+++ b/arch/sh/boards/mach-highlander/psw.c
@@ -13,7 +13,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
-#include <asm/r7780rp.h>
+#include <mach/highlander.h>
 #include <asm/push-switch.h>
 
 static irqreturn_t psw_irq_handler(int irq, void *arg)
diff --git a/arch/sh/boards/mach-highlander/setup.c b/arch/sh/boards/mach-highlander/setup.c
index bc79afb..c5a40f7 100644
--- a/arch/sh/boards/mach-highlander/setup.c
+++ b/arch/sh/boards/mach-highlander/setup.c
@@ -20,7 +20,7 @@
 #include <linux/i2c.h>
 #include <net/ax88796.h>
 #include <asm/machvec.h>
-#include <asm/r7780rp.h>
+#include <mach/highlander.h>
 #include <asm/clock.h>
 #include <asm/heartbeat.h>
 #include <asm/io.h>
diff --git a/arch/sh/boards/mach-hp6xx/hp6xx_apm.c b/arch/sh/boards/mach-hp6xx/hp6xx_apm.c
index 177f4f0..e85212f 100644
--- a/arch/sh/boards/mach-hp6xx/hp6xx_apm.c
+++ b/arch/sh/boards/mach-hp6xx/hp6xx_apm.c
@@ -14,7 +14,7 @@
 #include <linux/apm-emulation.h>
 #include <linux/io.h>
 #include <asm/adc.h>
-#include <asm/hp6xx.h>
+#include <mach/hp6xx.h>
 
 /* percentage values */
 #define APM_CRITICAL			10
diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
index e96684d..64af1f2 100644
--- a/arch/sh/boards/mach-hp6xx/pm.c
+++ b/arch/sh/boards/mach-hp6xx/pm.c
@@ -12,7 +12,7 @@
 #include <linux/time.h>
 #include <asm/io.h>
 #include <asm/hd64461.h>
-#include <asm/hp6xx.h>
+#include <mach/hp6xx.h>
 #include <cpu/dac.h>
 #include <asm/pm.h>
 
diff --git a/arch/sh/boards/mach-hp6xx/setup.c b/arch/sh/boards/mach-hp6xx/setup.c
index 475b46c..48fece7 100644
--- a/arch/sh/boards/mach-hp6xx/setup.c
+++ b/arch/sh/boards/mach-hp6xx/setup.c
@@ -15,7 +15,7 @@
 #include <asm/hd64461.h>
 #include <asm/io.h>
 #include <asm/irq.h>
-#include <asm/hp6xx.h>
+#include <mach/hp6xx.h>
 #include <cpu/dac.h>
 
 #define	SCPCR	0xa4000116
diff --git a/arch/sh/boards/mach-lboxre2/irq.c b/arch/sh/boards/mach-lboxre2/irq.c
index 5a1c3bb..8aa171a 100644
--- a/arch/sh/boards/mach-lboxre2/irq.c
+++ b/arch/sh/boards/mach-lboxre2/irq.c
@@ -15,7 +15,7 @@
 #include <linux/irq.h>
 #include <asm/irq.h>
 #include <asm/io.h>
-#include <asm/lboxre2.h>
+#include <mach/lboxre2.h>
 
 /*
  * Initialize IRQ setting
diff --git a/arch/sh/boards/mach-lboxre2/setup.c b/arch/sh/boards/mach-lboxre2/setup.c
index c74440d..2b0b581 100644
--- a/arch/sh/boards/mach-lboxre2/setup.c
+++ b/arch/sh/boards/mach-lboxre2/setup.c
@@ -16,7 +16,7 @@
 #include <linux/ata_platform.h>
 #include <asm/machvec.h>
 #include <asm/addrspace.h>
-#include <asm/lboxre2.h>
+#include <mach/lboxre2.h>
 #include <asm/io.h>
 
 static struct resource cf_ide_resources[] = {
diff --git a/arch/sh/boards/mach-microdev/io.c b/arch/sh/boards/mach-microdev/io.c
index 9f8a540..52dd748 100644
--- a/arch/sh/boards/mach-microdev/io.c
+++ b/arch/sh/boards/mach-microdev/io.c
@@ -15,7 +15,7 @@
 #include <linux/pci.h>
 #include <linux/wait.h>
 #include <asm/io.h>
-#include <asm/microdev.h>
+#include <mach/microdev.h>
 
 	/*
 	 *	we need to have a 'safe' address to re-direct all I/O requests
diff --git a/arch/sh/boards/mach-microdev/irq.c b/arch/sh/boards/mach-microdev/irq.c
index 4d33507..702753c 100644
--- a/arch/sh/boards/mach-microdev/irq.c
+++ b/arch/sh/boards/mach-microdev/irq.c
@@ -14,7 +14,7 @@
 #include <linux/interrupt.h>
 #include <asm/system.h>
 #include <asm/io.h>
-#include <asm/microdev.h>
+#include <mach/microdev.h>
 
 #define NUM_EXTERNAL_IRQS 16	/* IRL0 .. IRL15 */
 
diff --git a/arch/sh/boards/mach-microdev/setup.c b/arch/sh/boards/mach-microdev/setup.c
index fc8cd06..a9202fe 100644
--- a/arch/sh/boards/mach-microdev/setup.c
+++ b/arch/sh/boards/mach-microdev/setup.c
@@ -14,7 +14,7 @@
 #include <linux/platform_device.h>
 #include <linux/ioport.h>
 #include <video/s1d13xxxfb.h>
-#include <asm/microdev.h>
+#include <mach/microdev.h>
 #include <asm/io.h>
 #include <asm/machvec.h>
 
diff --git a/arch/sh/boards/mach-migor/lcd_qvga.c b/arch/sh/boards/mach-migor/lcd_qvga.c
index 6e96095..de9014a 100644
--- a/arch/sh/boards/mach-migor/lcd_qvga.c
+++ b/arch/sh/boards/mach-migor/lcd_qvga.c
@@ -17,8 +17,10 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <asm/sh_mobile_lcdc.h>
-#include <asm/migor.h>
+#include <linux/gpio.h>
+#include <video/sh_mobile_lcdc.h>
+#include <cpu/sh7722.h>
+#include <mach/migor.h>
 
 /* LCD Module is a PH240320T according to board schematics. This module
  * is made up of a 240x320 LCD hooked up to a R61505U (or HX8347-A01?)
@@ -30,9 +32,9 @@
 
 static void reset_lcd_module(void)
 {
-	ctrl_outb(ctrl_inb(PORT_PHDR) & ~0x04, PORT_PHDR);
+	gpio_set_value(GPIO_PTH2, 0);
 	mdelay(2);
-	ctrl_outb(ctrl_inb(PORT_PHDR) | 0x04, PORT_PHDR);
+	gpio_set_value(GPIO_PTH2, 1);
 	mdelay(1);
 }
 
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 714dce9..769d630 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -17,14 +17,16 @@
 #include <linux/smc91x.h>
 #include <linux/delay.h>
 #include <linux/clk.h>
+#include <linux/gpio.h>
 #include <media/soc_camera_platform.h>
 #include <media/sh_mobile_ceu.h>
+#include <video/sh_mobile_lcdc.h>
 #include <asm/clock.h>
 #include <asm/machvec.h>
 #include <asm/io.h>
 #include <asm/sh_keysc.h>
-#include <asm/sh_mobile_lcdc.h>
-#include <asm/migor.h>
+#include <mach/migor.h>
+#include <cpu/sh7722.h>
 
 /* Address     IRQ  Size  Bus  Description
  * 0x00000000       64MB  16   NOR Flash (SP29PL256N)
@@ -35,7 +37,7 @@
  */
 
 static struct smc91x_platdata smc91x_info = {
-	.flags = SMC91X_USE_16BIT,
+	.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
 };
 
 static struct resource smc91x_eth_resources[] = {
@@ -169,7 +171,7 @@
 
 static int migor_nand_flash_ready(struct mtd_info *mtd)
 {
-	return ctrl_inb(PORT_PADR) & 0x02; /* PTA1 */
+	return gpio_get_value(GPIO_PTA1); /* NAND_RBn */
 }
 
 struct platform_nand_data migor_nand_flash_data = {
@@ -286,22 +288,15 @@
 
 static void camera_power_on(void)
 {
-	unsigned char value;
-
 	camera_clk = clk_get(NULL, "video_clk");
 	clk_set_rate(camera_clk, 24000000);
 	clk_enable(camera_clk);	/* start VIO_CKO */
 
+	/* use VIO_RST to take camera out of reset */
 	mdelay(10);
-	value = ctrl_inb(PORT_PTDR);
-	value &= ~0x09;
-#ifndef CONFIG_SH_MIGOR_RTA_WVGA
-	value |= 0x01;
-#endif
-	ctrl_outb(value, PORT_PTDR);
+	gpio_set_value(GPIO_PTT3, 0);
 	mdelay(10);
-
-	ctrl_outb(value | 8, PORT_PTDR);
+	gpio_set_value(GPIO_PTT3, 1);
 }
 
 static void camera_power_off(void)
@@ -309,7 +304,7 @@
 	clk_disable(camera_clk); /* stop VIO_CKO */
 	clk_put(camera_clk);
 
-	ctrl_outb(ctrl_inb(PORT_PTDR) & ~0x08, PORT_PTDR);
+	gpio_set_value(GPIO_PTT3, 0);
 }
 
 #ifdef CONFIG_I2C
@@ -458,75 +453,135 @@
 
 static int __init migor_devices_setup(void)
 {
+	/* Lit D11 LED */
+	gpio_request(GPIO_PTJ7, NULL);
+	gpio_direction_output(GPIO_PTJ7, 1);
+	gpio_export(GPIO_PTJ7, 0);
+
+	/* Lit D12 LED */
+	gpio_request(GPIO_PTJ5, NULL);
+	gpio_direction_output(GPIO_PTJ5, 1);
+	gpio_export(GPIO_PTJ5, 0);
+
+	/* SMC91C111 - Enable IRQ0, Setup CS4 for 16-bit fast access */
+	gpio_request(GPIO_FN_IRQ0, NULL);
+	ctrl_outl(0x00003400, BSC_CS4BCR);
+	ctrl_outl(0x00110080, BSC_CS4WCR);
+
+	/* KEYSC */
 	clk_always_enable("mstp214"); /* KEYSC */
+	gpio_request(GPIO_FN_KEYOUT0, NULL);
+	gpio_request(GPIO_FN_KEYOUT1, NULL);
+	gpio_request(GPIO_FN_KEYOUT2, NULL);
+	gpio_request(GPIO_FN_KEYOUT3, NULL);
+	gpio_request(GPIO_FN_KEYOUT4_IN6, NULL);
+	gpio_request(GPIO_FN_KEYIN1, NULL);
+	gpio_request(GPIO_FN_KEYIN2, NULL);
+	gpio_request(GPIO_FN_KEYIN3, NULL);
+	gpio_request(GPIO_FN_KEYIN4, NULL);
+	gpio_request(GPIO_FN_KEYOUT5_IN5, NULL);
+
+	/* NAND Flash */
+	gpio_request(GPIO_FN_CS6A_CE2B, NULL);
+	ctrl_outl((ctrl_inl(BSC_CS6ABCR) & ~0x0600) | 0x0200, BSC_CS6ABCR);
+	gpio_request(GPIO_PTA1, NULL);
+	gpio_direction_input(GPIO_PTA1);
+
+	/* Touch Panel */
+	gpio_request(GPIO_FN_IRQ6, NULL);
+
+	/* LCD Panel */
 	clk_always_enable("mstp200"); /* LCDC */
+#ifdef CONFIG_SH_MIGOR_QVGA /* LCDC - QVGA - Enable SYS Interface signals */
+	gpio_request(GPIO_FN_LCDD17, NULL);
+	gpio_request(GPIO_FN_LCDD16, NULL);
+	gpio_request(GPIO_FN_LCDD15, NULL);
+	gpio_request(GPIO_FN_LCDD14, NULL);
+	gpio_request(GPIO_FN_LCDD13, NULL);
+	gpio_request(GPIO_FN_LCDD12, NULL);
+	gpio_request(GPIO_FN_LCDD11, NULL);
+	gpio_request(GPIO_FN_LCDD10, NULL);
+	gpio_request(GPIO_FN_LCDD8, NULL);
+	gpio_request(GPIO_FN_LCDD7, NULL);
+	gpio_request(GPIO_FN_LCDD6, NULL);
+	gpio_request(GPIO_FN_LCDD5, NULL);
+	gpio_request(GPIO_FN_LCDD4, NULL);
+	gpio_request(GPIO_FN_LCDD3, NULL);
+	gpio_request(GPIO_FN_LCDD2, NULL);
+	gpio_request(GPIO_FN_LCDD1, NULL);
+	gpio_request(GPIO_FN_LCDRS, NULL);
+	gpio_request(GPIO_FN_LCDCS, NULL);
+	gpio_request(GPIO_FN_LCDRD, NULL);
+	gpio_request(GPIO_FN_LCDWR, NULL);
+	gpio_request(GPIO_PTH2, NULL); /* LCD_DON */
+	gpio_direction_output(GPIO_PTH2, 1);
+#endif
+#ifdef CONFIG_SH_MIGOR_RTA_WVGA /* LCDC - WVGA - Enable RGB Interface signals */
+	gpio_request(GPIO_FN_LCDD15, NULL);
+	gpio_request(GPIO_FN_LCDD14, NULL);
+	gpio_request(GPIO_FN_LCDD13, NULL);
+	gpio_request(GPIO_FN_LCDD12, NULL);
+	gpio_request(GPIO_FN_LCDD11, NULL);
+	gpio_request(GPIO_FN_LCDD10, NULL);
+	gpio_request(GPIO_FN_LCDD9, NULL);
+	gpio_request(GPIO_FN_LCDD8, NULL);
+	gpio_request(GPIO_FN_LCDD7, NULL);
+	gpio_request(GPIO_FN_LCDD6, NULL);
+	gpio_request(GPIO_FN_LCDD5, NULL);
+	gpio_request(GPIO_FN_LCDD4, NULL);
+	gpio_request(GPIO_FN_LCDD3, NULL);
+	gpio_request(GPIO_FN_LCDD2, NULL);
+	gpio_request(GPIO_FN_LCDD1, NULL);
+	gpio_request(GPIO_FN_LCDD0, NULL);
+	gpio_request(GPIO_FN_LCDLCLK, NULL);
+	gpio_request(GPIO_FN_LCDDCK, NULL);
+	gpio_request(GPIO_FN_LCDVEPWC, NULL);
+	gpio_request(GPIO_FN_LCDVCPWC, NULL);
+	gpio_request(GPIO_FN_LCDVSYN, NULL);
+	gpio_request(GPIO_FN_LCDHSYN, NULL);
+	gpio_request(GPIO_FN_LCDDISP, NULL);
+	gpio_request(GPIO_FN_LCDDON, NULL);
+#endif
+
+	/* CEU */
 	clk_always_enable("mstp203"); /* CEU */
+	gpio_request(GPIO_FN_VIO_CLK2, NULL);
+	gpio_request(GPIO_FN_VIO_VD2, NULL);
+	gpio_request(GPIO_FN_VIO_HD2, NULL);
+	gpio_request(GPIO_FN_VIO_FLD, NULL);
+	gpio_request(GPIO_FN_VIO_CKO, NULL);
+	gpio_request(GPIO_FN_VIO_D15, NULL);
+	gpio_request(GPIO_FN_VIO_D14, NULL);
+	gpio_request(GPIO_FN_VIO_D13, NULL);
+	gpio_request(GPIO_FN_VIO_D12, NULL);
+	gpio_request(GPIO_FN_VIO_D11, NULL);
+	gpio_request(GPIO_FN_VIO_D10, NULL);
+	gpio_request(GPIO_FN_VIO_D9, NULL);
+	gpio_request(GPIO_FN_VIO_D8, NULL);
+
+	gpio_request(GPIO_PTT3, NULL); /* VIO_RST */
+	gpio_direction_output(GPIO_PTT3, 0);
+	gpio_request(GPIO_PTT2, NULL); /* TV_IN_EN */
+	gpio_direction_output(GPIO_PTT2, 1);
+	gpio_request(GPIO_PTT0, NULL); /* CAM_EN */
+#ifdef CONFIG_SH_MIGOR_RTA_WVGA
+	gpio_direction_output(GPIO_PTT0, 0);
+#else
+	gpio_direction_output(GPIO_PTT0, 1);
+#endif
+	ctrl_outw(ctrl_inw(PORT_MSELCRB) | 0x2000, PORT_MSELCRB); /* D15->D8 */
 
 	platform_resource_setup_memory(&migor_ceu_device, "ceu", 4 << 20);
 
 	i2c_register_board_info(0, migor_i2c_devices,
 				ARRAY_SIZE(migor_i2c_devices));
- 
+
 	return platform_add_devices(migor_devices, ARRAY_SIZE(migor_devices));
 }
 __initcall(migor_devices_setup);
 
 static void __init migor_setup(char **cmdline_p)
 {
-	/* SMC91C111 - Enable IRQ0 */
-	ctrl_outw(ctrl_inw(PORT_PJCR) & ~0x0003, PORT_PJCR);
-
-	/* KEYSC */
-	ctrl_outw(ctrl_inw(PORT_PYCR) & ~0x0fff, PORT_PYCR);
-	ctrl_outw(ctrl_inw(PORT_PZCR) & ~0x0ff0, PORT_PZCR);
-	ctrl_outw(ctrl_inw(PORT_PSELA) & ~0x4100, PORT_PSELA);
-	ctrl_outw(ctrl_inw(PORT_HIZCRA) & ~0x4000, PORT_HIZCRA);
-	ctrl_outw(ctrl_inw(PORT_HIZCRC) & ~0xc000, PORT_HIZCRC);
-
-	/* NAND Flash */
-	ctrl_outw(ctrl_inw(PORT_PXCR) & 0x0fff, PORT_PXCR);
-	ctrl_outl((ctrl_inl(BSC_CS6ABCR) & ~0x00000600) | 0x00000200,
-		  BSC_CS6ABCR);
-
-	/* Touch Panel - Enable IRQ6 */
-	ctrl_outw(ctrl_inw(PORT_PZCR) & ~0xc, PORT_PZCR);
-	ctrl_outw((ctrl_inw(PORT_PSELA) | 0x8000), PORT_PSELA);
-	ctrl_outw((ctrl_inw(PORT_HIZCRC) & ~0x4000), PORT_HIZCRC);
-
-#ifdef CONFIG_SH_MIGOR_RTA_WVGA
-	/* LCDC - WVGA - Enable RGB Interface signals */
-	ctrl_outw(ctrl_inw(PORT_PACR) & ~0x0003, PORT_PACR);
-	ctrl_outw(0x0000, PORT_PHCR);
-	ctrl_outw(0x0000, PORT_PLCR);
-	ctrl_outw(0x0000, PORT_PMCR);
-	ctrl_outw(ctrl_inw(PORT_PRCR) & ~0x000f, PORT_PRCR);
-	ctrl_outw((ctrl_inw(PORT_PSELD) & ~0x000d) | 0x0400, PORT_PSELD);
-	ctrl_outw(ctrl_inw(PORT_MSELCRB) & ~0x0100, PORT_MSELCRB);
-	ctrl_outw(ctrl_inw(PORT_HIZCRA) & ~0x01e0, PORT_HIZCRA);
-#endif
-#ifdef CONFIG_SH_MIGOR_QVGA
-	/* LCDC - QVGA - Enable SYS Interface signals */
-	ctrl_outw(ctrl_inw(PORT_PACR) & ~0x0003, PORT_PACR);
-	ctrl_outw((ctrl_inw(PORT_PHCR) & ~0xcfff) | 0x0010, PORT_PHCR);
-	ctrl_outw(0x0000, PORT_PLCR);
-	ctrl_outw(0x0000, PORT_PMCR);
-	ctrl_outw(ctrl_inw(PORT_PRCR) & ~0x030f, PORT_PRCR);
-	ctrl_outw((ctrl_inw(PORT_PSELD) & ~0x0001) | 0x0420, PORT_PSELD);
-	ctrl_outw(ctrl_inw(PORT_MSELCRB) | 0x0100, PORT_MSELCRB);
-	ctrl_outw(ctrl_inw(PORT_HIZCRA) & ~0x01e0, PORT_HIZCRA);
-#endif
-
-	/* CEU */
-	ctrl_outw((ctrl_inw(PORT_PTCR) & ~0x03c3) | 0x0051, PORT_PTCR);
-	ctrl_outw(ctrl_inw(PORT_PUCR) & ~0x03ff, PORT_PUCR);
-	ctrl_outw(ctrl_inw(PORT_PVCR) & ~0x03ff, PORT_PVCR);
-	ctrl_outw(ctrl_inw(PORT_PWCR) & ~0x3c00, PORT_PWCR);
-	ctrl_outw(ctrl_inw(PORT_PSELC) | 0x0001, PORT_PSELC);
-	ctrl_outw(ctrl_inw(PORT_PSELD) & ~0x2000, PORT_PSELD);
-	ctrl_outw(ctrl_inw(PORT_PSELE) | 0x000f, PORT_PSELE);
-	ctrl_outw(ctrl_inw(PORT_MSELCRB) | 0x2200, PORT_MSELCRB);
-	ctrl_outw(ctrl_inw(PORT_HIZCRA) & ~0x0a00, PORT_HIZCRA);
-	ctrl_outw(ctrl_inw(PORT_HIZCRB) & ~0x0003, PORT_HIZCRB);
 }
 
 static struct sh_machine_vector mv_migor __initmv = {
diff --git a/arch/sh/boards/mach-r2d/irq.c b/arch/sh/boards/mach-r2d/irq.c
index 8e49f6e..c70fece 100644
--- a/arch/sh/boards/mach-r2d/irq.c
+++ b/arch/sh/boards/mach-r2d/irq.c
@@ -13,7 +13,7 @@
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
-#include <asm/rts7751r2d.h>
+#include <mach/r2d.h>
 
 #define R2D_NR_IRL 13
 
diff --git a/arch/sh/boards/mach-r2d/setup.c b/arch/sh/boards/mach-r2d/setup.c
index 2308e87..c585be0 100644
--- a/arch/sh/boards/mach-r2d/setup.c
+++ b/arch/sh/boards/mach-r2d/setup.c
@@ -18,7 +18,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/spi_bitbang.h>
 #include <asm/machvec.h>
-#include <asm/rts7751r2d.h>
+#include <mach/r2d.h>
 #include <asm/io.h>
 #include <asm/io_trapped.h>
 #include <asm/spi.h>
diff --git a/arch/sh/boards/mach-sdk7780/irq.c b/arch/sh/boards/mach-sdk7780/irq.c
index 87cdc57..8555581 100644
--- a/arch/sh/boards/mach-sdk7780/irq.c
+++ b/arch/sh/boards/mach-sdk7780/irq.c
@@ -12,7 +12,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/io.h>
-#include <asm/sdk7780.h>
+#include <mach/sdk7780.h>
 
 enum {
 	UNUSED = 0,
diff --git a/arch/sh/boards/mach-sdk7780/setup.c b/arch/sh/boards/mach-sdk7780/setup.c
index acc5932..aad94a7 100644
--- a/arch/sh/boards/mach-sdk7780/setup.c
+++ b/arch/sh/boards/mach-sdk7780/setup.c
@@ -13,7 +13,7 @@
 #include <linux/platform_device.h>
 #include <linux/ata_platform.h>
 #include <asm/machvec.h>
-#include <asm/sdk7780.h>
+#include <mach/sdk7780.h>
 #include <asm/heartbeat.h>
 #include <asm/io.h>
 #include <asm/addrspace.h>
diff --git a/arch/sh/boards/mach-sh7763rdp/irq.c b/arch/sh/boards/mach-sh7763rdp/irq.c
index fd850ba..d8ebfa7 100644
--- a/arch/sh/boards/mach-sh7763rdp/irq.c
+++ b/arch/sh/boards/mach-sh7763rdp/irq.c
@@ -15,7 +15,7 @@
 #include <linux/irq.h>
 #include <asm/io.h>
 #include <asm/irq.h>
-#include <asm/sh7763rdp.h>
+#include <mach/sh7763rdp.h>
 
 #define INTC_BASE		(0xFFD00000)
 #define INTC_INT2PRI7   (INTC_BASE+0x4001C)
diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c
index 23850da..6f926fd 100644
--- a/arch/sh/boards/mach-sh7763rdp/setup.c
+++ b/arch/sh/boards/mach-sh7763rdp/setup.c
@@ -17,7 +17,7 @@
 #include <linux/mtd/physmap.h>
 #include <linux/fb.h>
 #include <linux/io.h>
-#include <asm/sh7763rdp.h>
+#include <mach/sh7763rdp.h>
 #include <asm/sh_eth.h>
 #include <asm/sh7760fb.h>
 
diff --git a/arch/sh/boards/mach-snapgear/setup.c b/arch/sh/boards/mach-snapgear/setup.c
index a5e349d..a3277a2 100644
--- a/arch/sh/boards/mach-snapgear/setup.c
+++ b/arch/sh/boards/mach-snapgear/setup.c
@@ -19,7 +19,7 @@
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <asm/machvec.h>
-#include <asm/snapgear.h>
+#include <mach/snapgear.h>
 #include <asm/irq.h>
 #include <asm/io.h>
 #include <cpu/timer.h>
diff --git a/arch/sh/boards/mach-systemh/io.c b/arch/sh/boards/mach-systemh/io.c
index 1b767e1..dec3db0 100644
--- a/arch/sh/boards/mach-systemh/io.c
+++ b/arch/sh/boards/mach-systemh/io.c
@@ -9,7 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/pci.h>
-#include <asm/systemh7751.h>
+#include <mach/systemh7751.h>
 #include <asm/addrspace.h>
 #include <asm/io.h>
 
diff --git a/arch/sh/boards/mach-systemh/irq.c b/arch/sh/boards/mach-systemh/irq.c
index 601c9c8..5384068 100644
--- a/arch/sh/boards/mach-systemh/irq.c
+++ b/arch/sh/boards/mach-systemh/irq.c
@@ -14,7 +14,7 @@
 #include <linux/interrupt.h>
 
 #include <asm/io.h>
-#include <asm/systemh7751.h>
+#include <mach/systemh7751.h>
 #include <asm/smc37c93x.h>
 
 /* address of external interrupt mask register
diff --git a/arch/sh/boards/mach-systemh/setup.c b/arch/sh/boards/mach-systemh/setup.c
index ee78af8..219fd80 100644
--- a/arch/sh/boards/mach-systemh/setup.c
+++ b/arch/sh/boards/mach-systemh/setup.c
@@ -16,7 +16,7 @@
  */
 #include <linux/init.h>
 #include <asm/machvec.h>
-#include <asm/systemh7751.h>
+#include <mach/systemh7751.h>
 
 extern void make_systemh_irq(unsigned int irq);
 
diff --git a/arch/sh/boards/mach-titan/io.c b/arch/sh/boards/mach-titan/io.c
index 4730c1d..4badad4 100644
--- a/arch/sh/boards/mach-titan/io.c
+++ b/arch/sh/boards/mach-titan/io.c
@@ -4,7 +4,7 @@
 #include <linux/pci.h>
 #include <asm/machvec.h>
 #include <asm/addrspace.h>
-#include <asm/titan.h>
+#include <mach/titan.h>
 #include <asm/io.h>
 
 static inline unsigned int port2adr(unsigned int port)
diff --git a/arch/sh/boards/mach-titan/setup.c b/arch/sh/boards/mach-titan/setup.c
index 5de3b2a..81e7e0f 100644
--- a/arch/sh/boards/mach-titan/setup.c
+++ b/arch/sh/boards/mach-titan/setup.c
@@ -9,7 +9,7 @@
  */
 #include <linux/init.h>
 #include <linux/irq.h>
-#include <asm/titan.h>
+#include <mach/titan.h>
 #include <asm/io.h>
 
 static void __init init_titan_irq(void)
diff --git a/arch/sh/boot/.gitignore b/arch/sh/boot/.gitignore
index b6718de..aad5edd 100644
--- a/arch/sh/boot/.gitignore
+++ b/arch/sh/boot/.gitignore
@@ -1 +1,4 @@
 zImage
+vmlinux.srec
+uImage
+uImage.srec
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index 5b54965..c16ccd4 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -33,10 +33,16 @@
 $(obj)/compressed/vmlinux: FORCE
 	$(Q)$(MAKE) $(build)=$(obj)/compressed $@
 
+ifeq ($(CONFIG_32BIT),y)
+KERNEL_LOAD	:= $(shell /bin/bash -c 'printf "0x%08x" \
+		     $$[$(CONFIG_PAGE_OFFSET)  + \
+			$(CONFIG_ZERO_PAGE_OFFSET)]')
+else
 KERNEL_LOAD	:= $(shell /bin/bash -c 'printf "0x%08x" \
 		     $$[$(CONFIG_PAGE_OFFSET)  + \
 			$(CONFIG_MEMORY_START) + \
 			$(CONFIG_ZERO_PAGE_OFFSET)]')
+endif
 
 KERNEL_ENTRY	:= $(shell /bin/bash -c 'printf "0x%08x" \
 		     $$[$(CONFIG_PAGE_OFFSET)  + \
diff --git a/arch/sh/boot/compressed/Makefile_32 b/arch/sh/boot/compressed/Makefile_32
index 47685f6..301e6d5 100644
--- a/arch/sh/boot/compressed/Makefile_32
+++ b/arch/sh/boot/compressed/Makefile_32
@@ -23,6 +23,11 @@
 
 LIBGCC	:= $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
 
+ifeq ($(CONFIG_FTRACE),y)
+ORIG_CFLAGS := $(KBUILD_CFLAGS)
+KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
+endif
+
 LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup -T $(obj)/../../kernel/vmlinux.lds
 
 $(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE
diff --git a/arch/sh/boot/compressed/misc_32.c b/arch/sh/boot/compressed/misc_32.c
index f386997..efdba6b 100644
--- a/arch/sh/boot/compressed/misc_32.c
+++ b/arch/sh/boot/compressed/misc_32.c
@@ -191,7 +191,7 @@
 
 void decompress_kernel(void)
 {
-	output_data = 0;
+	output_data = NULL;
 	output_ptr = PHYSADDR((unsigned long)&_text+PAGE_SIZE);
 #ifdef CONFIG_29BIT
 	output_ptr |= P2SEG;
diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig
new file mode 100644
index 0000000..bef07fa
--- /dev/null
+++ b/arch/sh/configs/edosk7760_defconfig
@@ -0,0 +1,1050 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.26
+# Tue Aug 26 11:36:09 2008
+#
+CONFIG_SUPERH=y
+CONFIG_SUPERH32=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_NO_VIRT_TO_BUS=y
+CONFIG_ARCH_SUPPORTS_AOUT=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION="_edosk7760"
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_CGROUPS is not set
+# CONFIG_GROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_KPROBES is not set
+# CONFIG_HAVE_KRETPROBES is not set
+# CONFIG_HAVE_DMA_ATTRS is not set
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_CLASSIC_RCU=y
+
+#
+# System type
+#
+CONFIG_CPU_SH4=y
+# CONFIG_CPU_SUBTYPE_SH7619 is not set
+# CONFIG_CPU_SUBTYPE_SH7203 is not set
+# CONFIG_CPU_SUBTYPE_SH7206 is not set
+# CONFIG_CPU_SUBTYPE_SH7263 is not set
+# CONFIG_CPU_SUBTYPE_MXG is not set
+# CONFIG_CPU_SUBTYPE_SH7705 is not set
+# CONFIG_CPU_SUBTYPE_SH7706 is not set
+# CONFIG_CPU_SUBTYPE_SH7707 is not set
+# CONFIG_CPU_SUBTYPE_SH7708 is not set
+# CONFIG_CPU_SUBTYPE_SH7709 is not set
+# CONFIG_CPU_SUBTYPE_SH7710 is not set
+# CONFIG_CPU_SUBTYPE_SH7712 is not set
+# CONFIG_CPU_SUBTYPE_SH7720 is not set
+# CONFIG_CPU_SUBTYPE_SH7721 is not set
+# CONFIG_CPU_SUBTYPE_SH7750 is not set
+# CONFIG_CPU_SUBTYPE_SH7091 is not set
+# CONFIG_CPU_SUBTYPE_SH7750R is not set
+# CONFIG_CPU_SUBTYPE_SH7750S is not set
+# CONFIG_CPU_SUBTYPE_SH7751 is not set
+# CONFIG_CPU_SUBTYPE_SH7751R is not set
+CONFIG_CPU_SUBTYPE_SH7760=y
+# CONFIG_CPU_SUBTYPE_SH4_202 is not set
+# CONFIG_CPU_SUBTYPE_SH7723 is not set
+# CONFIG_CPU_SUBTYPE_SH7763 is not set
+# CONFIG_CPU_SUBTYPE_SH7770 is not set
+# CONFIG_CPU_SUBTYPE_SH7780 is not set
+# CONFIG_CPU_SUBTYPE_SH7785 is not set
+# CONFIG_CPU_SUBTYPE_SHX3 is not set
+# CONFIG_CPU_SUBTYPE_SH7343 is not set
+# CONFIG_CPU_SUBTYPE_SH7722 is not set
+# CONFIG_CPU_SUBTYPE_SH7366 is not set
+# CONFIG_CPU_SUBTYPE_SH5_101 is not set
+# CONFIG_CPU_SUBTYPE_SH5_103 is not set
+
+#
+# Memory management options
+#
+CONFIG_QUICKLIST=y
+CONFIG_MMU=y
+CONFIG_PAGE_OFFSET=0x80000000
+CONFIG_MEMORY_START=0x0c000000
+CONFIG_MEMORY_SIZE=0x04000000
+CONFIG_29BIT=y
+CONFIG_VSYSCALL=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_MAX_ACTIVE_REGIONS=1
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_STATIC=y
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
+
+#
+# Cache configuration
+#
+# CONFIG_SH_DIRECT_MAPPED is not set
+CONFIG_CACHE_WRITEBACK=y
+# CONFIG_CACHE_WRITETHROUGH is not set
+# CONFIG_CACHE_OFF is not set
+
+#
+# Processor features
+#
+CONFIG_CPU_LITTLE_ENDIAN=y
+# CONFIG_CPU_BIG_ENDIAN is not set
+CONFIG_SH_FPU=y
+CONFIG_SH_STORE_QUEUES=y
+CONFIG_CPU_HAS_INTEVT=y
+CONFIG_CPU_HAS_SR_RB=y
+CONFIG_CPU_HAS_PTEA=y
+CONFIG_CPU_HAS_FPU=y
+
+#
+# Board support
+#
+CONFIG_SH_EDOSK7760=y
+
+#
+# Timer and clock configuration
+#
+CONFIG_SH_TMU=y
+CONFIG_SH_TIMER_IRQ=16
+CONFIG_SH_PCLK_FREQ=33333333
+CONFIG_TICK_ONESHOT=y
+# CONFIG_NO_HZ is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# DMA support
+#
+CONFIG_SH_DMA_API=y
+CONFIG_SH_DMA=y
+CONFIG_NR_ONCHIP_DMA_CHANNELS=4
+# CONFIG_NR_DMA_CHANNELS_BOOL is not set
+# CONFIG_SH_DMABRG is not set
+
+#
+# Companion Chips
+#
+
+#
+# Additional SuperH Device Drivers
+#
+# CONFIG_HEARTBEAT is not set
+# CONFIG_PUSH_SWITCH is not set
+
+#
+# Kernel features
+#
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+# CONFIG_PREEMPT_RCU is not set
+CONFIG_GUSA=y
+# CONFIG_GUSA_RB is not set
+
+#
+# Boot options
+#
+CONFIG_ZERO_PAGE_OFFSET=0x00001000
+CONFIG_BOOT_LINK_OFFSET=0x02000000
+# CONFIG_UBC_WAKEUP is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="mem=64M console=ttySC2,115200 root=/dev/nfs rw nfsroot=192.168.0.3:/scripts/filesys ip=192.168.0.4"
+
+#
+# Bus options
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+CONFIG_DEBUG_DRIVER=y
+CONFIG_DEBUG_DEVRES=y
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_DEBUG_VERBOSE=0
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_NOSWAP=y
+# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
+# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_MAP_BANK_WIDTH_8=y
+CONFIG_MTD_MAP_BANK_WIDTH_16=y
+CONFIG_MTD_MAP_BANK_WIDTH_32=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI_I4=y
+CONFIG_MTD_CFI_I8=y
+# CONFIG_MTD_OTP is not set
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=y
+CONFIG_MTD_ABSENT=y
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0xffffffff
+CONFIG_MTD_PHYSMAP_LEN=0x0
+CONFIG_MTD_PHYSMAP_BANKWIDTH=4
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=26000
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_STNIC is not set
+# CONFIG_SMC9194 is not set
+CONFIG_SMC91X=y
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_B44 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=3
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+CONFIG_I2C_SH7760=y
+# CONFIG_I2C_SH_MOBILE is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+CONFIG_I2C_DEBUG_CORE=y
+CONFIG_I2C_DEBUG_ALGO=y
+CONFIG_I2C_DEBUG_BUS=y
+CONFIG_I2C_DEBUG_CHIP=y
+# CONFIG_SPI is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=m
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+CONFIG_FB_CFB_FILLRECT=m
+CONFIG_FB_CFB_COPYAREA=m
+CONFIG_FB_CFB_IMAGEBLIT=m
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_MB86290_640X480_16BPP is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+# CONFIG_LOGO is not set
+
+#
+# Sound
+#
+CONFIG_SOUND=y
+
+#
+# Advanced Linux Sound Architecture
+#
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+CONFIG_SND_VERBOSE_PRINTK=y
+# CONFIG_SND_DEBUG is not set
+
+#
+# Generic devices
+#
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+
+#
+# SUPERH devices
+#
+
+#
+# System on Chip audio support
+#
+CONFIG_SND_SOC=y
+
+#
+# SoC Audio support for SuperH
+#
+
+#
+# ALSA SoC audio for Freescale SOCs
+#
+
+#
+# SoC Audio for the Texas Instruments OMAP
+#
+
+#
+# Open Sound System
+#
+# CONFIG_SOUND_PRIME is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+# CONFIG_EXT2_FS_SECURITY is not set
+CONFIG_EXT2_FS_XIP=y
+CONFIG_FS_XIP=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+CONFIG_NLS_ISO8859_15=y
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_UNUSED_SYMBOLS=y
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_SCHEDSTATS is not set
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SAMPLES is not set
+# CONFIG_SH_STANDARD_BIOS is not set
+CONFIG_EARLY_SCIF_CONSOLE=y
+CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_BOOTMEM is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_4KSTACKS is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_SH_KGDB is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/sh/configs/rts7751r2dplus_qemu_defconfig b/arch/sh/configs/rts7751r2dplus_qemu_defconfig
new file mode 100644
index 0000000..a72796c
--- /dev/null
+++ b/arch/sh/configs/rts7751r2dplus_qemu_defconfig
@@ -0,0 +1,909 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.27-rc2
+# Mon Aug 18 22:17:44 2008
+#
+CONFIG_SUPERH=y
+CONFIG_SUPERH32=y
+CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig"
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_SYS_SUPPORTS_PCI=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_NO_VIRT_TO_BUS=y
+CONFIG_IO_TRAPPED=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+# CONFIG_HOTPLUG is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+# CONFIG_MARKERS is not set
+CONFIG_OPROFILE=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
+# CONFIG_HAVE_IOREMAP_PROT is not set
+# CONFIG_HAVE_KPROBES is not set
+# CONFIG_HAVE_KRETPROBES is not set
+# CONFIG_HAVE_ARCH_TRACEHOOK is not set
+# CONFIG_HAVE_DMA_ATTRS is not set
+# CONFIG_USE_GENERIC_SMP_HELPERS is not set
+CONFIG_HAVE_CLK=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+# CONFIG_MODULE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
+
+#
+# System type
+#
+CONFIG_CPU_SH4=y
+# CONFIG_CPU_SUBTYPE_SH7619 is not set
+# CONFIG_CPU_SUBTYPE_SH7203 is not set
+# CONFIG_CPU_SUBTYPE_SH7206 is not set
+# CONFIG_CPU_SUBTYPE_SH7263 is not set
+# CONFIG_CPU_SUBTYPE_MXG is not set
+# CONFIG_CPU_SUBTYPE_SH7705 is not set
+# CONFIG_CPU_SUBTYPE_SH7706 is not set
+# CONFIG_CPU_SUBTYPE_SH7707 is not set
+# CONFIG_CPU_SUBTYPE_SH7708 is not set
+# CONFIG_CPU_SUBTYPE_SH7709 is not set
+# CONFIG_CPU_SUBTYPE_SH7710 is not set
+# CONFIG_CPU_SUBTYPE_SH7712 is not set
+# CONFIG_CPU_SUBTYPE_SH7720 is not set
+# CONFIG_CPU_SUBTYPE_SH7721 is not set
+# CONFIG_CPU_SUBTYPE_SH7750 is not set
+# CONFIG_CPU_SUBTYPE_SH7091 is not set
+# CONFIG_CPU_SUBTYPE_SH7750R is not set
+# CONFIG_CPU_SUBTYPE_SH7750S is not set
+# CONFIG_CPU_SUBTYPE_SH7751 is not set
+CONFIG_CPU_SUBTYPE_SH7751R=y
+# CONFIG_CPU_SUBTYPE_SH7760 is not set
+# CONFIG_CPU_SUBTYPE_SH4_202 is not set
+# CONFIG_CPU_SUBTYPE_SH7723 is not set
+# CONFIG_CPU_SUBTYPE_SH7763 is not set
+# CONFIG_CPU_SUBTYPE_SH7770 is not set
+# CONFIG_CPU_SUBTYPE_SH7780 is not set
+# CONFIG_CPU_SUBTYPE_SH7785 is not set
+# CONFIG_CPU_SUBTYPE_SHX3 is not set
+# CONFIG_CPU_SUBTYPE_SH7343 is not set
+# CONFIG_CPU_SUBTYPE_SH7722 is not set
+# CONFIG_CPU_SUBTYPE_SH7366 is not set
+# CONFIG_CPU_SUBTYPE_SH5_101 is not set
+# CONFIG_CPU_SUBTYPE_SH5_103 is not set
+
+#
+# Memory management options
+#
+CONFIG_QUICKLIST=y
+CONFIG_MMU=y
+CONFIG_PAGE_OFFSET=0x80000000
+CONFIG_MEMORY_START=0x0c000000
+CONFIG_MEMORY_SIZE=0x04000000
+CONFIG_29BIT=y
+CONFIG_VSYSCALL=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_MAX_ACTIVE_REGIONS=1
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
+CONFIG_ENTRY_OFFSET=0x00001000
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_STATIC=y
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
+
+#
+# Cache configuration
+#
+# CONFIG_SH_DIRECT_MAPPED is not set
+CONFIG_CACHE_WRITEBACK=y
+# CONFIG_CACHE_WRITETHROUGH is not set
+# CONFIG_CACHE_OFF is not set
+
+#
+# Processor features
+#
+CONFIG_CPU_LITTLE_ENDIAN=y
+# CONFIG_CPU_BIG_ENDIAN is not set
+CONFIG_SH_FPU=y
+# CONFIG_SH_STORE_QUEUES is not set
+CONFIG_CPU_HAS_INTEVT=y
+CONFIG_CPU_HAS_SR_RB=y
+CONFIG_CPU_HAS_PTEA=y
+CONFIG_CPU_HAS_FPU=y
+
+#
+# Board support
+#
+# CONFIG_SH_7751_SYSTEMH is not set
+# CONFIG_SH_SECUREEDGE5410 is not set
+CONFIG_SH_RTS7751R2D=y
+# CONFIG_SH_LANDISK is not set
+# CONFIG_SH_TITAN is not set
+# CONFIG_SH_LBOX_RE2 is not set
+
+#
+# RTS7751R2D Board Revision
+#
+CONFIG_RTS7751R2D_PLUS=y
+# CONFIG_RTS7751R2D_1 is not set
+
+#
+# Timer and clock configuration
+#
+CONFIG_SH_TMU=y
+CONFIG_SH_TIMER_IRQ=16
+CONFIG_SH_PCLK_FREQ=60000000
+# CONFIG_TICK_ONESHOT is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# DMA support
+#
+# CONFIG_SH_DMA is not set
+
+#
+# Companion Chips
+#
+
+#
+# Additional SuperH Device Drivers
+#
+CONFIG_HEARTBEAT=y
+# CONFIG_PUSH_SWITCH is not set
+
+#
+# Kernel features
+#
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+CONFIG_SECCOMP=y
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_GUSA=y
+# CONFIG_GUSA_RB is not set
+
+#
+# Boot options
+#
+CONFIG_ZERO_PAGE_OFFSET=0x00010000
+CONFIG_BOOT_LINK_OFFSET=0x00800000
+# CONFIG_UBC_WAKEUP is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1 earlyprintk=serial"
+
+#
+# Bus options
+#
+# CONFIG_PCI is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_SATA_PMP=y
+CONFIG_ATA_SFF=y
+# CONFIG_SATA_MV is not set
+# CONFIG_PATA_PLATFORM is not set
+# CONFIG_MD is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_CONSOLE is not set
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=1
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BITBANG=y
+# CONFIG_SPI_SH_SCI is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_AT25 is not set
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+CONFIG_MFD_SM501=y
+# CONFIG_HTC_PASIC3 is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
+CONFIG_DAB=y
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_SH_MOBILE_LCDC=m
+CONFIG_FB_SM501=y
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_LOGO_SUPERH_MONO is not set
+# CONFIG_LOGO_SUPERH_VGA16 is not set
+CONFIG_LOGO_SUPERH_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SND=m
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_SPI=y
+CONFIG_SND_SUPERH=y
+# CONFIG_SND_SOC is not set
+CONFIG_SOUND_PRIME=m
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HID_DEBUG is not set
+# CONFIG_HIDRAW is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+CONFIG_RTC_DRV_R9701=y
+# CONFIG_RTC_DRV_RS5C348 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_RTC_DRV_SH is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+CONFIG_MINIX_FS=y
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+CONFIG_NLS_CODEPAGE_932=y
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SAMPLES is not set
+# CONFIG_SH_STANDARD_BIOS is not set
+CONFIG_EARLY_SCIF_CONSOLE=y
+CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_BOOTMEM is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_4KSTACKS is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_SH_KGDB is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC_T10DIF=y
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/sh/drivers/pci/ops-lboxre2.c b/arch/sh/drivers/pci/ops-lboxre2.c
index a13cb76..86c0b6f 100644
--- a/arch/sh/drivers/pci/ops-lboxre2.c
+++ b/arch/sh/drivers/pci/ops-lboxre2.c
@@ -10,7 +10,7 @@
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/io.h>
-#include <asm/lboxre2.h>
+#include <mach/lboxre2.h>
 #include "pci-sh4.h"
 
 static char lboxre2_irq_tab[] __initdata = {
diff --git a/arch/sh/drivers/pci/ops-r7780rp.c b/arch/sh/drivers/pci/ops-r7780rp.c
index 5fdadae..8555238 100644
--- a/arch/sh/drivers/pci/ops-r7780rp.c
+++ b/arch/sh/drivers/pci/ops-r7780rp.c
@@ -13,7 +13,7 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
-#include <asm/r7780rp.h>
+#include <mach/highlander.h>
 #include <asm/io.h>
 #include "pci-sh4.h"
 
diff --git a/arch/sh/drivers/pci/ops-rts7751r2d.c b/arch/sh/drivers/pci/ops-rts7751r2d.c
index b3fa3e2..d6ca74b 100644
--- a/arch/sh/drivers/pci/ops-rts7751r2d.c
+++ b/arch/sh/drivers/pci/ops-rts7751r2d.c
@@ -15,7 +15,7 @@
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/io.h>
-#include <asm/rts7751r2d.h>
+#include <mach/r2d.h>
 #include "pci-sh4.h"
 
 static u8 rts7751r2d_irq_tab[] __initdata = {
diff --git a/arch/sh/drivers/pci/ops-sdk7780.c b/arch/sh/drivers/pci/ops-sdk7780.c
index 66a9b40..4dcc641 100644
--- a/arch/sh/drivers/pci/ops-sdk7780.c
+++ b/arch/sh/drivers/pci/ops-sdk7780.c
@@ -13,7 +13,7 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
-#include <asm/sdk7780.h>
+#include <mach/sdk7780.h>
 #include <asm/io.h>
 #include "pci-sh4.h"
 
diff --git a/arch/sh/drivers/pci/ops-titan.c b/arch/sh/drivers/pci/ops-titan.c
index ac8ee23..a8f7801 100644
--- a/arch/sh/drivers/pci/ops-titan.c
+++ b/arch/sh/drivers/pci/ops-titan.c
@@ -16,7 +16,7 @@
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/io.h>
-#include <asm/titan.h>
+#include <mach/titan.h>
 #include "pci-sh4.h"
 
 static char titan_irq_tab[] __initdata = {
diff --git a/arch/sh/include/asm/bitops-llsc.h b/arch/sh/include/asm/bitops-llsc.h
new file mode 100644
index 0000000..43b8e1a
--- /dev/null
+++ b/arch/sh/include/asm/bitops-llsc.h
@@ -0,0 +1,144 @@
+#ifndef __ASM_SH_BITOPS_LLSC_H
+#define __ASM_SH_BITOPS_LLSC_H
+
+static inline void set_bit(int nr, volatile void * addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+	unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	__asm__ __volatile__ (
+		"1:						\n\t"
+		"movli.l	@%1, %0	! set_bit		\n\t"
+		"or		%3, %0				\n\t"
+		"movco.l	%0, @%1				\n\t"
+		"bf		1b				\n\t"
+		: "=&z" (tmp), "=r" (a)
+		: "1" (a), "r" (mask)
+		: "t", "memory"
+	);
+}
+
+static inline void clear_bit(int nr, volatile void * addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+	unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	__asm__ __volatile__ (
+		"1:						\n\t"
+		"movli.l	@%1, %0	! clear_bit		\n\t"
+		"and		%3, %0				\n\t"
+		"movco.l	%0, @%1				\n\t"
+		"bf		1b				\n\t"
+		: "=&z" (tmp), "=r" (a)
+		: "1" (a), "r" (~mask)
+		: "t", "memory"
+	);
+}
+
+static inline void change_bit(int nr, volatile void * addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+	unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	__asm__ __volatile__ (
+		"1:						\n\t"
+		"movli.l	@%1, %0	! change_bit		\n\t"
+		"xor		%3, %0				\n\t"
+		"movco.l	%0, @%1				\n\t"
+		"bf		1b				\n\t"
+		: "=&z" (tmp), "=r" (a)
+		: "1" (a), "r" (mask)
+		: "t", "memory"
+	);
+}
+
+static inline int test_and_set_bit(int nr, volatile void * addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+	unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	__asm__ __volatile__ (
+		"1:						\n\t"
+		"movli.l	@%1, %0	! test_and_set_bit	\n\t"
+		"mov		%0, %2				\n\t"
+		"or		%4, %0				\n\t"
+		"movco.l	%0, @%1				\n\t"
+		"bf		1b				\n\t"
+		"and		%4, %2				\n\t"
+		: "=&z" (tmp), "=r" (a), "=&r" (retval)
+		: "1" (a), "r" (mask)
+		: "t", "memory"
+	);
+
+	return retval != 0;
+}
+
+static inline int test_and_clear_bit(int nr, volatile void * addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+	unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	__asm__ __volatile__ (
+		"1:						\n\t"
+		"movli.l	@%1, %0	! test_and_clear_bit	\n\t"
+		"mov		%0, %2				\n\t"
+		"and		%5, %0				\n\t"
+		"movco.l	%0, @%1				\n\t"
+		"bf		1b				\n\t"
+		"and		%4, %2				\n\t"
+		"synco						\n\t"
+		: "=&z" (tmp), "=r" (a), "=&r" (retval)
+		: "1" (a), "r" (mask), "r" (~mask)
+		: "t", "memory"
+	);
+
+	return retval != 0;
+}
+
+static inline int test_and_change_bit(int nr, volatile void * addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+	unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	__asm__ __volatile__ (
+		"1:						\n\t"
+		"movli.l	@%1, %0	! test_and_change_bit	\n\t"
+		"mov		%0, %2				\n\t"
+		"xor		%4, %0				\n\t"
+		"movco.l	%0, @%1				\n\t"
+		"bf		1b				\n\t"
+		"and		%4, %2				\n\t"
+		"synco						\n\t"
+		: "=&z" (tmp), "=r" (a), "=&r" (retval)
+		: "1" (a), "r" (mask)
+		: "t", "memory"
+	);
+
+	return retval != 0;
+}
+
+#endif /* __ASM_SH_BITOPS_LLSC_H */
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h
index d7d382f..367930d 100644
--- a/arch/sh/include/asm/bitops.h
+++ b/arch/sh/include/asm/bitops.h
@@ -13,6 +13,8 @@
 
 #ifdef CONFIG_GUSA_RB
 #include <asm/bitops-grb.h>
+#elif defined(CONFIG_CPU_SH4A)
+#include <asm/bitops-llsc.h>
 #else
 #include <asm/bitops-irq.h>
 #endif
diff --git a/arch/sh/include/asm/clock.h b/arch/sh/include/asm/clock.h
index 720dfab..f9c8858 100644
--- a/arch/sh/include/asm/clock.h
+++ b/arch/sh/include/asm/clock.h
@@ -39,6 +39,7 @@
 
 /* Should be defined by processor-specific code */
 void arch_init_clk_ops(struct clk_ops **, int type);
+int __init arch_clk_init(void);
 
 /* arch/sh/kernel/cpu/clock.c */
 int clk_init(void);
diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h
new file mode 100644
index 0000000..aee3bf2
--- /dev/null
+++ b/arch/sh/include/asm/cmpxchg-llsc.h
@@ -0,0 +1,71 @@
+#ifndef __ASM_SH_CMPXCHG_LLSC_H
+#define __ASM_SH_CMPXCHG_LLSC_H
+
+static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
+{
+	unsigned long retval;
+	unsigned long tmp;
+
+	__asm__ __volatile__ (
+		"1:					\n\t"
+		"movli.l	@%1, %0	! xchg_u32	\n\t"
+		"mov		%0, %2			\n\t"
+		"mov		%4, %0			\n\t"
+		"movco.l	%0, @%1			\n\t"
+		"bf		1b			\n\t"
+		"synco					\n\t"
+		: "=&z"(tmp), "=r" (m), "=&r" (retval)
+		: "1" (m), "r" (val)
+		: "t", "memory"
+	);
+
+	return retval;
+}
+
+static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
+{
+	unsigned long retval;
+	unsigned long tmp;
+
+	__asm__ __volatile__ (
+		"1:					\n\t"
+		"movli.l	@%1, %0	! xchg_u8	\n\t"
+		"mov		%0, %2			\n\t"
+		"mov		%4, %0			\n\t"
+		"movco.l	%0, @%1			\n\t"
+		"bf		1b			\n\t"
+		"synco					\n\t"
+		: "=&z"(tmp), "=r" (m), "=&r" (retval)
+		: "1" (m), "r" (val & 0xff)
+		: "t", "memory"
+	);
+
+	return retval;
+}
+
+static inline unsigned long
+__cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
+{
+	unsigned long retval;
+	unsigned long tmp;
+
+	__asm__ __volatile__ (
+		"1:						\n\t"
+		"movli.l	@%1, %0	! __cmpxchg_u32		\n\t"
+		"mov		%0, %2				\n\t"
+		"cmp/eq		%2, %4				\n\t"
+		"bf		2f				\n\t"
+		"mov		%5, %0				\n\t"
+		"2:						\n\t"
+		"movco.l	%0, @%1				\n\t"
+		"bf		1b				\n\t"
+		"synco						\n\t"
+		: "=&z" (tmp), "=r" (m), "=&r" (retval)
+		: "1" (m), "r" (old), "r" (new)
+		: "t", "memory"
+	);
+
+	return retval;
+}
+
+#endif /* __ASM_SH_CMPXCHG_LLSC_H */
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index ee02db1..9eb9036 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -108,6 +108,14 @@
 #define elf_check_fdpic(x)		((x)->e_flags & EF_SH_FDPIC)
 #define elf_check_const_displacement(x)	((x)->e_flags & EF_SH_PIC)
 
+#ifdef CONFIG_SUPERH32
+/*
+ * Enable dump using regset.
+ * This covers all of general/DSP/FPU regs.
+ */
+#define CORE_DUMP_USE_REGSET
+#endif
+
 #define USE_ELF_CORE_DUMP
 #define ELF_FDPIC_CORE_EFLAGS	EF_SH_FDPIC
 #define ELF_EXEC_PAGESIZE	PAGE_SIZE
@@ -190,12 +198,6 @@
 #endif
 
 #define SET_PERSONALITY(ex) set_personality(PER_LINUX_32BIT)
-struct task_struct;
-extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
-extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
-
-#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
-#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
 
 #ifdef CONFIG_VSYSCALL
 /* vDSO has arch_setup_additional_pages */
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h
index 91462fe..1d3aee0 100644
--- a/arch/sh/include/asm/fpu.h
+++ b/arch/sh/include/asm/fpu.h
@@ -30,8 +30,15 @@
 }
 #endif
 
+struct user_regset;
+
 extern int do_fpu_inst(unsigned short, struct pt_regs *);
 
+extern int fpregs_get(struct task_struct *target,
+		      const struct user_regset *regset,
+		      unsigned int pos, unsigned int count,
+		      void *kbuf, void __user *ubuf);
+
 static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
 {
 	preempt_disable();
@@ -50,6 +57,18 @@
 	preempt_enable();
 }
 
+static inline int init_fpu(struct task_struct *tsk)
+{
+	if (tsk_used_math(tsk)) {
+		if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
+			unlazy_fpu(tsk, task_pt_regs(tsk));
+		return 0;
+	}
+
+	set_stopped_child_used_math(tsk);
+	return 0;
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ASM_SH_FPU_H */
diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h
new file mode 100644
index 0000000..3aed362
--- /dev/null
+++ b/arch/sh/include/asm/ftrace.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_SH_FTRACE_H
+#define __ASM_SH_FTRACE_H
+
+#ifndef __ASSEMBLY__
+extern void mcount(void);
+#endif
+
+#endif /* __ASM_SH_FTRACE_H */
diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h
index cf32bd2..9650e7c 100644
--- a/arch/sh/include/asm/gpio.h
+++ b/arch/sh/include/asm/gpio.h
@@ -1,9 +1,9 @@
 /*
  *  include/asm-sh/gpio.h
  *
- *  Copyright (C) 2007 Markus Brunner, Mark Jonas
+ * Generic GPIO API and pinmux table support for SuperH.
  *
- *  Addresses for the Pin Function Controller
+ * Copyright (c) 2008 Magnus Damm
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -16,4 +16,92 @@
 #include <cpu/gpio.h>
 #endif
 
+typedef unsigned short pinmux_enum_t;
+typedef unsigned char pinmux_flag_t;
+
+#define PINMUX_TYPE_NONE            0
+#define PINMUX_TYPE_FUNCTION        1
+#define PINMUX_TYPE_GPIO            2
+#define PINMUX_TYPE_OUTPUT          3
+#define PINMUX_TYPE_INPUT           4
+#define PINMUX_TYPE_INPUT_PULLUP    5
+#define PINMUX_TYPE_INPUT_PULLDOWN  6
+
+#define PINMUX_FLAG_TYPE            (0x7)
+#define PINMUX_FLAG_WANT_PULLUP     (1 << 3)
+#define PINMUX_FLAG_WANT_PULLDOWN   (1 << 4)
+
+struct pinmux_gpio {
+	pinmux_enum_t enum_id;
+	pinmux_flag_t flags;
+};
+
+#define PINMUX_GPIO(gpio, data_or_mark) [gpio] = { data_or_mark }
+#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0
+
+struct pinmux_cfg_reg {
+	unsigned long reg, reg_width, field_width;
+	unsigned long *cnt;
+	pinmux_enum_t *enum_ids;
+};
+
+#define PINMUX_CFG_REG(name, r, r_width, f_width) \
+	.reg = r, .reg_width = r_width, .field_width = f_width,		\
+	.cnt = (unsigned long [r_width / f_width]) {}, \
+	.enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) \
+
+struct pinmux_data_reg {
+	unsigned long reg, reg_width;
+	pinmux_enum_t *enum_ids;
+};
+
+#define PINMUX_DATA_REG(name, r, r_width) \
+	.reg = r, .reg_width = r_width,	\
+	.enum_ids = (pinmux_enum_t [r_width]) \
+
+struct pinmux_range {
+	pinmux_enum_t begin;
+	pinmux_enum_t end;
+};
+
+struct pinmux_info {
+	char *name;
+	pinmux_enum_t reserved_id;
+	struct pinmux_range data;
+	struct pinmux_range input;
+	struct pinmux_range input_pd;
+	struct pinmux_range input_pu;
+	struct pinmux_range output;
+	struct pinmux_range mark;
+	struct pinmux_range function;
+
+	unsigned first_gpio, last_gpio;
+
+	struct pinmux_gpio *gpios;
+	struct pinmux_cfg_reg *cfg_regs;
+	struct pinmux_data_reg *data_regs;
+
+	pinmux_enum_t *gpio_data;
+	unsigned int gpio_data_size;
+
+	unsigned long *gpio_in_use;
+};
+
+int register_pinmux(struct pinmux_info *pip);
+
+int __gpio_request(unsigned gpio);
+static inline int gpio_request(unsigned gpio, const char *label)
+{
+	return __gpio_request(gpio);
+}
+void gpio_free(unsigned gpio);
+int gpio_direction_input(unsigned gpio);
+int gpio_direction_output(unsigned gpio, int value);
+int gpio_get_value(unsigned gpio);
+void gpio_set_value(unsigned gpio, int value);
+static inline int gpio_export(unsigned gpio, bool direction_may_change)
+{
+	return 0;
+}
+
 #endif /* __ASM_SH_GPIO_H */
diff --git a/arch/sh/include/asm/hw_irq.h b/arch/sh/include/asm/hw_irq.h
index d557b00..603cdde 100644
--- a/arch/sh/include/asm/hw_irq.h
+++ b/arch/sh/include/asm/hw_irq.h
@@ -2,6 +2,7 @@
 #define __ASM_SH_HW_IRQ_H
 
 #include <linux/init.h>
+#include <linux/sh_intc.h>
 #include <asm/atomic.h>
 
 extern atomic_t irq_err_count;
@@ -23,101 +24,12 @@
 
 void register_ipr_controller(struct ipr_desc *);
 
-typedef unsigned char intc_enum;
-
-struct intc_vect {
-	intc_enum enum_id;
-	unsigned short vect;
-};
-
-#define INTC_VECT(enum_id, vect) { enum_id, vect }
-#define INTC_IRQ(enum_id, irq) INTC_VECT(enum_id, irq2evt(irq))
-
-struct intc_group {
-	intc_enum enum_id;
-	intc_enum enum_ids[32];
-};
-
-#define INTC_GROUP(enum_id, ids...) { enum_id, { ids } }
-
-struct intc_mask_reg {
-	unsigned long set_reg, clr_reg, reg_width;
-	intc_enum enum_ids[32];
-#ifdef CONFIG_SMP
-	unsigned long smp;
-#endif
-};
-
-struct intc_prio_reg {
-	unsigned long set_reg, clr_reg, reg_width, field_width;
-	intc_enum enum_ids[16];
-#ifdef CONFIG_SMP
-	unsigned long smp;
-#endif
-};
-
-struct intc_sense_reg {
-	unsigned long reg, reg_width, field_width;
-	intc_enum enum_ids[16];
-};
-
-#ifdef CONFIG_SMP
-#define INTC_SMP(stride, nr) .smp = (stride) | ((nr) << 8)
-#else
-#define INTC_SMP(stride, nr)
-#endif
-
-struct intc_desc {
-	struct intc_vect *vectors;
-	unsigned int nr_vectors;
-	struct intc_group *groups;
-	unsigned int nr_groups;
-	struct intc_mask_reg *mask_regs;
-	unsigned int nr_mask_regs;
-	struct intc_prio_reg *prio_regs;
-	unsigned int nr_prio_regs;
-	struct intc_sense_reg *sense_regs;
-	unsigned int nr_sense_regs;
-	char *name;
-#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
-	struct intc_mask_reg *ack_regs;
-	unsigned int nr_ack_regs;
-#endif
-};
-
-#define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a)
-#define DECLARE_INTC_DESC(symbol, chipname, vectors, groups,		\
-	mask_regs, prio_regs, sense_regs)				\
-struct intc_desc symbol __initdata = {					\
-	_INTC_ARRAY(vectors), _INTC_ARRAY(groups),			\
-	_INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs),			\
-	_INTC_ARRAY(sense_regs),					\
-	chipname,							\
-}
-
-#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
-#define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups,	\
-	mask_regs, prio_regs, sense_regs, ack_regs)			\
-struct intc_desc symbol __initdata = {					\
-	_INTC_ARRAY(vectors), _INTC_ARRAY(groups),			\
-	_INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs),			\
-	_INTC_ARRAY(sense_regs),					\
-	chipname,							\
-	_INTC_ARRAY(ack_regs),						\
-}
-#endif
-
-void __init register_intc_controller(struct intc_desc *desc);
-int intc_set_priority(unsigned int irq, unsigned int prio);
-
 void __init plat_irq_setup(void);
-#ifdef CONFIG_CPU_SH3
 void __init plat_irq_setup_sh3(void);
-#endif
+void __init plat_irq_setup_pins(int mode);
 
 enum { IRQ_MODE_IRQ, IRQ_MODE_IRQ7654, IRQ_MODE_IRQ3210,
        IRQ_MODE_IRL7654_MASK, IRQ_MODE_IRL3210_MASK,
        IRQ_MODE_IRL7654, IRQ_MODE_IRL3210 };
-void __init plat_irq_setup_pins(int mode);
 
 #endif /* __ASM_SH_HW_IRQ_H */
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index a4fbf0c..436c285 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -1,27 +1,26 @@
 #ifndef __ASM_SH_IO_H
 #define __ASM_SH_IO_H
-
 /*
  * Convention:
- *    read{b,w,l}/write{b,w,l} are for PCI,
+ *    read{b,w,l,q}/write{b,w,l,q} are for PCI,
  *    while in{b,w,l}/out{b,w,l} are for ISA
- * These may (will) be platform specific function.
+ *
  * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
  * and 'string' versions: ins{b,w,l}/outs{b,w,l}
- * For read{b,w,l} and write{b,w,l} there are also __raw versions, which
- * do not have a memory barrier after them.
  *
- * In addition, we have
- *   ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O.
- *   which are processor specific.
- */
-
-/*
- * We follow the Alpha convention here:
- *  __inb expands to an inline function call (which calls via the mv)
- *  _inb  is a real function call (note ___raw fns are _ version of __raw)
- *  inb   by default expands to _inb, but the machine specific code may
- *        define it to __inb if it chooses.
+ * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
+ * automatically, there are also __raw versions, which do not.
+ *
+ * Historically, we have also had ctrl_in{b,w,l,q}/ctrl_out{b,w,l,q} for
+ * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice
+ * these have the same semantics as the __raw variants, and as such, all
+ * new code should be using the __raw versions.
+ *
+ * All ISA I/O routines are wrapped through the machine vector. If a
+ * board does not provide overrides, a generic set that are copied in
+ * from the default machine vector are used instead. These are largely
+ * for old compat code for I/O offseting to SuperIOs, all of which are
+ * better handled through the machvec ioport mapping routines these days.
  */
 #include <asm/cache.h>
 #include <asm/system.h>
@@ -31,7 +30,6 @@
 #include <asm-generic/iomap.h>
 
 #ifdef __KERNEL__
-
 /*
  * Depending on which platform we are running on, we need different
  * I/O functions.
@@ -40,105 +38,68 @@
 #include <asm/io_generic.h>
 #include <asm/io_trapped.h>
 
-#define maybebadio(port) \
-  printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
-	 __FUNCTION__, __LINE__, (port), (u32)__builtin_return_address(0))
+#define inb(p)			sh_mv.mv_inb((p))
+#define inw(p)			sh_mv.mv_inw((p))
+#define inl(p)			sh_mv.mv_inl((p))
+#define outb(x,p)		sh_mv.mv_outb((x),(p))
+#define outw(x,p)		sh_mv.mv_outw((x),(p))
+#define outl(x,p)		sh_mv.mv_outl((x),(p))
 
-/*
- * Since boards are able to define their own set of I/O routines through
- * their respective machine vector, we always wrap through the mv.
- *
- * Also, in the event that a board hasn't provided its own definition for
- * a given routine, it will be wrapped to generic code at run-time.
- */
+#define inb_p(p)		sh_mv.mv_inb_p((p))
+#define inw_p(p)		sh_mv.mv_inw_p((p))
+#define inl_p(p)		sh_mv.mv_inl_p((p))
+#define outb_p(x,p)		sh_mv.mv_outb_p((x),(p))
+#define outw_p(x,p)		sh_mv.mv_outw_p((x),(p))
+#define outl_p(x,p)		sh_mv.mv_outl_p((x),(p))
 
-#define __inb(p)	sh_mv.mv_inb((p))
-#define __inw(p)	sh_mv.mv_inw((p))
-#define __inl(p)	sh_mv.mv_inl((p))
-#define __outb(x,p)	sh_mv.mv_outb((x),(p))
-#define __outw(x,p)	sh_mv.mv_outw((x),(p))
-#define __outl(x,p)	sh_mv.mv_outl((x),(p))
+#define insb(p,b,c)		sh_mv.mv_insb((p), (b), (c))
+#define insw(p,b,c)		sh_mv.mv_insw((p), (b), (c))
+#define insl(p,b,c)		sh_mv.mv_insl((p), (b), (c))
+#define outsb(p,b,c)		sh_mv.mv_outsb((p), (b), (c))
+#define outsw(p,b,c)		sh_mv.mv_outsw((p), (b), (c))
+#define outsl(p,b,c)		sh_mv.mv_outsl((p), (b), (c))
 
-#define __inb_p(p)	sh_mv.mv_inb_p((p))
-#define __inw_p(p)	sh_mv.mv_inw_p((p))
-#define __inl_p(p)	sh_mv.mv_inl_p((p))
-#define __outb_p(x,p)	sh_mv.mv_outb_p((x),(p))
-#define __outw_p(x,p)	sh_mv.mv_outw_p((x),(p))
-#define __outl_p(x,p)	sh_mv.mv_outl_p((x),(p))
+#define __raw_writeb(v,a)	(__chk_io_ptr(a), *(volatile u8  __force *)(a) = (v))
+#define __raw_writew(v,a)	(__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
+#define __raw_writel(v,a)	(__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
+#define __raw_writeq(v,a)	(__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
 
-#define __insb(p,b,c)	sh_mv.mv_insb((p), (b), (c))
-#define __insw(p,b,c)	sh_mv.mv_insw((p), (b), (c))
-#define __insl(p,b,c)	sh_mv.mv_insl((p), (b), (c))
-#define __outsb(p,b,c)	sh_mv.mv_outsb((p), (b), (c))
-#define __outsw(p,b,c)	sh_mv.mv_outsw((p), (b), (c))
-#define __outsl(p,b,c)	sh_mv.mv_outsl((p), (b), (c))
+#define __raw_readb(a)		(__chk_io_ptr(a), *(volatile u8  __force *)(a))
+#define __raw_readw(a)		(__chk_io_ptr(a), *(volatile u16 __force *)(a))
+#define __raw_readl(a)		(__chk_io_ptr(a), *(volatile u32 __force *)(a))
+#define __raw_readq(a)		(__chk_io_ptr(a), *(volatile u64 __force *)(a))
 
-#define __readb(a)	sh_mv.mv_readb((a))
-#define __readw(a)	sh_mv.mv_readw((a))
-#define __readl(a)	sh_mv.mv_readl((a))
-#define __writeb(v,a)	sh_mv.mv_writeb((v),(a))
-#define __writew(v,a)	sh_mv.mv_writew((v),(a))
-#define __writel(v,a)	sh_mv.mv_writel((v),(a))
+#define readb(a)		({ u8  r_ = __raw_readb(a); mb(); r_; })
+#define readw(a)		({ u16 r_ = __raw_readw(a); mb(); r_; })
+#define readl(a)		({ u32 r_ = __raw_readl(a); mb(); r_; })
+#define readq(a)		({ u64 r_ = __raw_readq(a); mb(); r_; })
 
-#define inb		__inb
-#define inw		__inw
-#define inl		__inl
-#define outb		__outb
-#define outw		__outw
-#define outl		__outl
+#define writeb(v,a)		({ __raw_writeb((v),(a)); mb(); })
+#define writew(v,a)		({ __raw_writew((v),(a)); mb(); })
+#define writel(v,a)		({ __raw_writel((v),(a)); mb(); })
+#define writeq(v,a)		({ __raw_writeq((v),(a)); mb(); })
 
-#define inb_p		__inb_p
-#define inw_p		__inw_p
-#define inl_p		__inl_p
-#define outb_p		__outb_p
-#define outw_p		__outw_p
-#define outl_p		__outl_p
+/* SuperH on-chip I/O functions */
+#define ctrl_inb		__raw_readb
+#define ctrl_inw		__raw_readw
+#define ctrl_inl		__raw_readl
+#define ctrl_inq		__raw_readq
 
-#define insb		__insb
-#define insw		__insw
-#define insl		__insl
-#define outsb		__outsb
-#define outsw		__outsw
-#define outsl		__outsl
+#define ctrl_outb		__raw_writeb
+#define ctrl_outw		__raw_writew
+#define ctrl_outl		__raw_writel
+#define ctrl_outq		__raw_writeq
 
-#define __raw_readb(a)		__readb((void __iomem *)(a))
-#define __raw_readw(a)		__readw((void __iomem *)(a))
-#define __raw_readl(a)		__readl((void __iomem *)(a))
-#define __raw_writeb(v, a)	__writeb(v, (void __iomem *)(a))
-#define __raw_writew(v, a)	__writew(v, (void __iomem *)(a))
-#define __raw_writel(v, a)	__writel(v, (void __iomem *)(a))
-
-void __raw_writesl(unsigned long addr, const void *data, int longlen);
-void __raw_readsl(unsigned long addr, void *data, int longlen);
-
-/*
- * The platform header files may define some of these macros to use
- * the inlined versions where appropriate.  These macros may also be
- * redefined by userlevel programs.
- */
-#ifdef __readb
-# define readb(a)	({ unsigned int r_ = __raw_readb(a); mb(); r_; })
+static inline void ctrl_delay(void)
+{
+#ifdef P2SEG
+	__raw_readw(P2SEG);
 #endif
-#ifdef __raw_readw
-# define readw(a)	({ unsigned int r_ = __raw_readw(a); mb(); r_; })
-#endif
-#ifdef __raw_readl
-# define readl(a)	({ unsigned int r_ = __raw_readl(a); mb(); r_; })
-#endif
-
-#ifdef __raw_writeb
-# define writeb(v,a)	({ __raw_writeb((v),(a)); mb(); })
-#endif
-#ifdef __raw_writew
-# define writew(v,a)	({ __raw_writew((v),(a)); mb(); })
-#endif
-#ifdef __raw_writel
-# define writel(v,a)	({ __raw_writel((v),(a)); mb(); })
-#endif
+}
 
 #define __BUILD_MEMORY_STRING(bwlq, type)				\
 									\
-static inline void writes##bwlq(volatile void __iomem *mem,		\
+static inline void __raw_writes##bwlq(volatile void __iomem *mem,	\
 				const void *addr, unsigned int count)	\
 {									\
 	const volatile type *__addr = addr;				\
@@ -149,8 +110,8 @@
 	}								\
 }									\
 									\
-static inline void reads##bwlq(volatile void __iomem *mem, void *addr,	\
-			       unsigned int count)			\
+static inline void __raw_reads##bwlq(volatile void __iomem *mem,	\
+			       void *addr, unsigned int count)		\
 {									\
 	volatile type *__addr = addr;					\
 									\
@@ -162,106 +123,71 @@
 
 __BUILD_MEMORY_STRING(b, u8)
 __BUILD_MEMORY_STRING(w, u16)
-#define writesl __raw_writesl
-#define readsl  __raw_readsl
+__BUILD_MEMORY_STRING(q, u64)
 
-#define readb_relaxed(a) readb(a)
-#define readw_relaxed(a) readw(a)
-#define readl_relaxed(a) readl(a)
+void __raw_writesl(void __iomem *addr, const void *data, int longlen);
+void __raw_readsl(const void __iomem *addr, void *data, int longlen);
+
+#define writesb			__raw_writesb
+#define writesw			__raw_writesw
+#define writesl			__raw_writesl
+
+#define readsb			__raw_readsb
+#define readsw			__raw_readsw
+#define readsl			__raw_readsl
+
+#define readb_relaxed(a)	readb(a)
+#define readw_relaxed(a)	readw(a)
+#define readl_relaxed(a)	readl(a)
+#define readq_relaxed(a)	readq(a)
 
 /* Simple MMIO */
-#define ioread8(a)		readb(a)
-#define ioread16(a)		readw(a)
+#define ioread8(a)		__raw_readb(a)
+#define ioread16(a)		__raw_readw(a)
 #define ioread16be(a)		be16_to_cpu(__raw_readw((a)))
-#define ioread32(a)		readl(a)
+#define ioread32(a)		__raw_readl(a)
 #define ioread32be(a)		be32_to_cpu(__raw_readl((a)))
 
-#define iowrite8(v,a)		writeb((v),(a))
-#define iowrite16(v,a)		writew((v),(a))
+#define iowrite8(v,a)		__raw_writeb((v),(a))
+#define iowrite16(v,a)		__raw_writew((v),(a))
 #define iowrite16be(v,a)	__raw_writew(cpu_to_be16((v)),(a))
-#define iowrite32(v,a)		writel((v),(a))
+#define iowrite32(v,a)		__raw_writel((v),(a))
 #define iowrite32be(v,a)	__raw_writel(cpu_to_be32((v)),(a))
 
-#define ioread8_rep(a, d, c)	readsb((a), (d), (c))
-#define ioread16_rep(a, d, c)	readsw((a), (d), (c))
-#define ioread32_rep(a, d, c)	readsl((a), (d), (c))
+#define ioread8_rep(a, d, c)	__raw_readsb((a), (d), (c))
+#define ioread16_rep(a, d, c)	__raw_readsw((a), (d), (c))
+#define ioread32_rep(a, d, c)	__raw_readsl((a), (d), (c))
 
-#define iowrite8_rep(a, s, c)	writesb((a), (s), (c))
-#define iowrite16_rep(a, s, c)	writesw((a), (s), (c))
-#define iowrite32_rep(a, s, c)	writesl((a), (s), (c))
+#define iowrite8_rep(a, s, c)	__raw_writesb((a), (s), (c))
+#define iowrite16_rep(a, s, c)	__raw_writesw((a), (s), (c))
+#define iowrite32_rep(a, s, c)	__raw_writesl((a), (s), (c))
 
-#define mmiowb()	wmb()	/* synco on SH-4A, otherwise a nop */
+/* synco on SH-4A, otherwise a nop */
+#define mmiowb()		wmb()
 
 #define IO_SPACE_LIMIT 0xffffffff
 
+extern unsigned long generic_io_base;
+
 /*
- * This function provides a method for the generic case where a board-specific
- * ioport_map simply needs to return the port + some arbitrary port base.
+ * This function provides a method for the generic case where a
+ * board-specific ioport_map simply needs to return the port + some
+ * arbitrary port base.
  *
  * We use this at board setup time to implicitly set the port base, and
  * as a result, we can use the generic ioport_map.
  */
 static inline void __set_io_port_base(unsigned long pbase)
 {
-	extern unsigned long generic_io_base;
-
 	generic_io_base = pbase;
 }
 
 #define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n))
 
 /* We really want to try and get these to memcpy etc */
-extern void memcpy_fromio(void *, volatile void __iomem *, unsigned long);
-extern void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
-extern void memset_io(volatile void __iomem *, int, unsigned long);
-
-/* SuperH on-chip I/O functions */
-static inline unsigned char ctrl_inb(unsigned long addr)
-{
-	return *(volatile unsigned char*)addr;
-}
-
-static inline unsigned short ctrl_inw(unsigned long addr)
-{
-	return *(volatile unsigned short*)addr;
-}
-
-static inline unsigned int ctrl_inl(unsigned long addr)
-{
-	return *(volatile unsigned long*)addr;
-}
-
-static inline unsigned long long ctrl_inq(unsigned long addr)
-{
-	return *(volatile unsigned long long*)addr;
-}
-
-static inline void ctrl_outb(unsigned char b, unsigned long addr)
-{
-	*(volatile unsigned char*)addr = b;
-}
-
-static inline void ctrl_outw(unsigned short b, unsigned long addr)
-{
-	*(volatile unsigned short*)addr = b;
-}
-
-static inline void ctrl_outl(unsigned int b, unsigned long addr)
-{
-        *(volatile unsigned long*)addr = b;
-}
-
-static inline void ctrl_outq(unsigned long long b, unsigned long addr)
-{
-	*(volatile unsigned long long*)addr = b;
-}
-
-static inline void ctrl_delay(void)
-{
-#ifdef P2SEG
-	ctrl_inw(P2SEG);
-#endif
-}
+void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
+void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
+void memset_io(volatile void __iomem *, int, unsigned long);
 
 /* Quad-word real-mode I/O, don't ask.. */
 unsigned long long peek_real_address_q(unsigned long long addr);
@@ -347,9 +273,15 @@
 	__ioremap_mode((offset), (size), _PAGE_CACHABLE)
 #define p3_ioremap(offset, size, flags)			\
 	__ioremap((offset), (size), (flags))
+#define ioremap_prot(offset, size, flags)		\
+	__ioremap_mode((offset), (size), (flags))
 #define iounmap(addr)					\
 	__iounmap((addr))
 
+#define maybebadio(port) \
+	printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
+	       __func__, __LINE__, (port), (u32)__builtin_return_address(0))
+
 /*
  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
  * access
diff --git a/arch/sh/include/asm/io_generic.h b/arch/sh/include/asm/io_generic.h
index 92fc607..1e5d375 100644
--- a/arch/sh/include/asm/io_generic.h
+++ b/arch/sh/include/asm/io_generic.h
@@ -33,13 +33,6 @@
 void IO_CONCAT(__IO_PREFIX,outsw)(unsigned long, const void *src, unsigned long count);
 void IO_CONCAT(__IO_PREFIX,outsl)(unsigned long, const void *src, unsigned long count);
 
-u8 IO_CONCAT(__IO_PREFIX,readb)(void __iomem *);
-u16 IO_CONCAT(__IO_PREFIX,readw)(void __iomem *);
-u32 IO_CONCAT(__IO_PREFIX,readl)(void __iomem *);
-void IO_CONCAT(__IO_PREFIX,writeb)(u8, void __iomem *);
-void IO_CONCAT(__IO_PREFIX,writew)(u16, void __iomem *);
-void IO_CONCAT(__IO_PREFIX,writel)(u32, void __iomem *);
-
 void *IO_CONCAT(__IO_PREFIX,ioremap)(unsigned long offset, unsigned long size);
 void IO_CONCAT(__IO_PREFIX,iounmap)(void *addr);
 
diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h
index 6195a53..d319baa 100644
--- a/arch/sh/include/asm/irq.h
+++ b/arch/sh/include/asm/irq.h
@@ -41,6 +41,9 @@
 #define irq_canonicalize(irq)	(irq)
 #define irq_demux(irq)		sh_mv.mv_irq_demux(irq)
 
+void init_IRQ(void);
+asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs);
+
 #ifdef CONFIG_IRQSTACKS
 extern void irq_ctx_init(int cpu);
 extern void irq_ctx_exit(int cpu);
diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h
new file mode 100644
index 0000000..6078d8e
--- /dev/null
+++ b/arch/sh/include/asm/kprobes.h
@@ -0,0 +1,58 @@
+#ifndef __ASM_SH_KPROBES_H
+#define __ASM_SH_KPROBES_H
+
+#ifdef CONFIG_KPROBES
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+
+typedef u16 kprobe_opcode_t;
+#define BREAKPOINT_INSTRUCTION	0xc33a
+
+#define MAX_INSN_SIZE 16
+#define MAX_STACK_SIZE 64
+#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
+	(((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
+	? (MAX_STACK_SIZE) \
+	: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
+
+#define regs_return_value(regs)		((regs)->regs[0])
+#define flush_insn_slot(p)		do { } while (0)
+#define kretprobe_blacklist_size	0
+
+struct kprobe;
+
+void arch_remove_kprobe(struct kprobe *);
+void kretprobe_trampoline(void);
+void jprobe_return_end(void);
+
+/* Architecture specific copy of original instruction*/
+struct arch_specific_insn {
+	/* copy of the original instruction */
+	kprobe_opcode_t insn[MAX_INSN_SIZE];
+};
+
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned long status;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+	unsigned long kprobe_status;
+	unsigned long jprobe_saved_r15;
+	struct pt_regs jprobe_saved_regs;
+	kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
+	struct prev_kprobe prev_kprobe;
+};
+
+extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+				    unsigned long val, void *data);
+extern int kprobe_handle_illslot(unsigned long pc);
+#else
+
+#define kprobe_handle_illslot(pc)	(-1)
+
+#endif /* CONFIG_KPROBES */
+#endif /* __ASM_SH_KPROBES_H */
diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h
index b2e4124..f1bae02 100644
--- a/arch/sh/include/asm/machvec.h
+++ b/arch/sh/include/asm/machvec.h
@@ -42,13 +42,6 @@
 	void (*mv_outsw)(unsigned long, const void *src, unsigned long count);
 	void (*mv_outsl)(unsigned long, const void *src, unsigned long count);
 
-	u8 (*mv_readb)(void __iomem *);
-	u16 (*mv_readw)(void __iomem *);
-	u32 (*mv_readl)(void __iomem *);
-	void (*mv_writeb)(u8, void __iomem *);
-	void (*mv_writew)(u16, void __iomem *);
-	void (*mv_writel)(u32, void __iomem *);
-
 	int (*mv_irq_demux)(int irq);
 
 	void (*mv_init_irq)(void);
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h
index 2969253..7f5363b 100644
--- a/arch/sh/include/asm/mmzone.h
+++ b/arch/sh/include/asm/mmzone.h
@@ -4,6 +4,8 @@
 #ifdef __KERNEL__
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
+#include <linux/numa.h>
+
 extern struct pglist_data *node_data[];
 #define NODE_DATA(nid)		(node_data[nid])
 
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 77fb8bf..5871d78 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -104,6 +104,8 @@
 
 typedef struct page *pgtable_t;
 
+#define pte_pgprot(x) __pgprot(pte_val(x) & PTE_FLAGS_MASK)
+
 #endif /* !__ASSEMBLY__ */
 
 /*
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index a4a8f8b..52220d7 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -76,6 +76,7 @@
 #endif
 
 #define PTE_PHYS_MASK		(PHYS_ADDR_MASK & PAGE_MASK)
+#define PTE_FLAGS_MASK		(~(PTE_PHYS_MASK) << PAGE_SHIFT)
 
 #ifdef CONFIG_SUPERH32
 #define VMALLOC_START	(P3SEG)
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 15d9f92..693364a 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -3,6 +3,7 @@
 
 #include <asm/cpu-features.h>
 #include <asm/segment.h>
+#include <asm/cache.h>
 
 #ifndef __ASSEMBLY__
 /*
@@ -43,11 +44,52 @@
 	CPU_SH_NONE
 };
 
+/*
+ * TLB information structure
+ *
+ * Defined for both I and D tlb, per-processor.
+ */
+struct tlb_info {
+	unsigned long long next;
+	unsigned long long first;
+	unsigned long long last;
+
+	unsigned int entries;
+	unsigned int step;
+
+	unsigned long flags;
+};
+
+struct sh_cpuinfo {
+	unsigned int type;
+	int cut_major, cut_minor;
+	unsigned long loops_per_jiffy;
+	unsigned long asid_cache;
+
+	struct cache_info icache;	/* Primary I-cache */
+	struct cache_info dcache;	/* Primary D-cache */
+	struct cache_info scache;	/* Secondary cache */
+
+	/* TLB info */
+	struct tlb_info itlb;
+	struct tlb_info dtlb;
+
+	unsigned long flags;
+} __attribute__ ((aligned(L1_CACHE_BYTES)));
+
+extern struct sh_cpuinfo cpu_data[];
+#define boot_cpu_data cpu_data[0]
+#define current_cpu_data cpu_data[smp_processor_id()]
+#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
+
 /* Forward decl */
-struct sh_cpuinfo;
+struct seq_operations;
+
+extern struct pt_regs fake_swapper_regs;
 
 /* arch/sh/kernel/setup.c */
 const char *get_cpu_subtype(struct sh_cpuinfo *c);
+extern const struct seq_operations cpuinfo_op;
 
 #ifdef CONFIG_VSYSCALL
 int vsyscall_init(void);
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 0dadd75..a46a020 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -10,9 +10,9 @@
 #ifdef __KERNEL__
 
 #include <linux/compiler.h>
+#include <linux/linkage.h>
 #include <asm/page.h>
 #include <asm/types.h>
-#include <asm/cache.h>
 #include <asm/ptrace.h>
 
 /*
@@ -26,23 +26,7 @@
 #define CCN_CVR		0xff000040
 #define CCN_PRR		0xff000044
 
-struct sh_cpuinfo {
-	unsigned int type;
-	int cut_major, cut_minor;
-	unsigned long loops_per_jiffy;
-	unsigned long asid_cache;
-
-	struct cache_info icache;	/* Primary I-cache */
-	struct cache_info dcache;	/* Primary D-cache */
-	struct cache_info scache;	/* Secondary cache */
-
-	unsigned long flags;
-} __attribute__ ((aligned(L1_CACHE_BYTES)));
-
-extern struct sh_cpuinfo cpu_data[];
-#define boot_cpu_data cpu_data[0]
-#define current_cpu_data cpu_data[smp_processor_id()]
-#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
+asmlinkage void __init sh_cpu_init(void);
 
 /*
  * User space process size: 2GB.
@@ -196,6 +180,8 @@
 #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->pc)
 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->regs[15])
 
+#define user_stack_pointer(regs)	((regs)->regs[15])
+
 #define cpu_sleep()	__asm__ __volatile__ ("sleep" : : : "memory")
 #define cpu_relax()	barrier()
 
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 770d516..b0b4824 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -17,7 +17,6 @@
 #include <linux/compiler.h>
 #include <asm/page.h>
 #include <asm/types.h>
-#include <asm/cache.h>
 #include <asm/ptrace.h>
 #include <cpu/registers.h>
 
@@ -36,46 +35,6 @@
 	: "1" (__dummy)); \
 pc; })
 
-/*
- * TLB information structure
- *
- * Defined for both I and D tlb, per-processor.
- */
-struct tlb_info {
-	unsigned long long next;
-	unsigned long long first;
-	unsigned long long last;
-
-	unsigned int entries;
-	unsigned int step;
-
-	unsigned long flags;
-};
-
-struct sh_cpuinfo {
-	enum cpu_type type;
-	unsigned long loops_per_jiffy;
-	unsigned long asid_cache;
-
-	unsigned int cpu_clock, master_clock, bus_clock, module_clock;
-
-	/* Cache info */
-	struct cache_info icache;
-	struct cache_info dcache;
-	struct cache_info scache;
-
-	/* TLB info */
-	struct tlb_info itlb;
-	struct tlb_info dtlb;
-
-	unsigned long flags;
-};
-
-extern struct sh_cpuinfo cpu_data[];
-#define boot_cpu_data cpu_data[0]
-#define current_cpu_data cpu_data[smp_processor_id()]
-#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
-
 #endif
 
 /*
@@ -169,8 +128,6 @@
 #define INIT_MMAP \
 { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
 
-extern  struct pt_regs fake_swapper_regs;
-
 #define INIT_THREAD  {				\
 	.sp		= sizeof(init_stack) +	\
 			  (long) &init_stack,	\
@@ -269,6 +226,8 @@
 #define KSTK_EIP(tsk)  ((tsk)->thread.pc)
 #define KSTK_ESP(tsk)  ((tsk)->thread.sp)
 
+#define user_stack_pointer(regs)	((regs)->sp)
+
 #define cpu_relax()	barrier()
 
 #endif	/* __ASSEMBLY__ */
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index b86aeab..3ad18e9 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -87,12 +87,18 @@
 	unsigned long	mod;
 };
 
+#define PTRACE_GETREGS		12	/* General registers */
+#define PTRACE_SETREGS		13
+
+#define PTRACE_GETFPREGS	14	/* FPU registers */
+#define PTRACE_SETFPREGS	15
+
 #define PTRACE_GETFDPIC		31	/* get the ELF fdpic loadmap address */
 
 #define PTRACE_GETFDPIC_EXEC	0	/* [addr] request the executable loadmap */
 #define PTRACE_GETFDPIC_INTERP	1	/* [addr] request the interpreter loadmap */
 
-#define	PTRACE_GETDSPREGS	55
+#define	PTRACE_GETDSPREGS	55	/* DSP registers */
 #define	PTRACE_SETDSPREGS	56
 #endif
 
@@ -117,6 +123,9 @@
 #define task_pt_regs(task) \
 	((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
 		 - sizeof(struct pt_dspregs) - sizeof(unsigned long)) - 1)
+#define task_pt_dspregs(task) \
+	((struct pt_dspregs *) (task_stack_page(task) + THREAD_SIZE \
+		 - sizeof(unsigned long)) - 1)
 #else
 #define task_pt_regs(task) \
 	((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
diff --git a/arch/sh/include/asm/rtc.h b/arch/sh/include/asm/rtc.h
index 1813f42..f7b010d 100644
--- a/arch/sh/include/asm/rtc.h
+++ b/arch/sh/include/asm/rtc.h
@@ -1,6 +1,7 @@
 #ifndef _ASM_RTC_H
 #define _ASM_RTC_H
 
+void time_init(void);
 extern void (*board_time_init)(void);
 extern void (*rtc_sh_get_time)(struct timespec *);
 extern int (*rtc_sh_set_time)(const time_t);
diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h
index 55a2bd3..d450bcf 100644
--- a/arch/sh/include/asm/setup.h
+++ b/arch/sh/include/asm/setup.h
@@ -4,7 +4,6 @@
 #define COMMAND_LINE_SIZE 256
 
 #ifdef __KERNEL__
-
 /*
  * This is set up by the setup-routine at boot-time
  */
diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h
new file mode 100644
index 0000000..3a1fb97
--- /dev/null
+++ b/arch/sh/include/asm/sizes.h
@@ -0,0 +1,61 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/* DO NOT EDIT!! - this file automatically generated
+ *                 from .s file by awk -f s2h.awk
+ */
+/*  Size definitions
+ *  Copyright (C) ARM Limited 1998. All rights reserved.
+ */
+
+#ifndef __sizes_h
+#define __sizes_h                       1
+
+/* handy sizes */
+#define SZ_16				0x00000010
+#define SZ_32				0x00000020
+#define SZ_64				0x00000040
+#define SZ_128				0x00000080
+#define SZ_256				0x00000100
+#define SZ_512				0x00000200
+
+#define SZ_1K                           0x00000400
+#define SZ_4K                           0x00001000
+#define SZ_8K                           0x00002000
+#define SZ_16K                          0x00004000
+#define SZ_32K				0x00008000
+#define SZ_64K                          0x00010000
+#define SZ_128K                         0x00020000
+#define SZ_256K                         0x00040000
+#define SZ_512K                         0x00080000
+
+#define SZ_1M                           0x00100000
+#define SZ_2M                           0x00200000
+#define SZ_4M                           0x00400000
+#define SZ_8M                           0x00800000
+#define SZ_16M                          0x01000000
+#define SZ_26M				0x01a00000
+#define SZ_32M                          0x02000000
+#define SZ_64M                          0x04000000
+#define SZ_128M                         0x08000000
+#define SZ_256M                         0x10000000
+#define SZ_512M                         0x20000000
+
+#define SZ_1G                           0x40000000
+#define SZ_2G                           0x80000000
+
+#endif
+
+/*         END */
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 593343c..85b660c 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -21,25 +21,29 @@
 extern int __cpu_logical_map[NR_CPUS];
 #define cpu_logical_map(cpu)  __cpu_logical_map[cpu]
 
-/* I've no idea what the real meaning of this is */
-#define PROC_CHANGE_PENALTY	20
+enum {
+	SMP_MSG_FUNCTION,
+	SMP_MSG_RESCHEDULE,
+	SMP_MSG_FUNCTION_SINGLE,
+	SMP_MSG_TIMER,
 
-#define NO_PROC_ID	(-1)
+	SMP_MSG_NR,	/* must be last */
+};
 
-#define SMP_MSG_FUNCTION	0
-#define SMP_MSG_RESCHEDULE	1
-#define SMP_MSG_FUNCTION_SINGLE	2
-#define SMP_MSG_NR		3
+void smp_message_recv(unsigned int msg);
+void smp_timer_broadcast(cpumask_t mask);
+
+void local_timer_interrupt(void);
+void local_timer_setup(unsigned int cpu);
 
 void plat_smp_setup(void);
 void plat_prepare_cpus(unsigned int max_cpus);
 int plat_smp_processor_id(void);
 void plat_start_cpu(unsigned int cpu, unsigned long entry_point);
 void plat_send_ipi(unsigned int cpu, unsigned int message);
-int plat_register_ipi_handler(unsigned int message,
-			      void (*handler)(void *), void *arg);
-extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+
+void arch_send_call_function_single_ipi(int cpu);
+void arch_send_call_function_ipi(cpumask_t mask);
 
 #else
 
diff --git a/arch/sh/include/asm/syscall.h b/arch/sh/include/asm/syscall.h
new file mode 100644
index 0000000..6a38142
--- /dev/null
+++ b/arch/sh/include/asm/syscall.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_SH_SYSCALL_H
+#define __ASM_SH_SYSCALL_H
+
+#ifdef CONFIG_SUPERH32
+# include "syscall_32.h"
+#else
+# include "syscall_64.h"
+#endif
+
+#endif /* __ASM_SH_SYSCALL_H */
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h
new file mode 100644
index 0000000..54773f2
--- /dev/null
+++ b/arch/sh/include/asm/syscall_32.h
@@ -0,0 +1,110 @@
+#ifndef __ASM_SH_SYSCALL_32_H
+#define __ASM_SH_SYSCALL_32_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+
+/* The system call number is given by the user in %g1 */
+static inline long syscall_get_nr(struct task_struct *task,
+				  struct pt_regs *regs)
+{
+	return (regs->tra >= 0) ? regs->regs[3] : -1L;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+				    struct pt_regs *regs)
+{
+	/*
+	 * XXX: This needs some thought. On SH we don't
+	 * save away the original r0 value anywhere.
+	 */
+}
+
+static inline bool syscall_has_error(struct pt_regs *regs)
+{
+	return (regs->sr & 0x1) ? true : false;
+}
+static inline void syscall_set_error(struct pt_regs *regs)
+{
+	regs->sr |= 0x1;
+}
+static inline void syscall_clear_error(struct pt_regs *regs)
+{
+	regs->sr &= ~0x1;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+				     struct pt_regs *regs)
+{
+	return syscall_has_error(regs) ? regs->regs[0] : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+					    struct pt_regs *regs)
+{
+	return regs->regs[0];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+					    struct pt_regs *regs,
+					    int error, long val)
+{
+	if (error) {
+		syscall_set_error(regs);
+		regs->regs[0] = -error;
+	} else {
+		syscall_clear_error(regs);
+		regs->regs[0] = val;
+	}
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 unsigned int i, unsigned int n,
+					 unsigned long *args)
+{
+	/*
+	 * Do this simply for now. If we need to start supporting
+	 * fetching arguments from arbitrary indices, this will need some
+	 * extra logic. Presently there are no in-tree users that depend
+	 * on this behaviour.
+	 */
+	BUG_ON(i);
+
+	/* Argument pattern is: R4, R5, R6, R7, R0, R1 */
+	switch (n) {
+	case 6: args[5] = regs->regs[1];
+	case 5: args[4] = regs->regs[0];
+	case 4: args[3] = regs->regs[7];
+	case 3: args[2] = regs->regs[6];
+	case 2: args[1] = regs->regs[5];
+	case 1:	args[0] = regs->regs[4];
+		break;
+	default:
+		BUG();
+	}
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 unsigned int i, unsigned int n,
+					 const unsigned long *args)
+{
+	/* Same note as above applies */
+	BUG_ON(i);
+
+	switch (n) {
+	case 6: regs->regs[1] = args[5];
+	case 5: regs->regs[0] = args[4];
+	case 4: regs->regs[7] = args[3];
+	case 3: regs->regs[6] = args[2];
+	case 2: regs->regs[5] = args[1];
+	case 1: regs->regs[4] = args[0];
+		break;
+	default:
+		BUG();
+	}
+}
+
+#endif /* __ASM_SH_SYSCALL_32_H */
diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h
new file mode 100644
index 0000000..bcaaa8c
--- /dev/null
+++ b/arch/sh/include/asm/syscall_64.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH_SYSCALL_64_H
+#define __ASM_SH_SYSCALL_64_H
+
+#include <asm-generic/syscall.h>
+
+#endif /* __ASM_SH_SYSCALL_64_H */
diff --git a/arch/sh/include/asm/syscalls.h b/arch/sh/include/asm/syscalls.h
new file mode 100644
index 0000000..c1e2b8d
--- /dev/null
+++ b/arch/sh/include/asm/syscalls.h
@@ -0,0 +1,25 @@
+#ifndef __ASM_SH_SYSCALLS_H
+#define __ASM_SH_SYSCALLS_H
+
+#ifdef __KERNEL__
+
+struct old_utsname;
+
+asmlinkage int old_mmap(unsigned long addr, unsigned long len,
+			unsigned long prot, unsigned long flags,
+			int fd, unsigned long off);
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+			  unsigned long prot, unsigned long flags,
+			  unsigned long fd, unsigned long pgoff);
+asmlinkage int sys_ipc(uint call, int first, int second,
+		       int third, void __user *ptr, long fifth);
+asmlinkage int sys_uname(struct old_utsname __user *name);
+
+#ifdef CONFIG_SUPERH32
+# include "syscalls_32.h"
+#else
+# include "syscalls_64.h"
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SH_SYSCALLS_H */
diff --git a/arch/sh/include/asm/syscalls_32.h b/arch/sh/include/asm/syscalls_32.h
new file mode 100644
index 0000000..104c5e6
--- /dev/null
+++ b/arch/sh/include/asm/syscalls_32.h
@@ -0,0 +1,56 @@
+#ifndef __ASM_SH_SYSCALLS_32_H
+#define __ASM_SH_SYSCALLS_32_H
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+struct pt_regs;
+
+asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
+			unsigned long r6, unsigned long r7,
+			struct pt_regs __regs);
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+			 unsigned long parent_tidptr,
+			 unsigned long child_tidptr,
+			 struct pt_regs __regs);
+asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
+			 unsigned long r6, unsigned long r7,
+			 struct pt_regs __regs);
+asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
+			  char __user * __user *uenvp, unsigned long r7,
+			  struct pt_regs __regs);
+asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r5,
+			      unsigned long r6, unsigned long r7,
+			      struct pt_regs __regs);
+asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user *act,
+			     struct old_sigaction __user *oact);
+asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+			       unsigned long r6, unsigned long r7,
+			       struct pt_regs __regs);
+asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
+			     unsigned long r6, unsigned long r7,
+			     struct pt_regs __regs);
+asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
+				unsigned long r6, unsigned long r7,
+				struct pt_regs __regs);
+asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
+			unsigned long r6, unsigned long r7,
+			struct pt_regs __regs);
+asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf,
+				     size_t count, long dummy, loff_t pos);
+asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf,
+				      size_t count, long dummy, loff_t pos);
+asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
+					u32 len0, u32 len1, int advice);
+
+/* Misc syscall related bits */
+asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
+asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
+				 unsigned long thread_info_flags);
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SH_SYSCALLS_32_H */
diff --git a/arch/sh/include/asm/syscalls_64.h b/arch/sh/include/asm/syscalls_64.h
new file mode 100644
index 0000000..751fd88
--- /dev/null
+++ b/arch/sh/include/asm/syscalls_64.h
@@ -0,0 +1,34 @@
+#ifndef __ASM_SH_SYSCALLS_64_H
+#define __ASM_SH_SYSCALLS_64_H
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+struct pt_regs;
+
+asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
+			unsigned long r4, unsigned long r5,
+			unsigned long r6, unsigned long r7,
+			struct pt_regs *pregs);
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+			 unsigned long r4, unsigned long r5,
+			 unsigned long r6, unsigned long r7,
+			 struct pt_regs *pregs);
+asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
+			 unsigned long r4, unsigned long r5,
+			 unsigned long r6, unsigned long r7,
+			 struct pt_regs *pregs);
+asmlinkage int sys_execve(char *ufilename, char **uargv,
+			  char **uenvp, unsigned long r5,
+			  unsigned long r6, unsigned long r7,
+			  struct pt_regs *pregs);
+
+/* Misc syscall related bits */
+asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs);
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SH_SYSCALLS_64_H */
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h
index 056d68c..6160fe4 100644
--- a/arch/sh/include/asm/system.h
+++ b/arch/sh/include/asm/system.h
@@ -70,6 +70,8 @@
 
 #ifdef CONFIG_GUSA_RB
 #include <asm/cmpxchg-grb.h>
+#elif defined(CONFIG_CPU_SH4A)
+#include <asm/cmpxchg-llsc.h>
 #else
 #include <asm/cmpxchg-irq.h>
 #endif
@@ -125,6 +127,8 @@
   })
 
 extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
+void free_initmem(void);
+void free_initrd_mem(unsigned long start, unsigned long end);
 
 extern void *set_exception_table_vec(unsigned int vec, void *handler);
 
@@ -177,8 +181,8 @@
 #define arch_align_stack(x) (x)
 
 struct mem_access {
-	unsigned long (*from)(void *dst, const void *src, unsigned long cnt);
-	unsigned long (*to)(void *dst, const void *src, unsigned long cnt);
+	unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt);
+	unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt);
 };
 
 #ifdef CONFIG_SUPERH32
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index f11bcf0..a726d5d 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -58,7 +58,8 @@
 	last = __last;							\
 } while (0)
 
-#define __uses_jump_to_uncached __attribute__ ((__section__ (".uncached.text")))
+#define __uses_jump_to_uncached \
+	noinline __attribute__ ((__section__ (".uncached.text")))
 
 /*
  * Jump to uncached area.
@@ -96,7 +97,48 @@
 		: "=&r" (__dummy));			\
 } while (0)
 
+#ifdef CONFIG_CPU_HAS_SR_RB
+#define lookup_exception_vector()	\
+({					\
+	unsigned long _vec;		\
+					\
+	__asm__ __volatile__ (		\
+		"stc r2_bank, %0\n\t"	\
+		: "=r" (_vec)		\
+	);				\
+					\
+	_vec;				\
+})
+#else
+#define lookup_exception_vector()	\
+({					\
+	unsigned long _vec;		\
+	__asm__ __volatile__ (		\
+		"mov r4, %0\n\t"	\
+		: "=r" (_vec)		\
+	);				\
+					\
+	_vec;				\
+})
+#endif
+
 int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
 			    struct mem_access *ma);
 
+asmlinkage void do_address_error(struct pt_regs *regs,
+				 unsigned long writeaccess,
+				 unsigned long address);
+asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
+				unsigned long r6, unsigned long r7,
+				struct pt_regs __regs);
+asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
+				unsigned long r6, unsigned long r7,
+				struct pt_regs __regs);
+asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
+				unsigned long r6, unsigned long r7,
+				struct pt_regs __regs);
+asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
+				   unsigned long r6, unsigned long r7,
+				   struct pt_regs __regs);
+
 #endif /* __ASM_SH_SYSTEM_32_H */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 0a894ca..f09ac48 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -33,20 +33,12 @@
 #define PREEMPT_ACTIVE		0x10000000
 
 #if defined(CONFIG_4KSTACKS)
-#define THREAD_SIZE_ORDER	(0)
-#elif defined(CONFIG_PAGE_SIZE_4KB)
-#define THREAD_SIZE_ORDER	(1)
-#elif defined(CONFIG_PAGE_SIZE_8KB)
-#define THREAD_SIZE_ORDER	(1)
-#elif defined(CONFIG_PAGE_SIZE_16KB)
-#define THREAD_SIZE_ORDER	(0)
-#elif defined(CONFIG_PAGE_SIZE_64KB)
-#define THREAD_SIZE_ORDER	(0)
+#define THREAD_SHIFT	12
 #else
-#error "Unknown thread size"
+#define THREAD_SHIFT	13
 #endif
 
-#define THREAD_SIZE	(PAGE_SIZE << THREAD_SIZE_ORDER)
+#define THREAD_SIZE	(1 << THREAD_SHIFT)
 #define STACK_WARN	(THREAD_SIZE >> 3)
 
 /*
@@ -94,15 +86,19 @@
 	return ti;
 }
 
+/* thread information allocation */
+#if THREAD_SHIFT >= PAGE_SHIFT
+
+#define THREAD_SIZE_ORDER	(THREAD_SHIFT - PAGE_SHIFT)
+
+#else /* THREAD_SHIFT < PAGE_SHIFT */
+
 #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
 
-/* thread information allocation */
-#ifdef CONFIG_DEBUG_STACK_USAGE
-#define alloc_thread_info(ti)	kzalloc(THREAD_SIZE, GFP_KERNEL)
-#else
-#define alloc_thread_info(ti)	kmalloc(THREAD_SIZE, GFP_KERNEL)
-#endif
-#define free_thread_info(ti)	kfree(ti)
+extern struct thread_info *alloc_thread_info(struct task_struct *tsk);
+extern void free_thread_info(struct thread_info *ti);
+ 
+#endif /* THREAD_SHIFT < PAGE_SHIFT */
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
index 5580fd4..56fd20b 100644
--- a/arch/sh/include/asm/uaccess_64.h
+++ b/arch/sh/include/asm/uaccess_64.h
@@ -26,16 +26,20 @@
 	retval = 0;						\
 	switch (size) {						\
 	case 1:							\
-		retval = __get_user_asm_b(x, ptr);		\
+		retval = __get_user_asm_b((void *)&x,		\
+					  (long)ptr);		\
 		break;						\
 	case 2:							\
-		retval = __get_user_asm_w(x, ptr);		\
+		retval = __get_user_asm_w((void *)&x,		\
+					  (long)ptr);		\
 		break;						\
 	case 4:							\
-		retval = __get_user_asm_l(x, ptr);		\
+		retval = __get_user_asm_l((void *)&x,		\
+					  (long)ptr);		\
 		break;						\
 	case 8:							\
-		retval = __get_user_asm_q(x, ptr);		\
+		retval = __get_user_asm_q((void *)&x,		\
+					  (long)ptr);		\
 		break;						\
 	default:						\
 		__get_user_unknown();				\
@@ -54,16 +58,20 @@
 	retval = 0;						\
 	switch (size) {						\
 	case 1:							\
-		retval = __put_user_asm_b(x, ptr);		\
+		retval = __put_user_asm_b((void *)&x,		\
+					  (long)ptr);		\
 		break;						\
 	case 2:							\
-		retval = __put_user_asm_w(x, ptr);		\
+		retval = __put_user_asm_w((void *)&x,		\
+					  (long)ptr);		\
 		break;						\
 	case 4:							\
-		retval = __put_user_asm_l(x, ptr);		\
+		retval = __put_user_asm_l((void *)&x,		\
+					  (long)ptr);		\
 		break;						\
 	case 8:							\
-		retval = __put_user_asm_q(x, ptr);		\
+		retval = __put_user_asm_q((void *)&x,		\
+					  (long)ptr);		\
 		break;						\
 	default:						\
 		__put_user_unknown();				\
@@ -77,5 +85,7 @@
 extern void __put_user_unknown(void);
 
 extern long __strnlen_user(const char *__s, long __n);
+extern int __strncpy_from_user(unsigned long __dest,
+	       unsigned long __user __src, int __count);
 
 #endif /* __ASM_SH_UACCESS_64_H */
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7203.h b/arch/sh/include/cpu-sh2a/cpu/sh7203.h
new file mode 100644
index 0000000..79f9315
--- /dev/null
+++ b/arch/sh/include/cpu-sh2a/cpu/sh7203.h
@@ -0,0 +1,143 @@
+#ifndef __ASM_SH7203_H__
+#define __ASM_SH7203_H__
+
+enum {
+	/* PA */
+	GPIO_PA7, GPIO_PA6, GPIO_PA5, GPIO_PA4,
+	GPIO_PA3, GPIO_PA2, GPIO_PA1, GPIO_PA0,
+
+	/* PB */
+	GPIO_PB12,
+	GPIO_PB11, GPIO_PB10, GPIO_PB9, GPIO_PB8,
+	GPIO_PB7, GPIO_PB6, GPIO_PB5, GPIO_PB4,
+	GPIO_PB3, GPIO_PB2, GPIO_PB1, GPIO_PB0,
+
+	/* PC */
+	GPIO_PC14, GPIO_PC13, GPIO_PC12,
+	GPIO_PC11, GPIO_PC10, GPIO_PC9, GPIO_PC8,
+	GPIO_PC7, GPIO_PC6, GPIO_PC5, GPIO_PC4,
+	GPIO_PC3, GPIO_PC2, GPIO_PC1, GPIO_PC0,
+
+	/* PD */
+	GPIO_PD15, GPIO_PD14, GPIO_PD13, GPIO_PD12,
+	GPIO_PD11, GPIO_PD10, GPIO_PD9, GPIO_PD8,
+	GPIO_PD7, GPIO_PD6, GPIO_PD5, GPIO_PD4,
+	GPIO_PD3, GPIO_PD2, GPIO_PD1, GPIO_PD0,
+
+	/* PE */
+	GPIO_PE15, GPIO_PE14, GPIO_PE13, GPIO_PE12,
+	GPIO_PE11, GPIO_PE10, GPIO_PE9, GPIO_PE8,
+	GPIO_PE7, GPIO_PE6, GPIO_PE5, GPIO_PE4,
+	GPIO_PE3, GPIO_PE2, GPIO_PE1, GPIO_PE0,
+
+	/* PF */
+	GPIO_PF30, GPIO_PF29, GPIO_PF28,
+	GPIO_PF27, GPIO_PF26, GPIO_PF25, GPIO_PF24,
+	GPIO_PF23, GPIO_PF22, GPIO_PF21, GPIO_PF20,
+	GPIO_PF19, GPIO_PF18, GPIO_PF17, GPIO_PF16,
+	GPIO_PF15, GPIO_PF14, GPIO_PF13, GPIO_PF12,
+	GPIO_PF11, GPIO_PF10, GPIO_PF9, GPIO_PF8,
+	GPIO_PF7, GPIO_PF6, GPIO_PF5, GPIO_PF4,
+	GPIO_PF3, GPIO_PF2, GPIO_PF1, GPIO_PF0,
+
+	/* INTC: IRQ and PINT on PB/PD/PE */
+	GPIO_FN_PINT7_PB, GPIO_FN_PINT6_PB, GPIO_FN_PINT5_PB, GPIO_FN_PINT4_PB,
+	GPIO_FN_PINT3_PB, GPIO_FN_PINT2_PB, GPIO_FN_PINT1_PB, GPIO_FN_PINT0_PB,
+	GPIO_FN_PINT7_PD, GPIO_FN_PINT6_PD, GPIO_FN_PINT5_PD, GPIO_FN_PINT4_PD,
+	GPIO_FN_PINT3_PD, GPIO_FN_PINT2_PD, GPIO_FN_PINT1_PD, GPIO_FN_PINT0_PD,
+	GPIO_FN_IRQ7_PB, GPIO_FN_IRQ6_PB, GPIO_FN_IRQ5_PB, GPIO_FN_IRQ4_PB,
+	GPIO_FN_IRQ3_PB, GPIO_FN_IRQ2_PB, GPIO_FN_IRQ1_PB, GPIO_FN_IRQ0_PB,
+	GPIO_FN_IRQ7_PD, GPIO_FN_IRQ6_PD, GPIO_FN_IRQ5_PD, GPIO_FN_IRQ4_PD,
+	GPIO_FN_IRQ3_PD, GPIO_FN_IRQ2_PD, GPIO_FN_IRQ1_PD, GPIO_FN_IRQ0_PD,
+	GPIO_FN_IRQ7_PE, GPIO_FN_IRQ6_PE, GPIO_FN_IRQ5_PE, GPIO_FN_IRQ4_PE,
+	GPIO_FN_IRQ3_PE, GPIO_FN_IRQ2_PE, GPIO_FN_IRQ1_PE, GPIO_FN_IRQ0_PE,
+
+	GPIO_FN_WDTOVF, GPIO_FN_IRQOUT, GPIO_FN_REFOUT, GPIO_FN_IRQOUT_REFOUT,
+	GPIO_FN_UBCTRG,
+
+	/* CAN */
+	GPIO_FN_CTX1, GPIO_FN_CRX1, GPIO_FN_CTX0, GPIO_FN_CTX0_CTX1,
+	GPIO_FN_CRX0, GPIO_FN_CRX0_CRX1,
+
+	/* IIC3 */
+	GPIO_FN_SDA3, GPIO_FN_SCL3,
+	GPIO_FN_SDA2, GPIO_FN_SCL2,
+	GPIO_FN_SDA1, GPIO_FN_SCL1,
+	GPIO_FN_SDA0, GPIO_FN_SCL0,
+
+	/* DMAC */
+	GPIO_FN_TEND0_PD, GPIO_FN_TEND0_PE, GPIO_FN_DACK0_PD,
+	GPIO_FN_DACK0_PE, GPIO_FN_DREQ0_PD, GPIO_FN_DREQ0_PE,
+	GPIO_FN_TEND1_PD, GPIO_FN_TEND1_PE, GPIO_FN_DACK1_PD,
+	GPIO_FN_DACK1_PE, GPIO_FN_DREQ1_PD, GPIO_FN_DREQ1_PE,
+	GPIO_FN_DACK2, GPIO_FN_DREQ2,
+	GPIO_FN_DACK3, GPIO_FN_DREQ3,
+
+	/* ADC */
+	GPIO_FN_ADTRG_PD, GPIO_FN_ADTRG_PE,
+
+	/* BSC */
+	GPIO_FN_D31, GPIO_FN_D30, GPIO_FN_D29, GPIO_FN_D28,
+	GPIO_FN_D27, GPIO_FN_D26, GPIO_FN_D25, GPIO_FN_D24,
+	GPIO_FN_D23, GPIO_FN_D22, GPIO_FN_D21, GPIO_FN_D20,
+	GPIO_FN_D19, GPIO_FN_D18, GPIO_FN_D17, GPIO_FN_D16,
+	GPIO_FN_A25, GPIO_FN_A24, GPIO_FN_A23, GPIO_FN_A22,
+	GPIO_FN_A21, GPIO_FN_CS4, GPIO_FN_MRES, GPIO_FN_BS,
+	GPIO_FN_IOIS16, GPIO_FN_CS1, GPIO_FN_CS6_CE1B,
+	GPIO_FN_CE2B, GPIO_FN_CS5_CE1A, GPIO_FN_CE2A,
+	GPIO_FN_FRAME, GPIO_FN_WAIT, GPIO_FN_RDWR,
+	GPIO_FN_CKE, GPIO_FN_CASU, GPIO_FN_BREQ, GPIO_FN_RASU,
+	GPIO_FN_BACK, GPIO_FN_CASL, GPIO_FN_RASL,
+	GPIO_FN_WE3_DQMUU_AH_ICIO_WR, GPIO_FN_WE2_DQMUL_ICIORD,
+	GPIO_FN_WE1_DQMLU_WE, GPIO_FN_WE0_DQMLL,
+	GPIO_FN_CS3, GPIO_FN_CS2, GPIO_FN_A1, GPIO_FN_A0, GPIO_FN_CS7,
+
+	/* TMU */
+	GPIO_FN_TIOC4D, GPIO_FN_TIOC4C, GPIO_FN_TIOC4B, GPIO_FN_TIOC4A,
+	GPIO_FN_TIOC3D, GPIO_FN_TIOC3C, GPIO_FN_TIOC3B, GPIO_FN_TIOC3A,
+	GPIO_FN_TIOC2B, GPIO_FN_TIOC1B, GPIO_FN_TIOC2A, GPIO_FN_TIOC1A,
+	GPIO_FN_TIOC0D, GPIO_FN_TIOC0C, GPIO_FN_TIOC0B, GPIO_FN_TIOC0A,
+	GPIO_FN_TCLKD_PD, GPIO_FN_TCLKC_PD, GPIO_FN_TCLKB_PD, GPIO_FN_TCLKA_PD,
+	GPIO_FN_TCLKD_PF, GPIO_FN_TCLKC_PF, GPIO_FN_TCLKB_PF, GPIO_FN_TCLKA_PF,
+
+	/* SSU */
+	GPIO_FN_SCS0_PD, GPIO_FN_SSO0_PD, GPIO_FN_SSI0_PD, GPIO_FN_SSCK0_PD,
+	GPIO_FN_SCS0_PF, GPIO_FN_SSO0_PF, GPIO_FN_SSI0_PF, GPIO_FN_SSCK0_PF,
+	GPIO_FN_SCS1_PD, GPIO_FN_SSO1_PD, GPIO_FN_SSI1_PD, GPIO_FN_SSCK1_PD,
+	GPIO_FN_SCS1_PF, GPIO_FN_SSO1_PF, GPIO_FN_SSI1_PF, GPIO_FN_SSCK1_PF,
+
+	/* SCIF */
+	GPIO_FN_TXD0, GPIO_FN_RXD0, GPIO_FN_SCK0,
+	GPIO_FN_TXD1, GPIO_FN_RXD1, GPIO_FN_SCK1,
+	GPIO_FN_TXD2, GPIO_FN_RXD2, GPIO_FN_SCK2,
+	GPIO_FN_RTS3, GPIO_FN_CTS3, GPIO_FN_TXD3, GPIO_FN_RXD3, GPIO_FN_SCK3,
+
+	/* SSI */
+	GPIO_FN_AUDIO_CLK,
+	GPIO_FN_SSIDATA3, GPIO_FN_SSIWS3, GPIO_FN_SSISCK3,
+	GPIO_FN_SSIDATA2, GPIO_FN_SSIWS2, GPIO_FN_SSISCK2,
+	GPIO_FN_SSIDATA1, GPIO_FN_SSIWS1, GPIO_FN_SSISCK1,
+	GPIO_FN_SSIDATA0, GPIO_FN_SSIWS0, GPIO_FN_SSISCK0,
+
+	/* FLCTL */
+	GPIO_FN_FCE, GPIO_FN_FRB,
+	GPIO_FN_NAF7, GPIO_FN_NAF6, GPIO_FN_NAF5, GPIO_FN_NAF4,
+	GPIO_FN_NAF3, GPIO_FN_NAF2, GPIO_FN_NAF1, GPIO_FN_NAF0,
+	GPIO_FN_FSC, GPIO_FN_FOE, GPIO_FN_FCDE, GPIO_FN_FWE,
+
+	/* LCDC */
+	GPIO_FN_LCD_VEPWC, GPIO_FN_LCD_VCPWC,
+	GPIO_FN_LCD_CLK, GPIO_FN_LCD_FLM,
+	GPIO_FN_LCD_M_DISP, GPIO_FN_LCD_CL2,
+	GPIO_FN_LCD_CL1, GPIO_FN_LCD_DON,
+	GPIO_FN_LCD_DATA15, GPIO_FN_LCD_DATA14,
+	GPIO_FN_LCD_DATA13, GPIO_FN_LCD_DATA12,
+	GPIO_FN_LCD_DATA11, GPIO_FN_LCD_DATA10,
+	GPIO_FN_LCD_DATA9, GPIO_FN_LCD_DATA8,
+	GPIO_FN_LCD_DATA7, GPIO_FN_LCD_DATA6,
+	GPIO_FN_LCD_DATA5, GPIO_FN_LCD_DATA4,
+	GPIO_FN_LCD_DATA3, GPIO_FN_LCD_DATA2,
+	GPIO_FN_LCD_DATA1, GPIO_FN_LCD_DATA0,
+};
+
+#endif /* __ASM_SH7203_H__ */
diff --git a/arch/sh/include/cpu-sh3/cpu/sh7720.h b/arch/sh/include/cpu-sh3/cpu/sh7720.h
new file mode 100644
index 0000000..41c1406
--- /dev/null
+++ b/arch/sh/include/cpu-sh3/cpu/sh7720.h
@@ -0,0 +1,174 @@
+#ifndef __ASM_SH7720_H__
+#define __ASM_SH7720_H__
+
+enum {
+	/* PTA */
+	GPIO_PTA7, GPIO_PTA6, GPIO_PTA5, GPIO_PTA4,
+	GPIO_PTA3, GPIO_PTA2, GPIO_PTA1, GPIO_PTA0,
+
+	/* PTB */
+	GPIO_PTB7, GPIO_PTB6, GPIO_PTB5, GPIO_PTB4,
+	GPIO_PTB3, GPIO_PTB2, GPIO_PTB1, GPIO_PTB0,
+
+	/* PTC */
+	GPIO_PTC7, GPIO_PTC6, GPIO_PTC5, GPIO_PTC4,
+	GPIO_PTC3, GPIO_PTC2, GPIO_PTC1, GPIO_PTC0,
+
+	/* PTD */
+	GPIO_PTD7, GPIO_PTD6, GPIO_PTD5, GPIO_PTD4,
+	GPIO_PTD3, GPIO_PTD2, GPIO_PTD1, GPIO_PTD0,
+
+	/* PTE */
+	GPIO_PTE6, GPIO_PTE5, GPIO_PTE4, GPIO_PTE3,
+	GPIO_PTE2, GPIO_PTE1, GPIO_PTE0,
+
+	/* PTF */
+	GPIO_PTF6, GPIO_PTF5, GPIO_PTF4, GPIO_PTF3,
+	GPIO_PTF2, GPIO_PTF1, GPIO_PTF0, GPIO_PTG6,
+
+	/* PTG */
+	GPIO_PTG5, GPIO_PTG4, GPIO_PTG3, GPIO_PTG2,
+	GPIO_PTG1, GPIO_PTG0,
+
+	/* PTH */
+	GPIO_PTH6, GPIO_PTH5, GPIO_PTH4, GPIO_PTH3,
+	GPIO_PTH2, GPIO_PTH1, GPIO_PTH0,
+
+	/* PTJ */
+	GPIO_PTJ6, GPIO_PTJ5, GPIO_PTJ4, GPIO_PTJ3,
+	GPIO_PTJ2, GPIO_PTJ1, GPIO_PTJ0,
+
+	/* PTK */
+	GPIO_PTK3, GPIO_PTK2, GPIO_PTK1, GPIO_PTK0,
+
+	/* PTL */
+	GPIO_PTL7, GPIO_PTL6, GPIO_PTL5, GPIO_PTL4, GPIO_PTL3,
+
+	/* PTM */
+	GPIO_PTM7, GPIO_PTM6, GPIO_PTM5, GPIO_PTM4,
+	GPIO_PTM3, GPIO_PTM2, GPIO_PTM1, GPIO_PTM0,
+
+	/* PTP */
+	GPIO_PTP4, GPIO_PTP3, GPIO_PTP2, GPIO_PTP1, GPIO_PTP0,
+
+	/* PTR */
+	GPIO_PTR7, GPIO_PTR6, GPIO_PTR5, GPIO_PTR4,
+	GPIO_PTR3, GPIO_PTR2, GPIO_PTR1, GPIO_PTR0,
+
+	/* PTS */
+	GPIO_PTS4, GPIO_PTS3, GPIO_PTS2, GPIO_PTS1, GPIO_PTS0,
+
+	/* PTT */
+	GPIO_PTT4, GPIO_PTT3, GPIO_PTT2, GPIO_PTT1, GPIO_PTT0,
+
+	/* PTU */
+	GPIO_PTU4, GPIO_PTU3, GPIO_PTU2, GPIO_PTU1, GPIO_PTU0,
+
+	/* PTV */
+	GPIO_PTV4, GPIO_PTV3, GPIO_PTV2, GPIO_PTV1, GPIO_PTV0,
+
+	/* BSC */
+	GPIO_FN_D31, GPIO_FN_D30, GPIO_FN_D29, GPIO_FN_D28,
+	GPIO_FN_D27, GPIO_FN_D26, GPIO_FN_D25, GPIO_FN_D24,
+	GPIO_FN_D23, GPIO_FN_D22, GPIO_FN_D21, GPIO_FN_D20,
+	GPIO_FN_D19, GPIO_FN_D18, GPIO_FN_D17, GPIO_FN_D16,
+	GPIO_FN_IOIS16, GPIO_FN_RAS, GPIO_FN_CAS, GPIO_FN_CKE,
+	GPIO_FN_CS5B_CE1A, GPIO_FN_CS6B_CE1B,
+	GPIO_FN_A25, GPIO_FN_A24, GPIO_FN_A23, GPIO_FN_A22,
+	GPIO_FN_A21, GPIO_FN_A20, GPIO_FN_A19, GPIO_FN_A0,
+	GPIO_FN_REFOUT, GPIO_FN_IRQOUT,
+
+	/* LCDC */
+	GPIO_FN_LCD_DATA15, GPIO_FN_LCD_DATA14,
+	GPIO_FN_LCD_DATA13, GPIO_FN_LCD_DATA12,
+	GPIO_FN_LCD_DATA11, GPIO_FN_LCD_DATA10,
+	GPIO_FN_LCD_DATA9, GPIO_FN_LCD_DATA8,
+	GPIO_FN_LCD_DATA7, GPIO_FN_LCD_DATA6,
+	GPIO_FN_LCD_DATA5, GPIO_FN_LCD_DATA4,
+	GPIO_FN_LCD_DATA3, GPIO_FN_LCD_DATA2,
+	GPIO_FN_LCD_DATA1, GPIO_FN_LCD_DATA0,
+	GPIO_FN_LCD_M_DISP,
+	GPIO_FN_LCD_CL1, GPIO_FN_LCD_CL2,
+	GPIO_FN_LCD_DON, GPIO_FN_LCD_FLM,
+	GPIO_FN_LCD_VEPWC, GPIO_FN_LCD_VCPWC,
+
+	/* AFEIF */
+	GPIO_FN_AFE_RXIN, GPIO_FN_AFE_RDET,
+	GPIO_FN_AFE_FS, GPIO_FN_AFE_TXOUT,
+	GPIO_FN_AFE_SCLK, GPIO_FN_AFE_RLYCNT,
+	GPIO_FN_AFE_HC1,
+
+	/* IIC */
+	GPIO_FN_IIC_SCL, GPIO_FN_IIC_SDA,
+
+	/* DAC */
+	GPIO_FN_DA1, GPIO_FN_DA0,
+
+	/* ADC */
+	GPIO_FN_AN3, GPIO_FN_AN2, GPIO_FN_AN1, GPIO_FN_AN0, GPIO_FN_ADTRG,
+
+	/* USB */
+	GPIO_FN_USB1D_RCV, GPIO_FN_USB1D_TXSE0,
+	GPIO_FN_USB1D_TXDPLS, GPIO_FN_USB1D_DMNS,
+	GPIO_FN_USB1D_DPLS, GPIO_FN_USB1D_SPEED,
+	GPIO_FN_USB1D_TXENL, GPIO_FN_USB2_PWR_EN,
+	GPIO_FN_USB1_PWR_EN_USBF_UPLUP, GPIO_FN_USB1D_SUSPEND,
+
+	/* INTC */
+	GPIO_FN_IRQ5, GPIO_FN_IRQ4,
+	GPIO_FN_IRQ3_IRL3, GPIO_FN_IRQ2_IRL2,
+	GPIO_FN_IRQ1_IRL1, GPIO_FN_IRQ0_IRL0,
+
+	/* PCC */
+	GPIO_FN_PCC_REG, GPIO_FN_PCC_DRV,
+	GPIO_FN_PCC_BVD2, GPIO_FN_PCC_BVD1,
+	GPIO_FN_PCC_CD2, GPIO_FN_PCC_CD1,
+	GPIO_FN_PCC_RESET, GPIO_FN_PCC_RDY,
+	GPIO_FN_PCC_VS2, GPIO_FN_PCC_VS1,
+
+	/* HUDI */
+	GPIO_FN_AUDATA3, GPIO_FN_AUDATA2, GPIO_FN_AUDATA1, GPIO_FN_AUDATA0,
+	GPIO_FN_AUDCK, GPIO_FN_AUDSYNC, GPIO_FN_ASEBRKAK, GPIO_FN_TRST,
+	GPIO_FN_TMS, GPIO_FN_TDO, GPIO_FN_TDI, GPIO_FN_TCK,
+
+	/* DMAC */
+	GPIO_FN_DACK1, GPIO_FN_DREQ1, GPIO_FN_DACK0, GPIO_FN_DREQ0,
+	GPIO_FN_TEND1, GPIO_FN_TEND0,
+
+	/* SIOF0 */
+	GPIO_FN_SIOF0_SYNC, GPIO_FN_SIOF0_MCLK,
+	GPIO_FN_SIOF0_TXD, GPIO_FN_SIOF0_RXD,
+	GPIO_FN_SIOF0_SCK,
+
+	/* SIOF1 */
+	GPIO_FN_SIOF1_SYNC, GPIO_FN_SIOF1_MCLK,
+	GPIO_FN_SIOF1_TXD, GPIO_FN_SIOF1_RXD,
+	GPIO_FN_SIOF1_SCK,
+
+	/* SCIF0 */
+	GPIO_FN_SCIF0_TXD, GPIO_FN_SCIF0_RXD,
+	GPIO_FN_SCIF0_RTS, GPIO_FN_SCIF0_CTS, GPIO_FN_SCIF0_SCK,
+
+	/* SCIF1 */
+	GPIO_FN_SCIF1_TXD, GPIO_FN_SCIF1_RXD,
+	GPIO_FN_SCIF1_RTS, GPIO_FN_SCIF1_CTS, GPIO_FN_SCIF1_SCK,
+
+	/* TPU */
+	GPIO_FN_TPU_TO1, GPIO_FN_TPU_TO0,
+	GPIO_FN_TPU_TI3B, GPIO_FN_TPU_TI3A,
+	GPIO_FN_TPU_TI2B, GPIO_FN_TPU_TI2A,
+	GPIO_FN_TPU_TO3, GPIO_FN_TPU_TO2,
+
+	/* SIM */
+	GPIO_FN_SIM_D, GPIO_FN_SIM_CLK, GPIO_FN_SIM_RST,
+
+	/* MMC */
+	GPIO_FN_MMC_DAT, GPIO_FN_MMC_CMD,
+	GPIO_FN_MMC_CLK, GPIO_FN_MMC_VDDON,
+	GPIO_FN_MMC_ODMOD,
+
+	/* SYSC */
+	GPIO_FN_STATUS0, GPIO_FN_STATUS1,
+};
+
+#endif /* __ASM_SH7720_H__ */
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7722.h b/arch/sh/include/cpu-sh4/cpu/sh7722.h
new file mode 100644
index 0000000..4b3096f
--- /dev/null
+++ b/arch/sh/include/cpu-sh4/cpu/sh7722.h
@@ -0,0 +1,210 @@
+#ifndef __ASM_SH7722_H__
+#define __ASM_SH7722_H__
+
+enum {
+	/* PTA */
+	GPIO_PTA7, GPIO_PTA6, GPIO_PTA5, GPIO_PTA4,
+	GPIO_PTA3, GPIO_PTA2, GPIO_PTA1, GPIO_PTA0,
+
+	/* PTB */
+	GPIO_PTB7, GPIO_PTB6, GPIO_PTB5, GPIO_PTB4,
+	GPIO_PTB3, GPIO_PTB2, GPIO_PTB1, GPIO_PTB0,
+
+	/* PTC */
+	GPIO_PTC7, GPIO_PTC5, GPIO_PTC4, GPIO_PTC3,
+	GPIO_PTC2, GPIO_PTC0,
+
+	/* PTD */
+	GPIO_PTD7, GPIO_PTD6, GPIO_PTD5, GPIO_PTD4,
+	GPIO_PTD3, GPIO_PTD2, GPIO_PTD1, GPIO_PTD0,
+
+	/* PTE */
+	GPIO_PTE7, GPIO_PTE6, GPIO_PTE5, GPIO_PTE4,
+	GPIO_PTE1, GPIO_PTE0,
+
+	/* PTF */
+	GPIO_PTF6, GPIO_PTF5, GPIO_PTF4, GPIO_PTF3,
+	GPIO_PTF2, GPIO_PTF1, GPIO_PTF0,
+
+	/* PTG */
+	GPIO_PTG4, GPIO_PTG3, GPIO_PTG2, GPIO_PTG1, GPIO_PTG0,
+
+	/* PTH */
+	GPIO_PTH7, GPIO_PTH6, GPIO_PTH5, GPIO_PTH4,
+	GPIO_PTH3, GPIO_PTH2, GPIO_PTH1, GPIO_PTH0,
+
+	/* PTJ */
+	GPIO_PTJ7, GPIO_PTJ6, GPIO_PTJ5, GPIO_PTJ1, GPIO_PTJ0,
+
+	/* PTK */
+	GPIO_PTK6, GPIO_PTK5, GPIO_PTK4, GPIO_PTK3,
+	GPIO_PTK2, GPIO_PTK1, GPIO_PTK0,
+
+	/* PTL */
+	GPIO_PTL7, GPIO_PTL6, GPIO_PTL5, GPIO_PTL4,
+	GPIO_PTL3, GPIO_PTL2, GPIO_PTL1, GPIO_PTL0,
+
+	/* PTM */
+	GPIO_PTM7, GPIO_PTM6, GPIO_PTM5, GPIO_PTM4,
+	GPIO_PTM3, GPIO_PTM2, GPIO_PTM1, GPIO_PTM0,
+
+	/* PTN */
+	GPIO_PTN7, GPIO_PTN6, GPIO_PTN5, GPIO_PTN4,
+	GPIO_PTN3, GPIO_PTN2, GPIO_PTN1, GPIO_PTN0,
+
+	/* PTQ */
+	GPIO_PTQ7, GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4,
+	GPIO_PTQ3, GPIO_PTQ2, GPIO_PTQ1, GPIO_PTQ0,
+
+	/* PTR */
+	GPIO_PTR4, GPIO_PTR3, GPIO_PTR2, GPIO_PTR1, GPIO_PTR0,
+
+	/* PTS */
+	GPIO_PTS4, GPIO_PTS3, GPIO_PTS2, GPIO_PTS1, GPIO_PTS0,
+
+	/* PTT */
+	GPIO_PTT4, GPIO_PTT3, GPIO_PTT2, GPIO_PTT1, GPIO_PTT0,
+
+	/* PTU */
+	GPIO_PTU4, GPIO_PTU3, GPIO_PTU2, GPIO_PTU1, GPIO_PTU0,
+
+	/* PTV */
+	GPIO_PTV4, GPIO_PTV3, GPIO_PTV2, GPIO_PTV1, GPIO_PTV0,
+
+	/* PTW */
+	GPIO_PTW6, GPIO_PTW5, GPIO_PTW4, GPIO_PTW3,
+	GPIO_PTW2, GPIO_PTW1, GPIO_PTW0,
+
+	/* PTX */
+	GPIO_PTX6, GPIO_PTX5, GPIO_PTX4, GPIO_PTX3,
+	GPIO_PTX2, GPIO_PTX1, GPIO_PTX0,
+
+	/* PTY */
+	GPIO_PTY5, GPIO_PTY4, GPIO_PTY3, GPIO_PTY2,
+	GPIO_PTY1, GPIO_PTY0,
+
+	/* PTZ */
+	GPIO_PTZ5, GPIO_PTZ4, GPIO_PTZ3, GPIO_PTZ2, GPIO_PTZ1,
+
+	/* SCIF0 */
+	GPIO_FN_SCIF0_TXD, GPIO_FN_SCIF0_RXD,
+	GPIO_FN_SCIF0_RTS, GPIO_FN_SCIF0_CTS, GPIO_FN_SCIF0_SCK,
+
+	/* SCIF1 */
+	GPIO_FN_SCIF1_TXD, GPIO_FN_SCIF1_RXD,
+	GPIO_FN_SCIF1_RTS, GPIO_FN_SCIF1_CTS, GPIO_FN_SCIF1_SCK,
+
+	/* SCIF2 */
+	GPIO_FN_SCIF2_TXD, GPIO_FN_SCIF2_RXD,
+	GPIO_FN_SCIF2_RTS, GPIO_FN_SCIF2_CTS, GPIO_FN_SCIF2_SCK,
+
+	/* SIO */
+	GPIO_FN_SIOTXD, GPIO_FN_SIORXD,
+	GPIO_FN_SIOD, GPIO_FN_SIOSTRB0, GPIO_FN_SIOSTRB1,
+	GPIO_FN_SIOSCK, GPIO_FN_SIOMCK,
+
+	/* CEU */
+	GPIO_FN_VIO_D15, GPIO_FN_VIO_D14, GPIO_FN_VIO_D13, GPIO_FN_VIO_D12,
+	GPIO_FN_VIO_D11, GPIO_FN_VIO_D10, GPIO_FN_VIO_D9, GPIO_FN_VIO_D8,
+	GPIO_FN_VIO_D7, GPIO_FN_VIO_D6, GPIO_FN_VIO_D5, GPIO_FN_VIO_D4,
+	GPIO_FN_VIO_D3, GPIO_FN_VIO_D2, GPIO_FN_VIO_D1, GPIO_FN_VIO_D0,
+	GPIO_FN_VIO_FLD, GPIO_FN_VIO_CKO, GPIO_FN_VIO_STEX, GPIO_FN_VIO_STEM,
+	GPIO_FN_VIO_VD, GPIO_FN_VIO_HD, GPIO_FN_VIO_CLK,
+	GPIO_FN_VIO_VD2, GPIO_FN_VIO_HD2, GPIO_FN_VIO_CLK2,
+
+	/* LCDC */
+	GPIO_FN_LCDD23, GPIO_FN_LCDD22, GPIO_FN_LCDD21, GPIO_FN_LCDD20,
+	GPIO_FN_LCDD19, GPIO_FN_LCDD18, GPIO_FN_LCDD17, GPIO_FN_LCDD16,
+	GPIO_FN_LCDD15, GPIO_FN_LCDD14, GPIO_FN_LCDD13, GPIO_FN_LCDD12,
+	GPIO_FN_LCDD11, GPIO_FN_LCDD10, GPIO_FN_LCDD9, GPIO_FN_LCDD8,
+	GPIO_FN_LCDD7, GPIO_FN_LCDD6, GPIO_FN_LCDD5, GPIO_FN_LCDD4,
+	GPIO_FN_LCDD3, GPIO_FN_LCDD2, GPIO_FN_LCDD1, GPIO_FN_LCDD0,
+	GPIO_FN_LCDLCLK,
+	/* Main LCD */
+	GPIO_FN_LCDDON, GPIO_FN_LCDVCPWC, GPIO_FN_LCDVEPWC, GPIO_FN_LCDVSYN,
+	/* Main LCD - RGB Mode */
+	GPIO_FN_LCDDCK, GPIO_FN_LCDHSYN, GPIO_FN_LCDDISP,
+	/* Main LCD - SYS Mode */
+	GPIO_FN_LCDRS, GPIO_FN_LCDCS, GPIO_FN_LCDWR, GPIO_FN_LCDRD,
+	/* Sub LCD - SYS Mode */
+	GPIO_FN_LCDDON2, GPIO_FN_LCDVCPWC2, GPIO_FN_LCDVEPWC2,
+	GPIO_FN_LCDVSYN2, GPIO_FN_LCDCS2,
+
+	/* BSC */
+	GPIO_FN_IOIS16, GPIO_FN_A25, GPIO_FN_A24, GPIO_FN_A23, GPIO_FN_A22,
+	GPIO_FN_BS, GPIO_FN_CS6B_CE1B, GPIO_FN_WAIT, GPIO_FN_CS6A_CE2B,
+
+	/* SBSC */
+	GPIO_FN_HPD63, GPIO_FN_HPD62, GPIO_FN_HPD61, GPIO_FN_HPD60,
+	GPIO_FN_HPD59, GPIO_FN_HPD58, GPIO_FN_HPD57, GPIO_FN_HPD56,
+	GPIO_FN_HPD55, GPIO_FN_HPD54, GPIO_FN_HPD53, GPIO_FN_HPD52,
+	GPIO_FN_HPD51, GPIO_FN_HPD50, GPIO_FN_HPD49, GPIO_FN_HPD48,
+	GPIO_FN_HPDQM7, GPIO_FN_HPDQM6, GPIO_FN_HPDQM5, GPIO_FN_HPDQM4,
+
+	/* IRQ */
+	GPIO_FN_IRQ0, GPIO_FN_IRQ1, GPIO_FN_IRQ2, GPIO_FN_IRQ3,
+	GPIO_FN_IRQ4, GPIO_FN_IRQ5, GPIO_FN_IRQ6, GPIO_FN_IRQ7,
+
+	/* SDHI */
+	GPIO_FN_SDHICD, GPIO_FN_SDHIWP, GPIO_FN_SDHID3, GPIO_FN_SDHID2,
+	GPIO_FN_SDHID1, GPIO_FN_SDHID0, GPIO_FN_SDHICMD, GPIO_FN_SDHICLK,
+
+	/* SIU - Port A */
+	GPIO_FN_SIUAOLR, GPIO_FN_SIUAOBT, GPIO_FN_SIUAISLD, GPIO_FN_SIUAILR,
+	GPIO_FN_SIUAIBT, GPIO_FN_SIUAOSLD, GPIO_FN_SIUMCKA, GPIO_FN_SIUFCKA,
+
+	/* SIU - Port B */
+	GPIO_FN_SIUBOLR, GPIO_FN_SIUBOBT, GPIO_FN_SIUBISLD, GPIO_FN_SIUBILR,
+	GPIO_FN_SIUBIBT, GPIO_FN_SIUBOSLD, GPIO_FN_SIUMCKB, GPIO_FN_SIUFCKB,
+
+	/* AUD */
+	GPIO_FN_AUDSYNC, GPIO_FN_AUDATA3, GPIO_FN_AUDATA2, GPIO_FN_AUDATA1,
+	GPIO_FN_AUDATA0,
+
+	/* DMAC */
+	GPIO_FN_DACK, GPIO_FN_DREQ0,
+
+	/* VOU */
+	GPIO_FN_DV_CLKI, GPIO_FN_DV_CLK, GPIO_FN_DV_HSYNC, GPIO_FN_DV_VSYNC,
+	GPIO_FN_DV_D15, GPIO_FN_DV_D14, GPIO_FN_DV_D13, GPIO_FN_DV_D12,
+	GPIO_FN_DV_D11, GPIO_FN_DV_D10, GPIO_FN_DV_D9, GPIO_FN_DV_D8,
+	GPIO_FN_DV_D7, GPIO_FN_DV_D6, GPIO_FN_DV_D5, GPIO_FN_DV_D4,
+	GPIO_FN_DV_D3, GPIO_FN_DV_D2, GPIO_FN_DV_D1, GPIO_FN_DV_D0,
+
+	/* CPG */
+	GPIO_FN_STATUS0, GPIO_FN_PDSTATUS,
+
+	/* SIOF0 */
+	GPIO_FN_SIOF0_MCK, GPIO_FN_SIOF0_SCK,
+	GPIO_FN_SIOF0_SYNC, GPIO_FN_SIOF0_SS1, GPIO_FN_SIOF0_SS2,
+	GPIO_FN_SIOF0_TXD, GPIO_FN_SIOF0_RXD,
+
+	/* SIOF1 */
+	GPIO_FN_SIOF1_MCK, GPIO_FN_SIOF1_SCK,
+	GPIO_FN_SIOF1_SYNC, GPIO_FN_SIOF1_SS1, GPIO_FN_SIOF1_SS2,
+	GPIO_FN_SIOF1_TXD, GPIO_FN_SIOF1_RXD,
+
+	/* SIM */
+	GPIO_FN_SIM_D, GPIO_FN_SIM_CLK, GPIO_FN_SIM_RST,
+
+	/* TSIF */
+	GPIO_FN_TS_SDAT, GPIO_FN_TS_SCK, GPIO_FN_TS_SDEN, GPIO_FN_TS_SPSYNC,
+
+	/* IRDA */
+	GPIO_FN_IRDA_IN, GPIO_FN_IRDA_OUT,
+
+	/* TPU */
+	GPIO_FN_TPUTO,
+
+	/* FLCTL */
+	GPIO_FN_FCE, GPIO_FN_NAF7, GPIO_FN_NAF6, GPIO_FN_NAF5, GPIO_FN_NAF4,
+	GPIO_FN_NAF3, GPIO_FN_NAF2, GPIO_FN_NAF1, GPIO_FN_NAF0, GPIO_FN_FCDE,
+	GPIO_FN_FOE, GPIO_FN_FSC, GPIO_FN_FWE, GPIO_FN_FRB,
+
+	/* KEYSC */
+	GPIO_FN_KEYIN0, GPIO_FN_KEYIN1, GPIO_FN_KEYIN2, GPIO_FN_KEYIN3,
+	GPIO_FN_KEYIN4, GPIO_FN_KEYOUT0, GPIO_FN_KEYOUT1, GPIO_FN_KEYOUT2,
+	GPIO_FN_KEYOUT3, GPIO_FN_KEYOUT4_IN6, GPIO_FN_KEYOUT5_IN5,
+};
+
+#endif /* __ASM_SH7722_H__ */
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7723.h b/arch/sh/include/cpu-sh4/cpu/sh7723.h
new file mode 100644
index 0000000..9d2f6d7
--- /dev/null
+++ b/arch/sh/include/cpu-sh4/cpu/sh7723.h
@@ -0,0 +1,254 @@
+#ifndef __ASM_SH7723_H__
+#define __ASM_SH7723_H__
+
+enum {
+	/* PTA */
+	GPIO_PTA7, GPIO_PTA6, GPIO_PTA5, GPIO_PTA4,
+	GPIO_PTA3, GPIO_PTA2, GPIO_PTA1, GPIO_PTA0,
+
+	/* PTB */
+	GPIO_PTB7, GPIO_PTB6, GPIO_PTB5, GPIO_PTB4,
+	GPIO_PTB3, GPIO_PTB2, GPIO_PTB1, GPIO_PTB0,
+
+	/* PTC */
+	GPIO_PTC7, GPIO_PTC6, GPIO_PTC5, GPIO_PTC4,
+	GPIO_PTC3, GPIO_PTC2, GPIO_PTC1, GPIO_PTC0,
+
+	/* PTD */
+	GPIO_PTD7, GPIO_PTD6, GPIO_PTD5, GPIO_PTD4,
+	GPIO_PTD3, GPIO_PTD2, GPIO_PTD1, GPIO_PTD0,
+
+	/* PTE */
+	GPIO_PTE5, GPIO_PTE4, GPIO_PTE3, GPIO_PTE2,
+	GPIO_PTE1, GPIO_PTE0,
+
+	/* PTF */
+	GPIO_PTF7, GPIO_PTF6, GPIO_PTF5, GPIO_PTF4,
+	GPIO_PTF3, GPIO_PTF2, GPIO_PTF1, GPIO_PTF0,
+
+	/* PTG */
+	GPIO_PTG5, GPIO_PTG4, GPIO_PTG3, GPIO_PTG2,
+	GPIO_PTG1, GPIO_PTG0,
+
+	/* PTH */
+	GPIO_PTH7, GPIO_PTH6, GPIO_PTH5, GPIO_PTH4,
+	GPIO_PTH3, GPIO_PTH2, GPIO_PTH1, GPIO_PTH0,
+
+	/* PTJ */
+	GPIO_PTJ7, GPIO_PTJ5, GPIO_PTJ3, GPIO_PTJ2,
+	GPIO_PTJ1, GPIO_PTJ0,
+
+	/* PTK */
+	GPIO_PTK7, GPIO_PTK6, GPIO_PTK5, GPIO_PTK4,
+	GPIO_PTK3, GPIO_PTK2, GPIO_PTK1, GPIO_PTK0,
+
+	/* PTL */
+	GPIO_PTL7, GPIO_PTL6, GPIO_PTL5, GPIO_PTL4,
+	GPIO_PTL3, GPIO_PTL2, GPIO_PTL1, GPIO_PTL0,
+
+	/* PTM */
+	GPIO_PTM7, GPIO_PTM6, GPIO_PTM5, GPIO_PTM4,
+	GPIO_PTM3, GPIO_PTM2, GPIO_PTM1, GPIO_PTM0,
+
+	/* PTN */
+	GPIO_PTN7, GPIO_PTN6, GPIO_PTN5, GPIO_PTN4,
+	GPIO_PTN3, GPIO_PTN2, GPIO_PTN1, GPIO_PTN0,
+
+	/* PTQ */
+	GPIO_PTQ3, GPIO_PTQ2, GPIO_PTQ1, GPIO_PTQ0,
+
+	/* PTR */
+	GPIO_PTR7, GPIO_PTR6, GPIO_PTR5, GPIO_PTR4,
+	GPIO_PTR3, GPIO_PTR2, GPIO_PTR1, GPIO_PTR0,
+
+	/* PTS */
+	GPIO_PTS7, GPIO_PTS6, GPIO_PTS5, GPIO_PTS4,
+	GPIO_PTS3, GPIO_PTS2, GPIO_PTS1, GPIO_PTS0,
+
+	/* PTT */
+	GPIO_PTT5, GPIO_PTT4, GPIO_PTT3, GPIO_PTT2,
+	GPIO_PTT1, GPIO_PTT0,
+
+	/* PTU */
+	GPIO_PTU5, GPIO_PTU4, GPIO_PTU3, GPIO_PTU2,
+	GPIO_PTU1, GPIO_PTU0,
+
+	/* PTV */
+	GPIO_PTV7, GPIO_PTV6, GPIO_PTV5, GPIO_PTV4,
+	GPIO_PTV3, GPIO_PTV2, GPIO_PTV1, GPIO_PTV0,
+
+	/* PTW */
+	GPIO_PTW7, GPIO_PTW6, GPIO_PTW5, GPIO_PTW4,
+	GPIO_PTW3, GPIO_PTW2, GPIO_PTW1, GPIO_PTW0,
+
+	/* PTX */
+	GPIO_PTX7, GPIO_PTX6, GPIO_PTX5, GPIO_PTX4,
+	GPIO_PTX3, GPIO_PTX2, GPIO_PTX1, GPIO_PTX0,
+
+	/* PTY */
+	GPIO_PTY7, GPIO_PTY6, GPIO_PTY5, GPIO_PTY4,
+	GPIO_PTY3, GPIO_PTY2, GPIO_PTY1, GPIO_PTY0,
+
+	/* PTZ */
+	GPIO_PTZ7, GPIO_PTZ6, GPIO_PTZ5, GPIO_PTZ4,
+	GPIO_PTZ3, GPIO_PTZ2, GPIO_PTZ1, GPIO_PTZ0,
+
+	/* SCIF0 (SCIF: 3 pin PTT/PTU) */
+	GPIO_FN_SCIF0_PTT_TXD, GPIO_FN_SCIF0_PTT_RXD, GPIO_FN_SCIF0_PTT_SCK,
+	GPIO_FN_SCIF0_PTU_TXD, GPIO_FN_SCIF0_PTU_RXD, GPIO_FN_SCIF0_PTU_SCK,
+
+	/* SCIF1 (SCIF: 3 pin PTS/PTV) */
+	GPIO_FN_SCIF1_PTS_TXD, GPIO_FN_SCIF1_PTS_RXD, GPIO_FN_SCIF1_PTS_SCK,
+	GPIO_FN_SCIF1_PTV_TXD, GPIO_FN_SCIF1_PTV_RXD, GPIO_FN_SCIF1_PTV_SCK,
+
+	/* SCIF2 (SCIF: 3 pin PTT/PTU) */
+	GPIO_FN_SCIF2_PTT_TXD, GPIO_FN_SCIF2_PTT_RXD, GPIO_FN_SCIF2_PTT_SCK,
+	GPIO_FN_SCIF2_PTU_TXD, GPIO_FN_SCIF2_PTU_RXD, GPIO_FN_SCIF2_PTU_SCK,
+
+	/* SCIF3 (SCIFA: 5 pin PTS/PTV) */
+	GPIO_FN_SCIF3_PTS_TXD, GPIO_FN_SCIF3_PTS_RXD, GPIO_FN_SCIF3_PTS_SCK,
+	GPIO_FN_SCIF3_PTS_RTS, GPIO_FN_SCIF3_PTS_CTS,
+	GPIO_FN_SCIF3_PTV_TXD, GPIO_FN_SCIF3_PTV_RXD, GPIO_FN_SCIF3_PTV_SCK,
+	GPIO_FN_SCIF3_PTV_RTS, GPIO_FN_SCIF3_PTV_CTS,
+
+	/* SCIF4 (SCIFA: 3 pin PTE/PTN) */
+	GPIO_FN_SCIF4_PTE_TXD, GPIO_FN_SCIF4_PTE_RXD, GPIO_FN_SCIF4_PTE_SCK,
+	GPIO_FN_SCIF4_PTN_TXD, GPIO_FN_SCIF4_PTN_RXD, GPIO_FN_SCIF4_PTN_SCK,
+
+	/* SCIF5 (SCIFA: 3 pin PTE/PTN) */
+	GPIO_FN_SCIF5_PTE_TXD, GPIO_FN_SCIF5_PTE_RXD, GPIO_FN_SCIF5_PTE_SCK,
+	GPIO_FN_SCIF5_PTN_TXD, GPIO_FN_SCIF5_PTN_RXD, GPIO_FN_SCIF5_PTN_SCK,
+
+	/* CEU */
+	GPIO_FN_VIO_D15, GPIO_FN_VIO_D14, GPIO_FN_VIO_D13, GPIO_FN_VIO_D12,
+	GPIO_FN_VIO_D11, GPIO_FN_VIO_D10, GPIO_FN_VIO_D9, GPIO_FN_VIO_D8,
+	GPIO_FN_VIO_D7, GPIO_FN_VIO_D6, GPIO_FN_VIO_D5, GPIO_FN_VIO_D4,
+	GPIO_FN_VIO_D3, GPIO_FN_VIO_D2, GPIO_FN_VIO_D1, GPIO_FN_VIO_D0,
+	GPIO_FN_VIO_FLD, GPIO_FN_VIO_CKO,
+	GPIO_FN_VIO_VD1, GPIO_FN_VIO_HD1, GPIO_FN_VIO_CLK1,
+	GPIO_FN_VIO_VD2, GPIO_FN_VIO_HD2, GPIO_FN_VIO_CLK2,
+
+	/* LCDC */
+	GPIO_FN_LCDD23, GPIO_FN_LCDD22, GPIO_FN_LCDD21, GPIO_FN_LCDD20,
+	GPIO_FN_LCDD19, GPIO_FN_LCDD18, GPIO_FN_LCDD17, GPIO_FN_LCDD16,
+	GPIO_FN_LCDD15, GPIO_FN_LCDD14, GPIO_FN_LCDD13, GPIO_FN_LCDD12,
+	GPIO_FN_LCDD11, GPIO_FN_LCDD10, GPIO_FN_LCDD9, GPIO_FN_LCDD8,
+	GPIO_FN_LCDD7, GPIO_FN_LCDD6, GPIO_FN_LCDD5, GPIO_FN_LCDD4,
+	GPIO_FN_LCDD3, GPIO_FN_LCDD2, GPIO_FN_LCDD1, GPIO_FN_LCDD0,
+	GPIO_FN_LCDLCLK_PTR, GPIO_FN_LCDLCLK_PTW,
+	/* Main LCD */
+	GPIO_FN_LCDDON, GPIO_FN_LCDVCPWC, GPIO_FN_LCDVEPWC, GPIO_FN_LCDVSYN,
+	/* Main LCD - RGB Mode */
+	GPIO_FN_LCDDCK, GPIO_FN_LCDHSYN, GPIO_FN_LCDDISP,
+	/* Main LCD - SYS Mode */
+	GPIO_FN_LCDRS, GPIO_FN_LCDCS, GPIO_FN_LCDWR, GPIO_FN_LCDRD,
+
+	/* IRQ */
+	GPIO_FN_IRQ0, GPIO_FN_IRQ1, GPIO_FN_IRQ2, GPIO_FN_IRQ3,
+	GPIO_FN_IRQ4, GPIO_FN_IRQ5, GPIO_FN_IRQ6, GPIO_FN_IRQ7,
+
+	/* AUD */
+	GPIO_FN_AUDATA3, GPIO_FN_AUDATA2, GPIO_FN_AUDATA1, GPIO_FN_AUDATA0,
+	GPIO_FN_AUDCK, GPIO_FN_AUDSYNC,
+
+	/* SDHI0 (PTD) */
+	GPIO_FN_SDHI0CD_PTD, GPIO_FN_SDHI0WP_PTD,
+	GPIO_FN_SDHI0D3_PTD, GPIO_FN_SDHI0D2_PTD,
+	GPIO_FN_SDHI0D1_PTD, GPIO_FN_SDHI0D0_PTD,
+	GPIO_FN_SDHI0CMD_PTD, GPIO_FN_SDHI0CLK_PTD,
+
+	/* SDHI0 (PTS) */
+	GPIO_FN_SDHI0CD_PTS, GPIO_FN_SDHI0WP_PTS,
+	GPIO_FN_SDHI0D3_PTS, GPIO_FN_SDHI0D2_PTS,
+	GPIO_FN_SDHI0D1_PTS, GPIO_FN_SDHI0D0_PTS,
+	GPIO_FN_SDHI0CMD_PTS, GPIO_FN_SDHI0CLK_PTS,
+
+	/* SDHI1 */
+	GPIO_FN_SDHI1CD, GPIO_FN_SDHI1WP, GPIO_FN_SDHI1D3, GPIO_FN_SDHI1D2,
+	GPIO_FN_SDHI1D1, GPIO_FN_SDHI1D0, GPIO_FN_SDHI1CMD, GPIO_FN_SDHI1CLK,
+
+	/* SIUA */
+	GPIO_FN_SIUAFCK, GPIO_FN_SIUAILR, GPIO_FN_SIUAIBT, GPIO_FN_SIUAISLD,
+	GPIO_FN_SIUAOLR, GPIO_FN_SIUAOBT, GPIO_FN_SIUAOSLD, GPIO_FN_SIUAMCK,
+	GPIO_FN_SIUAISPD, GPIO_FN_SIUOSPD,
+
+	/* SIUB */
+	GPIO_FN_SIUBFCK, GPIO_FN_SIUBILR, GPIO_FN_SIUBIBT, GPIO_FN_SIUBISLD,
+	GPIO_FN_SIUBOLR, GPIO_FN_SIUBOBT, GPIO_FN_SIUBOSLD, GPIO_FN_SIUBMCK,
+
+	/* IRDA */
+	GPIO_FN_IRDA_IN, GPIO_FN_IRDA_OUT,
+
+	/* VOU */
+	GPIO_FN_DV_CLKI, GPIO_FN_DV_CLK, GPIO_FN_DV_HSYNC, GPIO_FN_DV_VSYNC,
+	GPIO_FN_DV_D15, GPIO_FN_DV_D14, GPIO_FN_DV_D13, GPIO_FN_DV_D12,
+	GPIO_FN_DV_D11, GPIO_FN_DV_D10, GPIO_FN_DV_D9, GPIO_FN_DV_D8,
+	GPIO_FN_DV_D7, GPIO_FN_DV_D6, GPIO_FN_DV_D5, GPIO_FN_DV_D4,
+	GPIO_FN_DV_D3, GPIO_FN_DV_D2, GPIO_FN_DV_D1, GPIO_FN_DV_D0,
+
+	/* KEYSC */
+	GPIO_FN_KEYIN0, GPIO_FN_KEYIN1, GPIO_FN_KEYIN2, GPIO_FN_KEYIN3,
+	GPIO_FN_KEYIN4, GPIO_FN_KEYOUT0, GPIO_FN_KEYOUT1, GPIO_FN_KEYOUT2,
+	GPIO_FN_KEYOUT3, GPIO_FN_KEYOUT4_IN6, GPIO_FN_KEYOUT5_IN5,
+
+	/* MSIOF0 (PTF) */
+	GPIO_FN_MSIOF0_PTF_TXD, GPIO_FN_MSIOF0_PTF_RXD, GPIO_FN_MSIOF0_PTF_MCK,
+	GPIO_FN_MSIOF0_PTF_TSYNC, GPIO_FN_MSIOF0_PTF_TSCK,
+	GPIO_FN_MSIOF0_PTF_RSYNC, GPIO_FN_MSIOF0_PTF_RSCK,
+	GPIO_FN_MSIOF0_PTF_SS1, GPIO_FN_MSIOF0_PTF_SS2,
+
+	/* MSIOF0 (PTT+PTX) */
+	GPIO_FN_MSIOF0_PTT_TXD, GPIO_FN_MSIOF0_PTT_RXD, GPIO_FN_MSIOF0_PTX_MCK,
+	GPIO_FN_MSIOF0_PTT_TSYNC, GPIO_FN_MSIOF0_PTT_TSCK,
+	GPIO_FN_MSIOF0_PTT_RSYNC, GPIO_FN_MSIOF0_PTT_RSCK,
+	GPIO_FN_MSIOF0_PTT_SS1, GPIO_FN_MSIOF0_PTT_SS2,
+
+	/* MSIOF1 */
+	GPIO_FN_MSIOF1_TXD, GPIO_FN_MSIOF1_RXD, GPIO_FN_MSIOF1_MCK,
+	GPIO_FN_MSIOF1_TSYNC, GPIO_FN_MSIOF1_TSCK,
+	GPIO_FN_MSIOF1_RSYNC, GPIO_FN_MSIOF1_RSCK,
+	GPIO_FN_MSIOF1_SS1, GPIO_FN_MSIOF1_SS2,
+
+	/* TSIF */
+	GPIO_FN_TS0_SDAT, GPIO_FN_TS0_SCK, GPIO_FN_TS0_SDEN, GPIO_FN_TS0_SPSYNC,
+
+	/* FLCTL */
+	GPIO_FN_FCE, GPIO_FN_NAF7, GPIO_FN_NAF6, GPIO_FN_NAF5, GPIO_FN_NAF4,
+	GPIO_FN_NAF3, GPIO_FN_NAF2, GPIO_FN_NAF1, GPIO_FN_NAF0, GPIO_FN_FCDE,
+	GPIO_FN_FOE, GPIO_FN_FSC, GPIO_FN_FWE, GPIO_FN_FRB,
+
+	/* DMAC */
+	GPIO_FN_DACK1, GPIO_FN_DREQ1, GPIO_FN_DACK0, GPIO_FN_DREQ0,
+
+	/* ADC */
+	GPIO_FN_AN3, GPIO_FN_AN2, GPIO_FN_AN1, GPIO_FN_AN0, GPIO_FN_ADTRG,
+
+	/* CPG */
+	GPIO_FN_STATUS0, GPIO_FN_PDSTATUS,
+
+	/* TPU */
+	GPIO_FN_TPUTO3, GPIO_FN_TPUTO2, GPIO_FN_TPUTO1, GPIO_FN_TPUTO0,
+
+	/* BSC */
+	GPIO_FN_D31, GPIO_FN_D30, GPIO_FN_D29, GPIO_FN_D28,
+	GPIO_FN_D27, GPIO_FN_D26, GPIO_FN_D25, GPIO_FN_D24,
+	GPIO_FN_D23, GPIO_FN_D22, GPIO_FN_D21, GPIO_FN_D20,
+	GPIO_FN_D19, GPIO_FN_D18, GPIO_FN_D17, GPIO_FN_D16,
+	GPIO_FN_IOIS16, GPIO_FN_WAIT, GPIO_FN_BS,
+	GPIO_FN_A25, GPIO_FN_A24, GPIO_FN_A23, GPIO_FN_A22,
+	GPIO_FN_CS6B_CE1B, GPIO_FN_CS6A_CE2B,
+	GPIO_FN_CS5B_CE1A, GPIO_FN_CS5A_CE2A,
+	GPIO_FN_WE3_ICIOWR, GPIO_FN_WE2_ICIORD,
+
+	/* ATAPI */
+	GPIO_FN_IDED15, GPIO_FN_IDED14, GPIO_FN_IDED13, GPIO_FN_IDED12,
+	GPIO_FN_IDED11, GPIO_FN_IDED10, GPIO_FN_IDED9, GPIO_FN_IDED8,
+	GPIO_FN_IDED7, GPIO_FN_IDED6, GPIO_FN_IDED5, GPIO_FN_IDED4,
+	GPIO_FN_IDED3, GPIO_FN_IDED2, GPIO_FN_IDED1, GPIO_FN_IDED0,
+	GPIO_FN_DIRECTION, GPIO_FN_EXBUF_ENB, GPIO_FN_IDERST, GPIO_FN_IODACK,
+	GPIO_FN_IODREQ, GPIO_FN_IDEIORDY, GPIO_FN_IDEINT, GPIO_FN_IDEIOWR,
+	GPIO_FN_IDEIORD, GPIO_FN_IDECS1, GPIO_FN_IDECS0, GPIO_FN_IDEA2,
+	GPIO_FN_IDEA1, GPIO_FN_IDEA0,
+};
+
+#endif /* __ASM_SH7723_H__ */
diff --git a/arch/sh/include/asm/edosk7705.h b/arch/sh/include/mach-common/mach/edosk7705.h
similarity index 100%
rename from arch/sh/include/asm/edosk7705.h
rename to arch/sh/include/mach-common/mach/edosk7705.h
diff --git a/arch/sh/include/asm/r7780rp.h b/arch/sh/include/mach-common/mach/highlander.h
similarity index 100%
rename from arch/sh/include/asm/r7780rp.h
rename to arch/sh/include/mach-common/mach/highlander.h
diff --git a/arch/sh/include/asm/hp6xx.h b/arch/sh/include/mach-common/mach/hp6xx.h
similarity index 100%
rename from arch/sh/include/asm/hp6xx.h
rename to arch/sh/include/mach-common/mach/hp6xx.h
diff --git a/arch/sh/include/asm/lboxre2.h b/arch/sh/include/mach-common/mach/lboxre2.h
similarity index 100%
rename from arch/sh/include/asm/lboxre2.h
rename to arch/sh/include/mach-common/mach/lboxre2.h
diff --git a/arch/sh/include/asm/magicpanelr2.h b/arch/sh/include/mach-common/mach/magicpanelr2.h
similarity index 100%
rename from arch/sh/include/asm/magicpanelr2.h
rename to arch/sh/include/mach-common/mach/magicpanelr2.h
diff --git a/arch/sh/include/asm/microdev.h b/arch/sh/include/mach-common/mach/microdev.h
similarity index 100%
rename from arch/sh/include/asm/microdev.h
rename to arch/sh/include/mach-common/mach/microdev.h
diff --git a/arch/sh/include/asm/migor.h b/arch/sh/include/mach-common/mach/migor.h
similarity index 93%
rename from arch/sh/include/asm/migor.h
rename to arch/sh/include/mach-common/mach/migor.h
index c12b632..e451f02 100644
--- a/arch/sh/include/asm/migor.h
+++ b/arch/sh/include/mach-common/mach/migor.h
@@ -52,9 +52,11 @@
 #define PORT_HIZCRB 0xa405015a
 #define PORT_HIZCRC 0xa405015c
 
+#define BSC_CS4BCR 0xfec10010
 #define BSC_CS6ABCR 0xfec1001c
+#define BSC_CS4WCR 0xfec10030
 
-#include <asm/sh_mobile_lcdc.h>
+#include <video/sh_mobile_lcdc.h>
 
 int migor_lcd_qvga_setup(void *board_data, void *sys_ops_handle,
 			 struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
diff --git a/arch/sh/include/asm/rts7751r2d.h b/arch/sh/include/mach-common/mach/r2d.h
similarity index 100%
rename from arch/sh/include/asm/rts7751r2d.h
rename to arch/sh/include/mach-common/mach/r2d.h
diff --git a/arch/sh/include/asm/sdk7780.h b/arch/sh/include/mach-common/mach/sdk7780.h
similarity index 100%
rename from arch/sh/include/asm/sdk7780.h
rename to arch/sh/include/mach-common/mach/sdk7780.h
diff --git a/arch/sh/include/asm/sh7763rdp.h b/arch/sh/include/mach-common/mach/sh7763rdp.h
similarity index 100%
rename from arch/sh/include/asm/sh7763rdp.h
rename to arch/sh/include/mach-common/mach/sh7763rdp.h
diff --git a/arch/sh/include/asm/sh7785lcr.h b/arch/sh/include/mach-common/mach/sh7785lcr.h
similarity index 100%
rename from arch/sh/include/asm/sh7785lcr.h
rename to arch/sh/include/mach-common/mach/sh7785lcr.h
diff --git a/arch/sh/include/asm/shmin.h b/arch/sh/include/mach-common/mach/shmin.h
similarity index 100%
rename from arch/sh/include/asm/shmin.h
rename to arch/sh/include/mach-common/mach/shmin.h
diff --git a/arch/sh/include/asm/snapgear.h b/arch/sh/include/mach-common/mach/snapgear.h
similarity index 100%
rename from arch/sh/include/asm/snapgear.h
rename to arch/sh/include/mach-common/mach/snapgear.h
diff --git a/arch/sh/include/asm/systemh7751.h b/arch/sh/include/mach-common/mach/systemh7751.h
similarity index 100%
rename from arch/sh/include/asm/systemh7751.h
rename to arch/sh/include/mach-common/mach/systemh7751.h
diff --git a/arch/sh/include/asm/titan.h b/arch/sh/include/mach-common/mach/titan.h
similarity index 100%
rename from arch/sh/include/asm/titan.h
rename to arch/sh/include/mach-common/mach/titan.h
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
index 0e6905f..48edfb1 100644
--- a/arch/sh/kernel/Makefile_32
+++ b/arch/sh/kernel/Makefile_32
@@ -21,7 +21,8 @@
 obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
 obj-$(CONFIG_PM)		+= pm.o
 obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
-obj-$(CONFIG_ELF_CORE)		+= dump_task.o
 obj-$(CONFIG_IO_TRAPPED)	+= io_trapped.o
+obj-$(CONFIG_KPROBES)		+= kprobes.o
+obj-$(CONFIG_GENERIC_GPIO)	+= gpio.o
 
 EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
index 6edf53b..c97660b 100644
--- a/arch/sh/kernel/Makefile_64
+++ b/arch/sh/kernel/Makefile_64
@@ -17,7 +17,7 @@
 obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
 obj-$(CONFIG_PM)		+= pm.o
 obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
-obj-$(CONFIG_BINFMT_ELF)	+= dump_task.o
 obj-$(CONFIG_IO_TRAPPED)	+= io_trapped.o
+obj-$(CONFIG_GENERIC_GPIO)	+= gpio.o
 
 EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
index f5eb56e..b7e46d5 100644
--- a/arch/sh/kernel/cpu/clock.c
+++ b/arch/sh/kernel/cpu/clock.c
@@ -294,9 +294,10 @@
 {
 }
 
-void __init __attribute__ ((weak))
+int __init __attribute__ ((weak))
 arch_clk_init(void)
 {
+	return 0;
 }
 
 static int show_clocks(char *buf, char **start, off_t off,
@@ -331,7 +332,7 @@
 		ret |= clk_register(clk);
 	}
 
-	arch_clk_init();
+	ret |= arch_clk_init();
 
 	/* Kick the child clocks.. */
 	propagate_rate(&master_clk);
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index 462a8f6..f0c7025 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -1,8 +1,6 @@
 #
 # Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
 #
-obj-y	+= intc.o
-
 obj-$(CONFIG_SUPERH32)			+= imask.o
 obj-$(CONFIG_CPU_SH5)			+= intc-sh5.o
 obj-$(CONFIG_CPU_HAS_IPR_IRQ)		+= ipr.o
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index 56ea7b2..3eb17ee 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -33,7 +33,7 @@
 	struct ipr_data *p = get_irq_chip_data(irq);
 	unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
 	/* Set the priority in IPR to 0 */
-	ctrl_outw(ctrl_inw(addr) & (0xffff ^ (0xf << p->shift)), addr);
+	__raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
 }
 
 static void enable_ipr_irq(unsigned int irq)
@@ -41,7 +41,7 @@
 	struct ipr_data *p = get_irq_chip_data(irq);
 	unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
 	/* Set priority in IPR back to original value */
-	ctrl_outw(ctrl_inw(addr) | (p->priority << p->shift), addr);
+	__raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr);
 }
 
 /*
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
index 1ab1ecf..428450c 100644
--- a/arch/sh/kernel/cpu/sh2a/Makefile
+++ b/arch/sh/kernel/cpu/sh2a/Makefile
@@ -12,3 +12,8 @@
 obj-$(CONFIG_CPU_SUBTYPE_SH7203)	+= setup-sh7203.o clock-sh7203.o
 obj-$(CONFIG_CPU_SUBTYPE_SH7263)	+= setup-sh7203.o clock-sh7203.o
 obj-$(CONFIG_CPU_SUBTYPE_MXG)		+= setup-mxg.o clock-sh7206.o
+
+# Pinmux setup
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7203)	:= pinmux-sh7203.o
+
+obj-$(CONFIG_GENERIC_GPIO)	+= $(pinmux-y)
diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c
new file mode 100644
index 0000000..39a5b88
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c
@@ -0,0 +1,1599 @@
+/*
+ * SH7203 Pinmux
+ *
+ *  Copyright (C) 2008  Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <asm/sh7203.h>
+
+enum {
+	PINMUX_RESERVED = 0,
+
+	PINMUX_DATA_BEGIN,
+	PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+	PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+	PB12_DATA,
+	PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+	PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+	PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+	PC14_DATA, PC13_DATA, PC12_DATA,
+	PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA,
+	PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+	PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+	PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+	PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+	PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+	PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+	PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA,
+	PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA,
+	PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+	PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+	PF30_DATA, PF29_DATA, PF28_DATA,
+	PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA,
+	PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
+	PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA,
+	PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
+	PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+	PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+	PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+	PINMUX_DATA_END,
+
+	PINMUX_INPUT_BEGIN,
+	PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+	PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+	PB11_IN, PB10_IN, PB9_IN, PB8_IN,
+	PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+	PB3_IN, PB2_IN, PB1_IN, PB0_IN,
+	PC14_IN, PC13_IN, PC12_IN,
+	PC11_IN, PC10_IN, PC9_IN, PC8_IN,
+	PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+	PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+	PD15_IN, PD14_IN, PD13_IN, PD12_IN,
+	PD11_IN, PD10_IN, PD9_IN, PD8_IN,
+	PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+	PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+	PE15_IN, PE14_IN, PE13_IN, PE12_IN,
+	PE11_IN, PE10_IN, PE9_IN, PE8_IN,
+	PE7_IN, PE6_IN, PE5_IN, PE4_IN,
+	PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+	PF30_IN, PF29_IN, PF28_IN,
+	PF27_IN, PF26_IN, PF25_IN, PF24_IN,
+	PF23_IN, PF22_IN, PF21_IN, PF20_IN,
+	PF19_IN, PF18_IN, PF17_IN, PF16_IN,
+	PF15_IN, PF14_IN, PF13_IN, PF12_IN,
+	PF11_IN, PF10_IN, PF9_IN, PF8_IN,
+	PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+	PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+	PINMUX_INPUT_END,
+
+	PINMUX_OUTPUT_BEGIN,
+	PB12_OUT,
+	PB11_OUT, PB10_OUT, PB9_OUT, PB8_OUT,
+	PC14_OUT, PC13_OUT, PC12_OUT,
+	PC11_OUT, PC10_OUT, PC9_OUT, PC8_OUT,
+	PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+	PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+	PD15_OUT, PD14_OUT, PD13_OUT, PD12_OUT,
+	PD11_OUT, PD10_OUT, PD9_OUT, PD8_OUT,
+	PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+	PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+	PE15_OUT, PE14_OUT, PE13_OUT, PE12_OUT,
+	PE11_OUT, PE10_OUT, PE9_OUT, PE8_OUT,
+	PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT,
+	PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+	PF30_OUT, PF29_OUT, PF28_OUT,
+	PF27_OUT, PF26_OUT, PF25_OUT, PF24_OUT,
+	PF23_OUT, PF22_OUT, PF21_OUT, PF20_OUT,
+	PF19_OUT, PF18_OUT, PF17_OUT, PF16_OUT,
+	PF15_OUT, PF14_OUT, PF13_OUT, PF12_OUT,
+	PF11_OUT, PF10_OUT, PF9_OUT, PF8_OUT,
+	PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+	PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+	PINMUX_OUTPUT_END,
+
+	PINMUX_FUNCTION_BEGIN,
+	PB11_IOR_IN, PB11_IOR_OUT,
+	PB10_IOR_IN, PB10_IOR_OUT,
+	PB9_IOR_IN, PB9_IOR_OUT,
+	PB8_IOR_IN, PB8_IOR_OUT,
+	PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
+	PB11MD_0, PB11MD_1,
+	PB10MD_0, PB10MD_1,
+	PB9MD_00, PB9MD_01, PB9MD_10,
+	PB8MD_00, PB8MD_01, PB8MD_10,
+	PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11,
+	PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11,
+	PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11,
+	PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11,
+	PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11,
+	PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11,
+	PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11,
+	PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11,
+
+	PB12IRQ_00, PB12IRQ_01, PB12IRQ_10,
+
+	PC14MD_0, PC14MD_1,
+	PC13MD_0, PC13MD_1,
+	PC12MD_0, PC12MD_1,
+	PC11MD_00, PC11MD_01, PC11MD_10,
+	PC10MD_00, PC10MD_01, PC10MD_10,
+	PC9MD_0, PC9MD_1,
+	PC8MD_0, PC8MD_1,
+	PC7MD_0, PC7MD_1,
+	PC6MD_0, PC6MD_1,
+	PC5MD_0, PC5MD_1,
+	PC4MD_0, PC4MD_1,
+	PC3MD_0, PC3MD_1,
+	PC2MD_0, PC2MD_1,
+	PC1MD_0, PC1MD_1,
+	PC0MD_00, PC0MD_01, PC0MD_10,
+
+	PD15MD_000, PD15MD_001, PD15MD_010, PD15MD_100, PD15MD_101,
+	PD14MD_000, PD14MD_001, PD14MD_010, PD14MD_101,
+	PD13MD_000, PD13MD_001, PD13MD_010, PD13MD_100, PD13MD_101,
+	PD12MD_000, PD12MD_001, PD12MD_010, PD12MD_100, PD12MD_101,
+	PD11MD_000, PD11MD_001, PD11MD_010, PD11MD_100, PD11MD_101,
+	PD10MD_000, PD10MD_001, PD10MD_010, PD10MD_100, PD10MD_101,
+	PD9MD_000, PD9MD_001, PD9MD_010, PD9MD_100, PD9MD_101,
+	PD8MD_000, PD8MD_001, PD8MD_010, PD8MD_100, PD8MD_101,
+	PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011, PD7MD_100, PD7MD_101,
+	PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011, PD6MD_100, PD6MD_101,
+	PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011, PD5MD_100, PD5MD_101,
+	PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011, PD4MD_100, PD4MD_101,
+	PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011, PD3MD_100, PD3MD_101,
+	PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011, PD2MD_100, PD2MD_101,
+	PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011, PD1MD_100, PD1MD_101,
+	PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011, PD0MD_100, PD0MD_101,
+
+	PE15MD_00, PE15MD_01, PE15MD_11,
+	PE14MD_00, PE14MD_01, PE14MD_11,
+	PE13MD_00, PE13MD_11,
+	PE12MD_00, PE12MD_11,
+	PE11MD_000, PE11MD_001, PE11MD_010, PE11MD_100,
+	PE10MD_000, PE10MD_001, PE10MD_010, PE10MD_100,
+	PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11,
+	PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11,
+	PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011, PE7MD_100,
+	PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011, PE6MD_100,
+	PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011, PE5MD_100,
+	PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011, PE4MD_100,
+	PE3MD_00, PE3MD_01, PE3MD_11,
+	PE2MD_00, PE2MD_01, PE2MD_11,
+	PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11,
+	PE0MD_000, PE0MD_001, PE0MD_011, PE0MD_100,
+
+	PF30MD_0, PF30MD_1,
+	PF29MD_0, PF29MD_1,
+	PF28MD_0, PF28MD_1,
+	PF27MD_0, PF27MD_1,
+	PF26MD_0, PF26MD_1,
+	PF25MD_0, PF25MD_1,
+	PF24MD_0, PF24MD_1,
+	PF23MD_00, PF23MD_01, PF23MD_10,
+	PF22MD_00, PF22MD_01, PF22MD_10,
+	PF21MD_00, PF21MD_01, PF21MD_10,
+	PF20MD_00, PF20MD_01, PF20MD_10,
+	PF19MD_00, PF19MD_01, PF19MD_10,
+	PF18MD_00, PF18MD_01, PF18MD_10,
+	PF17MD_00, PF17MD_01, PF17MD_10,
+	PF16MD_00, PF16MD_01, PF16MD_10,
+	PF15MD_00, PF15MD_01, PF15MD_10,
+	PF14MD_00, PF14MD_01, PF14MD_10,
+	PF13MD_00, PF13MD_01, PF13MD_10,
+	PF12MD_00, PF12MD_01, PF12MD_10,
+	PF11MD_00, PF11MD_01, PF11MD_10,
+	PF10MD_00, PF10MD_01, PF10MD_10,
+	PF9MD_00, PF9MD_01, PF9MD_10,
+	PF8MD_00, PF8MD_01, PF8MD_10,
+	PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11,
+	PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11,
+	PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11,
+	PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11,
+	PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11,
+	PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11,
+	PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11,
+	PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11,
+	PINMUX_FUNCTION_END,
+
+	PINMUX_MARK_BEGIN,
+	PINT7_PB_MARK, PINT6_PB_MARK, PINT5_PB_MARK, PINT4_PB_MARK,
+	PINT3_PB_MARK, PINT2_PB_MARK, PINT1_PB_MARK, PINT0_PB_MARK,
+	PINT7_PD_MARK, PINT6_PD_MARK, PINT5_PD_MARK, PINT4_PD_MARK,
+	PINT3_PD_MARK, PINT2_PD_MARK, PINT1_PD_MARK, PINT0_PD_MARK,
+	IRQ7_PB_MARK, IRQ6_PB_MARK, IRQ5_PB_MARK, IRQ4_PB_MARK,
+	IRQ3_PB_MARK, IRQ2_PB_MARK, IRQ1_PB_MARK, IRQ0_PB_MARK,
+	IRQ7_PD_MARK, IRQ6_PD_MARK, IRQ5_PD_MARK, IRQ4_PD_MARK,
+	IRQ3_PD_MARK, IRQ2_PD_MARK, IRQ1_PD_MARK, IRQ0_PD_MARK,
+	IRQ7_PE_MARK, IRQ6_PE_MARK, IRQ5_PE_MARK, IRQ4_PE_MARK,
+	IRQ3_PE_MARK, IRQ2_PE_MARK, IRQ1_PE_MARK, IRQ0_PE_MARK,
+	WDTOVF_MARK, IRQOUT_MARK, REFOUT_MARK, IRQOUT_REFOUT_MARK,
+	UBCTRG_MARK,
+	CTX1_MARK, CRX1_MARK, CTX0_MARK, CTX0_CTX1_MARK,
+	CRX0_MARK, CRX0_CRX1_MARK,
+	SDA3_MARK, SCL3_MARK,
+	SDA2_MARK, SCL2_MARK,
+	SDA1_MARK, SCL1_MARK,
+	SDA0_MARK, SCL0_MARK,
+	TEND0_PD_MARK, TEND0_PE_MARK, DACK0_PD_MARK, DACK0_PE_MARK,
+	DREQ0_PD_MARK, DREQ0_PE_MARK, TEND1_PD_MARK, TEND1_PE_MARK,
+	DACK1_PD_MARK, DACK1_PE_MARK, DREQ1_PD_MARK, DREQ1_PE_MARK,
+	DACK2_MARK, DREQ2_MARK, DACK3_MARK, DREQ3_MARK,
+	ADTRG_PD_MARK, ADTRG_PE_MARK,
+	D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+	D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+	D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+	D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+	A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+	A21_MARK, CS4_MARK, MRES_MARK, BS_MARK,
+	IOIS16_MARK, CS1_MARK, CS6_CE1B_MARK, CE2B_MARK,
+	CS5_CE1A_MARK, CE2A_MARK, FRAME_MARK, WAIT_MARK,
+	RDWR_MARK, CKE_MARK, CASU_MARK,	BREQ_MARK,
+	RASU_MARK, BACK_MARK, CASL_MARK, RASL_MARK,
+	WE3_DQMUU_AH_ICIO_WR_MARK, WE2_DQMUL_ICIORD_MARK,
+	WE1_DQMLU_WE_MARK, WE0_DQMLL_MARK,
+	CS3_MARK, CS2_MARK, A1_MARK, A0_MARK, CS7_MARK,
+	TIOC4D_MARK, TIOC4C_MARK, TIOC4B_MARK, TIOC4A_MARK,
+	TIOC3D_MARK, TIOC3C_MARK, TIOC3B_MARK, TIOC3A_MARK,
+	TIOC2B_MARK, TIOC1B_MARK, TIOC2A_MARK, TIOC1A_MARK,
+	TIOC0D_MARK, TIOC0C_MARK, TIOC0B_MARK, TIOC0A_MARK,
+	TCLKD_PD_MARK, TCLKC_PD_MARK, TCLKB_PD_MARK, TCLKA_PD_MARK,
+	TCLKD_PF_MARK, TCLKC_PF_MARK, TCLKB_PF_MARK, TCLKA_PF_MARK,
+	SCS0_PD_MARK, SSO0_PD_MARK, SSI0_PD_MARK, SSCK0_PD_MARK,
+	SCS0_PF_MARK, SSO0_PF_MARK, SSI0_PF_MARK, SSCK0_PF_MARK,
+	SCS1_PD_MARK, SSO1_PD_MARK, SSI1_PD_MARK, SSCK1_PD_MARK,
+	SCS1_PF_MARK, SSO1_PF_MARK, SSI1_PF_MARK, SSCK1_PF_MARK,
+	TXD0_MARK, RXD0_MARK, SCK0_MARK,
+	TXD1_MARK, RXD1_MARK, SCK1_MARK,
+	TXD2_MARK, RXD2_MARK, SCK2_MARK,
+	RTS3_MARK, CTS3_MARK, TXD3_MARK,
+	RXD3_MARK, SCK3_MARK,
+	AUDIO_CLK_MARK,
+	SSIDATA3_MARK, SSIWS3_MARK, SSISCK3_MARK,
+	SSIDATA2_MARK, SSIWS2_MARK, SSISCK2_MARK,
+	SSIDATA1_MARK, SSIWS1_MARK, SSISCK1_MARK,
+	SSIDATA0_MARK, SSIWS0_MARK, SSISCK0_MARK,
+	FCE_MARK, FRB_MARK,
+	NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK,
+	NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK,
+	FSC_MARK, FOE_MARK, FCDE_MARK, FWE_MARK,
+	LCD_VEPWC_MARK, LCD_VCPWC_MARK,	LCD_CLK_MARK, LCD_FLM_MARK,
+	LCD_M_DISP_MARK, LCD_CL2_MARK, LCD_CL1_MARK, LCD_DON_MARK,
+	LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK,
+	LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK,
+	LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK,
+	LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK,
+	PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+	/* PA */
+	PINMUX_DATA(PA7_DATA, PA7_IN),
+	PINMUX_DATA(PA6_DATA, PA6_IN),
+	PINMUX_DATA(PA5_DATA, PA5_IN),
+	PINMUX_DATA(PA4_DATA, PA4_IN),
+	PINMUX_DATA(PA3_DATA, PA3_IN),
+	PINMUX_DATA(PA2_DATA, PA2_IN),
+	PINMUX_DATA(PA1_DATA, PA1_IN),
+	PINMUX_DATA(PA0_DATA, PA0_IN),
+
+	/* PB */
+	PINMUX_DATA(PB12_DATA, PB12MD_00, PB12_OUT),
+	PINMUX_DATA(WDTOVF_MARK, PB12MD_01),
+	PINMUX_DATA(IRQOUT_MARK, PB12MD_10, PB12IRQ_00),
+	PINMUX_DATA(REFOUT_MARK, PB12MD_10, PB12IRQ_01),
+	PINMUX_DATA(IRQOUT_REFOUT_MARK, PB12MD_10, PB12IRQ_10),
+	PINMUX_DATA(UBCTRG_MARK, PB12MD_11),
+
+	PINMUX_DATA(PB11_DATA, PB11MD_0, PB11_IN, PB11_OUT),
+	PINMUX_DATA(CTX1_MARK, PB11MD_1),
+
+	PINMUX_DATA(PB10_DATA, PB10MD_0, PB10_IN, PB10_OUT),
+	PINMUX_DATA(CRX1_MARK, PB10MD_1),
+
+	PINMUX_DATA(PB9_DATA, PB9MD_00, PB9_IN, PB9_OUT),
+	PINMUX_DATA(CTX0_MARK, PB9MD_01),
+	PINMUX_DATA(CTX0_CTX1_MARK, PB9MD_10),
+
+	PINMUX_DATA(PB8_DATA, PB8MD_00, PB8_IN, PB8_OUT),
+	PINMUX_DATA(CRX0_MARK, PB8MD_01),
+	PINMUX_DATA(CRX0_CRX1_MARK, PB8MD_10),
+
+	PINMUX_DATA(PB7_DATA, PB7MD_00, PB7_IN),
+	PINMUX_DATA(SDA3_MARK, PB7MD_01),
+	PINMUX_DATA(PINT7_PB_MARK, PB7MD_10),
+	PINMUX_DATA(IRQ7_PB_MARK, PB7MD_11),
+
+	PINMUX_DATA(PB6_DATA, PB6MD_00, PB6_IN),
+	PINMUX_DATA(SCL3_MARK, PB6MD_01),
+	PINMUX_DATA(PINT6_PB_MARK, PB6MD_10),
+	PINMUX_DATA(IRQ6_PB_MARK, PB6MD_11),
+
+	PINMUX_DATA(PB5_DATA, PB5MD_00, PB5_IN),
+	PINMUX_DATA(SDA2_MARK, PB6MD_01),
+	PINMUX_DATA(PINT5_PB_MARK, PB6MD_10),
+	PINMUX_DATA(IRQ5_PB_MARK, PB6MD_11),
+
+	PINMUX_DATA(PB4_DATA, PB4MD_00, PB4_IN),
+	PINMUX_DATA(SCL2_MARK, PB4MD_01),
+	PINMUX_DATA(PINT4_PB_MARK, PB4MD_10),
+	PINMUX_DATA(IRQ4_PB_MARK, PB4MD_11),
+
+	PINMUX_DATA(PB3_DATA, PB3MD_00, PB3_IN),
+	PINMUX_DATA(SDA1_MARK, PB3MD_01),
+	PINMUX_DATA(PINT3_PB_MARK, PB3MD_10),
+	PINMUX_DATA(IRQ3_PB_MARK, PB3MD_11),
+
+	PINMUX_DATA(PB2_DATA, PB2MD_00, PB2_IN),
+	PINMUX_DATA(SCL1_MARK, PB2MD_01),
+	PINMUX_DATA(PINT2_PB_MARK, PB2MD_10),
+	PINMUX_DATA(IRQ2_PB_MARK, PB2MD_11),
+
+	PINMUX_DATA(PB1_DATA, PB1MD_00, PB1_IN),
+	PINMUX_DATA(SDA0_MARK, PB1MD_01),
+	PINMUX_DATA(PINT1_PB_MARK, PB1MD_10),
+	PINMUX_DATA(IRQ1_PB_MARK, PB1MD_11),
+
+	PINMUX_DATA(PB0_DATA, PB0MD_00, PB0_IN),
+	PINMUX_DATA(SCL0_MARK, PB0MD_01),
+	PINMUX_DATA(PINT0_PB_MARK, PB0MD_10),
+	PINMUX_DATA(IRQ0_PB_MARK, PB0MD_11),
+
+	/* PC */
+	PINMUX_DATA(PC14_DATA, PC14MD_0, PC14_IN, PC14_OUT),
+	PINMUX_DATA(WAIT_MARK, PC14MD_1),
+
+	PINMUX_DATA(PC13_DATA, PC13MD_0, PC13_IN, PC13_OUT),
+	PINMUX_DATA(RDWR_MARK, PC13MD_1),
+
+	PINMUX_DATA(PC12_DATA, PC12MD_0, PC12_IN, PC12_OUT),
+	PINMUX_DATA(CKE_MARK, PC12MD_1),
+
+	PINMUX_DATA(PC11_DATA, PC11MD_00, PC11_IN, PC11_OUT),
+	PINMUX_DATA(CASU_MARK, PC11MD_01),
+	PINMUX_DATA(BREQ_MARK, PC11MD_10),
+
+	PINMUX_DATA(PC10_DATA, PC10MD_00, PC10_IN, PC10_OUT),
+	PINMUX_DATA(RASU_MARK, PC10MD_01),
+	PINMUX_DATA(BACK_MARK, PC10MD_10),
+
+	PINMUX_DATA(PC9_DATA, PC9MD_0, PC9_IN, PC9_OUT),
+	PINMUX_DATA(CASL_MARK, PC9MD_1),
+
+	PINMUX_DATA(PC8_DATA, PC8MD_0, PC8_IN, PC8_OUT),
+	PINMUX_DATA(RASL_MARK, PC8MD_1),
+
+	PINMUX_DATA(PC7_DATA, PC7MD_0, PC7_IN, PC7_OUT),
+	PINMUX_DATA(WE3_DQMUU_AH_ICIO_WR_MARK, PC7MD_1),
+
+	PINMUX_DATA(PC6_DATA, PC6MD_0, PC6_IN, PC6_OUT),
+	PINMUX_DATA(WE2_DQMUL_ICIORD_MARK, PC6MD_1),
+
+	PINMUX_DATA(PC5_DATA, PC5MD_0, PC5_IN, PC5_OUT),
+	PINMUX_DATA(WE1_DQMLU_WE_MARK, PC5MD_1),
+
+	PINMUX_DATA(PC4_DATA, PC4MD_0, PC4_IN, PC4_OUT),
+	PINMUX_DATA(WE0_DQMLL_MARK, PC4MD_1),
+
+	PINMUX_DATA(PC3_DATA, PC3MD_0, PC3_IN, PC3_OUT),
+	PINMUX_DATA(CS3_MARK, PC3MD_1),
+
+	PINMUX_DATA(PC2_DATA, PC2MD_0, PC2_IN, PC2_OUT),
+	PINMUX_DATA(CS2_MARK, PC2MD_1),
+
+	PINMUX_DATA(PC1_DATA, PC1MD_0, PC1_IN, PC1_OUT),
+	PINMUX_DATA(A1_MARK, PC1MD_1),
+
+	PINMUX_DATA(PC0_DATA, PC0MD_00, PC0_IN, PC0_OUT),
+	PINMUX_DATA(A0_MARK, PC0MD_01),
+	PINMUX_DATA(CS7_MARK, PC0MD_10),
+
+	/* PD */
+	PINMUX_DATA(PD15_DATA, PD15MD_000, PD15_IN, PD15_OUT),
+	PINMUX_DATA(D31_MARK, PD15MD_001),
+	PINMUX_DATA(PINT7_PD_MARK, PD15MD_010),
+	PINMUX_DATA(ADTRG_PD_MARK, PD15MD_100),
+	PINMUX_DATA(TIOC4D_MARK, PD15MD_101),
+
+	PINMUX_DATA(PD14_DATA, PD14MD_000, PD14_IN, PD14_OUT),
+	PINMUX_DATA(D30_MARK, PD14MD_001),
+	PINMUX_DATA(PINT6_PD_MARK, PD14MD_010),
+	PINMUX_DATA(TIOC4C_MARK, PD14MD_101),
+
+	PINMUX_DATA(PD13_DATA, PD13MD_000, PD13_IN, PD13_OUT),
+	PINMUX_DATA(D29_MARK, PD13MD_001),
+	PINMUX_DATA(PINT5_PD_MARK, PD13MD_010),
+	PINMUX_DATA(TEND1_PD_MARK, PD13MD_100),
+	PINMUX_DATA(TIOC4B_MARK, PD13MD_101),
+
+	PINMUX_DATA(PD12_DATA, PD12MD_000, PD12_IN, PD12_OUT),
+	PINMUX_DATA(D28_MARK, PD12MD_001),
+	PINMUX_DATA(PINT4_PD_MARK, PD12MD_010),
+	PINMUX_DATA(DACK1_PD_MARK, PD12MD_100),
+	PINMUX_DATA(TIOC4A_MARK, PD12MD_101),
+
+	PINMUX_DATA(PD11_DATA, PD11MD_000, PD11_IN, PD11_OUT),
+	PINMUX_DATA(D27_MARK, PD11MD_001),
+	PINMUX_DATA(PINT3_PD_MARK, PD11MD_010),
+	PINMUX_DATA(DREQ1_PD_MARK, PD11MD_100),
+	PINMUX_DATA(TIOC3D_MARK, PD11MD_101),
+
+	PINMUX_DATA(PD10_DATA, PD10MD_000, PD10_IN, PD10_OUT),
+	PINMUX_DATA(D26_MARK, PD10MD_001),
+	PINMUX_DATA(PINT2_PD_MARK, PD10MD_010),
+	PINMUX_DATA(TEND0_PD_MARK, PD10MD_100),
+	PINMUX_DATA(TIOC3C_MARK, PD10MD_101),
+
+	PINMUX_DATA(PD9_DATA, PD9MD_000, PD9_IN, PD9_OUT),
+	PINMUX_DATA(D25_MARK, PD9MD_001),
+	PINMUX_DATA(PINT1_PD_MARK, PD9MD_010),
+	PINMUX_DATA(DACK0_PD_MARK, PD9MD_100),
+	PINMUX_DATA(TIOC3B_MARK, PD9MD_101),
+
+	PINMUX_DATA(PD8_DATA, PD8MD_000, PD8_IN, PD8_OUT),
+	PINMUX_DATA(D24_MARK, PD8MD_001),
+	PINMUX_DATA(PINT0_PD_MARK, PD8MD_010),
+	PINMUX_DATA(DREQ0_PD_MARK, PD8MD_100),
+	PINMUX_DATA(TIOC3A_MARK, PD8MD_101),
+
+	PINMUX_DATA(PD7_DATA, PD7MD_000, PD7_IN, PD7_OUT),
+	PINMUX_DATA(D23_MARK, PD7MD_001),
+	PINMUX_DATA(IRQ7_PD_MARK, PD7MD_010),
+	PINMUX_DATA(SCS1_PD_MARK, PD7MD_011),
+	PINMUX_DATA(TCLKD_PD_MARK, PD7MD_100),
+	PINMUX_DATA(TIOC2B_MARK, PD7MD_101),
+
+	PINMUX_DATA(PD6_DATA, PD6MD_000, PD6_IN, PD6_OUT),
+	PINMUX_DATA(D22_MARK, PD6MD_001),
+	PINMUX_DATA(IRQ6_PD_MARK, PD6MD_010),
+	PINMUX_DATA(SSO1_PD_MARK, PD6MD_011),
+	PINMUX_DATA(TCLKC_PD_MARK, PD6MD_100),
+	PINMUX_DATA(TIOC2A_MARK, PD6MD_101),
+
+	PINMUX_DATA(PD5_DATA, PD5MD_000, PD5_IN, PD5_OUT),
+	PINMUX_DATA(D21_MARK, PD5MD_001),
+	PINMUX_DATA(IRQ5_PD_MARK, PD5MD_010),
+	PINMUX_DATA(SSI1_PD_MARK, PD5MD_011),
+	PINMUX_DATA(TCLKB_PD_MARK, PD5MD_100),
+	PINMUX_DATA(TIOC1B_MARK, PD5MD_101),
+
+	PINMUX_DATA(PD4_DATA, PD4MD_000, PD4_IN, PD4_OUT),
+	PINMUX_DATA(D20_MARK, PD4MD_001),
+	PINMUX_DATA(IRQ4_PD_MARK, PD4MD_010),
+	PINMUX_DATA(SSCK1_PD_MARK, PD4MD_011),
+	PINMUX_DATA(TCLKA_PD_MARK, PD4MD_100),
+	PINMUX_DATA(TIOC1A_MARK, PD4MD_101),
+
+	PINMUX_DATA(PD3_DATA, PD3MD_000, PD3_IN, PD3_OUT),
+	PINMUX_DATA(D19_MARK, PD3MD_001),
+	PINMUX_DATA(IRQ3_PD_MARK, PD3MD_010),
+	PINMUX_DATA(SCS0_PD_MARK, PD3MD_011),
+	PINMUX_DATA(DACK3_MARK, PD3MD_100),
+	PINMUX_DATA(TIOC0D_MARK, PD3MD_101),
+
+	PINMUX_DATA(PD2_DATA, PD2MD_000, PD2_IN, PD2_OUT),
+	PINMUX_DATA(D18_MARK, PD2MD_001),
+	PINMUX_DATA(IRQ2_PD_MARK, PD2MD_010),
+	PINMUX_DATA(SSO0_PD_MARK, PD2MD_011),
+	PINMUX_DATA(DREQ3_MARK, PD2MD_100),
+	PINMUX_DATA(TIOC0C_MARK, PD2MD_101),
+
+	PINMUX_DATA(PD1_DATA, PD1MD_000, PD1_IN, PD1_OUT),
+	PINMUX_DATA(D17_MARK, PD1MD_001),
+	PINMUX_DATA(IRQ1_PD_MARK, PD1MD_010),
+	PINMUX_DATA(SSI0_PD_MARK, PD1MD_011),
+	PINMUX_DATA(DACK2_MARK, PD1MD_100),
+	PINMUX_DATA(TIOC0B_MARK, PD1MD_101),
+
+	PINMUX_DATA(PD0_DATA, PD0MD_000, PD0_IN, PD0_OUT),
+	PINMUX_DATA(D16_MARK, PD0MD_001),
+	PINMUX_DATA(IRQ0_PD_MARK, PD0MD_010),
+	PINMUX_DATA(SSCK0_PD_MARK, PD0MD_011),
+	PINMUX_DATA(DREQ2_MARK, PD0MD_100),
+	PINMUX_DATA(TIOC0A_MARK, PD0MD_101),
+
+	/* PE */
+	PINMUX_DATA(PE15_DATA, PE15MD_00, PE15_IN, PE15_OUT),
+	PINMUX_DATA(IOIS16_MARK, PE15MD_01),
+	PINMUX_DATA(RTS3_MARK, PE15MD_11),
+
+	PINMUX_DATA(PE14_DATA, PE14MD_00, PE14_IN, PE14_OUT),
+	PINMUX_DATA(CS1_MARK, PE14MD_01),
+	PINMUX_DATA(CTS3_MARK, PE14MD_11),
+
+	PINMUX_DATA(PE13_DATA, PE13MD_00, PE13_IN, PE13_OUT),
+	PINMUX_DATA(TXD3_MARK, PE13MD_11),
+
+	PINMUX_DATA(PE12_DATA, PE12MD_00, PE12_IN, PE12_OUT),
+	PINMUX_DATA(RXD3_MARK, PE12MD_11),
+
+	PINMUX_DATA(PE11_DATA, PE11MD_000, PE11_IN, PE11_OUT),
+	PINMUX_DATA(CS6_CE1B_MARK, PE11MD_001),
+	PINMUX_DATA(IRQ7_PE_MARK, PE11MD_010),
+	PINMUX_DATA(TEND1_PE_MARK, PE11MD_100),
+
+	PINMUX_DATA(PE10_DATA, PE10MD_000, PE10_IN, PE10_OUT),
+	PINMUX_DATA(CE2B_MARK, PE10MD_001),
+	PINMUX_DATA(IRQ6_PE_MARK, PE10MD_010),
+	PINMUX_DATA(TEND0_PE_MARK, PE10MD_100),
+
+	PINMUX_DATA(PE9_DATA, PE9MD_00, PE9_IN, PE9_OUT),
+	PINMUX_DATA(CS5_CE1A_MARK, PE9MD_01),
+	PINMUX_DATA(IRQ5_PE_MARK, PE9MD_10),
+	PINMUX_DATA(SCK3_MARK, PE9MD_11),
+
+	PINMUX_DATA(PE8_DATA, PE8MD_00, PE8_IN, PE8_OUT),
+	PINMUX_DATA(CE2A_MARK, PE8MD_01),
+	PINMUX_DATA(IRQ4_PE_MARK, PE8MD_10),
+	PINMUX_DATA(SCK2_MARK, PE8MD_11),
+
+	PINMUX_DATA(PE7_DATA, PE7MD_000, PE7_IN, PE7_OUT),
+	PINMUX_DATA(FRAME_MARK, PE7MD_001),
+	PINMUX_DATA(IRQ3_PE_MARK, PE7MD_010),
+	PINMUX_DATA(TXD2_MARK, PE7MD_011),
+	PINMUX_DATA(DACK1_PE_MARK, PE7MD_100),
+
+	PINMUX_DATA(PE6_DATA, PE6MD_000, PE6_IN, PE6_OUT),
+	PINMUX_DATA(A25_MARK, PE6MD_001),
+	PINMUX_DATA(IRQ2_PE_MARK, PE6MD_010),
+	PINMUX_DATA(RXD2_MARK, PE6MD_011),
+	PINMUX_DATA(DREQ1_PE_MARK, PE6MD_100),
+
+	PINMUX_DATA(PE5_DATA, PE5MD_000, PE5_IN, PE5_OUT),
+	PINMUX_DATA(A24_MARK, PE5MD_001),
+	PINMUX_DATA(IRQ1_PE_MARK, PE5MD_010),
+	PINMUX_DATA(TXD1_MARK, PE5MD_011),
+	PINMUX_DATA(DACK0_PE_MARK, PE5MD_100),
+
+	PINMUX_DATA(PE4_DATA, PE4MD_000, PE4_IN, PE4_OUT),
+	PINMUX_DATA(A23_MARK, PE4MD_001),
+	PINMUX_DATA(IRQ0_PE_MARK, PE4MD_010),
+	PINMUX_DATA(RXD1_MARK, PE4MD_011),
+	PINMUX_DATA(DREQ0_PE_MARK, PE4MD_100),
+
+	PINMUX_DATA(PE3_DATA, PE3MD_00, PE3_IN, PE3_OUT),
+	PINMUX_DATA(A22_MARK, PE3MD_01),
+	PINMUX_DATA(SCK1_MARK, PE3MD_11),
+
+	PINMUX_DATA(PE2_DATA, PE2MD_00, PE2_IN, PE2_OUT),
+	PINMUX_DATA(A21_MARK, PE2MD_01),
+	PINMUX_DATA(SCK0_MARK, PE2MD_11),
+
+	PINMUX_DATA(PE1_DATA, PE1MD_00, PE1_IN, PE1_OUT),
+	PINMUX_DATA(CS4_MARK, PE1MD_01),
+	PINMUX_DATA(MRES_MARK, PE1MD_10),
+	PINMUX_DATA(TXD0_MARK, PE1MD_11),
+
+	PINMUX_DATA(PE0_DATA, PE0MD_000, PE0_IN, PE0_OUT),
+	PINMUX_DATA(BS_MARK, PE0MD_001),
+	PINMUX_DATA(RXD0_MARK, PE0MD_011),
+	PINMUX_DATA(ADTRG_PE_MARK, PE0MD_100),
+
+	/* PF */
+	PINMUX_DATA(PF30_DATA, PF30MD_0, PF30_IN, PF30_OUT),
+	PINMUX_DATA(AUDIO_CLK_MARK, PF30MD_1),
+
+	PINMUX_DATA(PF29_DATA, PF29MD_0, PF29_IN, PF29_OUT),
+	PINMUX_DATA(SSIDATA3_MARK, PF29MD_1),
+
+	PINMUX_DATA(PF28_DATA, PF28MD_0, PF28_IN, PF28_OUT),
+	PINMUX_DATA(SSIWS3_MARK, PF28MD_1),
+
+	PINMUX_DATA(PF27_DATA, PF27MD_0, PF27_IN, PF27_OUT),
+	PINMUX_DATA(SSISCK3_MARK, PF27MD_1),
+
+	PINMUX_DATA(PF26_DATA, PF26MD_0, PF26_IN, PF26_OUT),
+	PINMUX_DATA(SSIDATA2_MARK, PF26MD_1),
+
+	PINMUX_DATA(PF25_DATA, PF25MD_0, PF25_IN, PF25_OUT),
+	PINMUX_DATA(SSIWS2_MARK, PF25MD_1),
+
+	PINMUX_DATA(PF24_DATA, PF24MD_0, PF24_IN, PF24_OUT),
+	PINMUX_DATA(SSISCK2_MARK, PF24MD_1),
+
+	PINMUX_DATA(PF23_DATA, PF23MD_00, PF23_IN, PF23_OUT),
+	PINMUX_DATA(SSIDATA1_MARK, PF23MD_01),
+	PINMUX_DATA(LCD_VEPWC_MARK, PF23MD_10),
+
+	PINMUX_DATA(PF22_DATA, PF22MD_00, PF22_IN, PF22_OUT),
+	PINMUX_DATA(SSIWS1_MARK, PF22MD_01),
+	PINMUX_DATA(LCD_VCPWC_MARK, PF22MD_10),
+
+	PINMUX_DATA(PF21_DATA, PF21MD_00, PF21_IN, PF21_OUT),
+	PINMUX_DATA(SSISCK1_MARK, PF21MD_01),
+	PINMUX_DATA(LCD_CLK_MARK, PF21MD_10),
+
+	PINMUX_DATA(PF20_DATA, PF20MD_00, PF20_IN, PF20_OUT),
+	PINMUX_DATA(SSIDATA0_MARK, PF20MD_01),
+	PINMUX_DATA(LCD_FLM_MARK, PF20MD_10),
+
+	PINMUX_DATA(PF19_DATA, PF19MD_00, PF19_IN, PF19_OUT),
+	PINMUX_DATA(SSIWS0_MARK, PF19MD_01),
+	PINMUX_DATA(LCD_M_DISP_MARK, PF19MD_10),
+
+	PINMUX_DATA(PF18_DATA, PF18MD_00, PF18_IN, PF18_OUT),
+	PINMUX_DATA(SSISCK0_MARK, PF18MD_01),
+	PINMUX_DATA(LCD_CL2_MARK, PF18MD_10),
+
+	PINMUX_DATA(PF17_DATA, PF17MD_00, PF17_IN, PF17_OUT),
+	PINMUX_DATA(FCE_MARK, PF17MD_01),
+	PINMUX_DATA(LCD_CL1_MARK, PF17MD_10),
+
+	PINMUX_DATA(PF16_DATA, PF16MD_00, PF16_IN, PF16_OUT),
+	PINMUX_DATA(FRB_MARK, PF16MD_01),
+	PINMUX_DATA(LCD_DON_MARK, PF16MD_10),
+
+	PINMUX_DATA(PF15_DATA, PF15MD_00, PF15_IN, PF15_OUT),
+	PINMUX_DATA(NAF7_MARK, PF15MD_01),
+	PINMUX_DATA(LCD_DATA15_MARK, PF15MD_10),
+
+	PINMUX_DATA(PF14_DATA, PF14MD_00, PF14_IN, PF14_OUT),
+	PINMUX_DATA(NAF6_MARK, PF14MD_01),
+	PINMUX_DATA(LCD_DATA14_MARK, PF14MD_10),
+
+	PINMUX_DATA(PF13_DATA, PF13MD_00, PF13_IN, PF13_OUT),
+	PINMUX_DATA(NAF5_MARK, PF13MD_01),
+	PINMUX_DATA(LCD_DATA13_MARK, PF13MD_10),
+
+	PINMUX_DATA(PF12_DATA, PF12MD_00, PF12_IN, PF12_OUT),
+	PINMUX_DATA(NAF4_MARK, PF12MD_01),
+	PINMUX_DATA(LCD_DATA12_MARK, PF12MD_10),
+
+	PINMUX_DATA(PF11_DATA, PF11MD_00, PF11_IN, PF11_OUT),
+	PINMUX_DATA(NAF3_MARK, PF11MD_01),
+	PINMUX_DATA(LCD_DATA11_MARK, PF11MD_10),
+
+	PINMUX_DATA(PF10_DATA, PF10MD_00, PF10_IN, PF10_OUT),
+	PINMUX_DATA(NAF2_MARK, PF10MD_01),
+	PINMUX_DATA(LCD_DATA10_MARK, PF10MD_10),
+
+	PINMUX_DATA(PF9_DATA, PF9MD_00, PF9_IN, PF9_OUT),
+	PINMUX_DATA(NAF1_MARK, PF9MD_01),
+	PINMUX_DATA(LCD_DATA9_MARK, PF9MD_10),
+
+	PINMUX_DATA(PF8_DATA, PF8MD_00, PF8_IN, PF8_OUT),
+	PINMUX_DATA(NAF0_MARK, PF8MD_01),
+	PINMUX_DATA(LCD_DATA8_MARK, PF8MD_10),
+
+	PINMUX_DATA(PF7_DATA, PF7MD_00, PF7_IN, PF7_OUT),
+	PINMUX_DATA(FSC_MARK, PF7MD_01),
+	PINMUX_DATA(LCD_DATA7_MARK, PF7MD_10),
+	PINMUX_DATA(SCS1_PF_MARK, PF7MD_11),
+
+	PINMUX_DATA(PF6_DATA, PF6MD_00, PF6_IN, PF6_OUT),
+	PINMUX_DATA(FOE_MARK, PF6MD_01),
+	PINMUX_DATA(LCD_DATA6_MARK, PF6MD_10),
+	PINMUX_DATA(SSO1_PF_MARK, PF6MD_11),
+
+	PINMUX_DATA(PF5_DATA, PF5MD_00, PF5_IN, PF5_OUT),
+	PINMUX_DATA(FCDE_MARK, PF5MD_01),
+	PINMUX_DATA(LCD_DATA5_MARK, PF5MD_10),
+	PINMUX_DATA(SSI1_PF_MARK, PF5MD_11),
+
+	PINMUX_DATA(PF4_DATA, PF4MD_00, PF4_IN, PF4_OUT),
+	PINMUX_DATA(FWE_MARK, PF4MD_01),
+	PINMUX_DATA(LCD_DATA4_MARK, PF4MD_10),
+	PINMUX_DATA(SSCK1_PF_MARK, PF4MD_11),
+
+	PINMUX_DATA(PF3_DATA, PF3MD_00, PF3_IN, PF3_OUT),
+	PINMUX_DATA(TCLKD_PF_MARK, PF3MD_01),
+	PINMUX_DATA(LCD_DATA3_MARK, PF3MD_10),
+	PINMUX_DATA(SCS0_PF_MARK, PF3MD_11),
+
+	PINMUX_DATA(PF2_DATA, PF2MD_00, PF2_IN, PF2_OUT),
+	PINMUX_DATA(TCLKC_PF_MARK, PF2MD_01),
+	PINMUX_DATA(LCD_DATA2_MARK, PF2MD_10),
+	PINMUX_DATA(SSO0_PF_MARK, PF2MD_11),
+
+	PINMUX_DATA(PF1_DATA, PF1MD_00, PF1_IN, PF1_OUT),
+	PINMUX_DATA(TCLKB_PF_MARK, PF1MD_01),
+	PINMUX_DATA(LCD_DATA1_MARK, PF1MD_10),
+	PINMUX_DATA(SSI0_PF_MARK, PF1MD_11),
+
+	PINMUX_DATA(PF0_DATA, PF0MD_00, PF0_IN, PF0_OUT),
+	PINMUX_DATA(TCLKA_PF_MARK, PF0MD_01),
+	PINMUX_DATA(LCD_DATA0_MARK, PF0MD_10),
+	PINMUX_DATA(SSCK0_PF_MARK, PF0MD_11),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+
+	/* PA */
+	PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+	PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+	PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+	PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+	PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+	PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+	PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+	PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+	/* PB */
+	PINMUX_GPIO(GPIO_PB12, PB12_DATA),
+	PINMUX_GPIO(GPIO_PB11, PB11_DATA),
+	PINMUX_GPIO(GPIO_PB10, PB10_DATA),
+	PINMUX_GPIO(GPIO_PB9, PB9_DATA),
+	PINMUX_GPIO(GPIO_PB8, PB8_DATA),
+	PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+	PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+	PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+	PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+	PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+	PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+	PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+	PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+	/* PC */
+	PINMUX_GPIO(GPIO_PC14, PC14_DATA),
+	PINMUX_GPIO(GPIO_PC13, PC13_DATA),
+	PINMUX_GPIO(GPIO_PC12, PC12_DATA),
+	PINMUX_GPIO(GPIO_PC11, PC11_DATA),
+	PINMUX_GPIO(GPIO_PC10, PC10_DATA),
+	PINMUX_GPIO(GPIO_PC9, PC9_DATA),
+	PINMUX_GPIO(GPIO_PC8, PC8_DATA),
+	PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+	PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+	PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+	PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+	PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+	PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+	PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+	PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+	/* PD */
+	PINMUX_GPIO(GPIO_PD15, PD15_DATA),
+	PINMUX_GPIO(GPIO_PD14, PD14_DATA),
+	PINMUX_GPIO(GPIO_PD13, PD13_DATA),
+	PINMUX_GPIO(GPIO_PD12, PD12_DATA),
+	PINMUX_GPIO(GPIO_PD11, PD11_DATA),
+	PINMUX_GPIO(GPIO_PD10, PD10_DATA),
+	PINMUX_GPIO(GPIO_PD9, PD9_DATA),
+	PINMUX_GPIO(GPIO_PD8, PD8_DATA),
+	PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+	PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+	PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+	PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+	PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+	PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+	PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+	PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+	/* PE */
+	PINMUX_GPIO(GPIO_PE15, PE15_DATA),
+	PINMUX_GPIO(GPIO_PE14, PE14_DATA),
+	PINMUX_GPIO(GPIO_PE13, PE13_DATA),
+	PINMUX_GPIO(GPIO_PE12, PE12_DATA),
+	PINMUX_GPIO(GPIO_PE11, PE11_DATA),
+	PINMUX_GPIO(GPIO_PE10, PE10_DATA),
+	PINMUX_GPIO(GPIO_PE9, PE9_DATA),
+	PINMUX_GPIO(GPIO_PE8, PE8_DATA),
+	PINMUX_GPIO(GPIO_PE7, PE7_DATA),
+	PINMUX_GPIO(GPIO_PE6, PE6_DATA),
+	PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+	PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+	PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+	PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+	PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+	PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+	/* PF */
+	PINMUX_GPIO(GPIO_PF30, PF30_DATA),
+	PINMUX_GPIO(GPIO_PF29, PF29_DATA),
+	PINMUX_GPIO(GPIO_PF28, PF28_DATA),
+	PINMUX_GPIO(GPIO_PF27, PF27_DATA),
+	PINMUX_GPIO(GPIO_PF26, PF26_DATA),
+	PINMUX_GPIO(GPIO_PF25, PF25_DATA),
+	PINMUX_GPIO(GPIO_PF24, PF24_DATA),
+	PINMUX_GPIO(GPIO_PF23, PF23_DATA),
+	PINMUX_GPIO(GPIO_PF22, PF22_DATA),
+	PINMUX_GPIO(GPIO_PF21, PF21_DATA),
+	PINMUX_GPIO(GPIO_PF20, PF20_DATA),
+	PINMUX_GPIO(GPIO_PF19, PF19_DATA),
+	PINMUX_GPIO(GPIO_PF18, PF18_DATA),
+	PINMUX_GPIO(GPIO_PF17, PF17_DATA),
+	PINMUX_GPIO(GPIO_PF16, PF16_DATA),
+	PINMUX_GPIO(GPIO_PF15, PF15_DATA),
+	PINMUX_GPIO(GPIO_PF14, PF14_DATA),
+	PINMUX_GPIO(GPIO_PF13, PF13_DATA),
+	PINMUX_GPIO(GPIO_PF12, PF12_DATA),
+	PINMUX_GPIO(GPIO_PF11, PF11_DATA),
+	PINMUX_GPIO(GPIO_PF10, PF10_DATA),
+	PINMUX_GPIO(GPIO_PF9, PF9_DATA),
+	PINMUX_GPIO(GPIO_PF8, PF8_DATA),
+	PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+	PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+	PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+	PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+	PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+	PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+	PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+	PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+	/* INTC */
+	PINMUX_GPIO(GPIO_FN_PINT7_PB, PINT7_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT6_PB, PINT6_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT5_PB, PINT5_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT4_PB, PINT4_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT3_PB, PINT3_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT2_PB, PINT2_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT1_PB, PINT1_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT0_PB, PINT0_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT7_PD, PINT7_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT6_PD, PINT6_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT5_PD, PINT5_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT4_PD, PINT4_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT3_PD, PINT3_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT2_PD, PINT2_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT1_PD, PINT1_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_PINT0_PD, PINT0_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ7_PB, IRQ7_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ6_PB, IRQ6_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ5_PB, IRQ5_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ4_PB, IRQ4_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ3_PB, IRQ3_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ2_PB, IRQ2_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ1_PB, IRQ1_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ0_PB, IRQ0_PB_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ7_PD, IRQ7_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ6_PD, IRQ6_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ5_PD, IRQ5_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ4_PD, IRQ4_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ3_PD, IRQ3_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ2_PD, IRQ2_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ1_PD, IRQ1_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ0_PD, IRQ0_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ7_PE, IRQ7_PE_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ6_PE, IRQ6_PE_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ5_PE, IRQ5_PE_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ4_PE, IRQ4_PE_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ3_PE, IRQ3_PE_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ2_PE, IRQ2_PE_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ1_PE, IRQ1_PE_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ0_PE, IRQ0_PE_MARK),
+
+	PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+	PINMUX_GPIO(GPIO_FN_REFOUT, REFOUT_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQOUT_REFOUT, IRQOUT_REFOUT_MARK),
+	PINMUX_GPIO(GPIO_FN_UBCTRG, UBCTRG_MARK),
+
+	/* CAN */
+	PINMUX_GPIO(GPIO_FN_CTX1, CTX1_MARK),
+	PINMUX_GPIO(GPIO_FN_CRX1, CRX1_MARK),
+	PINMUX_GPIO(GPIO_FN_CTX0, CTX0_MARK),
+	PINMUX_GPIO(GPIO_FN_CTX0_CTX1, CTX0_CTX1_MARK),
+	PINMUX_GPIO(GPIO_FN_CRX0, CRX0_MARK),
+	PINMUX_GPIO(GPIO_FN_CRX0_CRX1, CRX0_CRX1_MARK),
+
+	/* IIC3 */
+	PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK),
+	PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK),
+	PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+	PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
+	PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
+	PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
+	PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
+	PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+
+	/* DMAC */
+	PINMUX_GPIO(GPIO_FN_TEND0_PD, TEND0_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_TEND0_PE, TEND0_PE_MARK),
+	PINMUX_GPIO(GPIO_FN_DACK0_PD, DACK0_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_DACK0_PE, DACK0_PE_MARK),
+	PINMUX_GPIO(GPIO_FN_DREQ0_PD, DREQ0_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_DREQ0_PE, DREQ0_PE_MARK),
+	PINMUX_GPIO(GPIO_FN_TEND1_PD, TEND1_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_TEND1_PE, TEND1_PE_MARK),
+	PINMUX_GPIO(GPIO_FN_DACK1_PD, DACK1_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_DACK1_PE, DACK1_PE_MARK),
+	PINMUX_GPIO(GPIO_FN_DREQ1_PD, DREQ1_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_DREQ1_PE, DREQ1_PE_MARK),
+	PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+	PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+	PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+	PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+
+	/* ADC */
+	PINMUX_GPIO(GPIO_FN_ADTRG_PD, ADTRG_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_ADTRG_PE, ADTRG_PE_MARK),
+
+	/* BSC */
+	PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+	PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+	PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+	PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+	PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+	PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+	PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+	PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+	PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+	PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+	PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+	PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+	PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+	PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+	PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+	PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+	PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+	PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+	PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+	PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+	PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+	PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+	PINMUX_GPIO(GPIO_FN_MRES, MRES_MARK),
+	PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+	PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+	PINMUX_GPIO(GPIO_FN_CS1, CS1_MARK),
+	PINMUX_GPIO(GPIO_FN_CS6_CE1B, CS6_CE1B_MARK),
+	PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+	PINMUX_GPIO(GPIO_FN_CS5_CE1A, CS5_CE1A_MARK),
+	PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+	PINMUX_GPIO(GPIO_FN_FRAME, FRAME_MARK),
+	PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+	PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
+	PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
+	PINMUX_GPIO(GPIO_FN_CASU, CASU_MARK),
+	PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+	PINMUX_GPIO(GPIO_FN_RASU, RASU_MARK),
+	PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+	PINMUX_GPIO(GPIO_FN_CASL, CASL_MARK),
+	PINMUX_GPIO(GPIO_FN_RASL, RASL_MARK),
+	PINMUX_GPIO(GPIO_FN_WE3_DQMUU_AH_ICIO_WR, WE3_DQMUU_AH_ICIO_WR_MARK),
+	PINMUX_GPIO(GPIO_FN_WE2_DQMUL_ICIORD, WE2_DQMUL_ICIORD_MARK),
+	PINMUX_GPIO(GPIO_FN_WE1_DQMLU_WE, WE1_DQMLU_WE_MARK),
+	PINMUX_GPIO(GPIO_FN_WE0_DQMLL, WE0_DQMLL_MARK),
+	PINMUX_GPIO(GPIO_FN_CS3, CS3_MARK),
+	PINMUX_GPIO(GPIO_FN_CS2, CS2_MARK),
+	PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
+	PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+	PINMUX_GPIO(GPIO_FN_CS7, CS7_MARK),
+
+	/* TMU */
+	PINMUX_GPIO(GPIO_FN_TIOC4D, TIOC4D_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC4C, TIOC4C_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC4B, TIOC4B_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC4A, TIOC4A_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC3D, TIOC3D_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC3C, TIOC3C_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC3B, TIOC3B_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC3A, TIOC3A_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC2B, TIOC2B_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC1B, TIOC1B_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC2A, TIOC2A_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC1A, TIOC1A_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC0D, TIOC0D_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC0C, TIOC0C_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC0B, TIOC0B_MARK),
+	PINMUX_GPIO(GPIO_FN_TIOC0A, TIOC0A_MARK),
+	PINMUX_GPIO(GPIO_FN_TCLKD_PD, TCLKD_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_TCLKC_PD, TCLKC_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_TCLKB_PD, TCLKB_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_TCLKA_PD, TCLKA_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_TCLKD_PF, TCLKD_PF_MARK),
+	PINMUX_GPIO(GPIO_FN_TCLKC_PF, TCLKC_PF_MARK),
+	PINMUX_GPIO(GPIO_FN_TCLKB_PF, TCLKB_PF_MARK),
+	PINMUX_GPIO(GPIO_FN_TCLKA_PF, TCLKA_PF_MARK),
+
+	/* SSU */
+	PINMUX_GPIO(GPIO_FN_SCS0_PD, SCS0_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_SSO0_PD, SSO0_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_SSI0_PD, SSI0_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_SSCK0_PD, SSCK0_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCS0_PF, SCS0_PF_MARK),
+	PINMUX_GPIO(GPIO_FN_SSO0_PF, SSO0_PF_MARK),
+	PINMUX_GPIO(GPIO_FN_SSI0_PF, SSI0_PF_MARK),
+	PINMUX_GPIO(GPIO_FN_SSCK0_PF, SSCK0_PF_MARK),
+	PINMUX_GPIO(GPIO_FN_SCS1_PD, SCS1_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_SSO1_PD, SSO1_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_SSI1_PD, SSI1_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_SSCK1_PD, SSCK1_PD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCS1_PF, SCS1_PF_MARK),
+	PINMUX_GPIO(GPIO_FN_SSO1_PF, SSO1_PF_MARK),
+	PINMUX_GPIO(GPIO_FN_SSI1_PF, SSI1_PF_MARK),
+	PINMUX_GPIO(GPIO_FN_SSCK1_PF, SSCK1_PF_MARK),
+
+	/* SCIF */
+	PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
+	PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
+	PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
+	PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
+	PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
+	PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
+	PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+	PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+	PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+	PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
+	PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
+	PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+	PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+	PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+
+	/* SSI */
+	PINMUX_GPIO(GPIO_FN_AUDIO_CLK, AUDIO_CLK_MARK),
+	PINMUX_GPIO(GPIO_FN_SSIDATA3, SSIDATA3_MARK),
+	PINMUX_GPIO(GPIO_FN_SSIWS3, SSIWS3_MARK),
+	PINMUX_GPIO(GPIO_FN_SSISCK3, SSISCK3_MARK),
+	PINMUX_GPIO(GPIO_FN_SSIDATA2, SSIDATA2_MARK),
+	PINMUX_GPIO(GPIO_FN_SSIWS2, SSIWS2_MARK),
+	PINMUX_GPIO(GPIO_FN_SSISCK2, SSISCK2_MARK),
+	PINMUX_GPIO(GPIO_FN_SSIDATA1, SSIDATA1_MARK),
+	PINMUX_GPIO(GPIO_FN_SSIWS1, SSIWS1_MARK),
+	PINMUX_GPIO(GPIO_FN_SSISCK1, SSISCK1_MARK),
+	PINMUX_GPIO(GPIO_FN_SSIDATA0, SSIDATA0_MARK),
+	PINMUX_GPIO(GPIO_FN_SSIWS0, SSIWS0_MARK),
+	PINMUX_GPIO(GPIO_FN_SSISCK0, SSISCK0_MARK),
+
+	/* FLCTL */
+	PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+	PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
+	PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
+	PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
+	PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
+	PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+
+	/* LCDC */
+	PINMUX_GPIO(GPIO_FN_LCD_VEPWC, LCD_VEPWC_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_VCPWC, LCD_VCPWC_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_CLK, LCD_CLK_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_FLM, LCD_FLM_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_CL2, LCD_CL2_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_CL1, LCD_CL1_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DON, LCD_DON_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+	{ PINMUX_CFG_REG("PBIORL", 0xfffe3886, 16, 1) {
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		PB11_IN, PB11_OUT,
+		PB10_IN, PB10_OUT,
+		PB9_IN, PB9_OUT,
+		PB8_IN, PB8_OUT,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0 }
+	},
+	{ PINMUX_CFG_REG("PBCRL4", 0xfffe3890, 16, 4) {
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PBCRL3", 0xfffe3892, 16, 4) {
+		PB11MD_0, PB11MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PB10MD_0, PB10MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PB9MD_00, PB9MD_01, PB9MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PB8MD_00, PB8MD_01, PB8MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PBCRL2", 0xfffe3894, 16, 4) {
+		PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PBCRL1", 0xfffe3896, 16, 4) {
+		PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("IFCR", 0xfffe38a2, 16, 4) {
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PB12IRQ_00, PB12IRQ_01, PB12IRQ_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PCIORL", 0xfffe3906, 16, 1) {
+		0, 0,
+		PC14_IN, PC14_OUT,
+		PC13_IN, PC13_OUT,
+		PC12_IN, PC12_OUT,
+		PC11_IN, PC11_OUT,
+		PC10_IN, PC10_OUT,
+		PC9_IN, PC9_OUT,
+		PC8_IN, PC8_OUT,
+		PC7_IN, PC7_OUT,
+		PC6_IN, PC6_OUT,
+		PC5_IN, PC5_OUT,
+		PC4_IN, PC4_OUT,
+		PC3_IN, PC3_OUT,
+		PC2_IN, PC2_OUT,
+		PC1_IN, PC1_OUT,
+		PC0_IN, PC0_OUT }
+	},
+	{ PINMUX_CFG_REG("PCCRL4", 0xfffe3910, 16, 4) {
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PC14MD_0, PC14MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PC13MD_0, PC13MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PC12MD_0, PC12MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PCCRL3", 0xfffe3912, 16, 4) {
+		PC11MD_00, PC11MD_01, PC11MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PC10MD_00, PC10MD_01, PC10MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PC9MD_0, PC9MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PC8MD_0, PC8MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PCCRL2", 0xfffe3914, 16, 4) {
+		PC7MD_0, PC7MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PC6MD_0, PC6MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PC5MD_0, PC5MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PC4MD_0, PC4MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PCCRL1", 0xfffe3916, 16, 4) {
+		PC3MD_0, PC3MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PC2MD_0, PC2MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PC1MD_0, PC1MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PC0MD_00, PC0MD_01, PC0MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PDIORL", 0xfffe3986, 16, 1) {
+		PD15_IN, PD15_OUT,
+		PD14_IN, PD14_OUT,
+		PD13_IN, PD13_OUT,
+		PD12_IN, PD12_OUT,
+		PD11_IN, PD11_OUT,
+		PD10_IN, PD10_OUT,
+		PD9_IN, PD9_OUT,
+		PD8_IN, PD8_OUT,
+		PD7_IN, PD7_OUT,
+		PD6_IN, PD6_OUT,
+		PD5_IN, PD5_OUT,
+		PD4_IN, PD4_OUT,
+		PD3_IN, PD3_OUT,
+		PD2_IN, PD2_OUT,
+		PD1_IN, PD1_OUT,
+		PD0_IN, PD0_OUT }
+	},
+	{ PINMUX_CFG_REG("PDCRL4", 0xfffe3990, 16, 4) {
+		PD15MD_000, PD15MD_001, PD15MD_010, 0,
+		PD15MD_100, PD15MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PD14MD_000, PD14MD_001, PD14MD_010, 0,
+		0, PD14MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PD13MD_000, PD13MD_001, PD13MD_010, 0,
+		PD13MD_100, PD13MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PD12MD_000, PD12MD_001, PD12MD_010, 0,
+		PD12MD_100, PD12MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PDCRL3", 0xfffe3992, 16, 4) {
+		PD11MD_000, PD11MD_001, PD11MD_010, 0,
+		PD11MD_100, PD11MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PD10MD_000, PD10MD_001, PD10MD_010, 0,
+		PD10MD_100, PD10MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PD9MD_000, PD9MD_001, PD9MD_010, 0,
+		PD9MD_100, PD9MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PD8MD_000, PD8MD_001, PD8MD_010, 0,
+		PD8MD_100, PD8MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PDCRL2", 0xfffe3994, 16, 4) {
+		PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011,
+		PD7MD_100, PD7MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011,
+		PD6MD_100, PD6MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011,
+		PD5MD_100, PD5MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011,
+		PD4MD_100, PD4MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PDCRL1", 0xfffe3996, 16, 4) {
+		PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011,
+		PD3MD_100, PD3MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011,
+		PD2MD_100, PD2MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011,
+		PD1MD_100, PD1MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011,
+		PD0MD_100, PD0MD_101, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PEIORL", 0xfffe3a06, 16, 1) {
+		PE15_IN, PE15_OUT,
+		PE14_IN, PE14_OUT,
+		PE13_IN, PE13_OUT,
+		PE12_IN, PE12_OUT,
+		PE11_IN, PE11_OUT,
+		PE10_IN, PE10_OUT,
+		PE9_IN, PE9_OUT,
+		PE8_IN, PE8_OUT,
+		PE7_IN, PE7_OUT,
+		PE6_IN, PE6_OUT,
+		PE5_IN, PE5_OUT,
+		PE4_IN, PE4_OUT,
+		PE3_IN, PE3_OUT,
+		PE2_IN, PE2_OUT,
+		PE1_IN, PE1_OUT,
+		PE0_IN, PE0_OUT }
+	},
+	{ PINMUX_CFG_REG("PECRL4", 0xfffe3a10, 16, 4) {
+		PE15MD_00, PE15MD_01, 0, PE15MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PE14MD_00, PE14MD_01, 0, PE14MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PE13MD_00, 0, 0, PE13MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PE12MD_00, 0, 0, PE12MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PECRL3", 0xfffe3a12, 16, 4) {
+		PE11MD_000, PE11MD_001, PE11MD_010, 0,
+		PE11MD_100, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PE10MD_000, PE10MD_001, PE10MD_010, 0,
+		PE10MD_100, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PECRL2", 0xfffe3a14, 16, 4) {
+		PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011,
+		PE7MD_100, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011,
+		PE6MD_100, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011,
+		PE5MD_100, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+
+		PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011,
+		PE4MD_100, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PECRL1", 0xfffe3a16, 16, 4) {
+		PE3MD_00, PE3MD_01, 0, PE3MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PE2MD_00, PE2MD_01, 0, PE2MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PE0MD_000, PE0MD_001, 0, PE0MD_011,
+		PE0MD_100, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PFIORH", 0xfffe3a84, 16, 1) {
+		0, 0,
+		PF30_IN, PF30_OUT,
+		PF29_IN, PF29_OUT,
+		PF28_IN, PF28_OUT,
+		PF27_IN, PF27_OUT,
+		PF26_IN, PF26_OUT,
+		PF25_IN, PF25_OUT,
+		PF24_IN, PF24_OUT,
+		PF23_IN, PF23_OUT,
+		PF22_IN, PF22_OUT,
+		PF21_IN, PF21_OUT,
+		PF20_IN, PF20_OUT,
+		PF19_IN, PF19_OUT,
+		PF18_IN, PF18_OUT,
+		PF17_IN, PF17_OUT,
+		PF16_IN, PF16_OUT }
+	},
+	{ PINMUX_CFG_REG("PFIORL", 0xfffe3a86, 16, 1) {
+		PF15_IN, PF15_OUT,
+		PF14_IN, PF14_OUT,
+		PF13_IN, PF13_OUT,
+		PF12_IN, PF12_OUT,
+		PF11_IN, PF11_OUT,
+		PF10_IN, PF10_OUT,
+		PF9_IN, PF9_OUT,
+		PF8_IN, PF8_OUT,
+		PF7_IN, PF7_OUT,
+		PF6_IN, PF6_OUT,
+		PF5_IN, PF5_OUT,
+		PF4_IN, PF4_OUT,
+		PF3_IN, PF3_OUT,
+		PF2_IN, PF2_OUT,
+		PF1_IN, PF1_OUT,
+		PF0_IN, PF0_OUT }
+	},
+	{ PINMUX_CFG_REG("PFCRH4", 0xfffe3a88, 16, 4) {
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF30MD_0, PF30MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF29MD_0, PF29MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF28MD_0, PF28MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PFCRH3", 0xfffe3a8a, 16, 4) {
+		PF27MD_0, PF27MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF26MD_0, PF26MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF25MD_0, PF25MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF24MD_0, PF24MD_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PFCRH2", 0xfffe3a8c, 16, 4) {
+		PF23MD_00, PF23MD_01, PF23MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF22MD_00, PF22MD_01, PF22MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF21MD_00, PF21MD_01, PF21MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF20MD_00, PF20MD_01, PF20MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PFCRH1", 0xfffe3a8e, 16, 4) {
+		PF19MD_00, PF19MD_01, PF19MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF18MD_00, PF18MD_01, PF18MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF17MD_00, PF17MD_01, PF17MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF16MD_00, PF16MD_01, PF16MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PFCRL4", 0xfffe3a90, 16, 4) {
+		PF15MD_00, PF15MD_01, PF15MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF14MD_00, PF14MD_01, PF14MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF13MD_00, PF13MD_01, PF13MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF12MD_00, PF12MD_01, PF12MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PFCRL3", 0xfffe3a92, 16, 4) {
+		PF11MD_00, PF11MD_01, PF11MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF10MD_00, PF10MD_01, PF10MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF9MD_00, PF9MD_01, PF9MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF8MD_00, PF8MD_01, PF8MD_10, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PFCRL2", 0xfffe3a94, 16, 4) {
+		PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PFCRL1", 0xfffe3a96, 16, 4) {
+		PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+		PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+	},
+	{}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+	{ PINMUX_DATA_REG("PADRL", 0xfffe3802, 16) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+		PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
+	},
+	{ PINMUX_DATA_REG("PBDRL", 0xfffe3882, 16) {
+		0, 0, 0, PB12_DATA,
+		PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+		PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+		PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA }
+	},
+	{ PINMUX_DATA_REG("PCDRL", 0xfffe3902, 16) {
+		0, PC14_DATA, PC13_DATA, PC12_DATA,
+		PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA,
+		PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+		PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+	},
+	{ PINMUX_DATA_REG("PDDRL", 0xfffe3982, 16) {
+		PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+		PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+		PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+		PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+	},
+	{ PINMUX_DATA_REG("PEDRL", 0xfffe3a02, 16) {
+		PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA,
+		PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA,
+		PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+		PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+	},
+	{ PINMUX_DATA_REG("PFDRH", 0xfffe3a80, 16) {
+		0, PF30_DATA, PF29_DATA, PF28_DATA,
+		PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA,
+		PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
+		PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA }
+	},
+	{ PINMUX_DATA_REG("PFDRL", 0xfffe3a82, 16) {
+		PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
+		PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+		PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+		PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+	},
+	{ },
+};
+
+static struct pinmux_info sh7203_pinmux_info = {
+	.name = "sh7203_pfc",
+	.reserved_id = PINMUX_RESERVED,
+	.data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+	.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+	.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+	.mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+	.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+	.first_gpio = GPIO_PA7,
+	.last_gpio = GPIO_FN_LCD_DATA0,
+
+	.gpios = pinmux_gpios,
+	.cfg_regs = pinmux_config_regs,
+	.data_regs = pinmux_data_regs,
+
+	.gpio_data = pinmux_data,
+	.gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+static int __init plat_pinmux_setup(void)
+{
+	return register_pinmux(&sh7203_pinmux_info);
+}
+
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
index 511de55..e07c69e 100644
--- a/arch/sh/kernel/cpu/sh3/Makefile
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -24,4 +24,8 @@
 clock-$(CONFIG_CPU_SUBTYPE_SH7720)	:= clock-sh7710.o
 clock-$(CONFIG_CPU_SUBTYPE_SH7712)	:= clock-sh7712.o
 
+# Pinmux setup
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7720)	:= pinmux-sh7720.o
+
 obj-y	+= $(clock-y)
+obj-$(CONFIG_GENERIC_GPIO)	+= $(pinmux-y)
diff --git a/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c b/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c
new file mode 100644
index 0000000..9ca1546
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c
@@ -0,0 +1,1242 @@
+/*
+ * SH7720 Pinmux
+ *
+ *  Copyright (C) 2008  Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7720.h>
+
+enum {
+	PINMUX_RESERVED = 0,
+
+	PINMUX_DATA_BEGIN,
+	PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+	PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+	PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+	PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+	PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+	PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+	PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+	PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+	PTE6_DATA, PTE5_DATA, PTE4_DATA,
+	PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+	PTF6_DATA, PTF5_DATA, PTF4_DATA,
+	PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+	PTG6_DATA, PTG5_DATA, PTG4_DATA,
+	PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+	PTH6_DATA, PTH5_DATA, PTH4_DATA,
+	PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+	PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+	PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+	PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+	PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA,
+	PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+	PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+	PTP4_DATA, PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA,
+	PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+	PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+	PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+	PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+	PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+	PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+	PINMUX_DATA_END,
+
+	PINMUX_INPUT_BEGIN,
+	PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+	PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+	PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+	PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+	PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+	PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+	PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+	PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+	PTE6_IN, PTE5_IN, PTE4_IN,
+	PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+	PTF6_IN, PTF5_IN, PTF4_IN,
+	PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+	PTG6_IN, PTG5_IN, PTG4_IN,
+	PTG3_IN, PTG2_IN, PTG1_IN, PTG0_IN,
+	PTH6_IN, PTH5_IN, PTH4_IN,
+	PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+	PTJ6_IN, PTJ5_IN, PTJ4_IN,
+	PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+	PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+	PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN, PTL3_IN,
+	PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+	PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+	PTP4_IN, PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN,
+	PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+	PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+	PTS4_IN, PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+	PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+	PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+	PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+	PINMUX_INPUT_END,
+
+	PINMUX_INPUT_PULLUP_BEGIN,
+	PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
+	PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+	PTB7_IN_PU, PTB6_IN_PU, PTB5_IN_PU, PTB4_IN_PU,
+	PTB3_IN_PU, PTB2_IN_PU, PTB1_IN_PU, PTB0_IN_PU,
+	PTC7_IN_PU, PTC6_IN_PU, PTC5_IN_PU, PTC4_IN_PU,
+	PTC3_IN_PU, PTC2_IN_PU, PTC1_IN_PU, PTC0_IN_PU,
+	PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+	PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
+	PTE4_IN_PU, PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
+	PTF0_IN_PU,
+	PTG6_IN_PU, PTG5_IN_PU, PTG4_IN_PU,
+	PTG3_IN_PU, PTG2_IN_PU, PTG1_IN_PU, PTG0_IN_PU,
+	PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
+	PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
+	PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU,
+	PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
+	PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
+	PTL7_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU, PTL3_IN_PU,
+	PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
+	PTM3_IN_PU, PTM2_IN_PU, PTM1_IN_PU, PTM0_IN_PU,
+	PTP4_IN_PU, PTP3_IN_PU, PTP2_IN_PU, PTP1_IN_PU, PTP0_IN_PU,
+	PTR7_IN_PU, PTR6_IN_PU, PTR5_IN_PU, PTR4_IN_PU,
+	PTR3_IN_PU, PTR2_IN_PU, PTR1_IN_PU, PTR0_IN_PU,
+	PTS4_IN_PU, PTS3_IN_PU, PTS2_IN_PU, PTS1_IN_PU, PTS0_IN_PU,
+	PTT4_IN_PU, PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
+	PTU4_IN_PU, PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
+	PTV4_IN_PU, PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU,
+	PINMUX_INPUT_PULLUP_END,
+
+	PINMUX_OUTPUT_BEGIN,
+	PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+	PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+	PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+	PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+	PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+	PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+	PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+	PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+	PTE4_OUT, PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+	PTF0_OUT,
+	PTG6_OUT, PTG5_OUT, PTG4_OUT,
+	PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+	PTH6_OUT, PTH5_OUT, PTH4_OUT,
+	PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+	PTJ6_OUT, PTJ5_OUT, PTJ4_OUT,
+	PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+	PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+	PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT, PTL3_OUT,
+	PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+	PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+	PTP4_OUT, PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT,
+	PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+	PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT,
+	PTS4_OUT, PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+	PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+	PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+	PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+	PINMUX_OUTPUT_END,
+
+	PINMUX_FUNCTION_BEGIN,
+	PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+	PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+	PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+	PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+	PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+	PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+	PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+	PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+	PTE6_FN, PTE5_FN, PTE4_FN,
+	PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+	PTF6_FN, PTF5_FN, PTF4_FN,
+	PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+	PTG6_FN, PTG5_FN, PTG4_FN,
+	PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+	PTH6_FN, PTH5_FN, PTH4_FN,
+	PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+	PTJ6_FN, PTJ5_FN, PTJ4_FN,
+	PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+	PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+	PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN, PTL3_FN,
+	PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+	PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+	PTP4_FN, PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN,
+	PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+	PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+	PTS4_FN, PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+	PTT4_FN, PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+	PTU4_FN, PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+	PTV4_FN, PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+
+	PSELA_1_0_00, PSELA_1_0_01, PSELA_1_0_10,
+	PSELA_3_2_00, PSELA_3_2_01, PSELA_3_2_10, PSELA_3_2_11,
+	PSELA_5_4_00, PSELA_5_4_01, PSELA_5_4_10, PSELA_5_4_11,
+	PSELA_7_6_00, PSELA_7_6_01, PSELA_7_6_10,
+	PSELA_9_8_00, PSELA_9_8_01, PSELA_9_8_10,
+	PSELA_11_10_00, PSELA_11_10_01, PSELA_11_10_10,
+	PSELA_13_12_00, PSELA_13_12_10,
+	PSELA_15_14_00, PSELA_15_14_10,
+	PSELB_9_8_00, PSELB_9_8_11,
+	PSELB_11_10_00, PSELB_11_10_01, PSELB_11_10_10, PSELB_11_10_11,
+	PSELB_13_12_00, PSELB_13_12_01, PSELB_13_12_10, PSELB_13_12_11,
+	PSELB_15_14_00, PSELB_15_14_11,
+	PSELC_9_8_00, PSELC_9_8_10,
+	PSELC_11_10_00, PSELC_11_10_10,
+	PSELC_13_12_00,	PSELC_13_12_01,	PSELC_13_12_10,
+	PSELC_15_14_00,	PSELC_15_14_01,	PSELC_15_14_10,
+	PSELD_1_0_00, PSELD_1_0_10,
+	PSELD_11_10_00,	PSELD_11_10_01,
+	PSELD_15_14_00,	PSELD_15_14_01,	PSELD_15_14_10,
+	PINMUX_FUNCTION_END,
+
+	PINMUX_MARK_BEGIN,
+	D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+	D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+	D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+	D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+	IOIS16_MARK, RAS_MARK, CAS_MARK, CKE_MARK,
+	CS5B_CE1A_MARK, CS6B_CE1B_MARK,
+	A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+	A21_MARK, A20_MARK, A19_MARK, A0_MARK,
+	REFOUT_MARK, IRQOUT_MARK,
+	LCD_DATA15_MARK, LCD_DATA14_MARK,
+	LCD_DATA13_MARK, LCD_DATA12_MARK,
+	LCD_DATA11_MARK, LCD_DATA10_MARK,
+	LCD_DATA9_MARK, LCD_DATA8_MARK,
+	LCD_DATA7_MARK, LCD_DATA6_MARK,
+	LCD_DATA5_MARK, LCD_DATA4_MARK,
+	LCD_DATA3_MARK, LCD_DATA2_MARK,
+	LCD_DATA1_MARK, LCD_DATA0_MARK,
+	LCD_M_DISP_MARK,
+	LCD_CL1_MARK, LCD_CL2_MARK,
+	LCD_DON_MARK, LCD_FLM_MARK,
+	LCD_VEPWC_MARK, LCD_VCPWC_MARK,
+	AFE_RXIN_MARK, AFE_RDET_MARK,
+	AFE_FS_MARK, AFE_TXOUT_MARK,
+	AFE_SCLK_MARK, AFE_RLYCNT_MARK,
+	AFE_HC1_MARK,
+	IIC_SCL_MARK, IIC_SDA_MARK,
+	DA1_MARK, DA0_MARK,
+	AN3_MARK, AN2_MARK, AN1_MARK, AN0_MARK, ADTRG_MARK,
+	USB1D_RCV_MARK, USB1D_TXSE0_MARK,
+	USB1D_TXDPLS_MARK, USB1D_DMNS_MARK,
+	USB1D_DPLS_MARK, USB1D_SPEED_MARK,
+	USB1D_TXENL_MARK,
+	USB2_PWR_EN_MARK, USB1_PWR_EN_USBF_UPLUP_MARK, USB1D_SUSPEND_MARK,
+	IRQ5_MARK, IRQ4_MARK,
+	IRQ3_IRL3_MARK, IRQ2_IRL2_MARK,
+	IRQ1_IRL1_MARK, IRQ0_IRL0_MARK,
+	PCC_REG_MARK, PCC_DRV_MARK,
+	PCC_BVD2_MARK, PCC_BVD1_MARK,
+	PCC_CD2_MARK, PCC_CD1_MARK,
+	PCC_RESET_MARK, PCC_RDY_MARK,
+	PCC_VS2_MARK, PCC_VS1_MARK,
+	AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+	AUDCK_MARK, AUDSYNC_MARK, ASEBRKAK_MARK, TRST_MARK,
+	TMS_MARK, TDO_MARK, TDI_MARK, TCK_MARK,
+	DACK1_MARK, DREQ1_MARK, DACK0_MARK, DREQ0_MARK,
+	TEND1_MARK, TEND0_MARK,
+	SIOF0_SYNC_MARK, SIOF0_MCLK_MARK,
+	SIOF0_TXD_MARK, SIOF0_RXD_MARK,
+	SIOF0_SCK_MARK,
+	SIOF1_SYNC_MARK, SIOF1_MCLK_MARK,
+	SIOF1_TXD_MARK, SIOF1_RXD_MARK,
+	SIOF1_SCK_MARK,
+	SCIF0_TXD_MARK, SCIF0_RXD_MARK,
+	SCIF0_RTS_MARK, SCIF0_CTS_MARK, SCIF0_SCK_MARK,
+	SCIF1_TXD_MARK, SCIF1_RXD_MARK,
+	SCIF1_RTS_MARK, SCIF1_CTS_MARK, SCIF1_SCK_MARK,
+	TPU_TO1_MARK, TPU_TO0_MARK,
+	TPU_TI3B_MARK, TPU_TI3A_MARK,
+	TPU_TI2B_MARK, TPU_TI2A_MARK,
+	TPU_TO3_MARK, TPU_TO2_MARK,
+	SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
+	MMC_DAT_MARK, MMC_CMD_MARK,
+	MMC_CLK_MARK, MMC_VDDON_MARK,
+	MMC_ODMOD_MARK,
+	STATUS0_MARK, STATUS1_MARK,
+	PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+	/* PTA GPIO */
+	PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU),
+	PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU),
+	PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT, PTA5_IN_PU),
+	PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
+	PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
+	PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
+	PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
+	PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+
+	/* PTB GPIO */
+	PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT, PTB7_IN_PU),
+	PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT, PTB6_IN_PU),
+	PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT, PTB5_IN_PU),
+	PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT, PTB4_IN_PU),
+	PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT, PTB3_IN_PU),
+	PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
+	PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
+	PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT, PTB0_IN_PU),
+
+	/* PTC GPIO */
+	PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT, PTC7_IN_PU),
+	PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT, PTC6_IN_PU),
+	PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT, PTC5_IN_PU),
+	PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT, PTC4_IN_PU),
+	PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT, PTC3_IN_PU),
+	PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT, PTC2_IN_PU),
+	PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT, PTC1_IN_PU),
+	PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT, PTC0_IN_PU),
+
+	/* PTD GPIO */
+	PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT, PTD7_IN_PU),
+	PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT, PTD6_IN_PU),
+	PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT, PTD5_IN_PU),
+	PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT, PTD4_IN_PU),
+	PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT, PTD3_IN_PU),
+	PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT, PTD2_IN_PU),
+	PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT, PTD1_IN_PU),
+	PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT, PTD0_IN_PU),
+
+	/* PTE GPIO */
+	PINMUX_DATA(PTE6_DATA, PTE6_IN),
+	PINMUX_DATA(PTE5_DATA, PTE5_IN),
+	PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT, PTE4_IN_PU),
+	PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT, PTE3_IN_PU),
+	PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT, PTE2_IN_PU),
+	PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT, PTE1_IN_PU),
+	PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT, PTE0_IN_PU),
+
+	/* PTF GPIO */
+	PINMUX_DATA(PTF6_DATA, PTF6_IN),
+	PINMUX_DATA(PTF5_DATA, PTF5_IN),
+	PINMUX_DATA(PTF4_DATA, PTF4_IN),
+	PINMUX_DATA(PTF3_DATA, PTF3_IN),
+	PINMUX_DATA(PTF2_DATA, PTF2_IN),
+	PINMUX_DATA(PTF1_DATA, PTF1_IN),
+	PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT, PTF0_IN_PU),
+
+	/* PTG GPIO */
+	PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT, PTG6_IN_PU),
+	PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT, PTG5_IN_PU),
+	PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT, PTG4_IN_PU),
+	PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT, PTG3_IN_PU),
+	PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT, PTG2_IN_PU),
+	PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT, PTG1_IN_PU),
+	PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT, PTG0_IN_PU),
+
+	/* PTH GPIO */
+	PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT, PTH6_IN_PU),
+	PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT, PTH5_IN_PU),
+	PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT, PTH4_IN_PU),
+	PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT, PTH3_IN_PU),
+	PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT, PTH2_IN_PU),
+	PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT, PTH1_IN_PU),
+	PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT, PTH0_IN_PU),
+
+	/* PTJ GPIO */
+	PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT, PTJ6_IN_PU),
+	PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT, PTJ5_IN_PU),
+	PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT, PTJ4_IN_PU),
+	PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT, PTJ3_IN_PU),
+	PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT, PTJ2_IN_PU),
+	PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT, PTJ1_IN_PU),
+	PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT, PTJ0_IN_PU),
+
+	/* PTK GPIO */
+	PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT, PTK3_IN_PU),
+	PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT, PTK2_IN_PU),
+	PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT, PTK1_IN_PU),
+	PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT, PTK0_IN_PU),
+
+	/* PTL GPIO */
+	PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT, PTL7_IN_PU),
+	PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT, PTL6_IN_PU),
+	PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT, PTL5_IN_PU),
+	PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT, PTL4_IN_PU),
+	PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT, PTL3_IN_PU),
+
+	/* PTM GPIO */
+	PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT, PTM7_IN_PU),
+	PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT, PTM6_IN_PU),
+	PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT, PTM5_IN_PU),
+	PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT, PTM4_IN_PU),
+	PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT, PTM3_IN_PU),
+	PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT, PTM2_IN_PU),
+	PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT, PTM1_IN_PU),
+	PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT, PTM0_IN_PU),
+
+	/* PTP GPIO */
+	PINMUX_DATA(PTP4_DATA, PTP4_IN, PTP4_OUT, PTP4_IN_PU),
+	PINMUX_DATA(PTP3_DATA, PTP3_IN, PTP3_OUT, PTP3_IN_PU),
+	PINMUX_DATA(PTP2_DATA, PTP2_IN, PTP2_OUT, PTP2_IN_PU),
+	PINMUX_DATA(PTP1_DATA, PTP1_IN, PTP1_OUT, PTP1_IN_PU),
+	PINMUX_DATA(PTP0_DATA, PTP0_IN, PTP0_OUT, PTP0_IN_PU),
+
+	/* PTR GPIO */
+	PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT, PTR7_IN_PU),
+	PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT, PTR6_IN_PU),
+	PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT, PTR5_IN_PU),
+	PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT, PTR4_IN_PU),
+	PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT, PTR3_IN_PU),
+	PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT, PTR2_IN_PU),
+	PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT, PTR1_IN_PU),
+	PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT, PTR0_IN_PU),
+
+	/* PTS GPIO */
+	PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT, PTS4_IN_PU),
+	PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT, PTS3_IN_PU),
+	PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT, PTS2_IN_PU),
+	PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT, PTS1_IN_PU),
+	PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT, PTS0_IN_PU),
+
+	/* PTT GPIO */
+	PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT, PTT4_IN_PU),
+	PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT, PTT3_IN_PU),
+	PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT, PTT2_IN_PU),
+	PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT, PTT1_IN_PU),
+	PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT, PTT0_IN_PU),
+
+	/* PTU GPIO */
+	PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT, PTU4_IN_PU),
+	PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT, PTU3_IN_PU),
+	PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT, PTU2_IN_PU),
+	PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT, PTU1_IN_PU),
+	PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT, PTU0_IN_PU),
+
+	/* PTV GPIO */
+	PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT, PTV4_IN_PU),
+	PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT, PTV3_IN_PU),
+	PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT, PTV2_IN_PU),
+	PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT, PTV1_IN_PU),
+	PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT, PTV0_IN_PU),
+
+	/* PTA FN */
+	PINMUX_DATA(D23_MARK, PTA7_FN),
+	PINMUX_DATA(D22_MARK, PTA6_FN),
+	PINMUX_DATA(D21_MARK, PTA5_FN),
+	PINMUX_DATA(D20_MARK, PTA4_FN),
+	PINMUX_DATA(D19_MARK, PTA3_FN),
+	PINMUX_DATA(D18_MARK, PTA2_FN),
+	PINMUX_DATA(D17_MARK, PTA1_FN),
+	PINMUX_DATA(D16_MARK, PTA0_FN),
+
+	/* PTB FN */
+	PINMUX_DATA(D31_MARK, PTB7_FN),
+	PINMUX_DATA(D30_MARK, PTB6_FN),
+	PINMUX_DATA(D29_MARK, PTB5_FN),
+	PINMUX_DATA(D28_MARK, PTB4_FN),
+	PINMUX_DATA(D27_MARK, PTB3_FN),
+	PINMUX_DATA(D26_MARK, PTB2_FN),
+	PINMUX_DATA(D25_MARK, PTB1_FN),
+	PINMUX_DATA(D24_MARK, PTB0_FN),
+
+	/* PTC FN */
+	PINMUX_DATA(LCD_DATA7_MARK, PTC7_FN),
+	PINMUX_DATA(LCD_DATA6_MARK, PTC6_FN),
+	PINMUX_DATA(LCD_DATA5_MARK, PTC5_FN),
+	PINMUX_DATA(LCD_DATA4_MARK, PTC4_FN),
+	PINMUX_DATA(LCD_DATA3_MARK, PTC3_FN),
+	PINMUX_DATA(LCD_DATA2_MARK, PTC2_FN),
+	PINMUX_DATA(LCD_DATA1_MARK, PTC1_FN),
+	PINMUX_DATA(LCD_DATA0_MARK, PTC0_FN),
+
+	/* PTD FN */
+	PINMUX_DATA(LCD_DATA15_MARK, PTD7_FN),
+	PINMUX_DATA(LCD_DATA14_MARK, PTD6_FN),
+	PINMUX_DATA(LCD_DATA13_MARK, PTD5_FN),
+	PINMUX_DATA(LCD_DATA12_MARK, PTD4_FN),
+	PINMUX_DATA(LCD_DATA11_MARK, PTD3_FN),
+	PINMUX_DATA(LCD_DATA10_MARK, PTD2_FN),
+	PINMUX_DATA(LCD_DATA9_MARK, PTD1_FN),
+	PINMUX_DATA(LCD_DATA8_MARK, PTD0_FN),
+
+	/* PTE FN */
+	PINMUX_DATA(IIC_SCL_MARK, PSELB_9_8_00, PTE6_FN),
+	PINMUX_DATA(AFE_RXIN_MARK, PSELB_9_8_11, PTE6_FN),
+	PINMUX_DATA(IIC_SDA_MARK, PSELB_9_8_00, PTE5_FN),
+	PINMUX_DATA(AFE_RDET_MARK, PSELB_9_8_11, PTE5_FN),
+	PINMUX_DATA(LCD_M_DISP_MARK, PTE4_FN),
+	PINMUX_DATA(LCD_CL1_MARK, PTE3_FN),
+	PINMUX_DATA(LCD_CL2_MARK, PTE2_FN),
+	PINMUX_DATA(LCD_DON_MARK, PTE1_FN),
+	PINMUX_DATA(LCD_FLM_MARK, PTE0_FN),
+
+	/* PTF FN */
+	PINMUX_DATA(DA1_MARK, PTF6_FN),
+	PINMUX_DATA(DA0_MARK, PTF5_FN),
+	PINMUX_DATA(AN3_MARK, PTF4_FN),
+	PINMUX_DATA(AN2_MARK, PTF3_FN),
+	PINMUX_DATA(AN1_MARK, PTF2_FN),
+	PINMUX_DATA(AN0_MARK, PTF1_FN),
+	PINMUX_DATA(ADTRG_MARK, PTF0_FN),
+
+	/* PTG FN */
+	PINMUX_DATA(USB1D_RCV_MARK, PSELA_3_2_00, PTG6_FN),
+	PINMUX_DATA(AFE_FS_MARK, PSELA_3_2_01, PTG6_FN),
+	PINMUX_DATA(PCC_REG_MARK, PSELA_3_2_10, PTG6_FN),
+	PINMUX_DATA(IRQ5_MARK, PSELA_3_2_11, PTG6_FN),
+	PINMUX_DATA(USB1D_TXSE0_MARK, PSELA_5_4_00, PTG5_FN),
+	PINMUX_DATA(AFE_TXOUT_MARK, PSELA_5_4_01, PTG5_FN),
+	PINMUX_DATA(PCC_DRV_MARK, PSELA_5_4_10, PTG5_FN),
+	PINMUX_DATA(IRQ4_MARK, PSELA_5_4_11, PTG5_FN),
+	PINMUX_DATA(USB1D_TXDPLS_MARK, PSELA_7_6_00, PTG4_FN),
+	PINMUX_DATA(AFE_SCLK_MARK, PSELA_7_6_01, PTG4_FN),
+	PINMUX_DATA(IOIS16_MARK, PSELA_7_6_10, PTG4_FN),
+	PINMUX_DATA(USB1D_DMNS_MARK, PSELA_9_8_00, PTG3_FN),
+	PINMUX_DATA(AFE_RLYCNT_MARK, PSELA_9_8_01, PTG3_FN),
+	PINMUX_DATA(PCC_BVD2_MARK, PSELA_9_8_10, PTG3_FN),
+	PINMUX_DATA(USB1D_DPLS_MARK, PSELA_11_10_00, PTG2_FN),
+	PINMUX_DATA(AFE_HC1_MARK, PSELA_11_10_01, PTG2_FN),
+	PINMUX_DATA(PCC_BVD1_MARK, PSELA_11_10_10, PTG2_FN),
+	PINMUX_DATA(USB1D_SPEED_MARK, PSELA_13_12_00, PTG1_FN),
+	PINMUX_DATA(PCC_CD2_MARK, PSELA_13_12_10, PTG1_FN),
+	PINMUX_DATA(USB1D_TXENL_MARK, PSELA_15_14_00, PTG0_FN),
+	PINMUX_DATA(PCC_CD1_MARK, PSELA_15_14_10, PTG0_FN),
+
+	/* PTH FN */
+	PINMUX_DATA(RAS_MARK, PTH6_FN),
+	PINMUX_DATA(CAS_MARK, PTH5_FN),
+	PINMUX_DATA(CKE_MARK, PTH4_FN),
+	PINMUX_DATA(STATUS1_MARK, PTH3_FN),
+	PINMUX_DATA(STATUS0_MARK, PTH2_FN),
+	PINMUX_DATA(USB2_PWR_EN_MARK, PTH1_FN),
+	PINMUX_DATA(USB1_PWR_EN_USBF_UPLUP_MARK, PTH0_FN),
+
+	/* PTJ FN */
+	PINMUX_DATA(AUDCK_MARK, PTJ6_FN),
+	PINMUX_DATA(ASEBRKAK_MARK, PTJ5_FN),
+	PINMUX_DATA(AUDATA3_MARK, PTJ4_FN),
+	PINMUX_DATA(AUDATA2_MARK, PTJ3_FN),
+	PINMUX_DATA(AUDATA1_MARK, PTJ2_FN),
+	PINMUX_DATA(AUDATA0_MARK, PTJ1_FN),
+	PINMUX_DATA(AUDSYNC_MARK, PTJ0_FN),
+
+	/* PTK FN */
+	PINMUX_DATA(PCC_RESET_MARK, PTK3_FN),
+	PINMUX_DATA(PCC_RDY_MARK, PTK2_FN),
+	PINMUX_DATA(PCC_VS2_MARK, PTK1_FN),
+	PINMUX_DATA(PCC_VS1_MARK, PTK0_FN),
+
+	/* PTL FN */
+	PINMUX_DATA(TRST_MARK, PTL7_FN),
+	PINMUX_DATA(TMS_MARK, PTL6_FN),
+	PINMUX_DATA(TDO_MARK, PTL5_FN),
+	PINMUX_DATA(TDI_MARK, PTL4_FN),
+	PINMUX_DATA(TCK_MARK, PTL3_FN),
+
+	/* PTM FN */
+	PINMUX_DATA(DREQ1_MARK, PTM7_FN),
+	PINMUX_DATA(DREQ0_MARK, PTM6_FN),
+	PINMUX_DATA(DACK1_MARK, PTM5_FN),
+	PINMUX_DATA(DACK0_MARK, PTM4_FN),
+	PINMUX_DATA(TEND1_MARK, PTM3_FN),
+	PINMUX_DATA(TEND0_MARK, PTM2_FN),
+	PINMUX_DATA(CS5B_CE1A_MARK, PTM1_FN),
+	PINMUX_DATA(CS6B_CE1B_MARK, PTM0_FN),
+
+	/* PTP FN */
+	PINMUX_DATA(USB1D_SUSPEND_MARK, PSELA_1_0_00, PTP4_FN),
+	PINMUX_DATA(REFOUT_MARK, PSELA_1_0_01, PTP4_FN),
+	PINMUX_DATA(IRQOUT_MARK, PSELA_1_0_10, PTP4_FN),
+	PINMUX_DATA(IRQ3_IRL3_MARK, PTP3_FN),
+	PINMUX_DATA(IRQ2_IRL2_MARK, PTP2_FN),
+	PINMUX_DATA(IRQ1_IRL1_MARK, PTP1_FN),
+	PINMUX_DATA(IRQ0_IRL0_MARK, PTP0_FN),
+
+	/* PTR FN */
+	PINMUX_DATA(A25_MARK, PTR7_FN),
+	PINMUX_DATA(A24_MARK, PTR6_FN),
+	PINMUX_DATA(A23_MARK, PTR5_FN),
+	PINMUX_DATA(A22_MARK, PTR4_FN),
+	PINMUX_DATA(A21_MARK, PTR3_FN),
+	PINMUX_DATA(A20_MARK, PTR2_FN),
+	PINMUX_DATA(A19_MARK, PTR1_FN),
+	PINMUX_DATA(A0_MARK, PTR0_FN),
+
+	/* PTS FN */
+	PINMUX_DATA(SIOF0_SYNC_MARK, PTS4_FN),
+	PINMUX_DATA(SIOF0_MCLK_MARK, PTS3_FN),
+	PINMUX_DATA(SIOF0_TXD_MARK, PTS2_FN),
+	PINMUX_DATA(SIOF0_RXD_MARK, PTS1_FN),
+	PINMUX_DATA(SIOF0_SCK_MARK, PTS0_FN),
+
+	/* PTT FN */
+	PINMUX_DATA(SCIF0_CTS_MARK, PSELB_15_14_00, PTT4_FN),
+	PINMUX_DATA(TPU_TO1_MARK, PSELB_15_14_11, PTT4_FN),
+	PINMUX_DATA(SCIF0_RTS_MARK, PSELB_15_14_00, PTT3_FN),
+	PINMUX_DATA(TPU_TO0_MARK, PSELB_15_14_11, PTT3_FN),
+	PINMUX_DATA(SCIF0_TXD_MARK, PTT2_FN),
+	PINMUX_DATA(SCIF0_RXD_MARK, PTT1_FN),
+	PINMUX_DATA(SCIF0_SCK_MARK, PTT0_FN),
+
+	/* PTU FN */
+	PINMUX_DATA(SIOF1_SYNC_MARK, PTU4_FN),
+	PINMUX_DATA(SIOF1_MCLK_MARK, PSELD_11_10_00, PTU3_FN),
+	PINMUX_DATA(TPU_TI3B_MARK, PSELD_11_10_01, PTU3_FN),
+	PINMUX_DATA(SIOF1_TXD_MARK, PSELD_15_14_00, PTU2_FN),
+	PINMUX_DATA(TPU_TI3A_MARK, PSELD_15_14_01, PTU2_FN),
+	PINMUX_DATA(MMC_DAT_MARK, PSELD_15_14_10, PTU2_FN),
+	PINMUX_DATA(SIOF1_RXD_MARK, PSELC_13_12_00, PTU1_FN),
+	PINMUX_DATA(TPU_TI2B_MARK, PSELC_13_12_01, PTU1_FN),
+	PINMUX_DATA(MMC_CMD_MARK, PSELC_13_12_10, PTU1_FN),
+	PINMUX_DATA(SIOF1_SCK_MARK, PSELC_15_14_00, PTU0_FN),
+	PINMUX_DATA(TPU_TI2A_MARK, PSELC_15_14_01, PTU0_FN),
+	PINMUX_DATA(MMC_CLK_MARK, PSELC_15_14_10, PTU0_FN),
+
+	/* PTV FN */
+	PINMUX_DATA(SCIF1_CTS_MARK, PSELB_11_10_00, PTV4_FN),
+	PINMUX_DATA(TPU_TO3_MARK, PSELB_11_10_01, PTV4_FN),
+	PINMUX_DATA(MMC_VDDON_MARK, PSELB_11_10_10, PTV4_FN),
+	PINMUX_DATA(LCD_VEPWC_MARK, PSELB_11_10_11, PTV4_FN),
+	PINMUX_DATA(SCIF1_RTS_MARK, PSELB_13_12_00, PTV3_FN),
+	PINMUX_DATA(TPU_TO2_MARK, PSELB_13_12_01, PTV3_FN),
+	PINMUX_DATA(MMC_ODMOD_MARK, PSELB_13_12_10, PTV3_FN),
+	PINMUX_DATA(LCD_VCPWC_MARK, PSELB_13_12_11, PTV3_FN),
+	PINMUX_DATA(SCIF1_TXD_MARK, PSELC_9_8_00, PTV2_FN),
+	PINMUX_DATA(SIM_D_MARK, PSELC_9_8_10, PTV2_FN),
+	PINMUX_DATA(SCIF1_RXD_MARK, PSELC_11_10_00, PTV1_FN),
+	PINMUX_DATA(SIM_RST_MARK, PSELC_11_10_10, PTV1_FN),
+	PINMUX_DATA(SCIF1_SCK_MARK, PSELD_1_0_00, PTV0_FN),
+	PINMUX_DATA(SIM_CLK_MARK, PSELD_1_0_10, PTV0_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+	/* PTA */
+	PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+	PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+	PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+	PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+	PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+	PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+	PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+	PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+	/* PTB */
+	PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+	PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+	PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+	PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+	PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+	PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+	PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+	PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+	/* PTC */
+	PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+	PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+	PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+	PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+	PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+	PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+	PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+	PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+	/* PTD */
+	PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+	PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+	PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+	PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+	PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+	PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+	PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+	PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+	/* PTE */
+	PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+	PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+	PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+	PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+	PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+	PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+	PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+	/* PTF */
+	PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+	PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+	PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+	PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+	PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+	PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+	PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+	/* PTG */
+	PINMUX_GPIO(GPIO_PTG6, PTG6_DATA),
+	PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+	PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+	PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+	PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+	PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+	PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+	/* PTH */
+	PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+	PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+	PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+	PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+	PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+	PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+	PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+	/* PTJ */
+	PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+	PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+	PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA),
+	PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+	PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+	PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+	PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+	/* PTK */
+	PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+	PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+	PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+	PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+	/* PTL */
+	PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+	PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+	PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+	PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+	PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+
+	/* PTM */
+	PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+	PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+	PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+	PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+	PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+	PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+	PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+	PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+	/* PTP */
+	PINMUX_GPIO(GPIO_PTP4, PTP4_DATA),
+	PINMUX_GPIO(GPIO_PTP3, PTP3_DATA),
+	PINMUX_GPIO(GPIO_PTP2, PTP2_DATA),
+	PINMUX_GPIO(GPIO_PTP1, PTP1_DATA),
+	PINMUX_GPIO(GPIO_PTP0, PTP0_DATA),
+
+	/* PTR */
+	PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+	PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+	PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+	PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+	PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+	PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+	PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+	PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+	/* PTS */
+	PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+	PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+	PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+	PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+	PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+	/* PTT */
+	PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+	PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+	PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+	PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+	PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+	/* PTU */
+	PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+	PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+	PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+	PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+	PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+	/* PTV */
+	PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+	PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+	PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+	PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+	PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+	/* BSC */
+	PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+	PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+	PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+	PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+	PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+	PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+	PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+	PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+	PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+	PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+	PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+	PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+	PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+	PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+	PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+	PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+	PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+	PINMUX_GPIO(GPIO_FN_RAS, RAS_MARK),
+	PINMUX_GPIO(GPIO_FN_CAS, CAS_MARK),
+	PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
+	PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
+	PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+	PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+	PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+	PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+	PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+	PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+	PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
+	PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
+	PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+	PINMUX_GPIO(GPIO_FN_REFOUT, REFOUT_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+
+	/* LCDC */
+	PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_CL1, LCD_CL1_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_CL2, LCD_CL2_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DON, LCD_DON_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_FLM, LCD_FLM_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_VEPWC, LCD_VEPWC_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_VCPWC, LCD_VCPWC_MARK),
+
+	/* AFEIF */
+	PINMUX_GPIO(GPIO_FN_AFE_RXIN, AFE_RXIN_MARK),
+	PINMUX_GPIO(GPIO_FN_AFE_RDET, AFE_RDET_MARK),
+	PINMUX_GPIO(GPIO_FN_AFE_FS, AFE_FS_MARK),
+	PINMUX_GPIO(GPIO_FN_AFE_TXOUT, AFE_TXOUT_MARK),
+	PINMUX_GPIO(GPIO_FN_AFE_SCLK, AFE_SCLK_MARK),
+	PINMUX_GPIO(GPIO_FN_AFE_RLYCNT, AFE_RLYCNT_MARK),
+	PINMUX_GPIO(GPIO_FN_AFE_HC1, AFE_HC1_MARK),
+
+	/* IIC */
+	PINMUX_GPIO(GPIO_FN_IIC_SCL, IIC_SCL_MARK),
+	PINMUX_GPIO(GPIO_FN_IIC_SDA, IIC_SDA_MARK),
+
+	/* DAC */
+	PINMUX_GPIO(GPIO_FN_DA1, DA1_MARK),
+	PINMUX_GPIO(GPIO_FN_DA0, DA0_MARK),
+
+	/* ADC */
+	PINMUX_GPIO(GPIO_FN_AN3, AN3_MARK),
+	PINMUX_GPIO(GPIO_FN_AN2, AN2_MARK),
+	PINMUX_GPIO(GPIO_FN_AN1, AN1_MARK),
+	PINMUX_GPIO(GPIO_FN_AN0, AN0_MARK),
+	PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+
+	/* USB */
+	PINMUX_GPIO(GPIO_FN_USB1D_RCV, USB1D_RCV_MARK),
+	PINMUX_GPIO(GPIO_FN_USB1D_TXSE0, USB1D_TXSE0_MARK),
+	PINMUX_GPIO(GPIO_FN_USB1D_TXDPLS, USB1D_TXDPLS_MARK),
+	PINMUX_GPIO(GPIO_FN_USB1D_DMNS, USB1D_DMNS_MARK),
+	PINMUX_GPIO(GPIO_FN_USB1D_DPLS, USB1D_DPLS_MARK),
+	PINMUX_GPIO(GPIO_FN_USB1D_SPEED, USB1D_SPEED_MARK),
+	PINMUX_GPIO(GPIO_FN_USB1D_TXENL, USB1D_TXENL_MARK),
+
+	PINMUX_GPIO(GPIO_FN_USB2_PWR_EN, USB2_PWR_EN_MARK),
+	PINMUX_GPIO(GPIO_FN_USB1_PWR_EN_USBF_UPLUP,
+		    USB1_PWR_EN_USBF_UPLUP_MARK),
+	PINMUX_GPIO(GPIO_FN_USB1D_SUSPEND, USB1D_SUSPEND_MARK),
+
+	/* INTC */
+	PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ3_IRL3, IRQ3_IRL3_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ2_IRL2, IRQ2_IRL2_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ1_IRL1, IRQ1_IRL1_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ0_IRL0, IRQ0_IRL0_MARK),
+
+	/* PCC */
+	PINMUX_GPIO(GPIO_FN_PCC_REG, PCC_REG_MARK),
+	PINMUX_GPIO(GPIO_FN_PCC_DRV, PCC_DRV_MARK),
+	PINMUX_GPIO(GPIO_FN_PCC_BVD2, PCC_BVD2_MARK),
+	PINMUX_GPIO(GPIO_FN_PCC_BVD1, PCC_BVD1_MARK),
+	PINMUX_GPIO(GPIO_FN_PCC_CD2, PCC_CD2_MARK),
+	PINMUX_GPIO(GPIO_FN_PCC_CD1, PCC_CD1_MARK),
+	PINMUX_GPIO(GPIO_FN_PCC_RESET, PCC_RESET_MARK),
+	PINMUX_GPIO(GPIO_FN_PCC_RDY, PCC_RDY_MARK),
+	PINMUX_GPIO(GPIO_FN_PCC_VS2, PCC_VS2_MARK),
+	PINMUX_GPIO(GPIO_FN_PCC_VS1, PCC_VS1_MARK),
+
+	/* HUDI */
+	PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_ASEBRKAK, ASEBRKAK_MARK),
+	PINMUX_GPIO(GPIO_FN_TRST, TRST_MARK),
+	PINMUX_GPIO(GPIO_FN_TMS, TMS_MARK),
+	PINMUX_GPIO(GPIO_FN_TDO, TDO_MARK),
+	PINMUX_GPIO(GPIO_FN_TDI, TDI_MARK),
+	PINMUX_GPIO(GPIO_FN_TCK, TCK_MARK),
+
+	/* DMAC */
+	PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+	PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+	PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+	PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+	PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+	PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+
+	/* SIOF0 */
+	PINMUX_GPIO(GPIO_FN_SIOF0_SYNC, SIOF0_SYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF0_MCLK, SIOF0_MCLK_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF0_TXD, SIOF0_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF0_RXD, SIOF0_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF0_SCK, SIOF0_SCK_MARK),
+
+	/* SIOF1 */
+	PINMUX_GPIO(GPIO_FN_SIOF1_SYNC, SIOF1_SYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF1_MCLK, SIOF1_MCLK_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF1_TXD, SIOF1_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF1_RXD, SIOF1_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF1_SCK, SIOF1_SCK_MARK),
+
+	/* SCIF0 */
+	PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+
+	/* SCIF1 */
+	PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF1_RTS, SCIF1_RTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF1_CTS, SCIF1_CTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+
+	/* TPU */
+	PINMUX_GPIO(GPIO_FN_TPU_TO1, TPU_TO1_MARK),
+	PINMUX_GPIO(GPIO_FN_TPU_TO0, TPU_TO0_MARK),
+	PINMUX_GPIO(GPIO_FN_TPU_TI3B, TPU_TI3B_MARK),
+	PINMUX_GPIO(GPIO_FN_TPU_TI3A, TPU_TI3A_MARK),
+	PINMUX_GPIO(GPIO_FN_TPU_TI2B, TPU_TI2B_MARK),
+	PINMUX_GPIO(GPIO_FN_TPU_TI2A, TPU_TI2A_MARK),
+	PINMUX_GPIO(GPIO_FN_TPU_TO3, TPU_TO3_MARK),
+	PINMUX_GPIO(GPIO_FN_TPU_TO2, TPU_TO2_MARK),
+
+	/* SIM */
+	PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
+	PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
+	PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+
+	/* MMC */
+	PINMUX_GPIO(GPIO_FN_MMC_DAT, MMC_DAT_MARK),
+	PINMUX_GPIO(GPIO_FN_MMC_CMD, MMC_CMD_MARK),
+	PINMUX_GPIO(GPIO_FN_MMC_CLK, MMC_CLK_MARK),
+	PINMUX_GPIO(GPIO_FN_MMC_VDDON, MMC_VDDON_MARK),
+	PINMUX_GPIO(GPIO_FN_MMC_ODMOD, MMC_ODMOD_MARK),
+
+	/* SYSC */
+	PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+	PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+	{ PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+		PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN,
+		PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN,
+		PTA5_FN, PTA5_OUT, PTA5_IN_PU, PTA5_IN,
+		PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
+		PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
+		PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
+		PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
+		PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+	},
+	{ PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+		PTB7_FN, PTB7_OUT, PTB7_IN_PU, PTB7_IN,
+		PTB6_FN, PTB6_OUT, PTB6_IN_PU, PTB6_IN,
+		PTB5_FN, PTB5_OUT, PTB5_IN_PU, PTB5_IN,
+		PTB4_FN, PTB4_OUT, PTB4_IN_PU, PTB4_IN,
+		PTB3_FN, PTB3_OUT, PTB3_IN_PU, PTB3_IN,
+		PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
+		PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
+		PTB0_FN, PTB0_OUT, PTB0_IN_PU, PTB0_IN }
+	},
+	{ PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+		PTC7_FN, PTC7_OUT, PTC7_IN_PU, PTC7_IN,
+		PTC6_FN, PTC6_OUT, PTC6_IN_PU, PTC6_IN,
+		PTC5_FN, PTC5_OUT, PTC5_IN_PU, PTC5_IN,
+		PTC4_FN, PTC4_OUT, PTC4_IN_PU, PTC4_IN,
+		PTC3_FN, PTC3_OUT, PTC3_IN_PU, PTC3_IN,
+		PTC2_FN, PTC2_OUT, PTC2_IN_PU, PTC2_IN,
+		PTC1_FN, PTC1_OUT, PTC1_IN_PU, PTC1_IN,
+		PTC0_FN, PTC0_OUT, PTC0_IN_PU, PTC0_IN }
+	},
+	{ PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+		PTD7_FN, PTD7_OUT, PTD7_IN_PU, PTD7_IN,
+		PTD6_FN, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
+		PTD5_FN, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
+		PTD4_FN, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
+		PTD3_FN, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
+		PTD2_FN, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
+		PTD1_FN, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
+		PTD0_FN, PTD0_OUT, PTD0_IN_PU, PTD0_IN }
+	},
+	{ PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+		0, 0, 0, 0,
+		PTE6_FN, 0, 0, PTE6_IN,
+		PTE5_FN, 0, 0, PTE5_IN,
+		PTE4_FN, PTE4_OUT, PTE4_IN_PU, PTE4_IN,
+		PTE3_FN, PTE3_OUT, PTE3_IN_PU, PTE3_IN,
+		PTE2_FN, PTE2_OUT, PTE2_IN_PU, PTE2_IN,
+		PTE1_FN, PTE1_OUT, PTE1_IN_PU, PTE1_IN,
+		PTE0_FN, PTE0_OUT, PTE0_IN_PU, PTE0_IN }
+	},
+	{ PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+		0, 0, 0, 0,
+		PTF6_FN, 0, 0, PTF6_IN,
+		PTF5_FN, 0, 0, PTF5_IN,
+		PTF4_FN, 0, 0, PTF4_IN,
+		PTF3_FN, 0, 0, PTF3_IN,
+		PTF2_FN, 0, 0, PTF2_IN,
+		PTF1_FN, 0, 0, PTF1_IN,
+		PTF0_FN, 0, 0, PTF0_IN }
+	},
+	{ PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+		0, 0, 0, 0,
+		PTG6_FN, PTG6_OUT, PTG6_IN_PU, PTG6_IN,
+		PTG5_FN, PTG5_OUT, PTG5_IN_PU, PTG5_IN,
+		PTG4_FN, PTG4_OUT, PTG4_IN_PU, PTG4_IN,
+		PTG3_FN, PTG3_OUT, PTG3_IN_PU, PTG3_IN,
+		PTG2_FN, PTG2_OUT, PTG2_IN_PU, PTG2_IN,
+		PTG1_FN, PTG1_OUT, PTG1_IN_PU, PTG1_IN,
+		PTG0_FN, PTG0_OUT, PTG0_IN_PU, PTG0_IN }
+	},
+	{ PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+		0, 0, 0, 0,
+		PTH6_FN, PTH6_OUT, PTH6_IN_PU, PTH6_IN,
+		PTH5_FN, PTH5_OUT, PTH5_IN_PU, PTH5_IN,
+		PTH4_FN, PTH4_OUT, PTH4_IN_PU, PTH4_IN,
+		PTH3_FN, PTH3_OUT, PTH3_IN_PU, PTH3_IN,
+		PTH2_FN, PTH2_OUT, PTH2_IN_PU, PTH2_IN,
+		PTH1_FN, PTH1_OUT, PTH1_IN_PU, PTH1_IN,
+		PTH0_FN, PTH0_OUT, PTH0_IN_PU, PTH0_IN }
+	},
+	{ PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+		0, 0, 0, 0,
+		PTJ6_FN, PTJ6_OUT, PTJ6_IN_PU, PTJ6_IN,
+		PTJ5_FN, PTJ5_OUT, PTJ5_IN_PU, PTJ5_IN,
+		PTJ4_FN, PTJ4_OUT, PTJ4_IN_PU, PTJ4_IN,
+		PTJ3_FN, PTJ3_OUT, PTJ3_IN_PU, PTJ3_IN,
+		PTJ2_FN, PTJ2_OUT, PTJ2_IN_PU, PTJ2_IN,
+		PTJ1_FN, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
+		PTJ0_FN, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+	},
+	{ PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		PTK3_FN, PTK3_OUT, PTK3_IN_PU, PTK3_IN,
+		PTK2_FN, PTK2_OUT, PTK2_IN_PU, PTK2_IN,
+		PTK1_FN, PTK1_OUT, PTK1_IN_PU, PTK1_IN,
+		PTK0_FN, PTK0_OUT, PTK0_IN_PU, PTK0_IN }
+	},
+	{ PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+		PTL7_FN, PTL7_OUT, PTL7_IN_PU, PTL7_IN,
+		PTL6_FN, PTL6_OUT, PTL6_IN_PU, PTL6_IN,
+		PTL5_FN, PTL5_OUT, PTL5_IN_PU, PTL5_IN,
+		PTL4_FN, PTL4_OUT, PTL4_IN_PU, PTL4_IN,
+		PTL3_FN, PTL3_OUT, PTL3_IN_PU, PTL3_IN,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+		PTM7_FN, PTM7_OUT, PTM7_IN_PU, PTM7_IN,
+		PTM6_FN, PTM6_OUT, PTM6_IN_PU, PTM6_IN,
+		PTM5_FN, PTM5_OUT, PTM5_IN_PU, PTM5_IN,
+		PTM4_FN, PTM4_OUT, PTM4_IN_PU, PTM4_IN,
+		PTM3_FN, PTM3_OUT, PTM3_IN_PU, PTM3_IN,
+		PTM2_FN, PTM2_OUT, PTM2_IN_PU, PTM2_IN,
+		PTM1_FN, PTM1_OUT, PTM1_IN_PU, PTM1_IN,
+		PTM0_FN, PTM0_OUT, PTM0_IN_PU, PTM0_IN }
+	},
+	{ PINMUX_CFG_REG("PPCR", 0xa4050118, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		PTP4_FN, PTP4_OUT, PTP4_IN_PU, PTP4_IN,
+		PTP3_FN, PTP3_OUT, PTP3_IN_PU, PTP3_IN,
+		PTP2_FN, PTP2_OUT, PTP2_IN_PU, PTP2_IN,
+		PTP1_FN, PTP1_OUT, PTP1_IN_PU, PTP1_IN,
+		PTP0_FN, PTP0_OUT, PTP0_IN_PU, PTP0_IN }
+	},
+	{ PINMUX_CFG_REG("PRCR", 0xa405011a, 16, 2) {
+		PTR7_FN, PTR7_OUT, PTR7_IN_PU, PTR7_IN,
+		PTR6_FN, PTR6_OUT, PTR6_IN_PU, PTR6_IN,
+		PTR5_FN, PTR5_OUT, PTR5_IN_PU, PTR5_IN,
+		PTR4_FN, PTR4_OUT, PTR4_IN_PU, PTR4_IN,
+		PTR3_FN, PTR3_OUT, PTR3_IN_PU, PTR3_IN,
+		PTR2_FN, PTR2_OUT, PTR2_IN_PU, PTR2_IN,
+		PTR1_FN, PTR1_OUT, PTR1_IN_PU, PTR1_IN,
+		PTR0_FN, PTR0_OUT, PTR0_IN_PU, PTR0_IN }
+	},
+	{ PINMUX_CFG_REG("PSCR", 0xa405011c, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		PTS4_FN, PTS4_OUT, PTS4_IN_PU, PTS4_IN,
+		PTS3_FN, PTS3_OUT, PTS3_IN_PU, PTS3_IN,
+		PTS2_FN, PTS2_OUT, PTS2_IN_PU, PTS2_IN,
+		PTS1_FN, PTS1_OUT, PTS1_IN_PU, PTS1_IN,
+		PTS0_FN, PTS0_OUT, PTS0_IN_PU, PTS0_IN }
+	},
+	{ PINMUX_CFG_REG("PTCR", 0xa405011e, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		PTT4_FN, PTT4_OUT, PTT4_IN_PU, PTT4_IN,
+		PTT3_FN, PTT3_OUT, PTT3_IN_PU, PTT3_IN,
+		PTT2_FN, PTT2_OUT, PTT2_IN_PU, PTT2_IN,
+		PTT1_FN, PTT1_OUT, PTT1_IN_PU, PTT1_IN,
+		PTT0_FN, PTT0_OUT, PTT0_IN_PU, PTT0_IN }
+	},
+	{ PINMUX_CFG_REG("PUCR", 0xa4050120, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		PTU4_FN, PTU4_OUT, PTU4_IN_PU, PTU4_IN,
+		PTU3_FN, PTU3_OUT, PTU3_IN_PU, PTU3_IN,
+		PTU2_FN, PTU2_OUT, PTU2_IN_PU, PTU2_IN,
+		PTU1_FN, PTU1_OUT, PTU1_IN_PU, PTU1_IN,
+		PTU0_FN, PTU0_OUT, PTU0_IN_PU, PTU0_IN }
+	},
+	{ PINMUX_CFG_REG("PVCR", 0xa4050122, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		PTV4_FN, PTV4_OUT, PTV4_IN_PU, PTV4_IN,
+		PTV3_FN, PTV3_OUT, PTV3_IN_PU, PTV3_IN,
+		PTV2_FN, PTV2_OUT, PTV2_IN_PU, PTV2_IN,
+		PTV1_FN, PTV1_OUT, PTV1_IN_PU, PTV1_IN,
+		PTV0_FN, PTV0_OUT, PTV0_IN_PU, PTV0_IN }
+	},
+	{}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+	{ PINMUX_DATA_REG("PADR", 0xa4050140, 8) {
+		PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+		PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+	},
+	{ PINMUX_DATA_REG("PBDR", 0xa4050142, 8) {
+		PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+		PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+	},
+	{ PINMUX_DATA_REG("PCDR", 0xa4050144, 8) {
+		PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+		PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+	},
+	{ PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+		PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+		PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+	},
+	{ PINMUX_DATA_REG("PEDR", 0xa4050148, 8) {
+		0, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+		PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+	},
+	{ PINMUX_DATA_REG("PFDR", 0xa405014a, 8) {
+		0, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+		PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+	},
+	{ PINMUX_DATA_REG("PGDR", 0xa405014c, 8) {
+		0, PTG6_DATA, PTG5_DATA, PTG4_DATA,
+		PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+	},
+	{ PINMUX_DATA_REG("PHDR", 0xa405014e, 8) {
+		0, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+		PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+	},
+	{ PINMUX_DATA_REG("PJDR", 0xa4050150, 8) {
+		0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+		PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+	},
+	{ PINMUX_DATA_REG("PKDR", 0xa4050152, 8) {
+		0, 0, 0, 0,
+		PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+	},
+	{ PINMUX_DATA_REG("PLDR", 0xa4050154, 8) {
+		PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+		PTL3_DATA, 0, 0, 0 }
+	},
+	{ PINMUX_DATA_REG("PMDR", 0xa4050156, 8) {
+		PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+		PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+	},
+	{ PINMUX_DATA_REG("PPDR", 0xa4050158, 8) {
+		0, 0, 0, PTP4_DATA,
+		PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA }
+	},
+	{ PINMUX_DATA_REG("PRDR", 0xa405015a, 8) {
+		PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+		PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+	},
+	{ PINMUX_DATA_REG("PSDR", 0xa405015c, 8) {
+		0, 0, 0, PTS4_DATA,
+		PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+	},
+	{ PINMUX_DATA_REG("PTDR", 0xa405015e, 8) {
+		0, 0, 0, PTT4_DATA,
+		PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+	},
+	{ PINMUX_DATA_REG("PUDR", 0xa4050160, 8) {
+		0, 0, 0, PTU4_DATA,
+		PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+	},
+	{ PINMUX_DATA_REG("PVDR", 0xa4050162, 8) {
+		0, 0, 0, PTV4_DATA,
+		PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+	},
+	{ },
+};
+
+static struct pinmux_info sh7720_pinmux_info = {
+	.name = "sh7720_pfc",
+	.reserved_id = PINMUX_RESERVED,
+	.data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+	.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+	.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+	.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+	.mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+	.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+	.first_gpio = GPIO_PTA7,
+	.last_gpio = GPIO_FN_STATUS1,
+
+	.gpios = pinmux_gpios,
+	.cfg_regs = pinmux_config_regs,
+	.data_regs = pinmux_data_regs,
+
+	.gpio_data = pinmux_data,
+	.gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+static int __init plat_pinmux_setup(void)
+{
+	return register_pinmux(&sh7720_pinmux_info);
+}
+
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index 2d452f6..2780917 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -36,7 +36,7 @@
 extern unsigned long long float64_sub(unsigned long long a,
 				      unsigned long long b);
 extern unsigned long int float32_sub(unsigned long int a, unsigned long int b);
-
+extern unsigned long int float64_to_float32(unsigned long long a);
 static unsigned int fpu_exception_flags;
 
 /*
@@ -417,6 +417,29 @@
 
 		regs->pc = nextpc;
 		return 1;
+	} else if ((finsn & 0xf0bd) == 0xf0bd) {
+		/* fcnvds - double to single precision convert */
+		struct task_struct *tsk = current;
+		int m;
+		unsigned int hx;
+
+		m = (finsn >> 9) & 0x7;
+		hx = tsk->thread.fpu.hard.fp_regs[m];
+
+		if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)
+			&& ((hx & 0x7fffffff) < 0x00100000)) {
+			/* subnormal double to float conversion */
+			long long llx;
+
+			llx = ((long long)tsk->thread.fpu.hard.fp_regs[m] << 32)
+			    | tsk->thread.fpu.hard.fp_regs[m + 1];
+
+			tsk->thread.fpu.hard.fpul = float64_to_float32(llx);
+		} else
+			return 0;
+
+		regs->pc = nextpc;
+		return 1;
 	}
 
 	return 0;
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 254c5c5..d9bdc93 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -11,6 +11,7 @@
 #include <linux/init.h>
 #include <linux/serial.h>
 #include <linux/serial_sci.h>
+#include <linux/io.h>
 
 enum {
 	UNUSED = 0,
@@ -178,10 +179,14 @@
 }
 __initcall(sh7760_devices_setup);
 
+#define INTC_ICR	0xffd00000UL
+#define INTC_ICR_IRLM	(1 << 7)
+
 void __init plat_irq_setup_pins(int mode)
 {
 	switch (mode) {
 	case IRQ_MODE_IRQ:
+		ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
 		register_intc_controller(&intc_desc_irq);
 		break;
 	default:
diff --git a/arch/sh/kernel/cpu/sh4/softfloat.c b/arch/sh/kernel/cpu/sh4/softfloat.c
index 828cb57..2b747f3 100644
--- a/arch/sh/kernel/cpu/sh4/softfloat.c
+++ b/arch/sh/kernel/cpu/sh4/softfloat.c
@@ -85,6 +85,7 @@
 float32 float32_div(float32 a, float32 b);
 float32 float32_mul(float32 a, float32 b);
 float64 float64_mul(float64 a, float64 b);
+float32 float64_to_float32(float64 a);
 inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
 		   bits64 * z1Ptr);
 inline void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
@@ -890,3 +891,31 @@
 	}
 	return roundAndPackFloat64(zSign, zExp, zSig0);
 }
+
+/*
+ * -------------------------------------------------------------------------------
+ *  Returns the result of converting the double-precision floating-point value
+ *  `a' to the single-precision floating-point format.  The conversion is
+ *  performed according to the IEC/IEEE Standard for Binary Floating-point
+ *  Arithmetic.
+ *  -------------------------------------------------------------------------------
+ *  */
+float32 float64_to_float32(float64 a)
+{
+    flag aSign;
+    int16 aExp;
+    bits64 aSig;
+    bits32 zSig;
+
+    aSig = extractFloat64Frac( a );
+    aExp = extractFloat64Exp( a );
+    aSign = extractFloat64Sign( a );
+
+    shift64RightJamming( aSig, 22, &aSig );
+    zSig = aSig;
+    if ( aExp || zSig ) {
+        zSig |= 0x40000000;
+        aExp -= 0x381;
+    }
+    return roundAndPackFloat32(aSign, aExp, zSig);
+}
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 9381ad8..be9a0c1 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -27,5 +27,10 @@
 clock-$(CONFIG_CPU_SUBTYPE_SH7366)	:= clock-sh7722.o
 clock-$(CONFIG_CPU_SUBTYPE_SHX3)	:= clock-shx3.o
 
+# Pinmux setup
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7722)	:= pinmux-sh7722.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7723)	:= pinmux-sh7723.o
+
 obj-y			+= $(clock-y)
 obj-$(CONFIG_SMP)	+= $(smp-y)
+obj-$(CONFIG_GENERIC_GPIO)	+= $(pinmux-y)
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
new file mode 100644
index 0000000..cb9d07b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
@@ -0,0 +1,1783 @@
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7722.h>
+
+enum {
+	PINMUX_RESERVED = 0,
+
+	PINMUX_DATA_BEGIN,
+	PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+	PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+	PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+	PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+	PTC7_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC0_DATA,
+	PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+	PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+	PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE1_DATA, PTE0_DATA,
+	PTF6_DATA, PTF5_DATA, PTF4_DATA,
+	PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+	PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+	PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+	PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+	PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ1_DATA, PTJ0_DATA,
+	PTK6_DATA, PTK5_DATA, PTK4_DATA,
+	PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+	PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+	PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+	PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+	PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+	PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+	PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+	PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+	PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+	PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+	PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+	PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+	PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+	PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+	PTW6_DATA, PTW5_DATA, PTW4_DATA,
+	PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+	PTX6_DATA, PTX5_DATA, PTX4_DATA,
+	PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+	PTY6_DATA, PTY5_DATA, PTY4_DATA,
+	PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+	PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+	PINMUX_DATA_END,
+
+	PINMUX_INPUT_BEGIN,
+	PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+	PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+	PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+	PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+	PTC7_IN, PTC5_IN, PTC4_IN, PTC3_IN, PTC2_IN, PTC0_IN,
+	PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN, PTD3_IN, PTD2_IN, PTD1_IN,
+	PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN, PTE1_IN, PTE0_IN,
+	PTF6_IN, PTF5_IN, PTF4_IN, PTF3_IN, PTF2_IN, PTF1_IN,
+	PTH6_IN, PTH5_IN, PTH1_IN, PTH0_IN,
+	PTJ1_IN, PTJ0_IN,
+	PTK6_IN, PTK5_IN, PTK4_IN, PTK3_IN, PTK2_IN, PTK0_IN,
+	PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
+	PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+	PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+	PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+	PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
+	PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+	PTQ5_IN, PTQ4_IN, PTQ3_IN, PTQ2_IN, PTQ0_IN,
+	PTR2_IN,
+	PTS4_IN, PTS2_IN, PTS1_IN,
+	PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN,
+	PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+	PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+	PTW6_IN, PTW4_IN, PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+	PTX6_IN, PTX5_IN, PTX4_IN, PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+	PTY5_IN, PTY4_IN, PTY3_IN, PTY2_IN, PTY0_IN,
+	PTZ5_IN, PTZ4_IN, PTZ3_IN, PTZ2_IN, PTZ1_IN,
+	PINMUX_INPUT_END,
+
+	PINMUX_INPUT_PULLDOWN_BEGIN,
+	PTA7_IN_PD, PTA6_IN_PD, PTA5_IN_PD, PTA4_IN_PD,
+	PTA3_IN_PD, PTA2_IN_PD, PTA1_IN_PD, PTA0_IN_PD,
+	PTE7_IN_PD, PTE6_IN_PD, PTE5_IN_PD, PTE4_IN_PD,	PTE1_IN_PD, PTE0_IN_PD,
+	PTF6_IN_PD, PTF5_IN_PD, PTF4_IN_PD, PTF3_IN_PD, PTF2_IN_PD, PTF1_IN_PD,
+	PTH6_IN_PD, PTH5_IN_PD, PTH1_IN_PD, PTH0_IN_PD,
+	PTK6_IN_PD, PTK5_IN_PD, PTK4_IN_PD, PTK3_IN_PD, PTK2_IN_PD, PTK0_IN_PD,
+	PTL7_IN_PD, PTL6_IN_PD, PTL5_IN_PD, PTL4_IN_PD,
+	PTL3_IN_PD, PTL2_IN_PD, PTL1_IN_PD, PTL0_IN_PD,
+	PTM7_IN_PD, PTM6_IN_PD, PTM5_IN_PD, PTM4_IN_PD,
+	PTM3_IN_PD, PTM2_IN_PD, PTM1_IN_PD, PTM0_IN_PD,
+	PTQ5_IN_PD, PTQ4_IN_PD, PTQ3_IN_PD, PTQ2_IN_PD,
+	PTS4_IN_PD, PTS2_IN_PD, PTS1_IN_PD,
+	PTT4_IN_PD, PTT3_IN_PD, PTT2_IN_PD, PTT1_IN_PD,
+	PTU4_IN_PD, PTU3_IN_PD, PTU2_IN_PD, PTU1_IN_PD, PTU0_IN_PD,
+	PTV4_IN_PD, PTV3_IN_PD, PTV2_IN_PD, PTV1_IN_PD, PTV0_IN_PD,
+	PTW6_IN_PD, PTW4_IN_PD,	PTW3_IN_PD, PTW2_IN_PD, PTW1_IN_PD, PTW0_IN_PD,
+	PTX6_IN_PD, PTX5_IN_PD, PTX4_IN_PD,
+	PTX3_IN_PD, PTX2_IN_PD, PTX1_IN_PD, PTX0_IN_PD,
+	PINMUX_INPUT_PULLDOWN_END,
+
+	PINMUX_INPUT_PULLUP_BEGIN,
+	PTC7_IN_PU, PTC5_IN_PU,
+	PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+	PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU,
+	PTJ1_IN_PU, PTJ0_IN_PU,
+	PTQ0_IN_PU,
+	PTR2_IN_PU,
+	PTX6_IN_PU,
+	PTY5_IN_PU, PTY4_IN_PU, PTY3_IN_PU, PTY2_IN_PU, PTY0_IN_PU,
+	PTZ5_IN_PU, PTZ4_IN_PU, PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU,
+	PINMUX_INPUT_PULLUP_END,
+
+	PINMUX_OUTPUT_BEGIN,
+	PTA7_OUT, PTA5_OUT,
+	PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+	PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+	PTC4_OUT, PTC3_OUT, PTC2_OUT, PTC0_OUT,
+	PTD6_OUT, PTD5_OUT, PTD4_OUT,
+	PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+	PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT, PTE1_OUT, PTE0_OUT,
+	PTF6_OUT, PTF5_OUT, PTF4_OUT, PTF3_OUT, PTF2_OUT, PTF0_OUT,
+	PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+	PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+	PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+	PTJ7_OUT, PTJ6_OUT, PTJ5_OUT, PTJ1_OUT, PTJ0_OUT,
+	PTK6_OUT, PTK5_OUT, PTK4_OUT, PTK3_OUT, PTK1_OUT, PTK0_OUT,
+	PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
+	PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+	PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+	PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+	PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
+	PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,	PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
+	PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
+	PTR4_OUT, PTR3_OUT, PTR1_OUT, PTR0_OUT,
+	PTS3_OUT, PTS2_OUT, PTS0_OUT,
+	PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT0_OUT,
+	PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU0_OUT,
+	PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+	PTW5_OUT, PTW4_OUT, PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+	PTX6_OUT, PTX5_OUT, PTX4_OUT, PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+	PTY5_OUT, PTY4_OUT, PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+	PINMUX_OUTPUT_END,
+
+	PINMUX_MARK_BEGIN,
+	SCIF0_TXD_MARK, SCIF0_RXD_MARK,
+	SCIF0_RTS_MARK, SCIF0_CTS_MARK, SCIF0_SCK_MARK,
+	SCIF1_TXD_MARK, SCIF1_RXD_MARK,
+	SCIF1_RTS_MARK, SCIF1_CTS_MARK, SCIF1_SCK_MARK,
+	SCIF2_TXD_MARK, SCIF2_RXD_MARK,
+	SCIF2_RTS_MARK, SCIF2_CTS_MARK, SCIF2_SCK_MARK,
+	SIOTXD_MARK, SIORXD_MARK,
+	SIOD_MARK, SIOSTRB0_MARK, SIOSTRB1_MARK,
+	SIOSCK_MARK, SIOMCK_MARK,
+	VIO_D15_MARK, VIO_D14_MARK, VIO_D13_MARK, VIO_D12_MARK,
+	VIO_D11_MARK, VIO_D10_MARK, VIO_D9_MARK, VIO_D8_MARK,
+	VIO_D7_MARK, VIO_D6_MARK, VIO_D5_MARK, VIO_D4_MARK,
+	VIO_D3_MARK, VIO_D2_MARK, VIO_D1_MARK, VIO_D0_MARK,
+	VIO_CLK_MARK, VIO_VD_MARK, VIO_HD_MARK, VIO_FLD_MARK,
+	VIO_CKO_MARK, VIO_STEX_MARK, VIO_STEM_MARK, VIO_VD2_MARK,
+	VIO_HD2_MARK, VIO_CLK2_MARK,
+	LCDD23_MARK, LCDD22_MARK, LCDD21_MARK, LCDD20_MARK,
+	LCDD19_MARK, LCDD18_MARK, LCDD17_MARK, LCDD16_MARK,
+	LCDD15_MARK, LCDD14_MARK, LCDD13_MARK, LCDD12_MARK,
+	LCDD11_MARK, LCDD10_MARK, LCDD9_MARK, LCDD8_MARK,
+	LCDD7_MARK, LCDD6_MARK, LCDD5_MARK, LCDD4_MARK,
+	LCDD3_MARK, LCDD2_MARK, LCDD1_MARK, LCDD0_MARK,
+	LCDLCLK_MARK, LCDDON_MARK, LCDVCPWC_MARK, LCDVEPWC_MARK,
+	LCDVSYN_MARK, LCDDCK_MARK, LCDHSYN_MARK, LCDDISP_MARK,
+	LCDRS_MARK, LCDCS_MARK, LCDWR_MARK, LCDRD_MARK,
+	LCDDON2_MARK, LCDVCPWC2_MARK, LCDVEPWC2_MARK, LCDVSYN2_MARK,
+	LCDCS2_MARK,
+	IOIS16_MARK, A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+	BS_MARK, CS6B_CE1B_MARK, WAIT_MARK, CS6A_CE2B_MARK,
+	HPD63_MARK, HPD62_MARK, HPD61_MARK, HPD60_MARK,
+	HPD59_MARK, HPD58_MARK, HPD57_MARK, HPD56_MARK,
+	HPD55_MARK, HPD54_MARK, HPD53_MARK, HPD52_MARK,
+	HPD51_MARK, HPD50_MARK, HPD49_MARK, HPD48_MARK,
+	HPDQM7_MARK, HPDQM6_MARK, HPDQM5_MARK, HPDQM4_MARK,
+	IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK,
+	IRQ4_MARK, IRQ5_MARK, IRQ6_MARK, IRQ7_MARK,
+	SDHICD_MARK, SDHIWP_MARK, SDHID3_MARK, SDHID2_MARK,
+	SDHID1_MARK, SDHID0_MARK, SDHICMD_MARK, SDHICLK_MARK,
+	SIUAOLR_MARK, SIUAOBT_MARK, SIUAISLD_MARK, SIUAILR_MARK,
+	SIUAIBT_MARK, SIUAOSLD_MARK, SIUMCKA_MARK, SIUFCKA_MARK,
+	SIUBOLR_MARK, SIUBOBT_MARK, SIUBISLD_MARK, SIUBILR_MARK,
+	SIUBIBT_MARK, SIUBOSLD_MARK, SIUMCKB_MARK, SIUFCKB_MARK,
+	AUDSYNC_MARK, AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK,	AUDATA0_MARK,
+	DACK_MARK, DREQ0_MARK,
+	DV_CLKI_MARK, DV_CLK_MARK, DV_HSYNC_MARK, DV_VSYNC_MARK,
+	DV_D15_MARK, DV_D14_MARK, DV_D13_MARK, DV_D12_MARK,
+	DV_D11_MARK, DV_D10_MARK, DV_D9_MARK, DV_D8_MARK,
+	DV_D7_MARK, DV_D6_MARK, DV_D5_MARK, DV_D4_MARK,
+	DV_D3_MARK, DV_D2_MARK, DV_D1_MARK, DV_D0_MARK,
+	STATUS0_MARK, PDSTATUS_MARK,
+	SIOF0_MCK_MARK, SIOF0_SCK_MARK,
+	SIOF0_SYNC_MARK, SIOF0_SS1_MARK, SIOF0_SS2_MARK,
+	SIOF0_TXD_MARK,	SIOF0_RXD_MARK,
+	SIOF1_MCK_MARK, SIOF1_SCK_MARK,
+	SIOF1_SYNC_MARK, SIOF1_SS1_MARK, SIOF1_SS2_MARK,
+	SIOF1_TXD_MARK, SIOF1_RXD_MARK,
+	SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
+	TS_SDAT_MARK, TS_SCK_MARK, TS_SDEN_MARK, TS_SPSYNC_MARK,
+	IRDA_IN_MARK, IRDA_OUT_MARK,
+	TPUTO_MARK,
+	FCE_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK,
+	NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FCDE_MARK,
+	FOE_MARK, FSC_MARK, FWE_MARK, FRB_MARK,
+	KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK, KEYIN4_MARK,
+	KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
+	KEYOUT4_IN6_MARK, KEYOUT5_IN5_MARK,
+	PINMUX_MARK_END,
+
+	PINMUX_FUNCTION_BEGIN,
+	VIO_D7_SCIF1_SCK, VIO_D6_SCIF1_RXD, VIO_D5_SCIF1_TXD, VIO_D4,
+	VIO_D3, VIO_D2, VIO_D1, VIO_D0_LCDLCLK,
+	HPD55, HPD54, HPD53, HPD52, HPD51, HPD50, HPD49, HPD48,
+	IOIS16, HPDQM7, HPDQM6, HPDQM5, HPDQM4,
+	SDHICD, SDHIWP, SDHID3, IRQ2_SDHID2, SDHID1, SDHID0, SDHICMD, SDHICLK,
+	A25, A24, A23, A22, IRQ5, IRQ4_BS,
+	PTF6, SIOSCK_SIUBOBT, SIOSTRB1_SIUBOLR,
+	SIOSTRB0_SIUBIBT, SIOD_SIUBILR, SIORXD_SIUBISLD, SIOTXD_SIUBOSLD,
+	AUDSYNC, AUDATA3, AUDATA2, AUDATA1, AUDATA0,
+	LCDVCPWC_LCDVCPWC2, LCDVSYN2_DACK, LCDVSYN, LCDDISP_LCDRS,
+	LCDHSYN_LCDCS, LCDDON_LCDDON2, LCDD17_DV_HSYNC, LCDD16_DV_VSYNC,
+	STATUS0, PDSTATUS, IRQ1, IRQ0,
+	SIUAILR_SIOF1_SS2, SIUAIBT_SIOF1_SS1, SIUAOLR_SIOF1_SYNC,
+	SIUAOBT_SIOF1_SCK, SIUAISLD_SIOF1_RXD, SIUAOSLD_SIOF1_TXD, PTK0,
+	LCDD15_DV_D15, LCDD14_DV_D14, LCDD13_DV_D13, LCDD12_DV_D12,
+	LCDD11_DV_D11, LCDD10_DV_D10, LCDD9_DV_D9, LCDD8_DV_D8,
+	LCDD7_DV_D7, LCDD6_DV_D6, LCDD5_DV_D5, LCDD4_DV_D4,
+	LCDD3_DV_D3, LCDD2_DV_D2, LCDD1_DV_D1, LCDD0_DV_D0,
+	HPD63, HPD62, HPD61, HPD60, HPD59, HPD58, HPD57, HPD56,
+	SIOF0_SS2_SIM_RST, SIOF0_SS1_TS_SPSYNC, SIOF0_SYNC_TS_SDEN,
+	SIOF0_SCK_TS_SCK, PTQ2, PTQ1, PTQ0,
+	LCDRD, CS6B_CE1B_LCDCS2, WAIT, LCDDCK_LCDWR, LCDVEPWC_LCDVEPWC2,
+	SCIF0_CTS_SIUAISPD, SCIF0_RTS_SIUAOSPD,
+	SCIF0_SCK_TPUTO, SCIF0_RXD, SCIF0_TXD,
+	FOE_VIO_VD2, FWE, FSC, DREQ0, FCDE,
+	NAF2_VIO_D10, NAF1_VIO_D9, NAF0_VIO_D8,
+	FRB_VIO_CLK2, FCE_VIO_HD2,
+	NAF7_VIO_D15, NAF6_VIO_D14, NAF5_VIO_D13, NAF4_VIO_D12, NAF3_VIO_D11,
+	VIO_FLD_SCIF2_CTS, VIO_CKO_SCIF2_RTS, VIO_STEX_SCIF2_SCK,
+	VIO_STEM_SCIF2_TXD, VIO_HD_SCIF2_RXD,
+	VIO_VD_SCIF1_CTS, VIO_CLK_SCIF1_RTS,
+	CS6A_CE2B, LCDD23, LCDD22, LCDD21, LCDD20,
+	LCDD19_DV_CLKI, LCDD18_DV_CLK,
+	KEYOUT5_IN5, KEYOUT4_IN6, KEYOUT3, KEYOUT2, KEYOUT1, KEYOUT0,
+	KEYIN4_IRQ7, KEYIN3, KEYIN2, KEYIN1, KEYIN0_IRQ6,
+
+	PSA15_KEYIN0, PSA15_IRQ6, PSA14_KEYIN4, PSA14_IRQ7,
+	PSA9_IRQ4, PSA9_BS, PSA4_IRQ2, PSA4_SDHID2,
+	PSB15_SIOTXD, PSB15_SIUBOSLD, PSB14_SIORXD, PSB14_SIUBISLD,
+	PSB13_SIOD, PSB13_SIUBILR, PSB12_SIOSTRB0, PSB12_SIUBIBT,
+	PSB11_SIOSTRB1, PSB11_SIUBOLR, PSB10_SIOSCK, PSB10_SIUBOBT,
+	PSB9_SIOMCK, PSB9_SIUMCKB, PSB8_SIOF0_MCK, PSB8_IRQ3,
+	PSB7_SIOF0_TXD, PSB7_IRDA_OUT, PSB6_SIOF0_RXD, PSB6_IRDA_IN,
+	PSB5_SIOF0_SCK, PSB5_TS_SCK, PSB4_SIOF0_SYNC, PSB4_TS_SDEN,
+	PSB3_SIOF0_SS1, PSB3_TS_SPSYNC, PSB2_SIOF0_SS2, PSB2_SIM_RST,
+	PSB1_SIUMCKA, PSB1_SIOF1_MCK, PSB0_SIUAOSLD, PSB0_SIOF1_TXD,
+	PSC15_SIUAISLD, PSC15_SIOF1_RXD, PSC14_SIUAOBT, PSC14_SIOF1_SCK,
+	PSC13_SIUAOLR, PSC13_SIOF1_SYNC, PSC12_SIUAIBT, PSC12_SIOF1_SS1,
+	PSC11_SIUAILR, PSC11_SIOF1_SS2, PSC0_NAF, PSC0_VIO,
+	PSD13_VIO, PSD13_SCIF2, PSD12_VIO, PSD12_SCIF1,
+	PSD11_VIO, PSD11_SCIF1, PSD10_VIO_D0, PSD10_LCDLCLK,
+	PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB, PSD8_SCIF0_SCK, PSD8_TPUTO,
+	PSD7_SCIF0_RTS, PSD7_SIUAOSPD, PSD6_SCIF0_CTS, PSD6_SIUAISPD,
+	PSD5_CS6B_CE1B, PSD5_LCDCS2,
+	PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2,
+	PSD2_LCDDON, PSD2_LCDDON2, PSD0_LCDD19_LCDD0, PSD0_DV,
+	PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D,
+	PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK,
+	PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT, PSE12_LCDVSYN2, PSE12_DACK,
+	PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA,
+	PSE3_FLCTL, PSE3_VIO, PSE2_NAF2, PSE2_VIO_D10,
+	PSE1_NAF1, PSE1_VIO_D9, PSE0_NAF0, PSE0_VIO_D8,
+
+	HIZA14_KEYSC, HIZA14_HIZ,
+	HIZA10_NAF, HIZA10_HIZ,
+	HIZA9_VIO, HIZA9_HIZ,
+	HIZA8_LCDC, HIZA8_HIZ,
+	HIZA7_LCDC, HIZA7_HIZ,
+	HIZA6_LCDC, HIZA6_HIZ,
+	HIZB1_VIO, HIZB1_HIZ,
+	HIZB0_VIO, HIZB0_HIZ,
+	HIZC15_IRQ7, HIZC15_HIZ,
+	HIZC14_IRQ6, HIZC14_HIZ,
+	HIZC13_IRQ5, HIZC13_HIZ,
+	HIZC12_IRQ4, HIZC12_HIZ,
+	HIZC11_IRQ3, HIZC11_HIZ,
+	HIZC10_IRQ2, HIZC10_HIZ,
+	HIZC9_IRQ1, HIZC9_HIZ,
+	HIZC8_IRQ0, HIZC8_HIZ,
+	MSELB9_VIO, MSELB9_VIO2,
+	MSELB8_RGB, MSELB8_SYS,
+	PINMUX_FUNCTION_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+	/* PTA */
+	PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_IN_PD, PTA7_OUT),
+	PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_IN_PD),
+	PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_IN_PD, PTA5_OUT),
+	PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_IN_PD),
+	PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_IN_PD),
+	PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_IN_PD),
+	PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_IN_PD),
+	PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_IN_PD),
+
+	/* PTB */
+	PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+	PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+	PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+	PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+	PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+	PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT),
+	PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT),
+	PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
+
+	/* PTC */
+	PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_IN_PU),
+	PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_IN_PU),
+	PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+	PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+	PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+	PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
+
+	/* PTD */
+	PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_IN_PU),
+	PINMUX_DATA(PTD6_DATA, PTD6_OUT, PTD6_IN, PTD6_IN_PU),
+	PINMUX_DATA(PTD5_DATA, PTD5_OUT, PTD5_IN, PTD5_IN_PU),
+	PINMUX_DATA(PTD4_DATA, PTD4_OUT, PTD4_IN, PTD4_IN_PU),
+	PINMUX_DATA(PTD3_DATA, PTD3_OUT, PTD3_IN, PTD3_IN_PU),
+	PINMUX_DATA(PTD2_DATA, PTD2_OUT, PTD2_IN, PTD2_IN_PU),
+	PINMUX_DATA(PTD1_DATA, PTD1_OUT, PTD1_IN, PTD1_IN_PU),
+	PINMUX_DATA(PTD0_DATA, PTD0_OUT),
+
+	/* PTE */
+	PINMUX_DATA(PTE7_DATA, PTE7_OUT, PTE7_IN, PTE7_IN_PD),
+	PINMUX_DATA(PTE6_DATA, PTE6_OUT, PTE6_IN, PTE6_IN_PD),
+	PINMUX_DATA(PTE5_DATA, PTE5_OUT, PTE5_IN, PTE5_IN_PD),
+	PINMUX_DATA(PTE4_DATA, PTE4_OUT, PTE4_IN, PTE4_IN_PD),
+	PINMUX_DATA(PTE1_DATA, PTE1_OUT, PTE1_IN, PTE1_IN_PD),
+	PINMUX_DATA(PTE0_DATA, PTE0_OUT, PTE0_IN, PTE0_IN_PD),
+
+	/* PTF */
+	PINMUX_DATA(PTF6_DATA, PTF6_OUT, PTF6_IN, PTF6_IN_PD),
+	PINMUX_DATA(PTF5_DATA, PTF5_OUT, PTF5_IN, PTF5_IN_PD),
+	PINMUX_DATA(PTF4_DATA, PTF4_OUT, PTF4_IN, PTF4_IN_PD),
+	PINMUX_DATA(PTF3_DATA, PTF3_OUT, PTF3_IN, PTF3_IN_PD),
+	PINMUX_DATA(PTF2_DATA, PTF2_OUT, PTF2_IN, PTF2_IN_PD),
+	PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_IN_PD),
+	PINMUX_DATA(PTF0_DATA, PTF0_OUT),
+
+	/* PTG */
+	PINMUX_DATA(PTG4_DATA, PTG4_OUT),
+	PINMUX_DATA(PTG3_DATA, PTG3_OUT),
+	PINMUX_DATA(PTG2_DATA, PTG2_OUT),
+	PINMUX_DATA(PTG1_DATA, PTG1_OUT),
+	PINMUX_DATA(PTG0_DATA, PTG0_OUT),
+
+	/* PTH */
+	PINMUX_DATA(PTH7_DATA, PTH7_OUT),
+	PINMUX_DATA(PTH6_DATA, PTH6_OUT, PTH6_IN, PTH6_IN_PD),
+	PINMUX_DATA(PTH5_DATA, PTH5_OUT, PTH5_IN, PTH5_IN_PD),
+	PINMUX_DATA(PTH4_DATA, PTH4_OUT),
+	PINMUX_DATA(PTH3_DATA, PTH3_OUT),
+	PINMUX_DATA(PTH2_DATA, PTH2_OUT),
+	PINMUX_DATA(PTH1_DATA, PTH1_OUT, PTH1_IN, PTH1_IN_PD),
+	PINMUX_DATA(PTH0_DATA, PTH0_OUT, PTH0_IN, PTH0_IN_PD),
+
+	/* PTJ */
+	PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
+	PINMUX_DATA(PTJ6_DATA, PTJ6_OUT),
+	PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
+	PINMUX_DATA(PTJ1_DATA, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU),
+	PINMUX_DATA(PTJ0_DATA, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU),
+
+	/* PTK */
+	PINMUX_DATA(PTK6_DATA, PTK6_OUT, PTK6_IN, PTK6_IN_PD),
+	PINMUX_DATA(PTK5_DATA, PTK5_OUT, PTK5_IN, PTK5_IN_PD),
+	PINMUX_DATA(PTK4_DATA, PTK4_OUT, PTK4_IN, PTK4_IN_PD),
+	PINMUX_DATA(PTK3_DATA, PTK3_OUT, PTK3_IN, PTK3_IN_PD),
+	PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_IN_PD),
+	PINMUX_DATA(PTK1_DATA, PTK1_OUT),
+	PINMUX_DATA(PTK0_DATA, PTK0_OUT, PTK0_IN, PTK0_IN_PD),
+
+	/* PTL */
+	PINMUX_DATA(PTL7_DATA, PTL7_OUT, PTL7_IN, PTL7_IN_PD),
+	PINMUX_DATA(PTL6_DATA, PTL6_OUT, PTL6_IN, PTL6_IN_PD),
+	PINMUX_DATA(PTL5_DATA, PTL5_OUT, PTL5_IN, PTL5_IN_PD),
+	PINMUX_DATA(PTL4_DATA, PTL4_OUT, PTL4_IN, PTL4_IN_PD),
+	PINMUX_DATA(PTL3_DATA, PTL3_OUT, PTL3_IN, PTL3_IN_PD),
+	PINMUX_DATA(PTL2_DATA, PTL2_OUT, PTL2_IN, PTL2_IN_PD),
+	PINMUX_DATA(PTL1_DATA, PTL1_OUT, PTL1_IN, PTL1_IN_PD),
+	PINMUX_DATA(PTL0_DATA, PTL0_OUT, PTL0_IN, PTL0_IN_PD),
+
+	/* PTM */
+	PINMUX_DATA(PTM7_DATA, PTM7_OUT, PTM7_IN, PTM7_IN_PD),
+	PINMUX_DATA(PTM6_DATA, PTM6_OUT, PTM6_IN, PTM6_IN_PD),
+	PINMUX_DATA(PTM5_DATA, PTM5_OUT, PTM5_IN, PTM5_IN_PD),
+	PINMUX_DATA(PTM4_DATA, PTM4_OUT, PTM4_IN, PTM4_IN_PD),
+	PINMUX_DATA(PTM3_DATA, PTM3_OUT, PTM3_IN, PTM3_IN_PD),
+	PINMUX_DATA(PTM2_DATA, PTM2_OUT, PTM2_IN, PTM2_IN_PD),
+	PINMUX_DATA(PTM1_DATA, PTM1_OUT, PTM1_IN, PTM1_IN_PD),
+	PINMUX_DATA(PTM0_DATA, PTM0_OUT, PTM0_IN, PTM0_IN_PD),
+
+	/* PTN */
+	PINMUX_DATA(PTN7_DATA, PTN7_OUT, PTN7_IN),
+	PINMUX_DATA(PTN6_DATA, PTN6_OUT, PTN6_IN),
+	PINMUX_DATA(PTN5_DATA, PTN5_OUT, PTN5_IN),
+	PINMUX_DATA(PTN4_DATA, PTN4_OUT, PTN4_IN),
+	PINMUX_DATA(PTN3_DATA, PTN3_OUT, PTN3_IN),
+	PINMUX_DATA(PTN2_DATA, PTN2_OUT, PTN2_IN),
+	PINMUX_DATA(PTN1_DATA, PTN1_OUT, PTN1_IN),
+	PINMUX_DATA(PTN0_DATA, PTN0_OUT, PTN0_IN),
+
+	/* PTQ */
+	PINMUX_DATA(PTQ6_DATA, PTQ6_OUT),
+	PINMUX_DATA(PTQ5_DATA, PTQ5_OUT, PTQ5_IN, PTQ5_IN_PD),
+	PINMUX_DATA(PTQ4_DATA, PTQ4_OUT, PTQ4_IN, PTQ4_IN_PD),
+	PINMUX_DATA(PTQ3_DATA, PTQ3_OUT, PTQ3_IN, PTQ3_IN_PD),
+	PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_IN_PD),
+	PINMUX_DATA(PTQ1_DATA, PTQ1_OUT),
+	PINMUX_DATA(PTQ0_DATA, PTQ0_OUT, PTQ0_IN, PTQ0_IN_PU),
+
+	/* PTR */
+	PINMUX_DATA(PTR4_DATA, PTR4_OUT),
+	PINMUX_DATA(PTR3_DATA, PTR3_OUT),
+	PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
+	PINMUX_DATA(PTR1_DATA, PTR1_OUT),
+	PINMUX_DATA(PTR0_DATA, PTR0_OUT),
+
+	/* PTS */
+	PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_IN_PD),
+	PINMUX_DATA(PTS3_DATA, PTS3_OUT),
+	PINMUX_DATA(PTS2_DATA, PTS2_OUT, PTS2_IN, PTS2_IN_PD),
+	PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_IN_PD),
+	PINMUX_DATA(PTS0_DATA, PTS0_OUT),
+
+	/* PTT */
+	PINMUX_DATA(PTT4_DATA, PTT4_OUT, PTT4_IN, PTT4_IN_PD),
+	PINMUX_DATA(PTT3_DATA, PTT3_OUT, PTT3_IN, PTT3_IN_PD),
+	PINMUX_DATA(PTT2_DATA, PTT2_OUT, PTT2_IN, PTT2_IN_PD),
+	PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_IN_PD),
+	PINMUX_DATA(PTT0_DATA, PTT0_OUT),
+
+	/* PTU */
+	PINMUX_DATA(PTU4_DATA, PTU4_OUT, PTU4_IN, PTU4_IN_PD),
+	PINMUX_DATA(PTU3_DATA, PTU3_OUT, PTU3_IN, PTU3_IN_PD),
+	PINMUX_DATA(PTU2_DATA, PTU2_OUT, PTU2_IN, PTU2_IN_PD),
+	PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_IN_PD),
+	PINMUX_DATA(PTU0_DATA, PTU0_OUT, PTU0_IN, PTU0_IN_PD),
+
+	/* PTV */
+	PINMUX_DATA(PTV4_DATA, PTV4_OUT, PTV4_IN, PTV4_IN_PD),
+	PINMUX_DATA(PTV3_DATA, PTV3_OUT, PTV3_IN, PTV3_IN_PD),
+	PINMUX_DATA(PTV2_DATA, PTV2_OUT, PTV2_IN, PTV2_IN_PD),
+	PINMUX_DATA(PTV1_DATA, PTV1_OUT, PTV1_IN, PTV1_IN_PD),
+	PINMUX_DATA(PTV0_DATA, PTV0_OUT, PTV0_IN, PTV0_IN_PD),
+
+	/* PTW */
+	PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_IN_PD),
+	PINMUX_DATA(PTW5_DATA, PTW5_OUT),
+	PINMUX_DATA(PTW4_DATA, PTW4_OUT, PTW4_IN, PTW4_IN_PD),
+	PINMUX_DATA(PTW3_DATA, PTW3_OUT, PTW3_IN, PTW3_IN_PD),
+	PINMUX_DATA(PTW2_DATA, PTW2_OUT, PTW2_IN, PTW2_IN_PD),
+	PINMUX_DATA(PTW1_DATA, PTW1_OUT, PTW1_IN, PTW1_IN_PD),
+	PINMUX_DATA(PTW0_DATA, PTW0_OUT, PTW0_IN, PTW0_IN_PD),
+
+	/* PTX */
+	PINMUX_DATA(PTX6_DATA, PTX6_OUT, PTX6_IN, PTX6_IN_PD),
+	PINMUX_DATA(PTX5_DATA, PTX5_OUT, PTX5_IN, PTX5_IN_PD),
+	PINMUX_DATA(PTX4_DATA, PTX4_OUT, PTX4_IN, PTX4_IN_PD),
+	PINMUX_DATA(PTX3_DATA, PTX3_OUT, PTX3_IN, PTX3_IN_PD),
+	PINMUX_DATA(PTX2_DATA, PTX2_OUT, PTX2_IN, PTX2_IN_PD),
+	PINMUX_DATA(PTX1_DATA, PTX1_OUT, PTX1_IN, PTX1_IN_PD),
+	PINMUX_DATA(PTX0_DATA, PTX0_OUT, PTX0_IN, PTX0_IN_PD),
+
+	/* PTY */
+	PINMUX_DATA(PTY5_DATA, PTY5_OUT, PTY5_IN, PTY5_IN_PU),
+	PINMUX_DATA(PTY4_DATA, PTY4_OUT, PTY4_IN, PTY4_IN_PU),
+	PINMUX_DATA(PTY3_DATA, PTY3_OUT, PTY3_IN, PTY3_IN_PU),
+	PINMUX_DATA(PTY2_DATA, PTY2_OUT, PTY2_IN, PTY2_IN_PU),
+	PINMUX_DATA(PTY1_DATA, PTY1_OUT),
+	PINMUX_DATA(PTY0_DATA, PTY0_OUT, PTY0_IN, PTY0_IN_PU),
+
+	/* PTZ */
+	PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_IN_PU),
+	PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_IN_PU),
+	PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_IN_PU),
+	PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_IN_PU),
+	PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_IN_PU),
+
+	/* SCIF0 */
+	PINMUX_DATA(SCIF0_TXD_MARK, SCIF0_TXD),
+	PINMUX_DATA(SCIF0_RXD_MARK, SCIF0_RXD),
+	PINMUX_DATA(SCIF0_RTS_MARK, PSD7_SCIF0_RTS, SCIF0_RTS_SIUAOSPD),
+	PINMUX_DATA(SCIF0_CTS_MARK, PSD6_SCIF0_CTS, SCIF0_CTS_SIUAISPD),
+	PINMUX_DATA(SCIF0_SCK_MARK, PSD8_SCIF0_SCK, SCIF0_SCK_TPUTO),
+
+	/* SCIF1 */
+	PINMUX_DATA(SCIF1_TXD_MARK, PSD11_SCIF1, VIO_D5_SCIF1_TXD),
+	PINMUX_DATA(SCIF1_RXD_MARK, PSD11_SCIF1, VIO_D6_SCIF1_RXD),
+	PINMUX_DATA(SCIF1_RTS_MARK, PSD12_SCIF1, VIO_CLK_SCIF1_RTS),
+	PINMUX_DATA(SCIF1_CTS_MARK, PSD12_SCIF1, VIO_VD_SCIF1_CTS),
+	PINMUX_DATA(SCIF1_SCK_MARK, PSD11_SCIF1, VIO_D7_SCIF1_SCK),
+
+	/* SCIF2 */
+	PINMUX_DATA(SCIF2_TXD_MARK, PSD13_SCIF2, VIO_STEM_SCIF2_TXD),
+	PINMUX_DATA(SCIF2_RXD_MARK, PSD13_SCIF2, VIO_HD_SCIF2_RXD),
+	PINMUX_DATA(SCIF2_RTS_MARK, PSD13_SCIF2, VIO_CKO_SCIF2_RTS),
+	PINMUX_DATA(SCIF2_CTS_MARK, PSD13_SCIF2, VIO_FLD_SCIF2_CTS),
+	PINMUX_DATA(SCIF2_SCK_MARK, PSD13_SCIF2, VIO_STEX_SCIF2_SCK),
+
+	/* SIO */
+	PINMUX_DATA(SIOTXD_MARK, PSB15_SIOTXD, SIOTXD_SIUBOSLD),
+	PINMUX_DATA(SIORXD_MARK, PSB14_SIORXD, SIORXD_SIUBISLD),
+	PINMUX_DATA(SIOD_MARK, PSB13_SIOD, SIOD_SIUBILR),
+	PINMUX_DATA(SIOSTRB0_MARK, PSB12_SIOSTRB0, SIOSTRB0_SIUBIBT),
+	PINMUX_DATA(SIOSTRB1_MARK, PSB11_SIOSTRB1, SIOSTRB1_SIUBOLR),
+	PINMUX_DATA(SIOSCK_MARK, PSB10_SIOSCK, SIOSCK_SIUBOBT),
+	PINMUX_DATA(SIOMCK_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIOMCK, PTF6),
+
+	/* CEU */
+	PINMUX_DATA(VIO_D15_MARK, PSC0_VIO, HIZA10_NAF, NAF7_VIO_D15),
+	PINMUX_DATA(VIO_D14_MARK, PSC0_VIO, HIZA10_NAF, NAF6_VIO_D14),
+	PINMUX_DATA(VIO_D13_MARK, PSC0_VIO, HIZA10_NAF, NAF5_VIO_D13),
+	PINMUX_DATA(VIO_D12_MARK, PSC0_VIO, HIZA10_NAF, NAF4_VIO_D12),
+	PINMUX_DATA(VIO_D11_MARK, PSC0_VIO, HIZA10_NAF, NAF3_VIO_D11),
+	PINMUX_DATA(VIO_D10_MARK, PSE2_VIO_D10, HIZB0_VIO, NAF2_VIO_D10),
+	PINMUX_DATA(VIO_D9_MARK, PSE1_VIO_D9, HIZB0_VIO, NAF1_VIO_D9),
+	PINMUX_DATA(VIO_D8_MARK, PSE0_VIO_D8, HIZB0_VIO, NAF0_VIO_D8),
+	PINMUX_DATA(VIO_D7_MARK, PSD11_VIO, VIO_D7_SCIF1_SCK),
+	PINMUX_DATA(VIO_D6_MARK, PSD11_VIO, VIO_D6_SCIF1_RXD),
+	PINMUX_DATA(VIO_D5_MARK, PSD11_VIO, VIO_D5_SCIF1_TXD),
+	PINMUX_DATA(VIO_D4_MARK, VIO_D4),
+	PINMUX_DATA(VIO_D3_MARK, VIO_D3),
+	PINMUX_DATA(VIO_D2_MARK, VIO_D2),
+	PINMUX_DATA(VIO_D1_MARK, VIO_D1),
+	PINMUX_DATA(VIO_D0_MARK, PSD10_VIO_D0, VIO_D0_LCDLCLK),
+	PINMUX_DATA(VIO_CLK_MARK, PSD12_VIO, MSELB9_VIO, VIO_CLK_SCIF1_RTS),
+	PINMUX_DATA(VIO_VD_MARK, PSD12_VIO, MSELB9_VIO, VIO_VD_SCIF1_CTS),
+	PINMUX_DATA(VIO_HD_MARK, PSD13_VIO, MSELB9_VIO, VIO_HD_SCIF2_RXD),
+	PINMUX_DATA(VIO_FLD_MARK, PSD13_VIO, HIZA9_VIO, VIO_FLD_SCIF2_CTS),
+	PINMUX_DATA(VIO_CKO_MARK, PSD13_VIO, HIZA9_VIO, VIO_CKO_SCIF2_RTS),
+	PINMUX_DATA(VIO_STEX_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEX_SCIF2_SCK),
+	PINMUX_DATA(VIO_STEM_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEM_SCIF2_TXD),
+	PINMUX_DATA(VIO_VD2_MARK, PSE3_VIO, MSELB9_VIO2,
+		    HIZB0_VIO, FOE_VIO_VD2),
+	PINMUX_DATA(VIO_HD2_MARK, PSE3_VIO, MSELB9_VIO2,
+		    HIZB1_VIO, HIZB1_VIO, FCE_VIO_HD2),
+	PINMUX_DATA(VIO_CLK2_MARK, PSE3_VIO, MSELB9_VIO2,
+		    HIZB1_VIO, FRB_VIO_CLK2),
+
+	/* LCDC */
+	PINMUX_DATA(LCDD23_MARK, HIZA8_LCDC, LCDD23),
+	PINMUX_DATA(LCDD22_MARK, HIZA8_LCDC, LCDD22),
+	PINMUX_DATA(LCDD21_MARK, HIZA8_LCDC, LCDD21),
+	PINMUX_DATA(LCDD20_MARK, HIZA8_LCDC, LCDD20),
+	PINMUX_DATA(LCDD19_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD19_DV_CLKI),
+	PINMUX_DATA(LCDD18_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD18_DV_CLK),
+	PINMUX_DATA(LCDD17_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC,
+		    LCDD17_DV_HSYNC),
+	PINMUX_DATA(LCDD16_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC,
+		    LCDD16_DV_VSYNC),
+	PINMUX_DATA(LCDD15_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD15_DV_D15),
+	PINMUX_DATA(LCDD14_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD14_DV_D14),
+	PINMUX_DATA(LCDD13_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD13_DV_D13),
+	PINMUX_DATA(LCDD12_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD12_DV_D12),
+	PINMUX_DATA(LCDD11_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD11_DV_D11),
+	PINMUX_DATA(LCDD10_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD10_DV_D10),
+	PINMUX_DATA(LCDD9_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD9_DV_D9),
+	PINMUX_DATA(LCDD8_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD8_DV_D8),
+	PINMUX_DATA(LCDD7_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD7_DV_D7),
+	PINMUX_DATA(LCDD6_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD6_DV_D6),
+	PINMUX_DATA(LCDD5_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD5_DV_D5),
+	PINMUX_DATA(LCDD4_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD4_DV_D4),
+	PINMUX_DATA(LCDD3_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD3_DV_D3),
+	PINMUX_DATA(LCDD2_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD2_DV_D2),
+	PINMUX_DATA(LCDD1_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD1_DV_D1),
+	PINMUX_DATA(LCDD0_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD0_DV_D0),
+	PINMUX_DATA(LCDLCLK_MARK, PSD10_LCDLCLK, VIO_D0_LCDLCLK),
+	/* Main LCD */
+	PINMUX_DATA(LCDDON_MARK, PSD2_LCDDON, HIZA7_LCDC, LCDDON_LCDDON2),
+	PINMUX_DATA(LCDVCPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC,
+		    HIZA6_LCDC, LCDVCPWC_LCDVCPWC2),
+	PINMUX_DATA(LCDVEPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC,
+		    HIZA6_LCDC, LCDVEPWC_LCDVEPWC2),
+	PINMUX_DATA(LCDVSYN_MARK, HIZA7_LCDC, LCDVSYN),
+	/* Main LCD - RGB Mode */
+	PINMUX_DATA(LCDDCK_MARK, MSELB8_RGB, HIZA8_LCDC, LCDDCK_LCDWR),
+	PINMUX_DATA(LCDHSYN_MARK, MSELB8_RGB, HIZA7_LCDC, LCDHSYN_LCDCS),
+	PINMUX_DATA(LCDDISP_MARK, MSELB8_RGB, HIZA7_LCDC, LCDDISP_LCDRS),
+	/* Main LCD - SYS Mode */
+	PINMUX_DATA(LCDRS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDDISP_LCDRS),
+	PINMUX_DATA(LCDCS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDHSYN_LCDCS),
+	PINMUX_DATA(LCDWR_MARK, MSELB8_SYS, HIZA8_LCDC, LCDDCK_LCDWR),
+	PINMUX_DATA(LCDRD_MARK, HIZA7_LCDC, LCDRD),
+	/* Sub LCD - SYS Mode */
+	PINMUX_DATA(LCDDON2_MARK, PSD2_LCDDON2, HIZA7_LCDC, LCDDON_LCDDON2),
+	PINMUX_DATA(LCDVCPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2,
+		    HIZA6_LCDC, LCDVCPWC_LCDVCPWC2),
+	PINMUX_DATA(LCDVEPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2,
+		    HIZA6_LCDC, LCDVEPWC_LCDVEPWC2),
+	PINMUX_DATA(LCDVSYN2_MARK, PSE12_LCDVSYN2, HIZA8_LCDC, LCDVSYN2_DACK),
+	PINMUX_DATA(LCDCS2_MARK, PSD5_LCDCS2, CS6B_CE1B_LCDCS2),
+
+	/* BSC */
+	PINMUX_DATA(IOIS16_MARK, IOIS16),
+	PINMUX_DATA(A25_MARK, A25),
+	PINMUX_DATA(A24_MARK, A24),
+	PINMUX_DATA(A23_MARK, A23),
+	PINMUX_DATA(A22_MARK, A22),
+	PINMUX_DATA(BS_MARK, PSA9_BS, IRQ4_BS),
+	PINMUX_DATA(CS6B_CE1B_MARK, PSD5_CS6B_CE1B, CS6B_CE1B_LCDCS2),
+	PINMUX_DATA(WAIT_MARK, WAIT),
+	PINMUX_DATA(CS6A_CE2B_MARK, CS6A_CE2B),
+
+	/* SBSC */
+	PINMUX_DATA(HPD63_MARK, HPD63),
+	PINMUX_DATA(HPD62_MARK, HPD62),
+	PINMUX_DATA(HPD61_MARK, HPD61),
+	PINMUX_DATA(HPD60_MARK, HPD60),
+	PINMUX_DATA(HPD59_MARK, HPD59),
+	PINMUX_DATA(HPD58_MARK, HPD58),
+	PINMUX_DATA(HPD57_MARK, HPD57),
+	PINMUX_DATA(HPD56_MARK, HPD56),
+	PINMUX_DATA(HPD55_MARK, HPD55),
+	PINMUX_DATA(HPD54_MARK, HPD54),
+	PINMUX_DATA(HPD53_MARK, HPD53),
+	PINMUX_DATA(HPD52_MARK, HPD52),
+	PINMUX_DATA(HPD51_MARK, HPD51),
+	PINMUX_DATA(HPD50_MARK, HPD50),
+	PINMUX_DATA(HPD49_MARK, HPD49),
+	PINMUX_DATA(HPD48_MARK, HPD48),
+	PINMUX_DATA(HPDQM7_MARK, HPDQM7),
+	PINMUX_DATA(HPDQM6_MARK, HPDQM6),
+	PINMUX_DATA(HPDQM5_MARK, HPDQM5),
+	PINMUX_DATA(HPDQM4_MARK, HPDQM4),
+
+	/* IRQ */
+	PINMUX_DATA(IRQ0_MARK, HIZC8_IRQ0, IRQ0),
+	PINMUX_DATA(IRQ1_MARK, HIZC9_IRQ1, IRQ1),
+	PINMUX_DATA(IRQ2_MARK, PSA4_IRQ2, HIZC10_IRQ2, IRQ2_SDHID2),
+	PINMUX_DATA(IRQ3_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_IRQ3,
+		    HIZC11_IRQ3, PTQ0),
+	PINMUX_DATA(IRQ4_MARK, PSA9_IRQ4, HIZC12_IRQ4, IRQ4_BS),
+	PINMUX_DATA(IRQ5_MARK, HIZC13_IRQ5, IRQ5),
+	PINMUX_DATA(IRQ6_MARK, PSA15_IRQ6, HIZC14_IRQ6, KEYIN0_IRQ6),
+	PINMUX_DATA(IRQ7_MARK, PSA14_IRQ7, HIZC15_IRQ7, KEYIN4_IRQ7),
+
+	/* SDHI */
+	PINMUX_DATA(SDHICD_MARK, SDHICD),
+	PINMUX_DATA(SDHIWP_MARK, SDHIWP),
+	PINMUX_DATA(SDHID3_MARK, SDHID3),
+	PINMUX_DATA(SDHID2_MARK, PSA4_SDHID2, IRQ2_SDHID2),
+	PINMUX_DATA(SDHID1_MARK, SDHID1),
+	PINMUX_DATA(SDHID0_MARK, SDHID0),
+	PINMUX_DATA(SDHICMD_MARK, SDHICMD),
+	PINMUX_DATA(SDHICLK_MARK, SDHICLK),
+
+	/* SIU - Port A */
+	PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, SIUAOLR_SIOF1_SYNC),
+	PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, SIUAOBT_SIOF1_SCK),
+	PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, SIUAISLD_SIOF1_RXD),
+	PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, SIUAILR_SIOF1_SS2),
+	PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, SIUAIBT_SIOF1_SS1),
+	PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, SIUAOSLD_SIOF1_TXD),
+	PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, PSB1_SIUMCKA, PTK0),
+	PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, PTK0),
+
+	/* SIU - Port B */
+	PINMUX_DATA(SIUBOLR_MARK, PSB11_SIUBOLR, SIOSTRB1_SIUBOLR),
+	PINMUX_DATA(SIUBOBT_MARK, PSB10_SIUBOBT, SIOSCK_SIUBOBT),
+	PINMUX_DATA(SIUBISLD_MARK, PSB14_SIUBISLD, SIORXD_SIUBISLD),
+	PINMUX_DATA(SIUBILR_MARK, PSB13_SIUBILR, SIOD_SIUBILR),
+	PINMUX_DATA(SIUBIBT_MARK, PSB12_SIUBIBT, SIOSTRB0_SIUBIBT),
+	PINMUX_DATA(SIUBOSLD_MARK, PSB15_SIUBOSLD, SIOTXD_SIUBOSLD),
+	PINMUX_DATA(SIUMCKB_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIUMCKB, PTF6),
+	PINMUX_DATA(SIUFCKB_MARK, PSD9_SIUFCKB, PTF6),
+
+	/* AUD */
+	PINMUX_DATA(AUDSYNC_MARK, AUDSYNC),
+	PINMUX_DATA(AUDATA3_MARK, AUDATA3),
+	PINMUX_DATA(AUDATA2_MARK, AUDATA2),
+	PINMUX_DATA(AUDATA1_MARK, AUDATA1),
+	PINMUX_DATA(AUDATA0_MARK, AUDATA0),
+
+	/* DMAC */
+	PINMUX_DATA(DACK_MARK, PSE12_DACK, LCDVSYN2_DACK),
+	PINMUX_DATA(DREQ0_MARK, DREQ0),
+
+	/* VOU */
+	PINMUX_DATA(DV_CLKI_MARK, PSD0_DV, LCDD19_DV_CLKI),
+	PINMUX_DATA(DV_CLK_MARK, PSD0_DV, LCDD18_DV_CLK),
+	PINMUX_DATA(DV_HSYNC_MARK, PSD0_DV, LCDD17_DV_HSYNC),
+	PINMUX_DATA(DV_VSYNC_MARK, PSD0_DV, LCDD16_DV_VSYNC),
+	PINMUX_DATA(DV_D15_MARK, PSD0_DV, LCDD15_DV_D15),
+	PINMUX_DATA(DV_D14_MARK, PSD0_DV, LCDD14_DV_D14),
+	PINMUX_DATA(DV_D13_MARK, PSD0_DV, LCDD13_DV_D13),
+	PINMUX_DATA(DV_D12_MARK, PSD0_DV, LCDD12_DV_D12),
+	PINMUX_DATA(DV_D11_MARK, PSD0_DV, LCDD11_DV_D11),
+	PINMUX_DATA(DV_D10_MARK, PSD0_DV, LCDD10_DV_D10),
+	PINMUX_DATA(DV_D9_MARK, PSD0_DV, LCDD9_DV_D9),
+	PINMUX_DATA(DV_D8_MARK, PSD0_DV, LCDD8_DV_D8),
+	PINMUX_DATA(DV_D7_MARK, PSD0_DV, LCDD7_DV_D7),
+	PINMUX_DATA(DV_D6_MARK, PSD0_DV, LCDD6_DV_D6),
+	PINMUX_DATA(DV_D5_MARK, PSD0_DV, LCDD5_DV_D5),
+	PINMUX_DATA(DV_D4_MARK, PSD0_DV, LCDD4_DV_D4),
+	PINMUX_DATA(DV_D3_MARK, PSD0_DV, LCDD3_DV_D3),
+	PINMUX_DATA(DV_D2_MARK, PSD0_DV, LCDD2_DV_D2),
+	PINMUX_DATA(DV_D1_MARK, PSD0_DV, LCDD1_DV_D1),
+	PINMUX_DATA(DV_D0_MARK, PSD0_DV, LCDD0_DV_D0),
+
+	/* CPG */
+	PINMUX_DATA(STATUS0_MARK, STATUS0),
+	PINMUX_DATA(PDSTATUS_MARK, PDSTATUS),
+
+	/* SIOF0 */
+	PINMUX_DATA(SIOF0_MCK_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_SIOF0_MCK, PTQ0),
+	PINMUX_DATA(SIOF0_SCK_MARK, PSB5_SIOF0_SCK, SIOF0_SCK_TS_SCK),
+	PINMUX_DATA(SIOF0_SYNC_MARK, PSB4_SIOF0_SYNC, SIOF0_SYNC_TS_SDEN),
+	PINMUX_DATA(SIOF0_SS1_MARK, PSB3_SIOF0_SS1, SIOF0_SS1_TS_SPSYNC),
+	PINMUX_DATA(SIOF0_SS2_MARK, PSB2_SIOF0_SS2, SIOF0_SS2_SIM_RST),
+	PINMUX_DATA(SIOF0_TXD_MARK, PSE14_SIOF0_TXD_IRDA_OUT,
+		    PSB7_SIOF0_TXD, PTQ1),
+	PINMUX_DATA(SIOF0_RXD_MARK, PSE13_SIOF0_RXD_IRDA_IN,
+		    PSB6_SIOF0_RXD, PTQ2),
+
+	/* SIOF1 */
+	PINMUX_DATA(SIOF1_MCK_MARK, PSE11_SIUMCKA_SIOF1_MCK,
+		    PSB1_SIOF1_MCK, PTK0),
+	PINMUX_DATA(SIOF1_SCK_MARK, PSC14_SIOF1_SCK, SIUAOBT_SIOF1_SCK),
+	PINMUX_DATA(SIOF1_SYNC_MARK, PSC13_SIOF1_SYNC, SIUAOLR_SIOF1_SYNC),
+	PINMUX_DATA(SIOF1_SS1_MARK, PSC12_SIOF1_SS1, SIUAIBT_SIOF1_SS1),
+	PINMUX_DATA(SIOF1_SS2_MARK, PSC11_SIOF1_SS2, SIUAILR_SIOF1_SS2),
+	PINMUX_DATA(SIOF1_TXD_MARK, PSB0_SIOF1_TXD, SIUAOSLD_SIOF1_TXD),
+	PINMUX_DATA(SIOF1_RXD_MARK, PSC15_SIOF1_RXD, SIUAISLD_SIOF1_RXD),
+
+	/* SIM */
+	PINMUX_DATA(SIM_D_MARK, PSE15_SIM_D, PTQ0),
+	PINMUX_DATA(SIM_CLK_MARK, PSE14_SIM_CLK, PTQ1),
+	PINMUX_DATA(SIM_RST_MARK, PSB2_SIM_RST, SIOF0_SS2_SIM_RST),
+
+	/* TSIF */
+	PINMUX_DATA(TS_SDAT_MARK, PSE13_TS_SDAT, PTQ2),
+	PINMUX_DATA(TS_SCK_MARK, PSB5_TS_SCK, SIOF0_SCK_TS_SCK),
+	PINMUX_DATA(TS_SDEN_MARK, PSB4_TS_SDEN, SIOF0_SYNC_TS_SDEN),
+	PINMUX_DATA(TS_SPSYNC_MARK, PSB3_TS_SPSYNC, SIOF0_SS1_TS_SPSYNC),
+
+	/* IRDA */
+	PINMUX_DATA(IRDA_IN_MARK, PSE13_SIOF0_RXD_IRDA_IN, PSB6_IRDA_IN, PTQ2),
+	PINMUX_DATA(IRDA_OUT_MARK, PSE14_SIOF0_TXD_IRDA_OUT,
+		    PSB7_IRDA_OUT, PTQ1),
+
+	/* TPU */
+	PINMUX_DATA(TPUTO_MARK, PSD8_TPUTO, SCIF0_SCK_TPUTO),
+
+	/* FLCTL */
+	PINMUX_DATA(FCE_MARK, PSE3_FLCTL, FCE_VIO_HD2),
+	PINMUX_DATA(NAF7_MARK, PSC0_NAF, HIZA10_NAF, NAF7_VIO_D15),
+	PINMUX_DATA(NAF6_MARK, PSC0_NAF, HIZA10_NAF, NAF6_VIO_D14),
+	PINMUX_DATA(NAF5_MARK, PSC0_NAF, HIZA10_NAF, NAF5_VIO_D13),
+	PINMUX_DATA(NAF4_MARK, PSC0_NAF, HIZA10_NAF, NAF4_VIO_D12),
+	PINMUX_DATA(NAF3_MARK, PSC0_NAF, HIZA10_NAF, NAF3_VIO_D11),
+	PINMUX_DATA(NAF2_MARK, PSE2_NAF2, HIZB0_VIO, NAF2_VIO_D10),
+	PINMUX_DATA(NAF1_MARK, PSE1_NAF1, HIZB0_VIO, NAF1_VIO_D9),
+	PINMUX_DATA(NAF0_MARK, PSE0_NAF0, HIZB0_VIO, NAF0_VIO_D8),
+	PINMUX_DATA(FCDE_MARK, FCDE),
+	PINMUX_DATA(FOE_MARK, PSE3_FLCTL, HIZB0_VIO, FOE_VIO_VD2),
+	PINMUX_DATA(FSC_MARK, FSC),
+	PINMUX_DATA(FWE_MARK, FWE),
+	PINMUX_DATA(FRB_MARK, PSE3_FLCTL, FRB_VIO_CLK2),
+
+	/* KEYSC */
+	PINMUX_DATA(KEYIN0_MARK, PSA15_KEYIN0, HIZC14_IRQ6, KEYIN0_IRQ6),
+	PINMUX_DATA(KEYIN1_MARK, HIZA14_KEYSC, KEYIN1),
+	PINMUX_DATA(KEYIN2_MARK, HIZA14_KEYSC, KEYIN2),
+	PINMUX_DATA(KEYIN3_MARK, HIZA14_KEYSC, KEYIN3),
+	PINMUX_DATA(KEYIN4_MARK, PSA14_KEYIN4, HIZC15_IRQ7, KEYIN4_IRQ7),
+	PINMUX_DATA(KEYOUT0_MARK, HIZA14_KEYSC, KEYOUT0),
+	PINMUX_DATA(KEYOUT1_MARK, HIZA14_KEYSC, KEYOUT1),
+	PINMUX_DATA(KEYOUT2_MARK, HIZA14_KEYSC, KEYOUT2),
+	PINMUX_DATA(KEYOUT3_MARK, HIZA14_KEYSC, KEYOUT3),
+	PINMUX_DATA(KEYOUT4_IN6_MARK, HIZA14_KEYSC, KEYOUT4_IN6),
+	PINMUX_DATA(KEYOUT5_IN5_MARK, HIZA14_KEYSC, KEYOUT5_IN5),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+	/* PTA */
+	PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+	PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+	PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+	PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+	PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+	PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+	PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+	PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+	/* PTB */
+	PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+	PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+	PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+	PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+	PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+	PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+	PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+	PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+	/* PTC */
+	PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+	PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+	PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+	PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+	PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+	PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+	/* PTD */
+	PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+	PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+	PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+	PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+	PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+	PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+	PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+	PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+	/* PTE */
+	PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
+	PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+	PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+	PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+	PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+	PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+	/* PTF */
+	PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+	PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+	PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+	PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+	PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+	PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+	PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+	/* PTG */
+	PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+	PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+	PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+	PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+	PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+	/* PTH */
+	PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+	PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+	PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+	PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+	PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+	PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+	PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+	PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+	/* PTJ */
+	PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
+	PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+	PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+	PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+	PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+	/* PTK */
+	PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+	PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+	PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+	PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+	PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+	PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+	PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+	/* PTL */
+	PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+	PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+	PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+	PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+	PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+	PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+	PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+	PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+	/* PTM */
+	PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+	PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+	PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+	PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+	PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+	PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+	PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+	PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+	/* PTN */
+	PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
+	PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+	PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+	PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+	PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+	PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+	PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+	PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+	/* PTQ */
+	PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
+	PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
+	PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
+	PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+	PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+	PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+	PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+	/* PTR */
+	PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+	PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+	PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+	PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+	PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+	/* PTS */
+	PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+	PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+	PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+	PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+	PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+	/* PTT */
+	PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+	PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+	PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+	PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+	PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+	/* PTU */
+	PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+	PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+	PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+	PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+	PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+	/* PTV */
+	PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+	PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+	PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+	PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+	PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+	/* PTW */
+	PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+	PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+	PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+	PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+	PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+	PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+	PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+	/* PTX */
+	PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+	PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+	PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+	PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+	PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+	PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+	PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+	/* PTY */
+	PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+	PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+	PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+	PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+	PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+	PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+	/* PTZ */
+	PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+	PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+	PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+	PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+	PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+
+	/* SCIF0 */
+	PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+
+	/* SCIF1 */
+	PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF1_RTS, SCIF1_RTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF1_CTS, SCIF1_CTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+
+	/* SCIF2 */
+	PINMUX_GPIO(GPIO_FN_SCIF2_TXD, SCIF2_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF2_RXD, SCIF2_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF2_RTS, SCIF2_RTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF2_CTS, SCIF2_CTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF2_SCK, SCIF2_SCK_MARK),
+
+	/* SIO */
+	PINMUX_GPIO(GPIO_FN_SIOTXD, SIOTXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIORXD, SIORXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOD, SIOD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOSTRB0, SIOSTRB0_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOSTRB1, SIOSTRB1_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOSCK, SIOSCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOMCK, SIOMCK_MARK),
+
+	/* CEU */
+	PINMUX_GPIO(GPIO_FN_VIO_D15, VIO_D15_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D14, VIO_D14_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D13, VIO_D13_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D12, VIO_D12_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D11, VIO_D11_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D10, VIO_D10_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D9, VIO_D9_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D8, VIO_D8_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D7, VIO_D7_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D6, VIO_D6_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D5, VIO_D5_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D4, VIO_D4_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D3, VIO_D3_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D2, VIO_D2_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D1, VIO_D1_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D0, VIO_D0_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_CLK, VIO_CLK_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_VD, VIO_VD_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_HD, VIO_HD_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_FLD, VIO_FLD_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_STEX, VIO_STEX_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_STEM, VIO_STEM_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_VD2, VIO_VD2_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_HD2, VIO_HD2_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_CLK2, VIO_CLK2_MARK),
+
+	/* LCDC */
+	PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDLCLK, LCDLCLK_MARK),
+	/* Main LCD */
+	PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
+	/* Main LCD - RGB Mode */
+	PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
+	/* Main LCD - SYS Mode */
+	PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
+	/* Sub LCD - SYS Mode */
+	PINMUX_GPIO(GPIO_FN_LCDDON2, LCDDON2_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDVCPWC2, LCDVCPWC2_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDVEPWC2, LCDVEPWC2_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDVSYN2, LCDVSYN2_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDCS2, LCDCS2_MARK),
+
+	/* BSC */
+	PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+	PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+	PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+	PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+	PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+	PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+	PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+	PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+	PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
+
+	/* SBSC */
+	PINMUX_GPIO(GPIO_FN_HPD63, HPD63_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD62, HPD62_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD61, HPD61_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD60, HPD60_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD59, HPD59_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD58, HPD58_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD57, HPD57_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD56, HPD56_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD55, HPD55_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD54, HPD54_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD53, HPD53_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD52, HPD52_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD51, HPD51_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD50, HPD50_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD49, HPD49_MARK),
+	PINMUX_GPIO(GPIO_FN_HPD48, HPD48_MARK),
+	PINMUX_GPIO(GPIO_FN_HPDQM7, HPDQM7_MARK),
+	PINMUX_GPIO(GPIO_FN_HPDQM6, HPDQM6_MARK),
+	PINMUX_GPIO(GPIO_FN_HPDQM5, HPDQM5_MARK),
+	PINMUX_GPIO(GPIO_FN_HPDQM4, HPDQM4_MARK),
+
+	/* IRQ */
+	PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+
+	/* SDHI */
+	PINMUX_GPIO(GPIO_FN_SDHICD, SDHICD_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHIWP, SDHIWP_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHID3, SDHID3_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHID2, SDHID2_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHID1, SDHID1_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHID0, SDHID0_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHICMD, SDHICMD_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHICLK, SDHICLK_MARK),
+
+	/* SIU - Port A */
+	PINMUX_GPIO(GPIO_FN_SIUAOLR, SIUAOLR_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUAOBT, SIUAOBT_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUAISLD, SIUAISLD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUAILR, SIUAILR_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUAIBT, SIUAIBT_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUAOSLD, SIUAOSLD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUMCKA, SIUMCKA_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUFCKA, SIUFCKA_MARK),
+
+	/* SIU - Port B */
+	PINMUX_GPIO(GPIO_FN_SIUBOLR, SIUBOLR_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUBOBT, SIUBOBT_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUBISLD, SIUBISLD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUBILR, SIUBILR_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUBIBT, SIUBIBT_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUBOSLD, SIUBOSLD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUMCKB, SIUMCKB_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUFCKB, SIUFCKB_MARK),
+
+	/* AUD */
+	PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+
+	/* DMAC */
+	PINMUX_GPIO(GPIO_FN_DACK, DACK_MARK),
+	PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+
+	/* VOU */
+	PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
+
+	/* CPG */
+	PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+	PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
+
+	/* SIOF0 */
+	PINMUX_GPIO(GPIO_FN_SIOF0_MCK, SIOF0_MCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF0_SCK, SIOF0_SCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF0_SYNC, SIOF0_SYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF0_SS1, SIOF0_SS1_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF0_SS2, SIOF0_SS2_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF0_TXD, SIOF0_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF0_RXD, SIOF0_RXD_MARK),
+
+	/* SIOF1 */
+	PINMUX_GPIO(GPIO_FN_SIOF1_MCK, SIOF1_MCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF1_SCK, SIOF1_SCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF1_SYNC, SIOF1_SYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF1_SS1, SIOF1_SS1_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF1_SS2, SIOF1_SS2_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF1_TXD, SIOF1_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIOF1_RXD, SIOF1_RXD_MARK),
+
+	/* SIM */
+	PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
+	PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
+	PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+
+	/* TSIF */
+	PINMUX_GPIO(GPIO_FN_TS_SDAT, TS_SDAT_MARK),
+	PINMUX_GPIO(GPIO_FN_TS_SCK, TS_SCK_MARK),
+	PINMUX_GPIO(GPIO_FN_TS_SDEN, TS_SDEN_MARK),
+	PINMUX_GPIO(GPIO_FN_TS_SPSYNC, TS_SPSYNC_MARK),
+
+	/* IRDA */
+	PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
+	PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
+
+	/* TPU */
+	PINMUX_GPIO(GPIO_FN_TPUTO, TPUTO_MARK),
+
+	/* FLCTL */
+	PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
+	PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
+	PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
+	PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
+	PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+	PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+
+	/* KEYSC */
+	PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+	{ PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+		VIO_D7_SCIF1_SCK, PTA7_OUT, PTA7_IN_PD, PTA7_IN,
+		VIO_D6_SCIF1_RXD, 0, PTA6_IN_PD, PTA6_IN,
+		VIO_D5_SCIF1_TXD, PTA5_OUT, PTA5_IN_PD, PTA5_IN,
+		VIO_D4, 0, PTA4_IN_PD, PTA4_IN,
+		VIO_D3, 0, PTA3_IN_PD, PTA3_IN,
+		VIO_D2, 0, PTA2_IN_PD, PTA2_IN,
+		VIO_D1, 0, PTA1_IN_PD, PTA1_IN,
+		VIO_D0_LCDLCLK, 0, PTA0_IN_PD, PTA0_IN }
+	},
+	{ PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+		HPD55, PTB7_OUT, 0, PTB7_IN,
+		HPD54, PTB6_OUT, 0, PTB6_IN,
+		HPD53, PTB5_OUT, 0, PTB5_IN,
+		HPD52, PTB4_OUT, 0, PTB4_IN,
+		HPD51, PTB3_OUT, 0, PTB3_IN,
+		HPD50, PTB2_OUT, 0, PTB2_IN,
+		HPD49, PTB1_OUT, 0, PTB1_IN,
+		HPD48, PTB0_OUT, 0, PTB0_IN }
+	},
+	{ PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+		0, 0, PTC7_IN_PU, PTC7_IN,
+		0, 0, 0, 0,
+		IOIS16, 0, PTC5_IN_PU, PTC5_IN,
+		HPDQM7, PTC4_OUT, 0, PTC4_IN,
+		HPDQM6, PTC3_OUT, 0, PTC3_IN,
+		HPDQM5, PTC2_OUT, 0, PTC2_IN,
+		0, 0, 0, 0,
+		HPDQM4, PTC0_OUT, 0, PTC0_IN }
+	},
+	{ PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+		SDHICD, 0, PTD7_IN_PU, PTD7_IN,
+		SDHIWP, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
+		SDHID3, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
+		IRQ2_SDHID2, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
+		SDHID1, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
+		SDHID0, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
+		SDHICMD, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
+		SDHICLK, PTD0_OUT, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+		A25, PTE7_OUT, PTE7_IN_PD, PTE7_IN,
+		A24, PTE6_OUT, PTE6_IN_PD, PTE6_IN,
+		A23, PTE5_OUT, PTE5_IN_PD, PTE5_IN,
+		A22, PTE4_OUT, PTE4_IN_PD, PTE4_IN,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		IRQ5, PTE1_OUT, PTE1_IN_PD, PTE1_IN,
+		IRQ4_BS, PTE0_OUT, PTE0_IN_PD, PTE0_IN }
+	},
+	{ PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+		0, 0, 0, 0,
+		PTF6, PTF6_OUT, PTF6_IN_PD, PTF6_IN,
+		SIOSCK_SIUBOBT, PTF5_OUT, PTF5_IN_PD, PTF5_IN,
+		SIOSTRB1_SIUBOLR, PTF4_OUT, PTF4_IN_PD, PTF4_IN,
+		SIOSTRB0_SIUBIBT, PTF3_OUT, PTF3_IN_PD, PTF3_IN,
+		SIOD_SIUBILR, PTF2_OUT, PTF2_IN_PD, PTF2_IN,
+		SIORXD_SIUBISLD, 0, PTF1_IN_PD, PTF1_IN,
+		SIOTXD_SIUBOSLD, PTF0_OUT, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		AUDSYNC, PTG4_OUT, 0, 0,
+		AUDATA3, PTG3_OUT, 0, 0,
+		AUDATA2, PTG2_OUT, 0, 0,
+		AUDATA1, PTG1_OUT, 0, 0,
+		AUDATA0, PTG0_OUT, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+		LCDVCPWC_LCDVCPWC2, PTH7_OUT, 0, 0,
+		LCDVSYN2_DACK, PTH6_OUT, PTH6_IN_PD, PTH6_IN,
+		LCDVSYN, PTH5_OUT, PTH5_IN_PD, PTH5_IN,
+		LCDDISP_LCDRS, PTH4_OUT, 0, 0,
+		LCDHSYN_LCDCS, PTH3_OUT, 0, 0,
+		LCDDON_LCDDON2, PTH2_OUT, 0, 0,
+		LCDD17_DV_HSYNC, PTH1_OUT, PTH1_IN_PD, PTH1_IN,
+		LCDD16_DV_VSYNC, PTH0_OUT, PTH0_IN_PD, PTH0_IN }
+	},
+	{ PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+		STATUS0, PTJ7_OUT, 0, 0,
+		0, PTJ6_OUT, 0, 0,
+		PDSTATUS, PTJ5_OUT, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		IRQ1, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
+		IRQ0, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+	},
+	{ PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+		0, 0, 0, 0,
+		SIUAILR_SIOF1_SS2, PTK6_OUT, PTK6_IN_PD, PTK6_IN,
+		SIUAIBT_SIOF1_SS1, PTK5_OUT, PTK5_IN_PD, PTK5_IN,
+		SIUAOLR_SIOF1_SYNC, PTK4_OUT, PTK4_IN_PD, PTK4_IN,
+		SIUAOBT_SIOF1_SCK, PTK3_OUT, PTK3_IN_PD, PTK3_IN,
+		SIUAISLD_SIOF1_RXD, 0, PTK2_IN_PD, PTK2_IN,
+		SIUAOSLD_SIOF1_TXD, PTK1_OUT, 0, 0,
+		PTK0, PTK0_OUT, PTK0_IN_PD, PTK0_IN }
+	},
+	{ PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+		LCDD15_DV_D15, PTL7_OUT, PTL7_IN_PD, PTL7_IN,
+		LCDD14_DV_D14, PTL6_OUT, PTL6_IN_PD, PTL6_IN,
+		LCDD13_DV_D13, PTL5_OUT, PTL5_IN_PD, PTL5_IN,
+		LCDD12_DV_D12, PTL4_OUT, PTL4_IN_PD, PTL4_IN,
+		LCDD11_DV_D11, PTL3_OUT, PTL3_IN_PD, PTL3_IN,
+		LCDD10_DV_D10, PTL2_OUT, PTL2_IN_PD, PTL2_IN,
+		LCDD9_DV_D9, PTL1_OUT, PTL1_IN_PD, PTL1_IN,
+		LCDD8_DV_D8, PTL0_OUT, PTL0_IN_PD, PTL0_IN }
+	},
+	{ PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+		LCDD7_DV_D7, PTM7_OUT, PTM7_IN_PD, PTM7_IN,
+		LCDD6_DV_D6, PTM6_OUT, PTM6_IN_PD, PTM6_IN,
+		LCDD5_DV_D5, PTM5_OUT, PTM5_IN_PD, PTM5_IN,
+		LCDD4_DV_D4, PTM4_OUT, PTM4_IN_PD, PTM4_IN,
+		LCDD3_DV_D3, PTM3_OUT, PTM3_IN_PD, PTM3_IN,
+		LCDD2_DV_D2, PTM2_OUT, PTM2_IN_PD, PTM2_IN,
+		LCDD1_DV_D1, PTM1_OUT, PTM1_IN_PD, PTM1_IN,
+		LCDD0_DV_D0, PTM0_OUT, PTM0_IN_PD, PTM0_IN }
+	},
+	{ PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+		HPD63, PTN7_OUT, 0, PTN7_IN,
+		HPD62, PTN6_OUT, 0, PTN6_IN,
+		HPD61, PTN5_OUT, 0, PTN5_IN,
+		HPD60, PTN4_OUT, 0, PTN4_IN,
+		HPD59, PTN3_OUT, 0, PTN3_IN,
+		HPD58, PTN2_OUT, 0, PTN2_IN,
+		HPD57, PTN1_OUT, 0, PTN1_IN,
+		HPD56, PTN0_OUT, 0, PTN0_IN }
+	},
+	{ PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+		0, 0, 0, 0,
+		SIOF0_SS2_SIM_RST, PTQ6_OUT, 0, 0,
+		SIOF0_SS1_TS_SPSYNC, PTQ5_OUT, PTQ5_IN_PD, PTQ5_IN,
+		SIOF0_SYNC_TS_SDEN, PTQ4_OUT, PTQ4_IN_PD, PTQ4_IN,
+		SIOF0_SCK_TS_SCK, PTQ3_OUT, PTQ3_IN_PD, PTQ3_IN,
+		PTQ2, 0, PTQ2_IN_PD, PTQ2_IN,
+		PTQ1, PTQ1_OUT, 0, 0,
+		PTQ0, PTQ0_OUT, PTQ0_IN_PU, PTQ0_IN }
+	},
+	{ PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		LCDRD, PTR4_OUT, 0, 0,
+		CS6B_CE1B_LCDCS2, PTR3_OUT, 0, 0,
+		WAIT, 0, PTR2_IN_PU, PTR2_IN,
+		LCDDCK_LCDWR, PTR1_OUT, 0, 0,
+		LCDVEPWC_LCDVEPWC2, PTR0_OUT, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		SCIF0_CTS_SIUAISPD, 0, PTS4_IN_PD, PTS4_IN,
+		SCIF0_RTS_SIUAOSPD, PTS3_OUT, 0, 0,
+		SCIF0_SCK_TPUTO, PTS2_OUT, PTS2_IN_PD, PTS2_IN,
+		SCIF0_RXD, 0, PTS1_IN_PD, PTS1_IN,
+		SCIF0_TXD, PTS0_OUT, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		FOE_VIO_VD2, PTT4_OUT, PTT4_IN_PD, PTT4_IN,
+		FWE, PTT3_OUT, PTT3_IN_PD, PTT3_IN,
+		FSC, PTT2_OUT, PTT2_IN_PD, PTT2_IN,
+		DREQ0, 0, PTT1_IN_PD, PTT1_IN,
+		FCDE, PTT0_OUT, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		NAF2_VIO_D10, PTU4_OUT, PTU4_IN_PD, PTU4_IN,
+		NAF1_VIO_D9, PTU3_OUT, PTU3_IN_PD, PTU3_IN,
+		NAF0_VIO_D8, PTU2_OUT, PTU2_IN_PD, PTU2_IN,
+		FRB_VIO_CLK2, 0, PTU1_IN_PD, PTU1_IN,
+		FCE_VIO_HD2, PTU0_OUT, PTU0_IN_PD, PTU0_IN }
+	},
+	{ PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		NAF7_VIO_D15, PTV4_OUT, PTV4_IN_PD, PTV4_IN,
+		NAF6_VIO_D14, PTV3_OUT, PTV3_IN_PD, PTV3_IN,
+		NAF5_VIO_D13, PTV2_OUT, PTV2_IN_PD, PTV2_IN,
+		NAF4_VIO_D12, PTV1_OUT, PTV1_IN_PD, PTV1_IN,
+		NAF3_VIO_D11, PTV0_OUT, PTV0_IN_PD, PTV0_IN }
+	},
+	{ PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+		0, 0, 0, 0,
+		VIO_FLD_SCIF2_CTS, 0, PTW6_IN_PD, PTW6_IN,
+		VIO_CKO_SCIF2_RTS, PTW5_OUT, 0, 0,
+		VIO_STEX_SCIF2_SCK, PTW4_OUT, PTW4_IN_PD, PTW4_IN,
+		VIO_STEM_SCIF2_TXD, PTW3_OUT, PTW3_IN_PD, PTW3_IN,
+		VIO_HD_SCIF2_RXD, PTW2_OUT, PTW2_IN_PD, PTW2_IN,
+		VIO_VD_SCIF1_CTS, PTW1_OUT, PTW1_IN_PD, PTW1_IN,
+		VIO_CLK_SCIF1_RTS, PTW0_OUT, PTW0_IN_PD, PTW0_IN }
+	},
+	{ PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+		0, 0, 0, 0,
+		CS6A_CE2B, PTX6_OUT, PTX6_IN_PU, PTX6_IN,
+		LCDD23, PTX5_OUT, PTX5_IN_PD, PTX5_IN,
+		LCDD22, PTX4_OUT, PTX4_IN_PD, PTX4_IN,
+		LCDD21, PTX3_OUT, PTX3_IN_PD, PTX3_IN,
+		LCDD20, PTX2_OUT, PTX2_IN_PD, PTX2_IN,
+		LCDD19_DV_CLKI, PTX1_OUT, PTX1_IN_PD, PTX1_IN,
+		LCDD18_DV_CLK, PTX0_OUT, PTX0_IN_PD, PTX0_IN }
+	},
+	{ PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		KEYOUT5_IN5, PTY5_OUT, PTY5_IN_PU, PTY5_IN,
+		KEYOUT4_IN6, PTY4_OUT, PTY4_IN_PU, PTY4_IN,
+		KEYOUT3, PTY3_OUT, PTY3_IN_PU, PTY3_IN,
+		KEYOUT2, PTY2_OUT, PTY2_IN_PU, PTY2_IN,
+		KEYOUT1, PTY1_OUT, 0, 0,
+		KEYOUT0, PTY0_OUT, PTY0_IN_PU, PTY0_IN }
+	},
+	{ PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		KEYIN4_IRQ7, 0, PTZ5_IN_PU, PTZ5_IN,
+		KEYIN3, 0, PTZ4_IN_PU, PTZ4_IN,
+		KEYIN2, 0, PTZ3_IN_PU, PTZ3_IN,
+		KEYIN1, 0, PTZ2_IN_PU, PTZ2_IN,
+		KEYIN0_IRQ6, 0, PTZ1_IN_PU, PTZ1_IN,
+		0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) {
+		PSA15_KEYIN0, PSA15_IRQ6,
+		PSA14_KEYIN4, PSA14_IRQ7,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		PSA9_IRQ4, PSA9_BS,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		PSA4_IRQ2, PSA4_SDHID2,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0 }
+	},
+	{ PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) {
+		PSB15_SIOTXD, PSB15_SIUBOSLD,
+		PSB14_SIORXD, PSB14_SIUBISLD,
+		PSB13_SIOD, PSB13_SIUBILR,
+		PSB12_SIOSTRB0, PSB12_SIUBIBT,
+		PSB11_SIOSTRB1, PSB11_SIUBOLR,
+		PSB10_SIOSCK, PSB10_SIUBOBT,
+		PSB9_SIOMCK, PSB9_SIUMCKB,
+		PSB8_SIOF0_MCK, PSB8_IRQ3,
+		PSB7_SIOF0_TXD, PSB7_IRDA_OUT,
+		PSB6_SIOF0_RXD, PSB6_IRDA_IN,
+		PSB5_SIOF0_SCK, PSB5_TS_SCK,
+		PSB4_SIOF0_SYNC, PSB4_TS_SDEN,
+		PSB3_SIOF0_SS1, PSB3_TS_SPSYNC,
+		PSB2_SIOF0_SS2, PSB2_SIM_RST,
+		PSB1_SIUMCKA, PSB1_SIOF1_MCK,
+		PSB0_SIUAOSLD, PSB0_SIOF1_TXD }
+	},
+	{ PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) {
+		PSC15_SIUAISLD, PSC15_SIOF1_RXD,
+		PSC14_SIUAOBT, PSC14_SIOF1_SCK,
+		PSC13_SIUAOLR, PSC13_SIOF1_SYNC,
+		PSC12_SIUAIBT, PSC12_SIOF1_SS1,
+		PSC11_SIUAILR, PSC11_SIOF1_SS2,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		PSC0_NAF, PSC0_VIO }
+	},
+	{ PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) {
+		0, 0,
+		0, 0,
+		PSD13_VIO, PSD13_SCIF2,
+		PSD12_VIO, PSD12_SCIF1,
+		PSD11_VIO, PSD11_SCIF1,
+		PSD10_VIO_D0, PSD10_LCDLCLK,
+		PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB,
+		PSD8_SCIF0_SCK, PSD8_TPUTO,
+		PSD7_SCIF0_RTS, PSD7_SIUAOSPD,
+		PSD6_SCIF0_CTS, PSD6_SIUAISPD,
+		PSD5_CS6B_CE1B, PSD5_LCDCS2,
+		0, 0,
+		PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2,
+		PSD2_LCDDON, PSD2_LCDDON2,
+		0, 0,
+		PSD0_LCDD19_LCDD0, PSD0_DV }
+	},
+	{ PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) {
+		PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D,
+		PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK,
+		PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT,
+		PSE12_LCDVSYN2, PSE12_DACK,
+		PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		PSE3_FLCTL, PSE3_VIO,
+		PSE2_NAF2, PSE2_VIO_D10,
+		PSE1_NAF1, PSE1_VIO_D9,
+		PSE0_NAF0, PSE0_VIO_D8 }
+	},
+	{ PINMUX_CFG_REG("HIZCRA", 0xa4050158, 16, 1) {
+		0, 0,
+		HIZA14_KEYSC, HIZA14_HIZ,
+		0, 0,
+		0, 0,
+		0, 0,
+		HIZA10_NAF, HIZA10_HIZ,
+		HIZA9_VIO, HIZA9_HIZ,
+		HIZA8_LCDC, HIZA8_HIZ,
+		HIZA7_LCDC, HIZA7_HIZ,
+		HIZA6_LCDC, HIZA6_HIZ,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0 }
+	},
+	{ PINMUX_CFG_REG("HIZCRB", 0xa405015a, 16, 1) {
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		HIZB1_VIO, HIZB1_HIZ,
+		HIZB0_VIO, HIZB0_HIZ }
+	},
+	{ PINMUX_CFG_REG("HIZCRC", 0xa405015c, 16, 1) {
+		HIZC15_IRQ7, HIZC15_HIZ,
+		HIZC14_IRQ6, HIZC14_HIZ,
+		HIZC13_IRQ5, HIZC13_HIZ,
+		HIZC12_IRQ4, HIZC12_HIZ,
+		HIZC11_IRQ3, HIZC11_HIZ,
+		HIZC10_IRQ2, HIZC10_HIZ,
+		HIZC9_IRQ1, HIZC9_HIZ,
+		HIZC8_IRQ0, HIZC8_HIZ,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0 }
+	},
+	{ PINMUX_CFG_REG("MSELCRB", 0xa4050182, 16, 1) {
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		MSELB9_VIO, MSELB9_VIO2,
+		MSELB8_RGB, MSELB8_SYS,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0 }
+	},
+	{}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+	{ PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+		PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+		PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+	},
+	{ PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+		PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+		PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+	},
+	{ PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+		PTC7_DATA, 0, PTC5_DATA, PTC4_DATA,
+		PTC3_DATA, PTC2_DATA, 0, PTC0_DATA }
+	},
+	{ PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+		PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+		PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+	},
+	{ PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+		PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+		0, 0, PTE1_DATA, PTE0_DATA }
+	},
+	{ PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+		0, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+		PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+	},
+	{ PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+		0, 0, 0, PTG4_DATA,
+		PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+	},
+	{ PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+		PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+		PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+	},
+	{ PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+		PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0,
+		0, 0, PTJ1_DATA, PTJ0_DATA }
+	},
+	{ PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+		0, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+		PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+	},
+	{ PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+		PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+		PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+	},
+	{ PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+		PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+		PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+	},
+	{ PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+		PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+		PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+	},
+	{ PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+		0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+		PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+	},
+	{ PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+		0, 0, 0, PTR4_DATA,
+		PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+	},
+	{ PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+		0, 0, 0, PTS4_DATA,
+		PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+	},
+	{ PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+		0, 0, 0, PTT4_DATA,
+		PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+	},
+	{ PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+		0, 0, 0, PTU4_DATA,
+		PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+	},
+	{ PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+		0, 0, 0, PTV4_DATA,
+		PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+	},
+	{ PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+		0, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+		PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+	},
+	{ PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+		0, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+		PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+	},
+	{ PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+		0, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+		PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+	},
+	{ PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+		0, 0, PTZ5_DATA, PTZ4_DATA,
+		PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+	},
+	{ },
+};
+
+static struct pinmux_info sh7722_pinmux_info = {
+	.name = "sh7722_pfc",
+	.reserved_id = PINMUX_RESERVED,
+	.data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+	.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+	.input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
+	.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+	.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+	.mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+	.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+	.first_gpio = GPIO_PTA7,
+	.last_gpio = GPIO_FN_KEYOUT5_IN5,
+
+	.gpios = pinmux_gpios,
+	.cfg_regs = pinmux_config_regs,
+	.data_regs = pinmux_data_regs,
+
+	.gpio_data = pinmux_data,
+	.gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+static int __init plat_pinmux_setup(void)
+{
+	return register_pinmux(&sh7722_pinmux_info);
+}
+
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c
new file mode 100644
index 0000000..88bf5ec
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c
@@ -0,0 +1,1909 @@
+/*
+ * SH7723 Pinmux
+ *
+ *  Copyright (C) 2008  Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7723.h>
+
+enum {
+	PINMUX_RESERVED = 0,
+
+	PINMUX_DATA_BEGIN,
+	PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+	PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+	PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+	PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+	PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+	PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+	PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+	PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+	PTE5_DATA, PTE4_DATA, PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+	PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+	PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+	PTG5_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+	PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+	PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+	PTJ7_DATA, PTJ5_DATA, PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+	PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+	PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+	PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+	PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+	PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+	PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+	PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+	PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+	PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+	PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+	PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+	PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+	PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+	PTT5_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+	PTU5_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+	PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+	PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+	PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+	PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+	PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+	PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+	PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+	PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+	PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+	PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+	PINMUX_DATA_END,
+
+	PINMUX_INPUT_BEGIN,
+	PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+	PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+	PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+	PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+	PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+	PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+	PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+	PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+	PTE5_IN, PTE4_IN, PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+	PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
+	PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+	PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
+	PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+	PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+	PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
+	PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+	PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
+	PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+	PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+	PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+	PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
+	PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+	PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
+	PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+	PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+	PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN,
+	PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+	PTT5_IN, PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+	PTU5_IN, PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+	PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
+	PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+	PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
+	PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+	PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
+	PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+	PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
+	PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
+	PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
+	PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
+	PINMUX_INPUT_END,
+
+	PINMUX_INPUT_PULLUP_BEGIN,
+	PTA4_IN_PU, PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+	PTB2_IN_PU, PTB1_IN_PU,
+	PTR2_IN_PU,
+	PINMUX_INPUT_PULLUP_END,
+
+	PINMUX_OUTPUT_BEGIN,
+	PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+	PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+	PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+	PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+	PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+	PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+	PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+	PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+	PTE5_OUT, PTE4_OUT, PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+	PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
+	PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
+	PTG5_OUT, PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+	PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+	PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+	PTJ7_OUT, PTJ5_OUT, PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+	PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
+	PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+	PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
+	PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+	PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+	PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+	PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
+	PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
+	PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+	PTR1_OUT, PTR0_OUT,
+	PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT,
+	PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+	PTT5_OUT, PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+	PTU5_OUT, PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+	PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
+	PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+	PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
+	PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+	PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
+	PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+	PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
+	PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+	PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
+	PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
+	PINMUX_OUTPUT_END,
+
+	PINMUX_FUNCTION_BEGIN,
+	PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+	PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+	PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+	PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+	PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+	PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+	PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+	PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+	PTE5_FN, PTE4_FN, PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+	PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
+	PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+	PTG5_FN, PTG4_FN, PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+	PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
+	PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+	PTJ7_FN, PTJ5_FN, PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+	PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
+	PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+	PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN,
+	PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
+	PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+	PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+	PTN7_FN, PTN6_FN, PTN5_FN, PTN4_FN,
+	PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
+	PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
+	PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+	PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+	PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN,
+	PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+	PTT5_FN, PTT4_FN, PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+	PTU5_FN, PTU4_FN, PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+	PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
+	PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+	PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
+	PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
+	PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
+	PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
+	PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
+	PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
+	PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
+	PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
+
+
+	PSA15_PSA14_FN1, PSA15_PSA14_FN2,
+	PSA13_PSA12_FN1, PSA13_PSA12_FN2,
+	PSA11_PSA10_FN1, PSA11_PSA10_FN2,
+	PSA5_PSA4_FN1, PSA5_PSA4_FN2, PSA5_PSA4_FN3,
+	PSA3_PSA2_FN1, PSA3_PSA2_FN2,
+	PSB15_PSB14_FN1, PSB15_PSB14_FN2,
+	PSB13_PSB12_LCDC_RGB, PSB13_PSB12_LCDC_SYS,
+	PSB9_PSB8_FN1, PSB9_PSB8_FN2, PSB9_PSB8_FN3,
+	PSB7_PSB6_FN1, PSB7_PSB6_FN2,
+	PSB5_PSB4_FN1, PSB5_PSB4_FN2,
+	PSB3_PSB2_FN1, PSB3_PSB2_FN2,
+	PSC15_PSC14_FN1, PSC15_PSC14_FN2,
+	PSC13_PSC12_FN1, PSC13_PSC12_FN2,
+	PSC11_PSC10_FN1, PSC11_PSC10_FN2, PSC11_PSC10_FN3,
+	PSC9_PSC8_FN1, PSC9_PSC8_FN2,
+	PSC7_PSC6_FN1, PSC7_PSC6_FN2, PSC7_PSC6_FN3,
+	PSD15_PSD14_FN1, PSD15_PSD14_FN2,
+	PSD13_PSD12_FN1, PSD13_PSD12_FN2,
+	PSD11_PSD10_FN1, PSD11_PSD10_FN2, PSD11_PSD10_FN3,
+	PSD9_PSD8_FN1, PSD9_PSD8_FN2,
+	PSD7_PSD6_FN1, PSD7_PSD6_FN2,
+	PSD5_PSD4_FN1, PSD5_PSD4_FN2,
+	PSD3_PSD2_FN1, PSD3_PSD2_FN2,
+	PSD1_PSD0_FN1, PSD1_PSD0_FN2,
+	PINMUX_FUNCTION_END,
+
+	PINMUX_MARK_BEGIN,
+	SCIF0_PTT_TXD_MARK, SCIF0_PTT_RXD_MARK,
+	SCIF0_PTT_SCK_MARK, SCIF0_PTU_TXD_MARK,
+	SCIF0_PTU_RXD_MARK, SCIF0_PTU_SCK_MARK,
+
+	SCIF1_PTS_TXD_MARK, SCIF1_PTS_RXD_MARK,
+	SCIF1_PTS_SCK_MARK, SCIF1_PTV_TXD_MARK,
+	SCIF1_PTV_RXD_MARK, SCIF1_PTV_SCK_MARK,
+
+	SCIF2_PTT_TXD_MARK, SCIF2_PTT_RXD_MARK,
+	SCIF2_PTT_SCK_MARK, SCIF2_PTU_TXD_MARK,
+	SCIF2_PTU_RXD_MARK, SCIF2_PTU_SCK_MARK,
+
+	SCIF3_PTS_TXD_MARK, SCIF3_PTS_RXD_MARK,
+	SCIF3_PTS_SCK_MARK, SCIF3_PTS_RTS_MARK,
+	SCIF3_PTS_CTS_MARK, SCIF3_PTV_TXD_MARK,
+	SCIF3_PTV_RXD_MARK, SCIF3_PTV_SCK_MARK,
+	SCIF3_PTV_RTS_MARK, SCIF3_PTV_CTS_MARK,
+
+	SCIF4_PTE_TXD_MARK, SCIF4_PTE_RXD_MARK,
+	SCIF4_PTE_SCK_MARK, SCIF4_PTN_TXD_MARK,
+	SCIF4_PTN_RXD_MARK, SCIF4_PTN_SCK_MARK,
+
+	SCIF5_PTE_TXD_MARK, SCIF5_PTE_RXD_MARK,
+	SCIF5_PTE_SCK_MARK, SCIF5_PTN_TXD_MARK,
+	SCIF5_PTN_RXD_MARK, SCIF5_PTN_SCK_MARK,
+
+	VIO_D15_MARK, VIO_D14_MARK, VIO_D13_MARK, VIO_D12_MARK,
+	VIO_D11_MARK, VIO_D10_MARK, VIO_D9_MARK, VIO_D8_MARK,
+	VIO_D7_MARK, VIO_D6_MARK, VIO_D5_MARK, VIO_D4_MARK,
+	VIO_D3_MARK, VIO_D2_MARK, VIO_D1_MARK, VIO_D0_MARK,
+	VIO_FLD_MARK, VIO_CKO_MARK,
+	VIO_VD1_MARK, VIO_HD1_MARK, VIO_CLK1_MARK,
+	VIO_HD2_MARK, VIO_VD2_MARK, VIO_CLK2_MARK,
+
+	LCDD23_MARK, LCDD22_MARK, LCDD21_MARK, LCDD20_MARK,
+	LCDD19_MARK, LCDD18_MARK, LCDD17_MARK, LCDD16_MARK,
+	LCDD15_MARK, LCDD14_MARK, LCDD13_MARK, LCDD12_MARK,
+	LCDD11_MARK, LCDD10_MARK, LCDD9_MARK, LCDD8_MARK,
+	LCDD7_MARK, LCDD6_MARK, LCDD5_MARK, LCDD4_MARK,
+	LCDD3_MARK, LCDD2_MARK, LCDD1_MARK, LCDD0_MARK,
+	LCDDON_MARK, LCDVCPWC_MARK, LCDVEPWC_MARK,
+	LCDVSYN_MARK, LCDDCK_MARK, LCDHSYN_MARK, LCDDISP_MARK,
+	LCDRS_MARK, LCDCS_MARK, LCDWR_MARK, LCDRD_MARK,
+	LCDLCLK_PTR_MARK, LCDLCLK_PTW_MARK,
+
+	IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK,
+	IRQ4_MARK, IRQ5_MARK, IRQ6_MARK, IRQ7_MARK,
+
+	AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+	AUDCK_MARK, AUDSYNC_MARK,
+
+	SDHI0CD_PTD_MARK, SDHI0WP_PTD_MARK,
+	SDHI0D3_PTD_MARK, SDHI0D2_PTD_MARK,
+	SDHI0D1_PTD_MARK, SDHI0D0_PTD_MARK,
+	SDHI0CMD_PTD_MARK, SDHI0CLK_PTD_MARK,
+
+	SDHI0CD_PTS_MARK, SDHI0WP_PTS_MARK,
+	SDHI0D3_PTS_MARK, SDHI0D2_PTS_MARK,
+	SDHI0D1_PTS_MARK, SDHI0D0_PTS_MARK,
+	SDHI0CMD_PTS_MARK, SDHI0CLK_PTS_MARK,
+
+	SDHI1CD_MARK, SDHI1WP_MARK, SDHI1D3_MARK, SDHI1D2_MARK,
+	SDHI1D1_MARK, SDHI1D0_MARK, SDHI1CMD_MARK, SDHI1CLK_MARK,
+
+	SIUAFCK_MARK, SIUAILR_MARK, SIUAIBT_MARK, SIUAISLD_MARK,
+	SIUAOLR_MARK, SIUAOBT_MARK, SIUAOSLD_MARK, SIUAMCK_MARK,
+	SIUAISPD_MARK, SIUAOSPD_MARK,
+
+	SIUBFCK_MARK, SIUBILR_MARK, SIUBIBT_MARK, SIUBISLD_MARK,
+	SIUBOLR_MARK, SIUBOBT_MARK, SIUBOSLD_MARK, SIUBMCK_MARK,
+
+	IRDA_IN_MARK, IRDA_OUT_MARK,
+
+	DV_CLKI_MARK, DV_CLK_MARK, DV_HSYNC_MARK, DV_VSYNC_MARK,
+	DV_D15_MARK, DV_D14_MARK, DV_D13_MARK, DV_D12_MARK,
+	DV_D11_MARK, DV_D10_MARK, DV_D9_MARK, DV_D8_MARK,
+	DV_D7_MARK, DV_D6_MARK, DV_D5_MARK, DV_D4_MARK,
+	DV_D3_MARK, DV_D2_MARK, DV_D1_MARK, DV_D0_MARK,
+
+	KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK, KEYIN4_MARK,
+	KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
+	KEYOUT4_IN6_MARK, KEYOUT5_IN5_MARK,
+
+	MSIOF0_PTF_TXD_MARK, MSIOF0_PTF_RXD_MARK, MSIOF0_PTF_MCK_MARK,
+	MSIOF0_PTF_TSYNC_MARK, MSIOF0_PTF_TSCK_MARK, MSIOF0_PTF_RSYNC_MARK,
+	MSIOF0_PTF_RSCK_MARK, MSIOF0_PTF_SS1_MARK, MSIOF0_PTF_SS2_MARK,
+
+	MSIOF0_PTT_TXD_MARK, MSIOF0_PTT_RXD_MARK, MSIOF0_PTX_MCK_MARK,
+	MSIOF0_PTT_TSYNC_MARK, MSIOF0_PTT_TSCK_MARK, MSIOF0_PTT_RSYNC_MARK,
+	MSIOF0_PTT_RSCK_MARK, MSIOF0_PTT_SS1_MARK, MSIOF0_PTT_SS2_MARK,
+
+	MSIOF1_TXD_MARK, MSIOF1_RXD_MARK, MSIOF1_MCK_MARK,
+	MSIOF1_TSYNC_MARK, MSIOF1_TSCK_MARK, MSIOF1_RSYNC_MARK,
+	MSIOF1_RSCK_MARK, MSIOF1_SS1_MARK, MSIOF1_SS2_MARK,
+
+	TS0_SDAT_MARK, TS0_SCK_MARK, TS0_SDEN_MARK, TS0_SPSYNC_MARK,
+
+	FCE_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK,
+	NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FCDE_MARK,
+	FOE_MARK, FSC_MARK, FWE_MARK, FRB_MARK,
+
+	DACK1_MARK, DREQ1_MARK, DACK0_MARK, DREQ0_MARK,
+
+	AN3_MARK, AN2_MARK, AN1_MARK, AN0_MARK, ADTRG_MARK,
+
+	STATUS0_MARK, PDSTATUS_MARK,
+
+	TPUTO3_MARK, TPUTO2_MARK, TPUTO1_MARK, TPUTO0_MARK,
+
+	D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+	D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+	D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+	D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+	IOIS16_MARK, WAIT_MARK, BS_MARK,
+	A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+	CS6B_CE1B_MARK, CS6A_CE2B_MARK,
+	CS5B_CE1A_MARK, CS5A_CE2A_MARK,
+	WE3_ICIOWR_MARK, WE2_ICIORD_MARK,
+
+	IDED15_MARK, IDED14_MARK, IDED13_MARK, IDED12_MARK,
+	IDED11_MARK, IDED10_MARK, IDED9_MARK, IDED8_MARK,
+	IDED7_MARK, IDED6_MARK, IDED5_MARK, IDED4_MARK,
+	IDED3_MARK, IDED2_MARK, IDED1_MARK, IDED0_MARK,
+	DIRECTION_MARK, EXBUF_ENB_MARK, IDERST_MARK, IODACK_MARK,
+	IODREQ_MARK, IDEIORDY_MARK, IDEINT_MARK, IDEIOWR_MARK,
+	IDEIORD_MARK, IDECS1_MARK, IDECS0_MARK, IDEA2_MARK,
+	IDEA1_MARK, IDEA0_MARK,
+	PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+	/* PTA GPIO */
+	PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
+	PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
+	PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
+	PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
+	PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
+	PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
+	PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
+	PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+
+	/* PTB GPIO */
+	PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+	PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+	PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+	PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+	PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+	PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
+	PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
+	PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
+
+	/* PTC GPIO */
+	PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT),
+	PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT),
+	PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT),
+	PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+	PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+	PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+	PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT),
+	PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
+
+	/* PTD GPIO */
+	PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT),
+	PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT),
+	PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT),
+	PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT),
+	PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT),
+	PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT),
+	PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT),
+	PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT),
+
+	/* PTE GPIO */
+	PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT),
+	PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT),
+	PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT),
+	PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT),
+	PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT),
+	PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT),
+
+	/* PTF GPIO */
+	PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT),
+	PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT),
+	PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT),
+	PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT),
+	PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT),
+	PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT),
+	PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT),
+	PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT),
+
+	/* PTG GPIO */
+	PINMUX_DATA(PTG5_DATA, PTG5_OUT),
+	PINMUX_DATA(PTG4_DATA, PTG4_OUT),
+	PINMUX_DATA(PTG3_DATA, PTG3_OUT),
+	PINMUX_DATA(PTG2_DATA, PTG2_OUT),
+	PINMUX_DATA(PTG1_DATA, PTG1_OUT),
+	PINMUX_DATA(PTG0_DATA, PTG0_OUT),
+
+	/* PTH GPIO */
+	PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT),
+	PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT),
+	PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT),
+	PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT),
+	PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT),
+	PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT),
+	PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT),
+	PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT),
+
+	/* PTJ GPIO */
+	PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
+	PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
+	PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT),
+	PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT),
+	PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT),
+	PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT),
+
+	/* PTK GPIO */
+	PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT),
+	PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT),
+	PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT),
+	PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT),
+	PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT),
+	PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT),
+	PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT),
+	PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT),
+
+	/* PTL GPIO */
+	PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT),
+	PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT),
+	PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT),
+	PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT),
+	PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT),
+	PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT),
+	PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT),
+	PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT),
+
+	/* PTM GPIO */
+	PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT),
+	PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT),
+	PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT),
+	PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT),
+	PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT),
+	PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT),
+	PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT),
+	PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT),
+
+	/* PTN GPIO */
+	PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT),
+	PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT),
+	PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT),
+	PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT),
+	PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT),
+	PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT),
+	PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT),
+	PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT),
+
+	/* PTQ GPIO */
+	PINMUX_DATA(PTQ3_DATA, PTQ3_IN),
+	PINMUX_DATA(PTQ2_DATA, PTQ2_IN),
+	PINMUX_DATA(PTQ1_DATA, PTQ1_IN),
+	PINMUX_DATA(PTQ0_DATA, PTQ0_IN),
+
+	/* PTR GPIO */
+	PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT),
+	PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT),
+	PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT),
+	PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT),
+	PINMUX_DATA(PTR3_DATA, PTR3_IN),
+	PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
+	PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT),
+	PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT),
+
+	/* PTS GPIO */
+	PINMUX_DATA(PTS7_DATA, PTS7_IN, PTS7_OUT),
+	PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT),
+	PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT),
+	PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT),
+	PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT),
+	PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT),
+	PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT),
+	PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT),
+
+	/* PTT GPIO */
+	PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT),
+	PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT),
+	PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT),
+	PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT),
+	PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT),
+	PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT),
+
+	/* PTU GPIO */
+	PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT),
+	PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT),
+	PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT),
+	PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT),
+	PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT),
+	PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT),
+
+	/* PTV GPIO */
+	PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT),
+	PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT),
+	PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT),
+	PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT),
+	PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT),
+	PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT),
+	PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT),
+	PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT),
+
+	/* PTW GPIO */
+	PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT),
+	PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT),
+	PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT),
+	PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT),
+	PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT),
+	PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT),
+	PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT),
+	PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT),
+
+	/* PTX GPIO */
+	PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT),
+	PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT),
+	PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT),
+	PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT),
+	PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT),
+	PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT),
+	PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT),
+	PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT),
+
+	/* PTY GPIO */
+	PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT),
+	PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT),
+	PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT),
+	PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT),
+	PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT),
+	PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT),
+	PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT),
+	PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT),
+
+	/* PTZ GPIO */
+	PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT),
+	PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT),
+	PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT),
+	PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT),
+	PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT),
+	PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT),
+	PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT),
+	PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT),
+
+	/* PTA FN */
+	PINMUX_DATA(D23_MARK, PSA15_PSA14_FN1, PTA7_FN),
+	PINMUX_DATA(KEYOUT2_MARK, PSA15_PSA14_FN2, PTA7_FN),
+	PINMUX_DATA(D22_MARK, PSA15_PSA14_FN1, PTA6_FN),
+	PINMUX_DATA(KEYOUT1_MARK, PSA15_PSA14_FN2, PTA6_FN),
+	PINMUX_DATA(D21_MARK, PSA15_PSA14_FN1, PTA5_FN),
+	PINMUX_DATA(KEYOUT0_MARK, PSA15_PSA14_FN2, PTA5_FN),
+	PINMUX_DATA(D20_MARK, PSA15_PSA14_FN1, PTA4_FN),
+	PINMUX_DATA(KEYIN4_MARK, PSA15_PSA14_FN2, PTA4_FN),
+	PINMUX_DATA(D19_MARK, PSA15_PSA14_FN1, PTA3_FN),
+	PINMUX_DATA(KEYIN3_MARK, PSA15_PSA14_FN2, PTA3_FN),
+	PINMUX_DATA(D18_MARK, PSA15_PSA14_FN1, PTA2_FN),
+	PINMUX_DATA(KEYIN2_MARK, PSA15_PSA14_FN2, PTA2_FN),
+	PINMUX_DATA(D17_MARK, PSA15_PSA14_FN1, PTA1_FN),
+	PINMUX_DATA(KEYIN1_MARK, PSA15_PSA14_FN2, PTA1_FN),
+	PINMUX_DATA(D16_MARK, PSA15_PSA14_FN1, PTA0_FN),
+	PINMUX_DATA(KEYIN0_MARK, PSA15_PSA14_FN2, PTA0_FN),
+
+	/* PTB FN */
+	PINMUX_DATA(D31_MARK, PTB7_FN),
+	PINMUX_DATA(D30_MARK, PTB6_FN),
+	PINMUX_DATA(D29_MARK, PTB5_FN),
+	PINMUX_DATA(D28_MARK, PTB4_FN),
+	PINMUX_DATA(D27_MARK, PTB3_FN),
+	PINMUX_DATA(D26_MARK, PSA15_PSA14_FN1, PTB2_FN),
+	PINMUX_DATA(KEYOUT5_IN5_MARK, PSA15_PSA14_FN2, PTB2_FN),
+	PINMUX_DATA(D25_MARK, PSA15_PSA14_FN1, PTB1_FN),
+	PINMUX_DATA(KEYOUT4_IN6_MARK, PSA15_PSA14_FN2, PTB1_FN),
+	PINMUX_DATA(D24_MARK, PSA15_PSA14_FN1, PTB0_FN),
+	PINMUX_DATA(KEYOUT3_MARK, PSA15_PSA14_FN2, PTB0_FN),
+
+	/* PTC FN */
+	PINMUX_DATA(IDED15_MARK, PSA11_PSA10_FN1, PTC7_FN),
+	PINMUX_DATA(SDHI1CD_MARK, PSA11_PSA10_FN2, PTC7_FN),
+	PINMUX_DATA(IDED14_MARK, PSA11_PSA10_FN1, PTC6_FN),
+	PINMUX_DATA(SDHI1WP_MARK, PSA11_PSA10_FN2, PTC6_FN),
+	PINMUX_DATA(IDED13_MARK, PSA11_PSA10_FN1, PTC5_FN),
+	PINMUX_DATA(SDHI1D3_MARK, PSA11_PSA10_FN2, PTC5_FN),
+	PINMUX_DATA(IDED12_MARK, PSA11_PSA10_FN1, PTC4_FN),
+	PINMUX_DATA(SDHI1D2_MARK, PSA11_PSA10_FN2, PTC4_FN),
+	PINMUX_DATA(IDED11_MARK, PSA11_PSA10_FN1, PTC3_FN),
+	PINMUX_DATA(SDHI1D1_MARK, PSA11_PSA10_FN2, PTC3_FN),
+	PINMUX_DATA(IDED10_MARK, PSA11_PSA10_FN1, PTC2_FN),
+	PINMUX_DATA(SDHI1D0_MARK, PSA11_PSA10_FN2, PTC2_FN),
+	PINMUX_DATA(IDED9_MARK, PSA11_PSA10_FN1, PTC1_FN),
+	PINMUX_DATA(SDHI1CMD_MARK, PSA11_PSA10_FN2, PTC1_FN),
+	PINMUX_DATA(IDED8_MARK, PSA11_PSA10_FN1, PTC0_FN),
+	PINMUX_DATA(SDHI1CLK_MARK, PSA11_PSA10_FN2, PTC0_FN),
+
+	/* PTD FN */
+	PINMUX_DATA(IDED7_MARK, PSA11_PSA10_FN1, PTD7_FN),
+	PINMUX_DATA(SDHI0CD_PTD_MARK, PSA11_PSA10_FN2, PTD7_FN),
+	PINMUX_DATA(IDED6_MARK, PSA11_PSA10_FN1, PTD6_FN),
+	PINMUX_DATA(SDHI0WP_PTD_MARK, PSA11_PSA10_FN2, PTD6_FN),
+	PINMUX_DATA(IDED5_MARK, PSA11_PSA10_FN1, PTD5_FN),
+	PINMUX_DATA(SDHI0D3_PTD_MARK, PSA11_PSA10_FN2, PTD5_FN),
+	PINMUX_DATA(IDED4_MARK, PSA11_PSA10_FN1, PTD4_FN),
+	PINMUX_DATA(SDHI0D2_PTD_MARK, PSA11_PSA10_FN2, PTD4_FN),
+	PINMUX_DATA(IDED3_MARK, PSA11_PSA10_FN1, PTD3_FN),
+	PINMUX_DATA(SDHI0D1_PTD_MARK, PSA11_PSA10_FN2, PTD3_FN),
+	PINMUX_DATA(IDED2_MARK, PSA11_PSA10_FN1, PTD2_FN),
+	PINMUX_DATA(SDHI0D0_PTD_MARK, PSA11_PSA10_FN2, PTD2_FN),
+	PINMUX_DATA(IDED1_MARK, PSA11_PSA10_FN1, PTD1_FN),
+	PINMUX_DATA(SDHI0CMD_PTD_MARK, PSA11_PSA10_FN2, PTD1_FN),
+	PINMUX_DATA(IDED0_MARK, PSA11_PSA10_FN1, PTD0_FN),
+	PINMUX_DATA(SDHI0CLK_PTD_MARK, PSA11_PSA10_FN2, PTD0_FN),
+
+	/* PTE FN */
+	PINMUX_DATA(DIRECTION_MARK, PSA11_PSA10_FN1, PTE5_FN),
+	PINMUX_DATA(SCIF5_PTE_SCK_MARK, PSA11_PSA10_FN2, PTE5_FN),
+	PINMUX_DATA(EXBUF_ENB_MARK, PSA11_PSA10_FN1, PTE4_FN),
+	PINMUX_DATA(SCIF5_PTE_RXD_MARK, PSA11_PSA10_FN2, PTE4_FN),
+	PINMUX_DATA(IDERST_MARK, PSA11_PSA10_FN1, PTE3_FN),
+	PINMUX_DATA(SCIF5_PTE_TXD_MARK, PSA11_PSA10_FN2, PTE3_FN),
+	PINMUX_DATA(IODACK_MARK, PSA11_PSA10_FN1, PTE2_FN),
+	PINMUX_DATA(SCIF4_PTE_SCK_MARK, PSA11_PSA10_FN2, PTE2_FN),
+	PINMUX_DATA(IODREQ_MARK, PSA11_PSA10_FN1, PTE1_FN),
+	PINMUX_DATA(SCIF4_PTE_RXD_MARK, PSA11_PSA10_FN2, PTE1_FN),
+	PINMUX_DATA(IDEIORDY_MARK, PSA11_PSA10_FN1, PTE0_FN),
+	PINMUX_DATA(SCIF4_PTE_TXD_MARK, PSA11_PSA10_FN2, PTE0_FN),
+
+	/* PTF FN */
+	PINMUX_DATA(IDEINT_MARK, PTF7_FN),
+	PINMUX_DATA(IDEIOWR_MARK, PSA5_PSA4_FN1, PTF6_FN),
+	PINMUX_DATA(MSIOF0_PTF_SS2_MARK, PSA5_PSA4_FN2, PTF6_FN),
+	PINMUX_DATA(MSIOF0_PTF_RSYNC_MARK, PSA5_PSA4_FN3, PTF6_FN),
+	PINMUX_DATA(IDEIORD_MARK, PSA5_PSA4_FN1, PTF5_FN),
+	PINMUX_DATA(MSIOF0_PTF_SS1_MARK, PSA5_PSA4_FN2, PTF5_FN),
+	PINMUX_DATA(MSIOF0_PTF_RSCK_MARK, PSA5_PSA4_FN3, PTF5_FN),
+	PINMUX_DATA(IDECS1_MARK, PSA11_PSA10_FN1, PTF4_FN),
+	PINMUX_DATA(MSIOF0_PTF_TSYNC_MARK, PSA11_PSA10_FN2, PTF4_FN),
+	PINMUX_DATA(IDECS0_MARK, PSA11_PSA10_FN1, PTF3_FN),
+	PINMUX_DATA(MSIOF0_PTF_TSCK_MARK, PSA11_PSA10_FN2, PTF3_FN),
+	PINMUX_DATA(IDEA2_MARK, PSA11_PSA10_FN1, PTF2_FN),
+	PINMUX_DATA(MSIOF0_PTF_RXD_MARK, PSA11_PSA10_FN2, PTF2_FN),
+	PINMUX_DATA(IDEA1_MARK, PSA11_PSA10_FN1, PTF1_FN),
+	PINMUX_DATA(MSIOF0_PTF_TXD_MARK, PSA11_PSA10_FN2, PTF1_FN),
+	PINMUX_DATA(IDEA0_MARK, PSA11_PSA10_FN1, PTF0_FN),
+	PINMUX_DATA(MSIOF0_PTF_MCK_MARK, PSA11_PSA10_FN2, PTF0_FN),
+
+	/* PTG FN */
+	PINMUX_DATA(AUDCK_MARK, PTG5_FN),
+	PINMUX_DATA(AUDSYNC_MARK, PTG4_FN),
+	PINMUX_DATA(AUDATA3_MARK, PSA3_PSA2_FN1, PTG3_FN),
+	PINMUX_DATA(TPUTO3_MARK, PSA3_PSA2_FN2, PTG3_FN),
+	PINMUX_DATA(AUDATA2_MARK, PSA3_PSA2_FN1, PTG2_FN),
+	PINMUX_DATA(TPUTO2_MARK, PSA3_PSA2_FN2, PTG2_FN),
+	PINMUX_DATA(AUDATA1_MARK, PSA3_PSA2_FN1, PTG1_FN),
+	PINMUX_DATA(TPUTO1_MARK, PSA3_PSA2_FN2, PTG1_FN),
+	PINMUX_DATA(AUDATA0_MARK, PSA3_PSA2_FN1, PTG0_FN),
+	PINMUX_DATA(TPUTO0_MARK, PSA3_PSA2_FN2, PTG0_FN),
+
+	/* PTG FN */
+	PINMUX_DATA(LCDVCPWC_MARK, PTH7_FN),
+	PINMUX_DATA(LCDRD_MARK, PSB15_PSB14_FN1, PTH6_FN),
+	PINMUX_DATA(DV_CLKI_MARK, PSB15_PSB14_FN2, PTH6_FN),
+	PINMUX_DATA(LCDVSYN_MARK, PSB15_PSB14_FN1, PTH5_FN),
+	PINMUX_DATA(DV_CLK_MARK, PSB15_PSB14_FN2, PTH5_FN),
+	PINMUX_DATA(LCDDISP_MARK, PSB13_PSB12_LCDC_RGB, PTH4_FN),
+	PINMUX_DATA(LCDRS_MARK, PSB13_PSB12_LCDC_SYS, PTH4_FN),
+	PINMUX_DATA(LCDHSYN_MARK, PSB13_PSB12_LCDC_RGB, PTH3_FN),
+	PINMUX_DATA(LCDCS_MARK, PSB13_PSB12_LCDC_SYS, PTH3_FN),
+	PINMUX_DATA(LCDDON_MARK, PTH2_FN),
+	PINMUX_DATA(LCDDCK_MARK, PSB13_PSB12_LCDC_RGB, PTH1_FN),
+	PINMUX_DATA(LCDWR_MARK, PSB13_PSB12_LCDC_SYS, PTH1_FN),
+	PINMUX_DATA(LCDVEPWC_MARK, PTH0_FN),
+
+	/* PTJ FN */
+	PINMUX_DATA(STATUS0_MARK, PTJ7_FN),
+	PINMUX_DATA(PDSTATUS_MARK, PTJ5_FN),
+	PINMUX_DATA(A25_MARK, PTJ3_FN),
+	PINMUX_DATA(A24_MARK, PTJ2_FN),
+	PINMUX_DATA(A23_MARK, PTJ1_FN),
+	PINMUX_DATA(A22_MARK, PTJ0_FN),
+
+	/* PTK FN */
+	PINMUX_DATA(SIUAFCK_MARK, PTK7_FN),
+	PINMUX_DATA(SIUAILR_MARK, PSB9_PSB8_FN1, PTK6_FN),
+	PINMUX_DATA(MSIOF1_SS2_MARK, PSB9_PSB8_FN2, PTK6_FN),
+	PINMUX_DATA(MSIOF1_RSYNC_MARK, PSB9_PSB8_FN3, PTK6_FN),
+	PINMUX_DATA(SIUAIBT_MARK, PSB9_PSB8_FN1, PTK5_FN),
+	PINMUX_DATA(MSIOF1_SS1_MARK, PSB9_PSB8_FN2, PTK5_FN),
+	PINMUX_DATA(MSIOF1_RSCK_MARK, PSB9_PSB8_FN3, PTK5_FN),
+	PINMUX_DATA(SIUAISLD_MARK, PSB7_PSB6_FN1, PTK4_FN),
+	PINMUX_DATA(MSIOF1_RXD_MARK, PSB7_PSB6_FN2, PTK4_FN),
+	PINMUX_DATA(SIUAOLR_MARK, PSB7_PSB6_FN1, PTK3_FN),
+	PINMUX_DATA(MSIOF1_TSYNC_MARK, PSB7_PSB6_FN2, PTK3_FN),
+	PINMUX_DATA(SIUAOBT_MARK, PSB7_PSB6_FN1, PTK2_FN),
+	PINMUX_DATA(MSIOF1_TSCK_MARK, PSB7_PSB6_FN2, PTK2_FN),
+	PINMUX_DATA(SIUAOSLD_MARK, PSB7_PSB6_FN1, PTK1_FN),
+	PINMUX_DATA(MSIOF1_RXD_MARK, PSB7_PSB6_FN2, PTK1_FN),
+	PINMUX_DATA(SIUAMCK_MARK, PSB7_PSB6_FN1, PTK0_FN),
+	PINMUX_DATA(MSIOF1_MCK_MARK, PSB7_PSB6_FN2, PTK0_FN),
+
+	/* PTL FN */
+	PINMUX_DATA(LCDD15_MARK, PSB5_PSB4_FN1, PTL7_FN),
+	PINMUX_DATA(DV_D15_MARK, PSB5_PSB4_FN2, PTL7_FN),
+	PINMUX_DATA(LCDD14_MARK, PSB5_PSB4_FN1, PTL6_FN),
+	PINMUX_DATA(DV_D14_MARK, PSB5_PSB4_FN2, PTL6_FN),
+	PINMUX_DATA(LCDD13_MARK, PSB5_PSB4_FN1, PTL5_FN),
+	PINMUX_DATA(DV_D13_MARK, PSB5_PSB4_FN2, PTL5_FN),
+	PINMUX_DATA(LCDD12_MARK, PSB5_PSB4_FN1, PTL4_FN),
+	PINMUX_DATA(DV_D12_MARK, PSB5_PSB4_FN2, PTL4_FN),
+	PINMUX_DATA(LCDD11_MARK, PSB5_PSB4_FN1, PTL3_FN),
+	PINMUX_DATA(DV_D11_MARK, PSB5_PSB4_FN2, PTL3_FN),
+	PINMUX_DATA(LCDD10_MARK, PSB5_PSB4_FN1, PTL2_FN),
+	PINMUX_DATA(DV_D10_MARK, PSB5_PSB4_FN2, PTL2_FN),
+	PINMUX_DATA(LCDD9_MARK, PSB5_PSB4_FN1, PTL1_FN),
+	PINMUX_DATA(DV_D9_MARK, PSB5_PSB4_FN2, PTL1_FN),
+	PINMUX_DATA(LCDD8_MARK, PSB5_PSB4_FN1, PTL0_FN),
+	PINMUX_DATA(DV_D8_MARK, PSB5_PSB4_FN2, PTL0_FN),
+
+	/* PTM FN */
+	PINMUX_DATA(LCDD7_MARK, PSB5_PSB4_FN1, PTM7_FN),
+	PINMUX_DATA(DV_D7_MARK, PSB5_PSB4_FN2, PTM7_FN),
+	PINMUX_DATA(LCDD6_MARK, PSB5_PSB4_FN1, PTM6_FN),
+	PINMUX_DATA(DV_D6_MARK, PSB5_PSB4_FN2, PTM6_FN),
+	PINMUX_DATA(LCDD5_MARK, PSB5_PSB4_FN1, PTM5_FN),
+	PINMUX_DATA(DV_D5_MARK, PSB5_PSB4_FN2, PTM5_FN),
+	PINMUX_DATA(LCDD4_MARK, PSB5_PSB4_FN1, PTM4_FN),
+	PINMUX_DATA(DV_D4_MARK, PSB5_PSB4_FN2, PTM4_FN),
+	PINMUX_DATA(LCDD3_MARK, PSB5_PSB4_FN1, PTM3_FN),
+	PINMUX_DATA(DV_D3_MARK, PSB5_PSB4_FN2, PTM3_FN),
+	PINMUX_DATA(LCDD2_MARK, PSB5_PSB4_FN1, PTM2_FN),
+	PINMUX_DATA(DV_D2_MARK, PSB5_PSB4_FN2, PTM2_FN),
+	PINMUX_DATA(LCDD1_MARK, PSB5_PSB4_FN1, PTM1_FN),
+	PINMUX_DATA(DV_D1_MARK, PSB5_PSB4_FN2, PTM1_FN),
+	PINMUX_DATA(LCDD0_MARK, PSB5_PSB4_FN1, PTM0_FN),
+	PINMUX_DATA(DV_D0_MARK, PSB5_PSB4_FN2, PTM0_FN),
+
+	/* PTN FN */
+	PINMUX_DATA(LCDD23_MARK, PSB3_PSB2_FN1, PTN7_FN),
+	PINMUX_DATA(SCIF5_PTN_SCK_MARK, PSB3_PSB2_FN2, PTN7_FN),
+	PINMUX_DATA(LCDD22_MARK, PSB3_PSB2_FN1, PTN6_FN),
+	PINMUX_DATA(SCIF5_PTN_RXD_MARK, PSB3_PSB2_FN2, PTN6_FN),
+	PINMUX_DATA(LCDD21_MARK, PSB3_PSB2_FN1, PTN5_FN),
+	PINMUX_DATA(SCIF5_PTN_TXD_MARK, PSB3_PSB2_FN2, PTN5_FN),
+	PINMUX_DATA(LCDD20_MARK, PSB3_PSB2_FN1, PTN4_FN),
+	PINMUX_DATA(SCIF4_PTN_SCK_MARK, PSB3_PSB2_FN2, PTN4_FN),
+	PINMUX_DATA(LCDD19_MARK, PSB3_PSB2_FN1, PTN3_FN),
+	PINMUX_DATA(SCIF4_PTN_RXD_MARK, PSB3_PSB2_FN2, PTN3_FN),
+	PINMUX_DATA(LCDD18_MARK, PSB3_PSB2_FN1, PTN2_FN),
+	PINMUX_DATA(SCIF4_PTN_TXD_MARK, PSB3_PSB2_FN2, PTN2_FN),
+	PINMUX_DATA(LCDD17_MARK, PSB5_PSB4_FN1, PTN1_FN),
+	PINMUX_DATA(DV_VSYNC_MARK, PSB5_PSB4_FN2, PTN1_FN),
+	PINMUX_DATA(LCDD16_MARK, PSB5_PSB4_FN1, PTN0_FN),
+	PINMUX_DATA(DV_HSYNC_MARK, PSB5_PSB4_FN2, PTN0_FN),
+
+	/* PTQ FN */
+	PINMUX_DATA(AN3_MARK, PTQ3_FN),
+	PINMUX_DATA(AN2_MARK, PTQ2_FN),
+	PINMUX_DATA(AN1_MARK, PTQ1_FN),
+	PINMUX_DATA(AN0_MARK, PTQ0_FN),
+
+	/* PTR FN */
+	PINMUX_DATA(CS6B_CE1B_MARK, PTR7_FN),
+	PINMUX_DATA(CS6A_CE2B_MARK, PTR6_FN),
+	PINMUX_DATA(CS5B_CE1A_MARK, PTR5_FN),
+	PINMUX_DATA(CS5A_CE2A_MARK, PTR4_FN),
+	PINMUX_DATA(IOIS16_MARK, PSA13_PSA12_FN1, PTR3_FN),
+	PINMUX_DATA(LCDLCLK_PTR_MARK, PSA13_PSA12_FN2, PTR3_FN),
+	PINMUX_DATA(WAIT_MARK, PTR2_FN),
+	PINMUX_DATA(WE3_ICIOWR_MARK, PTR1_FN),
+	PINMUX_DATA(WE2_ICIORD_MARK, PTR0_FN),
+
+	/* PTS FN */
+	PINMUX_DATA(SCIF1_PTS_SCK_MARK, PSC15_PSC14_FN1, PTS7_FN),
+	PINMUX_DATA(SDHI0CD_PTS_MARK, PSC15_PSC14_FN2, PTS7_FN),
+	PINMUX_DATA(SCIF1_PTS_RXD_MARK, PSC15_PSC14_FN1, PTS6_FN),
+	PINMUX_DATA(SDHI0WP_PTS_MARK, PSC15_PSC14_FN2, PTS6_FN),
+	PINMUX_DATA(SCIF1_PTS_TXD_MARK, PSC15_PSC14_FN1, PTS5_FN),
+	PINMUX_DATA(SDHI0D3_PTS_MARK, PSC15_PSC14_FN2, PTS5_FN),
+	PINMUX_DATA(SCIF3_PTS_CTS_MARK, PSC15_PSC14_FN1, PTS4_FN),
+	PINMUX_DATA(SDHI0D2_PTS_MARK, PSC15_PSC14_FN2, PTS4_FN),
+	PINMUX_DATA(SCIF3_PTS_RTS_MARK, PSC15_PSC14_FN1, PTS3_FN),
+	PINMUX_DATA(SDHI0D1_PTS_MARK, PSC15_PSC14_FN2, PTS3_FN),
+	PINMUX_DATA(SCIF3_PTS_SCK_MARK, PSC15_PSC14_FN1, PTS2_FN),
+	PINMUX_DATA(SDHI0D0_PTS_MARK, PSC15_PSC14_FN2, PTS2_FN),
+	PINMUX_DATA(SCIF3_PTS_RXD_MARK, PSC15_PSC14_FN1, PTS1_FN),
+	PINMUX_DATA(SDHI0CMD_PTS_MARK, PSC15_PSC14_FN2, PTS1_FN),
+	PINMUX_DATA(SCIF3_PTS_TXD_MARK, PSC15_PSC14_FN1, PTS0_FN),
+	PINMUX_DATA(SDHI0CLK_PTS_MARK, PSC15_PSC14_FN2, PTS0_FN),
+
+	/* PTT FN */
+	PINMUX_DATA(SCIF0_PTT_SCK_MARK, PSC13_PSC12_FN1, PTT5_FN),
+	PINMUX_DATA(MSIOF0_PTT_TSCK_MARK, PSC13_PSC12_FN2, PTT5_FN),
+	PINMUX_DATA(SCIF0_PTT_RXD_MARK, PSC13_PSC12_FN1, PTT4_FN),
+	PINMUX_DATA(MSIOF0_PTT_RXD_MARK, PSC13_PSC12_FN2, PTT4_FN),
+	PINMUX_DATA(SCIF0_PTT_TXD_MARK, PSC13_PSC12_FN1, PTT3_FN),
+	PINMUX_DATA(MSIOF0_PTT_TXD_MARK, PSC13_PSC12_FN2, PTT3_FN),
+	PINMUX_DATA(SCIF2_PTT_SCK_MARK, PSC11_PSC10_FN1, PTT2_FN),
+	PINMUX_DATA(MSIOF0_PTT_TSYNC_MARK, PSC11_PSC10_FN2, PTT2_FN),
+	PINMUX_DATA(SCIF2_PTT_RXD_MARK, PSC11_PSC10_FN1, PTT1_FN),
+	PINMUX_DATA(MSIOF0_PTT_SS1_MARK, PSC11_PSC10_FN2, PTT1_FN),
+	PINMUX_DATA(MSIOF0_PTT_RSCK_MARK, PSC11_PSC10_FN3, PTT1_FN),
+	PINMUX_DATA(SCIF2_PTT_TXD_MARK, PSC11_PSC10_FN1, PTT0_FN),
+	PINMUX_DATA(MSIOF0_PTT_SS2_MARK, PSC11_PSC10_FN2, PTT0_FN),
+	PINMUX_DATA(MSIOF0_PTT_RSYNC_MARK, PSC11_PSC10_FN3, PTT0_FN),
+
+	/* PTU FN */
+	PINMUX_DATA(FCDE_MARK, PSC9_PSC8_FN1, PTU5_FN),
+	PINMUX_DATA(SCIF0_PTU_SCK_MARK, PSC9_PSC8_FN2, PTU5_FN),
+	PINMUX_DATA(FSC_MARK, PSC9_PSC8_FN1, PTU4_FN),
+	PINMUX_DATA(SCIF0_PTU_RXD_MARK, PSC9_PSC8_FN2, PTU4_FN),
+	PINMUX_DATA(FWE_MARK, PSC9_PSC8_FN1, PTU3_FN),
+	PINMUX_DATA(SCIF0_PTU_TXD_MARK, PSC9_PSC8_FN2, PTU3_FN),
+	PINMUX_DATA(FOE_MARK, PSC7_PSC6_FN1, PTU2_FN),
+	PINMUX_DATA(SCIF2_PTU_SCK_MARK, PSC7_PSC6_FN2, PTU2_FN),
+	PINMUX_DATA(VIO_VD2_MARK, PSC7_PSC6_FN3, PTU2_FN),
+	PINMUX_DATA(FRB_MARK, PSC7_PSC6_FN1, PTU1_FN),
+	PINMUX_DATA(SCIF2_PTU_RXD_MARK, PSC7_PSC6_FN2, PTU1_FN),
+	PINMUX_DATA(VIO_CLK2_MARK, PSC7_PSC6_FN3, PTU1_FN),
+	PINMUX_DATA(FCE_MARK, PSC7_PSC6_FN1, PTU0_FN),
+	PINMUX_DATA(SCIF2_PTU_TXD_MARK, PSC7_PSC6_FN2, PTU0_FN),
+	PINMUX_DATA(VIO_HD2_MARK, PSC7_PSC6_FN3, PTU0_FN),
+
+	/* PTV FN */
+	PINMUX_DATA(NAF7_MARK, PSC7_PSC6_FN1, PTV7_FN),
+	PINMUX_DATA(SCIF1_PTV_SCK_MARK, PSC7_PSC6_FN2, PTV7_FN),
+	PINMUX_DATA(VIO_D15_MARK, PSC7_PSC6_FN3, PTV7_FN),
+	PINMUX_DATA(NAF6_MARK, PSC7_PSC6_FN1, PTV6_FN),
+	PINMUX_DATA(SCIF1_PTV_RXD_MARK, PSC7_PSC6_FN2, PTV6_FN),
+	PINMUX_DATA(VIO_D14_MARK, PSC7_PSC6_FN3, PTV6_FN),
+	PINMUX_DATA(NAF5_MARK, PSC7_PSC6_FN1, PTV5_FN),
+	PINMUX_DATA(SCIF1_PTV_TXD_MARK, PSC7_PSC6_FN2, PTV5_FN),
+	PINMUX_DATA(VIO_D13_MARK, PSC7_PSC6_FN3, PTV5_FN),
+	PINMUX_DATA(NAF4_MARK, PSC7_PSC6_FN1, PTV4_FN),
+	PINMUX_DATA(SCIF3_PTV_CTS_MARK, PSC7_PSC6_FN2, PTV4_FN),
+	PINMUX_DATA(VIO_D12_MARK, PSC7_PSC6_FN3, PTV4_FN),
+	PINMUX_DATA(NAF3_MARK, PSC7_PSC6_FN1, PTV3_FN),
+	PINMUX_DATA(SCIF3_PTV_RTS_MARK, PSC7_PSC6_FN2, PTV3_FN),
+	PINMUX_DATA(VIO_D11_MARK, PSC7_PSC6_FN3, PTV3_FN),
+	PINMUX_DATA(NAF2_MARK, PSC7_PSC6_FN1, PTV2_FN),
+	PINMUX_DATA(SCIF3_PTV_SCK_MARK, PSC7_PSC6_FN2, PTV2_FN),
+	PINMUX_DATA(VIO_D10_MARK, PSC7_PSC6_FN3, PTV2_FN),
+	PINMUX_DATA(NAF1_MARK, PSC7_PSC6_FN1, PTV1_FN),
+	PINMUX_DATA(SCIF3_PTV_RXD_MARK, PSC7_PSC6_FN2, PTV1_FN),
+	PINMUX_DATA(VIO_D9_MARK, PSC7_PSC6_FN3, PTV1_FN),
+	PINMUX_DATA(NAF0_MARK, PSC7_PSC6_FN1, PTV0_FN),
+	PINMUX_DATA(SCIF3_PTV_TXD_MARK, PSC7_PSC6_FN2, PTV0_FN),
+	PINMUX_DATA(VIO_D8_MARK, PSC7_PSC6_FN3, PTV0_FN),
+
+	/* PTW FN */
+	PINMUX_DATA(IRQ7_MARK, PTW7_FN),
+	PINMUX_DATA(IRQ6_MARK, PTW6_FN),
+	PINMUX_DATA(IRQ5_MARK, PTW5_FN),
+	PINMUX_DATA(IRQ4_MARK, PSD15_PSD14_FN1, PTW4_FN),
+	PINMUX_DATA(LCDLCLK_PTW_MARK, PSD15_PSD14_FN2, PTW4_FN),
+	PINMUX_DATA(IRQ3_MARK, PSD13_PSD12_FN1, PTW3_FN),
+	PINMUX_DATA(ADTRG_MARK, PSD13_PSD12_FN2, PTW3_FN),
+	PINMUX_DATA(IRQ2_MARK, PSD11_PSD10_FN1, PTW2_FN),
+	PINMUX_DATA(BS_MARK, PSD11_PSD10_FN2, PTW2_FN),
+	PINMUX_DATA(VIO_CKO_MARK, PSD11_PSD10_FN3, PTW2_FN),
+	PINMUX_DATA(IRQ1_MARK, PSD9_PSD8_FN1, PTW1_FN),
+	PINMUX_DATA(SIUAISPD_MARK, PSD9_PSD8_FN2, PTW1_FN),
+	PINMUX_DATA(IRQ0_MARK, PSD7_PSD6_FN1, PTW0_FN),
+	PINMUX_DATA(SIUAOSPD_MARK, PSD7_PSD6_FN2, PTW0_FN),
+
+	/* PTX FN */
+	PINMUX_DATA(DACK1_MARK, PTX7_FN),
+	PINMUX_DATA(DREQ1_MARK, PSD3_PSD2_FN1, PTX6_FN),
+	PINMUX_DATA(MSIOF0_PTX_MCK_MARK, PSD3_PSD2_FN2, PTX6_FN),
+	PINMUX_DATA(DACK1_MARK, PTX5_FN),
+	PINMUX_DATA(IRDA_OUT_MARK, PSD5_PSD4_FN2, PTX5_FN),
+	PINMUX_DATA(DREQ1_MARK, PTX4_FN),
+	PINMUX_DATA(IRDA_IN_MARK, PSD5_PSD4_FN2, PTX4_FN),
+	PINMUX_DATA(TS0_SDAT_MARK, PTX3_FN),
+	PINMUX_DATA(TS0_SCK_MARK, PTX2_FN),
+	PINMUX_DATA(TS0_SDEN_MARK, PTX1_FN),
+	PINMUX_DATA(TS0_SPSYNC_MARK, PTX0_FN),
+
+	/* PTY FN */
+	PINMUX_DATA(VIO_D7_MARK, PTY7_FN),
+	PINMUX_DATA(VIO_D6_MARK, PTY6_FN),
+	PINMUX_DATA(VIO_D5_MARK, PTY5_FN),
+	PINMUX_DATA(VIO_D4_MARK, PTY4_FN),
+	PINMUX_DATA(VIO_D3_MARK, PTY3_FN),
+	PINMUX_DATA(VIO_D2_MARK, PTY2_FN),
+	PINMUX_DATA(VIO_D1_MARK, PTY1_FN),
+	PINMUX_DATA(VIO_D0_MARK, PTY0_FN),
+
+	/* PTZ FN */
+	PINMUX_DATA(SIUBOBT_MARK, PTZ7_FN),
+	PINMUX_DATA(SIUBOLR_MARK, PTZ6_FN),
+	PINMUX_DATA(SIUBOSLD_MARK, PTZ5_FN),
+	PINMUX_DATA(SIUBMCK_MARK, PTZ4_FN),
+	PINMUX_DATA(VIO_FLD_MARK, PSD1_PSD0_FN1, PTZ3_FN),
+	PINMUX_DATA(SIUBFCK_MARK, PSD1_PSD0_FN2, PTZ3_FN),
+	PINMUX_DATA(VIO_HD1_MARK, PSD1_PSD0_FN1, PTZ2_FN),
+	PINMUX_DATA(SIUBILR_MARK, PSD1_PSD0_FN2, PTZ2_FN),
+	PINMUX_DATA(VIO_VD1_MARK, PSD1_PSD0_FN1, PTZ1_FN),
+	PINMUX_DATA(SIUBIBT_MARK, PSD1_PSD0_FN2, PTZ1_FN),
+	PINMUX_DATA(VIO_CLK1_MARK, PSD1_PSD0_FN1, PTZ0_FN),
+	PINMUX_DATA(SIUBISLD_MARK, PSD1_PSD0_FN2, PTZ0_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+	/* PTA */
+	PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+	PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+	PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+	PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+	PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+	PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+	PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+	PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+	/* PTB */
+	PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+	PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+	PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+	PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+	PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+	PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+	PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+	PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+	/* PTC */
+	PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+	PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+	PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+	PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+	PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+	PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+	PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+	PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+	/* PTD */
+	PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+	PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+	PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+	PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+	PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+	PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+	PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+	PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+	/* PTE */
+	PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+	PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+	PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+	PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+	PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+	PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+	/* PTF */
+	PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
+	PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+	PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+	PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+	PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+	PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+	PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+	PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+	/* PTG */
+	PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+	PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+	PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+	PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+	PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+	PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+	/* PTH */
+	PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+	PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+	PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+	PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+	PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+	PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+	PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+	PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+	/* PTJ */
+	PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
+	PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+	PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+	PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+	PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+	PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+	/* PTK */
+	PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
+	PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+	PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+	PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+	PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+	PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+	PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+	PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+	/* PTL */
+	PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+	PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+	PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+	PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+	PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+	PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+	PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+	PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+	/* PTM */
+	PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+	PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+	PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+	PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+	PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+	PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+	PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+	PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+	/* PTN */
+	PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
+	PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+	PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+	PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+	PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+	PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+	PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+	PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+	/* PTQ */
+	PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+	PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+	PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+	PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+	/* PTR */
+	PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+	PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+	PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+	PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+	PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+	PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+	PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+	PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+	/* PTS */
+	PINMUX_GPIO(GPIO_PTS7, PTS7_DATA),
+	PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
+	PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
+	PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+	PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+	PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+	PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+	PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+	/* PTT */
+	PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
+	PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+	PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+	PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+	PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+	PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+	/* PTU */
+	PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
+	PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+	PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+	PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+	PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+	PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+	/* PTV */
+	PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
+	PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
+	PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
+	PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+	PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+	PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+	PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+	PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+	/* PTW */
+	PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
+	PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+	PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+	PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+	PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+	PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+	PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+	PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+	/* PTX */
+	PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
+	PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+	PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+	PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+	PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+	PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+	PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+	PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+	/* PTY */
+	PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
+	PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
+	PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+	PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+	PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+	PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+	PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+	PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+	/* PTZ */
+	PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
+	PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
+	PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+	PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+	PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+	PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+	PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+	PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+
+	/* SCIF0 */
+	PINMUX_GPIO(GPIO_FN_SCIF0_PTT_TXD, SCIF0_PTT_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF0_PTT_RXD, SCIF0_PTT_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF0_PTT_SCK, SCIF0_PTT_SCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF0_PTU_TXD, SCIF0_PTU_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF0_PTU_RXD, SCIF0_PTU_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF0_PTU_SCK, SCIF0_PTU_SCK_MARK),
+
+	/* SCIF1 */
+	PINMUX_GPIO(GPIO_FN_SCIF1_PTS_TXD, SCIF1_PTS_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF1_PTS_RXD, SCIF1_PTS_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF1_PTS_SCK, SCIF1_PTS_SCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF1_PTV_TXD, SCIF1_PTV_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF1_PTV_RXD, SCIF1_PTV_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF1_PTV_SCK, SCIF1_PTV_SCK_MARK),
+
+	/* SCIF2 */
+	PINMUX_GPIO(GPIO_FN_SCIF2_PTT_TXD, SCIF2_PTT_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF2_PTT_RXD, SCIF2_PTT_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF2_PTT_SCK, SCIF2_PTT_SCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF2_PTU_TXD, SCIF2_PTU_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF2_PTU_RXD, SCIF2_PTU_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF2_PTU_SCK, SCIF2_PTU_SCK_MARK),
+
+	/* SCIF3 */
+	PINMUX_GPIO(GPIO_FN_SCIF3_PTS_TXD, SCIF3_PTS_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF3_PTS_RXD, SCIF3_PTS_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF3_PTS_SCK, SCIF3_PTS_SCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF3_PTS_RTS, SCIF3_PTS_RTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF3_PTS_CTS, SCIF3_PTS_CTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF3_PTV_TXD, SCIF3_PTV_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF3_PTV_RXD, SCIF3_PTV_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF3_PTV_SCK, SCIF3_PTV_SCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF3_PTV_RTS, SCIF3_PTV_RTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF3_PTV_CTS, SCIF3_PTV_CTS_MARK),
+
+	/* SCIF4 */
+	PINMUX_GPIO(GPIO_FN_SCIF4_PTE_TXD, SCIF4_PTE_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF4_PTE_RXD, SCIF4_PTE_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF4_PTE_SCK, SCIF4_PTE_SCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF4_PTN_TXD, SCIF4_PTN_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF4_PTN_RXD, SCIF4_PTN_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF4_PTN_SCK, SCIF4_PTN_SCK_MARK),
+
+	/* SCIF5 */
+	PINMUX_GPIO(GPIO_FN_SCIF5_PTE_TXD, SCIF5_PTE_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF5_PTE_RXD, SCIF5_PTE_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF5_PTE_SCK, SCIF5_PTE_SCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF5_PTN_TXD, SCIF5_PTN_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF5_PTN_RXD, SCIF5_PTN_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_SCIF5_PTN_SCK, SCIF5_PTN_SCK_MARK),
+
+	/* CEU */
+	PINMUX_GPIO(GPIO_FN_VIO_D15, VIO_D15_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D14, VIO_D14_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D13, VIO_D13_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D12, VIO_D12_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D11, VIO_D11_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D10, VIO_D10_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D9, VIO_D9_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D8, VIO_D8_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D7, VIO_D7_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D6, VIO_D6_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D5, VIO_D5_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D4, VIO_D4_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D3, VIO_D3_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D2, VIO_D2_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D1, VIO_D1_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_D0, VIO_D0_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_CLK1, VIO_CLK1_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_VD1, VIO_VD1_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_HD1, VIO_HD1_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_FLD, VIO_FLD_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_VD2, VIO_VD2_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_HD2, VIO_HD2_MARK),
+	PINMUX_GPIO(GPIO_FN_VIO_CLK2, VIO_CLK2_MARK),
+
+	/* LCDC */
+	PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDLCLK_PTR, LCDLCLK_PTR_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDLCLK_PTW, LCDLCLK_PTW_MARK),
+	/* Main LCD */
+	PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
+	/* Main LCD - RGB Mode */
+	PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
+	/* Main LCD - SYS Mode */
+	PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
+	PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
+
+	/* IRQ */
+	PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
+	PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+
+	/* AUD */
+	PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+	PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+
+	/* SDHI0 (PTD) */
+	PINMUX_GPIO(GPIO_FN_SDHI0CD_PTD, SDHI0CD_PTD_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0WP_PTD, SDHI0WP_PTD_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0D3_PTD, SDHI0D3_PTD_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0D2_PTD, SDHI0D2_PTD_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0D1_PTD, SDHI0D1_PTD_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0D0_PTD, SDHI0D0_PTD_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0CMD_PTD, SDHI0CMD_PTD_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0CLK_PTD, SDHI0CLK_PTD_MARK),
+
+	/* SDHI0 (PTS) */
+	PINMUX_GPIO(GPIO_FN_SDHI0CD_PTS, SDHI0CD_PTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0WP_PTS, SDHI0WP_PTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0D3_PTS, SDHI0D3_PTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0D2_PTS, SDHI0D2_PTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0D1_PTS, SDHI0D1_PTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0D0_PTS, SDHI0D0_PTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0CMD_PTS, SDHI0CMD_PTS_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI0CLK_PTS, SDHI0CLK_PTS_MARK),
+
+	/* SDHI1 */
+	PINMUX_GPIO(GPIO_FN_SDHI1CD, SDHI1CD_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI1WP, SDHI1WP_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI1D3, SDHI1D3_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI1D2, SDHI1D2_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI1D1, SDHI1D1_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI1D0, SDHI1D0_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI1CMD, SDHI1CMD_MARK),
+	PINMUX_GPIO(GPIO_FN_SDHI1CLK, SDHI1CLK_MARK),
+
+	/* SIUA */
+	PINMUX_GPIO(GPIO_FN_SIUAFCK, SIUAFCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUAILR, SIUAILR_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUAIBT, SIUAIBT_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUAISLD, SIUAISLD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUAOLR, SIUAOLR_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUAOBT, SIUAOBT_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUAOSLD, SIUAOSLD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUAMCK, SIUAMCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUAISPD, SIUAISPD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUOSPD, SIUAOSPD_MARK),
+
+	/* SIUB */
+	PINMUX_GPIO(GPIO_FN_SIUBFCK, SIUBFCK_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUBILR, SIUBILR_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUBIBT, SIUBIBT_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUBISLD, SIUBISLD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUBOLR, SIUBOLR_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUBOBT, SIUBOBT_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUBOSLD, SIUBOSLD_MARK),
+	PINMUX_GPIO(GPIO_FN_SIUBMCK, SIUBMCK_MARK),
+
+	/* IRDA */
+	PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
+	PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
+
+	/* VOU */
+	PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
+	PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
+
+	/* KEYSC */
+	PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
+	PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
+
+	/* MSIOF0 (PTF) */
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TXD, MSIOF0_PTF_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RXD, MSIOF0_PTF_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_MCK, MSIOF0_PTF_MCK_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TSYNC, MSIOF0_PTF_TSYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TSCK, MSIOF0_PTF_TSCK_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RSYNC, MSIOF0_PTF_RSYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RSCK, MSIOF0_PTF_RSCK_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_SS1, MSIOF0_PTF_SS1_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_SS2, MSIOF0_PTF_SS2_MARK),
+
+	/* MSIOF0 (PTT+PTX) */
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TXD, MSIOF0_PTT_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RXD, MSIOF0_PTT_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTX_MCK, MSIOF0_PTX_MCK_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TSYNC, MSIOF0_PTT_TSYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TSCK, MSIOF0_PTT_TSCK_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RSYNC, MSIOF0_PTT_RSYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RSCK, MSIOF0_PTT_RSCK_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_SS1, MSIOF0_PTT_SS1_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_SS2, MSIOF0_PTT_SS2_MARK),
+
+	/* MSIOF1 */
+	PINMUX_GPIO(GPIO_FN_MSIOF1_TXD, MSIOF1_TXD_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF1_RXD, MSIOF1_RXD_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF1_MCK, MSIOF1_MCK_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF1_TSYNC, MSIOF1_TSYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF1_TSCK, MSIOF1_TSCK_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF1_RSYNC, MSIOF1_RSYNC_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF1_RSCK, MSIOF1_RSCK_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF1_SS1, MSIOF1_SS1_MARK),
+	PINMUX_GPIO(GPIO_FN_MSIOF1_SS2, MSIOF1_SS2_MARK),
+
+	/* TSIF */
+	PINMUX_GPIO(GPIO_FN_TS0_SDAT, TS0_SDAT_MARK),
+	PINMUX_GPIO(GPIO_FN_TS0_SCK, TS0_SCK_MARK),
+	PINMUX_GPIO(GPIO_FN_TS0_SDEN, TS0_SDEN_MARK),
+	PINMUX_GPIO(GPIO_FN_TS0_SPSYNC, TS0_SPSYNC_MARK),
+
+	/* FLCTL */
+	PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
+	PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
+	PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
+	PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
+	PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
+	PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+	PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+
+	/* DMAC */
+	PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+	PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+	PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+	PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+
+	/* ADC */
+	PINMUX_GPIO(GPIO_FN_AN3, AN3_MARK),
+	PINMUX_GPIO(GPIO_FN_AN2, AN2_MARK),
+	PINMUX_GPIO(GPIO_FN_AN1, AN1_MARK),
+	PINMUX_GPIO(GPIO_FN_AN0, AN0_MARK),
+	PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+
+	/* CPG */
+	PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+	PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
+
+	/* TPU */
+	PINMUX_GPIO(GPIO_FN_TPUTO0, TPUTO0_MARK),
+	PINMUX_GPIO(GPIO_FN_TPUTO1, TPUTO1_MARK),
+	PINMUX_GPIO(GPIO_FN_TPUTO2, TPUTO2_MARK),
+	PINMUX_GPIO(GPIO_FN_TPUTO3, TPUTO3_MARK),
+
+	/* BSC */
+	PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+	PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+	PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+	PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+	PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+	PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+	PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+	PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+	PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+	PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+	PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+	PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+	PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+	PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+	PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+	PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+	PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+	PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+	PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+	PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+	PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+	PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+	PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+	PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+	PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
+	PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
+	PINMUX_GPIO(GPIO_FN_CS5A_CE2A, CS5A_CE2A_MARK),
+	PINMUX_GPIO(GPIO_FN_WE3_ICIOWR, WE3_ICIOWR_MARK),
+	PINMUX_GPIO(GPIO_FN_WE2_ICIORD, WE2_ICIORD_MARK),
+
+	/* ATAPI */
+	PINMUX_GPIO(GPIO_FN_IDED15, IDED15_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED14, IDED14_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED13, IDED13_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED12, IDED12_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED11, IDED11_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED10, IDED10_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED9, IDED9_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED8, IDED8_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED7, IDED7_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED6, IDED6_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED5, IDED5_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED4, IDED4_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED3, IDED3_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED2, IDED2_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED1, IDED1_MARK),
+	PINMUX_GPIO(GPIO_FN_IDED0, IDED0_MARK),
+	PINMUX_GPIO(GPIO_FN_DIRECTION, DIRECTION_MARK),
+	PINMUX_GPIO(GPIO_FN_EXBUF_ENB, EXBUF_ENB_MARK),
+	PINMUX_GPIO(GPIO_FN_IDERST, IDERST_MARK),
+	PINMUX_GPIO(GPIO_FN_IODACK, IODACK_MARK),
+	PINMUX_GPIO(GPIO_FN_IODREQ, IODREQ_MARK),
+	PINMUX_GPIO(GPIO_FN_IDEIORDY, IDEIORDY_MARK),
+	PINMUX_GPIO(GPIO_FN_IDEINT, IDEINT_MARK),
+	PINMUX_GPIO(GPIO_FN_IDEIOWR, IDEIOWR_MARK),
+	PINMUX_GPIO(GPIO_FN_IDEIORD, IDEIORD_MARK),
+	PINMUX_GPIO(GPIO_FN_IDECS1, IDECS1_MARK),
+	PINMUX_GPIO(GPIO_FN_IDECS0, IDECS0_MARK),
+	PINMUX_GPIO(GPIO_FN_IDEA2, IDEA2_MARK),
+	PINMUX_GPIO(GPIO_FN_IDEA1, IDEA1_MARK),
+	PINMUX_GPIO(GPIO_FN_IDEA0, IDEA0_MARK),
+ };
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+	{ PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+		PTA7_FN, PTA7_OUT, 0, PTA7_IN,
+		PTA6_FN, PTA6_OUT, 0, PTA6_IN,
+		PTA5_FN, PTA5_OUT, 0, PTA5_IN,
+		PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
+		PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
+		PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
+		PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
+		PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+	},
+	{ PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+		PTB7_FN, PTB7_OUT, 0, PTB7_IN,
+		PTB6_FN, PTB6_OUT, 0, PTB6_IN,
+		PTB5_FN, PTB5_OUT, 0, PTB5_IN,
+		PTB4_FN, PTB4_OUT, 0, PTB4_IN,
+		PTB3_FN, PTB3_OUT, 0, PTB3_IN,
+		PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
+		PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
+		PTB0_FN, PTB0_OUT, 0, PTB0_IN }
+	},
+	{ PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+		PTC7_FN, PTC7_OUT, 0, PTC7_IN,
+		PTC6_FN, PTC6_OUT, 0, PTC6_IN,
+		PTC5_FN, PTC5_OUT, 0, PTC5_IN,
+		PTC4_FN, PTC4_OUT, 0, PTC4_IN,
+		PTC3_FN, PTC3_OUT, 0, PTC3_IN,
+		PTC2_FN, PTC2_OUT, 0, PTC2_IN,
+		PTC1_FN, PTC1_OUT, 0, PTC1_IN,
+		PTC0_FN, PTC0_OUT, 0, PTC0_IN }
+	},
+	{ PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+		PTD7_FN, PTD7_OUT, 0, PTD7_IN,
+		PTD6_FN, PTD6_OUT, 0, PTD6_IN,
+		PTD5_FN, PTD5_OUT, 0, PTD5_IN,
+		PTD4_FN, PTD4_OUT, 0, PTD4_IN,
+		PTD3_FN, PTD3_OUT, 0, PTD3_IN,
+		PTD2_FN, PTD2_OUT, 0, PTD2_IN,
+		PTD1_FN, PTD1_OUT, 0, PTD1_IN,
+		PTD0_FN, PTD0_OUT, 0, PTD0_IN }
+	},
+	{ PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		PTE5_FN, PTE5_OUT, 0, PTE5_IN,
+		PTE4_FN, PTE4_OUT, 0, PTE4_IN,
+		PTE3_FN, PTE3_OUT, 0, PTE3_IN,
+		PTE2_FN, PTE2_OUT, 0, PTE2_IN,
+		PTE1_FN, PTE1_OUT, 0, PTE1_IN,
+		PTE0_FN, PTE0_OUT, 0, PTE0_IN }
+	},
+	{ PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+		PTF7_FN, PTF7_OUT, 0, PTF7_IN,
+		PTF6_FN, PTF6_OUT, 0, PTF6_IN,
+		PTF5_FN, PTF5_OUT, 0, PTF5_IN,
+		PTF4_FN, PTF4_OUT, 0, PTF4_IN,
+		PTF3_FN, PTF3_OUT, 0, PTF3_IN,
+		PTF2_FN, PTF2_OUT, 0, PTF2_IN,
+		PTF1_FN, PTF1_OUT, 0, PTF1_IN,
+		PTF0_FN, PTF0_OUT, 0, PTF0_IN }
+	},
+	{ PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		PTG5_FN, PTG5_OUT, 0, 0,
+		PTG4_FN, PTG4_OUT, 0, 0,
+		PTG3_FN, PTG3_OUT, 0, 0,
+		PTG2_FN, PTG2_OUT, 0, 0,
+		PTG1_FN, PTG1_OUT, 0, 0,
+		PTG0_FN, PTG0_OUT, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+		PTH7_FN, PTH7_OUT, 0, PTH7_IN,
+		PTH6_FN, PTH6_OUT, 0, PTH6_IN,
+		PTH5_FN, PTH5_OUT, 0, PTH5_IN,
+		PTH4_FN, PTH4_OUT, 0, PTH4_IN,
+		PTH3_FN, PTH3_OUT, 0, PTH3_IN,
+		PTH2_FN, PTH2_OUT, 0, PTH2_IN,
+		PTH1_FN, PTH1_OUT, 0, PTH1_IN,
+		PTH0_FN, PTH0_OUT, 0, PTH0_IN }
+	},
+	{ PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+		PTJ7_FN, PTJ7_OUT, 0, 0,
+		0, 0, 0, 0,
+		PTJ5_FN, PTJ5_OUT, 0, 0,
+		0, 0, 0, 0,
+		PTJ3_FN, PTJ3_OUT, 0, PTJ3_IN,
+		PTJ2_FN, PTJ2_OUT, 0, PTJ2_IN,
+		PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN,
+		PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN }
+	},
+	{ PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+		PTK7_FN, PTK7_OUT, 0, PTK7_IN,
+		PTK6_FN, PTK6_OUT, 0, PTK6_IN,
+		PTK5_FN, PTK5_OUT, 0, PTK5_IN,
+		PTK4_FN, PTK4_OUT, 0, PTK4_IN,
+		PTK3_FN, PTK3_OUT, 0, PTK3_IN,
+		PTK2_FN, PTK2_OUT, 0, PTK2_IN,
+		PTK1_FN, PTK1_OUT, 0, PTK1_IN,
+		PTK0_FN, PTK0_OUT, 0, PTK0_IN }
+	},
+	{ PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+		PTL7_FN, PTL7_OUT, 0, PTL7_IN,
+		PTL6_FN, PTL6_OUT, 0, PTL6_IN,
+		PTL5_FN, PTL5_OUT, 0, PTL5_IN,
+		PTL4_FN, PTL4_OUT, 0, PTL4_IN,
+		PTL3_FN, PTL3_OUT, 0, PTL3_IN,
+		PTL2_FN, PTL2_OUT, 0, PTL2_IN,
+		PTL1_FN, PTL1_OUT, 0, PTL1_IN,
+		PTL0_FN, PTL0_OUT, 0, PTL0_IN }
+	},
+	{ PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+		PTM7_FN, PTM7_OUT, 0, PTM7_IN,
+		PTM6_FN, PTM6_OUT, 0, PTM6_IN,
+		PTM5_FN, PTM5_OUT, 0, PTM5_IN,
+		PTM4_FN, PTM4_OUT, 0, PTM4_IN,
+		PTM3_FN, PTM3_OUT, 0, PTM3_IN,
+		PTM2_FN, PTM2_OUT, 0, PTM2_IN,
+		PTM1_FN, PTM1_OUT, 0, PTM1_IN,
+		PTM0_FN, PTM0_OUT, 0, PTM0_IN }
+	},
+	{ PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+		PTN7_FN, PTN7_OUT, 0, PTN7_IN,
+		PTN6_FN, PTN6_OUT, 0, PTN6_IN,
+		PTN5_FN, PTN5_OUT, 0, PTN5_IN,
+		PTN4_FN, PTN4_OUT, 0, PTN4_IN,
+		PTN3_FN, PTN3_OUT, 0, PTN3_IN,
+		PTN2_FN, PTN2_OUT, 0, PTN2_IN,
+		PTN1_FN, PTN1_OUT, 0, PTN1_IN,
+		PTN0_FN, PTN0_OUT, 0, PTN0_IN }
+	},
+	{ PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		PTQ3_FN, 0, 0, PTQ3_IN,
+		PTQ2_FN, 0, 0, PTQ2_IN,
+		PTQ1_FN, 0, 0, PTQ1_IN,
+		PTQ0_FN, 0, 0, PTQ0_IN }
+	},
+	{ PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+		PTR7_FN, PTR7_OUT, 0, PTR7_IN,
+		PTR6_FN, PTR6_OUT, 0, PTR6_IN,
+		PTR5_FN, PTR5_OUT, 0, PTR5_IN,
+		PTR4_FN, PTR4_OUT, 0, PTR4_IN,
+		PTR3_FN, 0, 0, PTR3_IN,
+		PTR2_FN, 0, PTR2_IN_PU, PTR2_IN,
+		PTR1_FN, PTR1_OUT, 0, PTR1_IN,
+		PTR0_FN, PTR0_OUT, 0, PTR0_IN }
+	},
+	{ PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+		PTS7_FN, PTS7_OUT, 0, PTS7_IN,
+		PTS6_FN, PTS6_OUT, 0, PTS6_IN,
+		PTS5_FN, PTS5_OUT, 0, PTS5_IN,
+		PTS4_FN, PTS4_OUT, 0, PTS4_IN,
+		PTS3_FN, PTS3_OUT, 0, PTS3_IN,
+		PTS2_FN, PTS2_OUT, 0, PTS2_IN,
+		PTS1_FN, PTS1_OUT, 0, PTS1_IN,
+		PTS0_FN, PTS0_OUT, 0, PTS0_IN }
+	},
+	{ PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		PTT5_FN, PTT5_OUT, 0, PTT5_IN,
+		PTT4_FN, PTT4_OUT, 0, PTT4_IN,
+		PTT3_FN, PTT3_OUT, 0, PTT3_IN,
+		PTT2_FN, PTT2_OUT, 0, PTT2_IN,
+		PTT1_FN, PTT1_OUT, 0, PTT1_IN,
+		PTT0_FN, PTT0_OUT, 0, PTT0_IN }
+	},
+	{ PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		PTU5_FN, PTU5_OUT, 0, PTU5_IN,
+		PTU4_FN, PTU4_OUT, 0, PTU4_IN,
+		PTU3_FN, PTU3_OUT, 0, PTU3_IN,
+		PTU2_FN, PTU2_OUT, 0, PTU2_IN,
+		PTU1_FN, PTU1_OUT, 0, PTU1_IN,
+		PTU0_FN, PTU0_OUT, 0, PTU0_IN }
+	},
+	{ PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+		PTV7_FN, PTV7_OUT, 0, PTV7_IN,
+		PTV6_FN, PTV6_OUT, 0, PTV6_IN,
+		PTV5_FN, PTV5_OUT, 0, PTV5_IN,
+		PTV4_FN, PTV4_OUT, 0, PTV4_IN,
+		PTV3_FN, PTV3_OUT, 0, PTV3_IN,
+		PTV2_FN, PTV2_OUT, 0, PTV2_IN,
+		PTV1_FN, PTV1_OUT, 0, PTV1_IN,
+		PTV0_FN, PTV0_OUT, 0, PTV0_IN }
+	},
+	{ PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+		PTW7_FN, PTW7_OUT, 0, PTW7_IN,
+		PTW6_FN, PTW6_OUT, 0, PTW6_IN,
+		PTW5_FN, PTW5_OUT, 0, PTW5_IN,
+		PTW4_FN, PTW4_OUT, 0, PTW4_IN,
+		PTW3_FN, PTW3_OUT, 0, PTW3_IN,
+		PTW2_FN, PTW2_OUT, 0, PTW2_IN,
+		PTW1_FN, PTW1_OUT, 0, PTW1_IN,
+		PTW0_FN, PTW0_OUT, 0, PTW0_IN }
+	},
+	{ PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+		PTX7_FN, PTX7_OUT, 0, PTX7_IN,
+		PTX6_FN, PTX6_OUT, 0, PTX6_IN,
+		PTX5_FN, PTX5_OUT, 0, PTX5_IN,
+		PTX4_FN, PTX4_OUT, 0, PTX4_IN,
+		PTX3_FN, PTX3_OUT, 0, PTX3_IN,
+		PTX2_FN, PTX2_OUT, 0, PTX2_IN,
+		PTX1_FN, PTX1_OUT, 0, PTX1_IN,
+		PTX0_FN, PTX0_OUT, 0, PTX0_IN }
+	},
+	{ PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+		PTY7_FN, PTY7_OUT, 0, PTY7_IN,
+		PTY6_FN, PTY6_OUT, 0, PTY6_IN,
+		PTY5_FN, PTY5_OUT, 0, PTY5_IN,
+		PTY4_FN, PTY4_OUT, 0, PTY4_IN,
+		PTY3_FN, PTY3_OUT, 0, PTY3_IN,
+		PTY2_FN, PTY2_OUT, 0, PTY2_IN,
+		PTY1_FN, PTY1_OUT, 0, PTY1_IN,
+		PTY0_FN, PTY0_OUT, 0, PTY0_IN }
+	},
+	{ PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+		PTZ7_FN, PTZ7_OUT, 0, PTZ7_IN,
+		PTZ6_FN, PTZ6_OUT, 0, PTZ6_IN,
+		PTZ5_FN, PTZ5_OUT, 0, PTZ5_IN,
+		PTZ4_FN, PTZ4_OUT, 0, PTZ4_IN,
+		PTZ3_FN, PTZ3_OUT, 0, PTZ3_IN,
+		PTZ2_FN, PTZ2_OUT, 0, PTZ2_IN,
+		PTZ1_FN, PTZ1_OUT, 0, PTZ1_IN,
+		PTZ0_FN, PTZ0_OUT, 0, PTZ0_IN }
+	},
+	{ PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 2) {
+		PSA15_PSA14_FN1, PSA15_PSA14_FN2, 0, 0,
+		PSA13_PSA12_FN1, PSA13_PSA12_FN2, 0, 0,
+		PSA11_PSA10_FN1, PSA11_PSA10_FN2, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		PSA5_PSA4_FN1, PSA5_PSA4_FN2, PSA5_PSA4_FN3, 0,
+		PSA3_PSA2_FN1, PSA3_PSA2_FN2, 0, 0,
+		0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 2) {
+		PSB15_PSB14_FN1, PSB15_PSB14_FN2, 0, 0,
+		PSB13_PSB12_LCDC_RGB, PSB13_PSB12_LCDC_SYS, 0, 0,
+		0, 0, 0, 0,
+		PSB9_PSB8_FN1, PSB9_PSB8_FN2, PSB9_PSB8_FN3, 0,
+		PSB7_PSB6_FN1, PSB7_PSB6_FN2, 0, 0,
+		PSB5_PSB4_FN1, PSB5_PSB4_FN2, 0, 0,
+		PSB3_PSB2_FN1, PSB3_PSB2_FN2, 0, 0,
+		0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 2) {
+		PSC15_PSC14_FN1, PSC15_PSC14_FN2, 0, 0,
+		PSC13_PSC12_FN1, PSC13_PSC12_FN2, 0, 0,
+		PSC11_PSC10_FN1, PSC11_PSC10_FN2, PSC11_PSC10_FN3, 0,
+		PSC9_PSC8_FN1, PSC9_PSC8_FN2, 0, 0,
+		PSC7_PSC6_FN1, PSC7_PSC6_FN2, PSC7_PSC6_FN3, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0 }
+	},
+	{ PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 2) {
+		PSD15_PSD14_FN1, PSD15_PSD14_FN2, 0, 0,
+		PSD13_PSD12_FN1, PSD13_PSD12_FN2, 0, 0,
+		PSD11_PSD10_FN1, PSD11_PSD10_FN2, PSD11_PSD10_FN3, 0,
+		PSD9_PSD8_FN1, PSD9_PSD8_FN2, 0, 0,
+		PSD7_PSD6_FN1, PSD7_PSD6_FN2, 0, 0,
+		PSD5_PSD4_FN1, PSD5_PSD4_FN2, 0, 0,
+		PSD3_PSD2_FN1, PSD3_PSD2_FN2, 0, 0,
+		PSD1_PSD0_FN1, PSD1_PSD0_FN2, 0, 0 }
+	},
+	{}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+	{ PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+		PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+		PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+	},
+	{ PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+		PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+		PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+	},
+	{ PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+		PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+		PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+	},
+	{ PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+		PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+		PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+	},
+	{ PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+		0, 0, PTE5_DATA, PTE4_DATA,
+		PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+	},
+	{ PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+		PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+		PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+	},
+	{ PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+		0, 0, PTG5_DATA, PTG4_DATA,
+		PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+	},
+	{ PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+		PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+		PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+	},
+	{ PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+		PTJ7_DATA, 0, PTJ5_DATA, 0,
+		PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+	},
+	{ PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+		PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+		PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+	},
+	{ PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+		PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+		PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+	},
+	{ PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+		PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+		PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+	},
+	{ PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+		PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+		PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+	},
+	{ PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+		0, 0, 0, 0,
+		PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+	},
+	{ PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+		PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+		PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+	},
+	{ PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+		PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+		PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+	},
+	{ PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+		0, 0, PTT5_DATA, PTT4_DATA,
+		PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+	},
+	{ PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+		0, 0, PTU5_DATA, PTU4_DATA,
+		PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+	},
+	{ PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+		PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+		PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+	},
+	{ PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+		PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+		PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+	},
+	{ PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+		PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+		PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+	},
+	{ PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+		PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+		PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+	},
+	{ PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+		PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+		PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+	},
+	{ },
+};
+
+static struct pinmux_info sh7723_pinmux_info = {
+	.name = "sh7723_pfc",
+	.reserved_id = PINMUX_RESERVED,
+	.data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+	.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+	.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+	.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+	.mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+	.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+	.first_gpio = GPIO_PTA7,
+	.last_gpio = GPIO_FN_IDEA0,
+
+	.gpios = pinmux_gpios,
+	.cfg_regs = pinmux_config_regs,
+	.data_regs = pinmux_data_regs,
+
+	.gpio_data = pinmux_data,
+	.gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+static int __init plat_pinmux_setup(void)
+{
+	return register_pinmux(&sh7723_pinmux_info);
+}
+
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index e5e0684..b8869aa 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -1,7 +1,7 @@
 /*
  * SH-X3 SMP
  *
- *  Copyright (C) 2007  Paul Mundt
+ *  Copyright (C) 2007 - 2008  Paul Mundt
  *  Copyright (C) 2007  Magnus Damm
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -14,6 +14,22 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 
+static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
+{
+	unsigned int message = (unsigned int)(long)arg;
+	unsigned int cpu = hard_smp_processor_id();
+	unsigned int offs = 4 * cpu;
+	unsigned int x;
+
+	x = ctrl_inl(0xfe410070 + offs); /* C0INITICI..CnINTICI */
+	x &= (1 << (message << 2));
+	ctrl_outl(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */
+
+	smp_message_recv(message);
+
+	return IRQ_HANDLED;
+}
+
 void __init plat_smp_setup(void)
 {
 	unsigned int cpu = 0;
@@ -40,6 +56,13 @@
 
 void __init plat_prepare_cpus(unsigned int max_cpus)
 {
+	int i;
+
+	BUILD_BUG_ON(SMP_MSG_NR >= 8);
+
+	for (i = 0; i < SMP_MSG_NR; i++)
+		request_irq(104 + i, ipi_interrupt_handler, IRQF_DISABLED,
+			    "IPI", (void *)(long)i);
 }
 
 #define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
@@ -59,7 +82,7 @@
 		ctrl_outl(STBCR_MSTP, STBCR_REG(cpu));
 
 	while (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP))
-		;
+		cpu_relax();
 
 	/* Start up secondary processor by sending a reset */
 	ctrl_outl(STBCR_AP_VAL, STBCR_REG(cpu));
@@ -75,46 +98,6 @@
 	unsigned long addr = 0xfe410070 + (cpu * 4);
 
 	BUG_ON(cpu >= 4);
-	BUG_ON(message >= SMP_MSG_NR);
 
 	ctrl_outl(1 << (message << 2), addr); /* C0INTICI..CnINTICI */
 }
-
-struct ipi_data {
-	void (*handler)(void *);
-	void *arg;
-	unsigned int message;
-};
-
-static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
-{
-	struct ipi_data *id = arg;
-	unsigned int cpu = hard_smp_processor_id();
-	unsigned int offs = 4 * cpu;
-	unsigned int x;
-
-	x = ctrl_inl(0xfe410070 + offs); /* C0INITICI..CnINTICI */
-	x &= (1 << (id->message << 2));
-	ctrl_outl(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */
-
-	id->handler(id->arg);
-
-	return IRQ_HANDLED;
-}
-
-static struct ipi_data ipi_handlers[SMP_MSG_NR];
-
-int plat_register_ipi_handler(unsigned int message,
-			      void (*handler)(void *), void *arg)
-{
-	struct ipi_data *id = &ipi_handlers[message];
-
-	BUG_ON(SMP_MSG_NR >= 8);
-	BUG_ON(message >= SMP_MSG_NR);
-
-	id->handler = handler;
-	id->arg = arg;
-	id->message = message;
-
-	return request_irq(104 + message, ipi_interrupt_handler, 0, "IPI", id);
-}
diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile
index 8646363..ce4602e 100644
--- a/arch/sh/kernel/cpu/sh5/Makefile
+++ b/arch/sh/kernel/cpu/sh5/Makefile
@@ -5,3 +5,8 @@
 
 obj-$(CONFIG_SH_FPU)		+= fpu.o
 obj-$(CONFIG_KALLSYMS)		+= unwind.o
+
+# Primary on-chip clocks (common)
+clock-$(CONFIG_CPU_SH5)		:= clock-sh5.o
+
+obj-y			+= $(clock-y)
diff --git a/arch/sh/kernel/cpu/sh5/clock-sh5.c b/arch/sh/kernel/cpu/sh5/clock-sh5.c
new file mode 100644
index 0000000..52c4924
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/clock-sh5.c
@@ -0,0 +1,79 @@
+/*
+ * arch/sh/kernel/cpu/sh5/clock-sh5.c
+ *
+ * SH-5 support for the clock framework
+ *
+ *  Copyright (C) 2008  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/io.h>
+
+static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
+
+/* Clock, Power and Reset Controller */
+#define	CPRC_BLOCK_OFF	0x01010000
+#define CPRC_BASE	(PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF)
+
+static unsigned long cprc_base;
+
+static void master_clk_init(struct clk *clk)
+{
+	int idx = (ctrl_inl(cprc_base + 0x00) >> 6) & 0x0007;
+	clk->rate *= ifc_table[idx];
+}
+
+static struct clk_ops sh5_master_clk_ops = {
+	.init		= master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+	int idx = (ctrl_inw(cprc_base) >> 12) & 0x0007;
+	clk->rate = clk->parent->rate / ifc_table[idx];
+}
+
+static struct clk_ops sh5_module_clk_ops = {
+	.recalc		= module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+	int idx = (ctrl_inw(cprc_base) >> 3) & 0x0007;
+	clk->rate = clk->parent->rate / ifc_table[idx];
+}
+
+static struct clk_ops sh5_bus_clk_ops = {
+	.recalc		= bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+	int idx = (ctrl_inw(cprc_base) & 0x0007);
+	clk->rate = clk->parent->rate / ifc_table[idx];
+}
+
+static struct clk_ops sh5_cpu_clk_ops = {
+	.recalc		= cpu_clk_recalc,
+};
+
+static struct clk_ops *sh5_clk_ops[] = {
+	&sh5_master_clk_ops,
+	&sh5_module_clk_ops,
+	&sh5_bus_clk_ops,
+	&sh5_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+	cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
+	BUG_ON(!cprc_base);
+
+	if (idx < ARRAY_SIZE(sh5_clk_ops))
+		*ops = sh5_clk_ops[idx];
+}
diff --git a/arch/sh/kernel/crash_dump.c b/arch/sh/kernel/crash_dump.c
index 4a2ecbe..95d2162 100644
--- a/arch/sh/kernel/crash_dump.c
+++ b/arch/sh/kernel/crash_dump.c
@@ -10,6 +10,9 @@
 #include <linux/io.h>
 #include <asm/uaccess.h>
 
+/* Stores the physical address of elf header of crash image. */
+unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
+
 /**
  * copy_oldmem_page - copy one page from "oldmem"
  * @pfn: page frame number to be copied
diff --git a/arch/sh/kernel/dump_task.c b/arch/sh/kernel/dump_task.c
deleted file mode 100644
index 1db7ce0..0000000
--- a/arch/sh/kernel/dump_task.c
+++ /dev/null
@@ -1,32 +0,0 @@
-#include <linux/elfcore.h>
-#include <linux/sched.h>
-#include <asm/fpu.h>
-
-/*
- * Capture the user space registers if the task is not running (in user space)
- */
-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-{
-	struct pt_regs ptregs;
-
-	ptregs = *task_pt_regs(tsk);
-	elf_core_copy_regs(regs, &ptregs);
-
-	return 1;
-}
-
-int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu)
-{
-	int fpvalid = 0;
-
-#if defined(CONFIG_SH_FPU)
-	fpvalid = !!tsk_used_math(tsk);
-	if (fpvalid) {
-		unlazy_fpu(tsk, task_pt_regs(tsk));
-		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
-	}
-#endif
-
-	return fpvalid;
-}
-
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index efbb426..1a5cf9d 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -371,3 +371,47 @@
 #endif
 7:	.long	do_syscall_trace_enter
 8:	.long	do_syscall_trace_leave
+
+#ifdef CONFIG_FTRACE
+	.align 2
+	.globl	_mcount
+	.type	_mcount,@function
+	.globl	mcount
+	.type	mcount,@function
+_mcount:
+mcount:
+	mov.l	r4, @-r15
+	mov.l	r5, @-r15
+	mov.l	r6, @-r15
+	mov.l	r7, @-r15
+	sts.l	pr, @-r15
+
+	mov.l	@(20,r15),r4
+	sts	pr, r5
+
+	mov.l	1f, r6
+	mov.l	ftrace_stub, r7	
+	cmp/eq	r6, r7
+	bt	skip_trace
+
+	mov.l	@r6, r6
+	jsr	@r6
+	 nop
+
+skip_trace:
+
+	lds.l	@r15+, pr
+	mov.l	@r15+, r7
+	mov.l	@r15+, r6
+	mov.l	@r15+, r5
+	rts
+	 mov.l	@r15+, r4
+
+	.align 2
+1:	.long	ftrace_trace_function
+
+	.globl	ftrace_stub
+ftrace_stub:
+	rts
+	 nop
+#endif /* CONFIG_FTRACE */
diff --git a/arch/sh/kernel/gpio.c b/arch/sh/kernel/gpio.c
new file mode 100644
index 0000000..bb8b812
--- /dev/null
+++ b/arch/sh/kernel/gpio.c
@@ -0,0 +1,498 @@
+/*
+ * Pinmuxed GPIO support for SuperH.
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+
+static struct pinmux_info *registered_gpio;
+
+static struct pinmux_info *gpio_controller(unsigned gpio)
+{
+	if (!registered_gpio)
+		return NULL;
+
+	if (gpio < registered_gpio->first_gpio)
+		return NULL;
+
+	if (gpio > registered_gpio->last_gpio)
+		return NULL;
+
+	return registered_gpio;
+}
+
+static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
+{
+	if (enum_id < r->begin)
+		return 0;
+
+	if (enum_id > r->end)
+		return 0;
+
+	return 1;
+}
+
+static int read_write_reg(unsigned long reg, unsigned long reg_width,
+			  unsigned long field_width, unsigned long in_pos,
+			  unsigned long value, int do_write)
+{
+	unsigned long data, mask, pos;
+
+	data = 0;
+	mask = (1 << field_width) - 1;
+	pos = reg_width - ((in_pos + 1) * field_width);
+
+#ifdef DEBUG
+	pr_info("%s, addr = %lx, value = %ld, pos = %ld, "
+		"r_width = %ld, f_width = %ld\n",
+		do_write ? "write" : "read", reg, value, pos,
+		reg_width, field_width);
+#endif
+
+	switch (reg_width) {
+	case 8:
+		data = ctrl_inb(reg);
+		break;
+	case 16:
+		data = ctrl_inw(reg);
+		break;
+	case 32:
+		data = ctrl_inl(reg);
+		break;
+	}
+
+	if (!do_write)
+		return (data >> pos) & mask;
+
+	data &= ~(mask << pos);
+	data |= value << pos;
+
+	switch (reg_width) {
+	case 8:
+		ctrl_outb(data, reg);
+		break;
+	case 16:
+		ctrl_outw(data, reg);
+		break;
+	case 32:
+		ctrl_outl(data, reg);
+		break;
+	}
+	return 0;
+}
+
+static int get_data_reg(struct pinmux_info *gpioc, unsigned gpio,
+			struct pinmux_data_reg **drp, int *bitp)
+{
+	pinmux_enum_t enum_id = gpioc->gpios[gpio].enum_id;
+	struct pinmux_data_reg *data_reg;
+	int k, n;
+
+	if (!enum_in_range(enum_id, &gpioc->data))
+		return -1;
+
+	k = 0;
+	while (1) {
+		data_reg = gpioc->data_regs + k;
+
+		if (!data_reg->reg_width)
+			break;
+
+		for (n = 0; n < data_reg->reg_width; n++) {
+			if (data_reg->enum_ids[n] == enum_id) {
+				*drp = data_reg;
+				*bitp = n;
+				return 0;
+
+			}
+		}
+		k++;
+	}
+
+	return -1;
+}
+
+static int get_config_reg(struct pinmux_info *gpioc, pinmux_enum_t enum_id,
+			  struct pinmux_cfg_reg **crp, int *indexp,
+			  unsigned long **cntp)
+{
+	struct pinmux_cfg_reg *config_reg;
+	unsigned long r_width, f_width;
+	int k, n;
+
+	k = 0;
+	while (1) {
+		config_reg = gpioc->cfg_regs + k;
+
+		r_width = config_reg->reg_width;
+		f_width = config_reg->field_width;
+
+		if (!r_width)
+			break;
+		for (n = 0; n < (r_width / f_width) * 1 << f_width; n++) {
+			if (config_reg->enum_ids[n] == enum_id) {
+				*crp = config_reg;
+				*indexp = n;
+				*cntp = &config_reg->cnt[n / (1 << f_width)];
+				return 0;
+			}
+		}
+		k++;
+	}
+
+	return -1;
+}
+
+static int get_gpio_enum_id(struct pinmux_info *gpioc, unsigned gpio,
+			    int pos, pinmux_enum_t *enum_idp)
+{
+	pinmux_enum_t enum_id = gpioc->gpios[gpio].enum_id;
+	pinmux_enum_t *data = gpioc->gpio_data;
+	int k;
+
+	if (!enum_in_range(enum_id, &gpioc->data)) {
+		if (!enum_in_range(enum_id, &gpioc->mark)) {
+			pr_err("non data/mark enum_id for gpio %d\n", gpio);
+			return -1;
+		}
+	}
+
+	if (pos) {
+		*enum_idp = data[pos + 1];
+		return pos + 1;
+	}
+
+	for (k = 0; k < gpioc->gpio_data_size; k++) {
+		if (data[k] == enum_id) {
+			*enum_idp = data[k + 1];
+			return k + 1;
+		}
+	}
+
+	pr_err("cannot locate data/mark enum_id for gpio %d\n", gpio);
+	return -1;
+}
+
+static int write_config_reg(struct pinmux_info *gpioc,
+			    struct pinmux_cfg_reg *crp,
+			    int index)
+{
+	unsigned long ncomb, pos, value;
+
+	ncomb = 1 << crp->field_width;
+	pos = index / ncomb;
+	value = index % ncomb;
+
+	return read_write_reg(crp->reg, crp->reg_width,
+			      crp->field_width, pos, value, 1);
+}
+
+static int check_config_reg(struct pinmux_info *gpioc,
+			    struct pinmux_cfg_reg *crp,
+			    int index)
+{
+	unsigned long ncomb, pos, value;
+
+	ncomb = 1 << crp->field_width;
+	pos = index / ncomb;
+	value = index % ncomb;
+
+	if (read_write_reg(crp->reg, crp->reg_width,
+			   crp->field_width, pos, 0, 0) == value)
+		return 0;
+
+	return -1;
+}
+
+enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
+
+int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
+		       int pinmux_type, int cfg_mode)
+{
+	struct pinmux_cfg_reg *cr = NULL;
+	pinmux_enum_t enum_id;
+	struct pinmux_range *range;
+	int in_range, pos, index;
+	unsigned long *cntp;
+
+	switch (pinmux_type) {
+
+	case PINMUX_TYPE_FUNCTION:
+		range = NULL;
+		break;
+
+	case PINMUX_TYPE_OUTPUT:
+		range = &gpioc->output;
+		break;
+
+	case PINMUX_TYPE_INPUT:
+		range = &gpioc->input;
+		break;
+
+	case PINMUX_TYPE_INPUT_PULLUP:
+		range = &gpioc->input_pu;
+		break;
+
+	case PINMUX_TYPE_INPUT_PULLDOWN:
+		range = &gpioc->input_pd;
+		break;
+
+	default:
+		goto out_err;
+	}
+
+	pos = 0;
+	enum_id = 0;
+	index = 0;
+	while (1) {
+		pos = get_gpio_enum_id(gpioc, gpio, pos, &enum_id);
+		if (pos <= 0)
+			goto out_err;
+
+		if (!enum_id)
+			break;
+
+		in_range = enum_in_range(enum_id, &gpioc->function);
+		if (!in_range && range)
+			in_range = enum_in_range(enum_id, range);
+
+		if (!in_range)
+			continue;
+
+		if (get_config_reg(gpioc, enum_id, &cr, &index, &cntp) != 0)
+			goto out_err;
+
+		switch (cfg_mode) {
+		case GPIO_CFG_DRYRUN:
+			if (!*cntp || !check_config_reg(gpioc, cr, index))
+				continue;
+			break;
+
+		case GPIO_CFG_REQ:
+			if (write_config_reg(gpioc, cr, index) != 0)
+				goto out_err;
+			*cntp = *cntp + 1;
+			break;
+
+		case GPIO_CFG_FREE:
+			*cntp = *cntp - 1;
+			break;
+		}
+	}
+
+	return 0;
+ out_err:
+	return -1;
+}
+
+static DEFINE_SPINLOCK(gpio_lock);
+
+int __gpio_request(unsigned gpio)
+{
+	struct pinmux_info *gpioc = gpio_controller(gpio);
+	struct pinmux_data_reg *dummy;
+	unsigned long flags;
+	int i, ret, pinmux_type;
+
+	ret = -EINVAL;
+
+	if (!gpioc)
+		goto err_out;
+
+	spin_lock_irqsave(&gpio_lock, flags);
+
+	if ((gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE) != PINMUX_TYPE_NONE)
+		goto err_unlock;
+
+	/* setup pin function here if no data is associated with pin */
+
+	if (get_data_reg(gpioc, gpio, &dummy, &i) != 0)
+		pinmux_type = PINMUX_TYPE_FUNCTION;
+	else
+		pinmux_type = PINMUX_TYPE_GPIO;
+
+	if (pinmux_type == PINMUX_TYPE_FUNCTION) {
+		if (pinmux_config_gpio(gpioc, gpio,
+				       pinmux_type,
+				       GPIO_CFG_DRYRUN) != 0)
+			goto err_unlock;
+
+		if (pinmux_config_gpio(gpioc, gpio,
+				       pinmux_type,
+				       GPIO_CFG_REQ) != 0)
+			BUG();
+	}
+
+	gpioc->gpios[gpio].flags = pinmux_type;
+
+	ret = 0;
+ err_unlock:
+	spin_unlock_irqrestore(&gpio_lock, flags);
+ err_out:
+	return ret;
+}
+EXPORT_SYMBOL(__gpio_request);
+
+void gpio_free(unsigned gpio)
+{
+	struct pinmux_info *gpioc = gpio_controller(gpio);
+	unsigned long flags;
+	int pinmux_type;
+
+	if (!gpioc)
+		return;
+
+	spin_lock_irqsave(&gpio_lock, flags);
+
+	pinmux_type = gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE;
+	pinmux_config_gpio(gpioc, gpio, pinmux_type, GPIO_CFG_FREE);
+	gpioc->gpios[gpio].flags = PINMUX_TYPE_NONE;
+
+	spin_unlock_irqrestore(&gpio_lock, flags);
+}
+EXPORT_SYMBOL(gpio_free);
+
+static int pinmux_direction(struct pinmux_info *gpioc,
+			    unsigned gpio, int new_pinmux_type)
+{
+	int ret, pinmux_type;
+
+	ret = -EINVAL;
+	pinmux_type = gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE;
+
+	switch (pinmux_type) {
+	case PINMUX_TYPE_GPIO:
+		break;
+	case PINMUX_TYPE_OUTPUT:
+	case PINMUX_TYPE_INPUT:
+	case PINMUX_TYPE_INPUT_PULLUP:
+	case PINMUX_TYPE_INPUT_PULLDOWN:
+		pinmux_config_gpio(gpioc, gpio, pinmux_type, GPIO_CFG_FREE);
+		break;
+	default:
+		goto err_out;
+	}
+
+	if (pinmux_config_gpio(gpioc, gpio,
+			       new_pinmux_type,
+			       GPIO_CFG_DRYRUN) != 0)
+		goto err_out;
+
+	if (pinmux_config_gpio(gpioc, gpio,
+			       new_pinmux_type,
+			       GPIO_CFG_REQ) != 0)
+		BUG();
+
+	gpioc->gpios[gpio].flags = new_pinmux_type;
+
+	ret = 0;
+ err_out:
+	return ret;
+}
+
+int gpio_direction_input(unsigned gpio)
+{
+	struct pinmux_info *gpioc = gpio_controller(gpio);
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	if (!gpioc)
+		goto err_out;
+
+	spin_lock_irqsave(&gpio_lock, flags);
+	ret = pinmux_direction(gpioc, gpio, PINMUX_TYPE_INPUT);
+	spin_unlock_irqrestore(&gpio_lock, flags);
+ err_out:
+	return ret;
+}
+EXPORT_SYMBOL(gpio_direction_input);
+
+static int __gpio_get_set_value(struct pinmux_info *gpioc,
+				unsigned gpio, int value,
+				int do_write)
+{
+	struct pinmux_data_reg *dr = NULL;
+	int bit = 0;
+
+	if (get_data_reg(gpioc, gpio, &dr, &bit) != 0)
+		BUG();
+	else
+		value = read_write_reg(dr->reg, dr->reg_width,
+				       1, bit, value, do_write);
+
+	return value;
+}
+
+int gpio_direction_output(unsigned gpio, int value)
+{
+	struct pinmux_info *gpioc = gpio_controller(gpio);
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	if (!gpioc)
+		goto err_out;
+
+	spin_lock_irqsave(&gpio_lock, flags);
+	__gpio_get_set_value(gpioc, gpio, value, 1);
+	ret = pinmux_direction(gpioc, gpio, PINMUX_TYPE_OUTPUT);
+	spin_unlock_irqrestore(&gpio_lock, flags);
+ err_out:
+	return ret;
+}
+EXPORT_SYMBOL(gpio_direction_output);
+
+int gpio_get_value(unsigned gpio)
+{
+	struct pinmux_info *gpioc = gpio_controller(gpio);
+	unsigned long flags;
+	int value = 0;
+
+	if (!gpioc)
+		BUG();
+	else {
+		spin_lock_irqsave(&gpio_lock, flags);
+		value = __gpio_get_set_value(gpioc, gpio, 0, 0);
+		spin_unlock_irqrestore(&gpio_lock, flags);
+	}
+
+	return value;
+}
+EXPORT_SYMBOL(gpio_get_value);
+
+void gpio_set_value(unsigned gpio, int value)
+{
+	struct pinmux_info *gpioc = gpio_controller(gpio);
+	unsigned long flags;
+
+	if (!gpioc)
+		BUG();
+	else {
+		spin_lock_irqsave(&gpio_lock, flags);
+		__gpio_get_set_value(gpioc, gpio, value, 1);
+		spin_unlock_irqrestore(&gpio_lock, flags);
+	}
+}
+EXPORT_SYMBOL(gpio_set_value);
+
+int register_pinmux(struct pinmux_info *pip)
+{
+	registered_gpio = pip;
+	pr_info("pinmux: %s handling gpio %d -> %d\n",
+		pip->name, pip->first_gpio, pip->last_gpio);
+
+	return 0;
+}
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
index 2b89912..29cf458 100644
--- a/arch/sh/kernel/io.c
+++ b/arch/sh/kernel/io.c
@@ -19,12 +19,12 @@
  * Copy data from IO memory space to "real" memory space.
  * This needs to be optimized.
  */
-void memcpy_fromio(void *to, volatile void __iomem *from, unsigned long count)
+void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count)
 {
-	char *p = to;
+	unsigned char *p = to;
         while (count) {
                 count--;
-                *p = readb((void __iomem *)from);
+                *p = readb(from);
                 p++;
                 from++;
         }
@@ -37,10 +37,10 @@
  */
 void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count)
 {
-	const char *p = from;
+	const unsigned char *p = from;
         while (count) {
                 count--;
-                writeb(*p, (void __iomem *)to);
+                writeb(*p, to);
                 p++;
                 to++;
         }
@@ -55,7 +55,7 @@
 {
         while (count) {
                 count--;
-                writeb(c, (void __iomem *)dst);
+                writeb(c, dst);
                 dst++;
         }
 }
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
index db76944..5a7f554 100644
--- a/arch/sh/kernel/io_generic.c
+++ b/arch/sh/kernel/io_generic.c
@@ -19,38 +19,33 @@
 /* SH3 has a PCMCIA bug that needs a dummy read from area 6 for a
  * workaround. */
 /* I'm not sure SH7709 has this kind of bug */
-#define dummy_read()	ctrl_inb(0xba000000)
+#define dummy_read()	__raw_readb(0xba000000)
 #else
 #define dummy_read()
 #endif
 
 unsigned long generic_io_base;
 
-static inline void delay(void)
-{
-	ctrl_inw(0xa0000000);
-}
-
 u8 generic_inb(unsigned long port)
 {
-	return ctrl_inb((unsigned long __force)__ioport_map(port, 1));
+	return __raw_readb(__ioport_map(port, 1));
 }
 
 u16 generic_inw(unsigned long port)
 {
-	return ctrl_inw((unsigned long __force)__ioport_map(port, 2));
+	return __raw_readw(__ioport_map(port, 2));
 }
 
 u32 generic_inl(unsigned long port)
 {
-	return ctrl_inl((unsigned long __force)__ioport_map(port, 4));
+	return __raw_readl(__ioport_map(port, 4));
 }
 
 u8 generic_inb_p(unsigned long port)
 {
 	unsigned long v = generic_inb(port);
 
-	delay();
+	ctrl_delay();
 	return v;
 }
 
@@ -58,7 +53,7 @@
 {
 	unsigned long v = generic_inw(port);
 
-	delay();
+	ctrl_delay();
 	return v;
 }
 
@@ -66,7 +61,7 @@
 {
 	unsigned long v = generic_inl(port);
 
-	delay();
+	ctrl_delay();
 	return v;
 }
 
@@ -81,7 +76,7 @@
 	volatile u8 *port_addr;
 	u8 *buf = dst;
 
-	port_addr = (volatile u8 *)__ioport_map(port, 1);
+	port_addr = (volatile u8 __force *)__ioport_map(port, 1);
 	while (count--)
 		*buf++ = *port_addr;
 }
@@ -91,7 +86,7 @@
 	volatile u16 *port_addr;
 	u16 *buf = dst;
 
-	port_addr = (volatile u16 *)__ioport_map(port, 2);
+	port_addr = (volatile u16 __force *)__ioport_map(port, 2);
 	while (count--)
 		*buf++ = *port_addr;
 
@@ -103,7 +98,7 @@
 	volatile u32 *port_addr;
 	u32 *buf = dst;
 
-	port_addr = (volatile u32 *)__ioport_map(port, 4);
+	port_addr = (volatile u32 __force *)__ioport_map(port, 4);
 	while (count--)
 		*buf++ = *port_addr;
 
@@ -112,35 +107,35 @@
 
 void generic_outb(u8 b, unsigned long port)
 {
-	ctrl_outb(b, (unsigned long __force)__ioport_map(port, 1));
+	__raw_writeb(b, __ioport_map(port, 1));
 }
 
 void generic_outw(u16 b, unsigned long port)
 {
-	ctrl_outw(b, (unsigned long __force)__ioport_map(port, 2));
+	__raw_writew(b, __ioport_map(port, 2));
 }
 
 void generic_outl(u32 b, unsigned long port)
 {
-	ctrl_outl(b, (unsigned long __force)__ioport_map(port, 4));
+	__raw_writel(b, __ioport_map(port, 4));
 }
 
 void generic_outb_p(u8 b, unsigned long port)
 {
 	generic_outb(b, port);
-	delay();
+	ctrl_delay();
 }
 
 void generic_outw_p(u16 b, unsigned long port)
 {
 	generic_outw(b, port);
-	delay();
+	ctrl_delay();
 }
 
 void generic_outl_p(u32 b, unsigned long port)
 {
 	generic_outl(b, port);
-	delay();
+	ctrl_delay();
 }
 
 /*
@@ -184,36 +179,6 @@
 	dummy_read();
 }
 
-u8 generic_readb(void __iomem *addr)
-{
-	return ctrl_inb((unsigned long __force)addr);
-}
-
-u16 generic_readw(void __iomem *addr)
-{
-	return ctrl_inw((unsigned long __force)addr);
-}
-
-u32 generic_readl(void __iomem *addr)
-{
-	return ctrl_inl((unsigned long __force)addr);
-}
-
-void generic_writeb(u8 b, void __iomem *addr)
-{
-	ctrl_outb(b, (unsigned long __force)addr);
-}
-
-void generic_writew(u16 b, void __iomem *addr)
-{
-	ctrl_outw(b, (unsigned long __force)addr);
-}
-
-void generic_writel(u32 b, void __iomem *addr)
-{
-	ctrl_outl(b, (unsigned long __force)addr);
-}
-
 void __iomem *generic_ioport_map(unsigned long addr, unsigned int size)
 {
 	return (void __iomem *)(addr + generic_io_base);
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
new file mode 100644
index 0000000..c96850b
--- /dev/null
+++ b/arch/sh/kernel/kprobes.c
@@ -0,0 +1,584 @@
+/*
+ * Kernel probes (kprobes) for SuperH
+ *
+ * Copyright (C) 2007 Chris Smith <chris.smith@st.com>
+ * Copyright (C) 2006 Lineo Solutions, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/kdebug.h>
+#include <asm/cacheflush.h>
+#include <asm/uaccess.h>
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static struct kprobe saved_current_opcode;
+static struct kprobe saved_next_opcode;
+static struct kprobe saved_next_opcode2;
+
+#define OPCODE_JMP(x)	(((x) & 0xF0FF) == 0x402b)
+#define OPCODE_JSR(x)	(((x) & 0xF0FF) == 0x400b)
+#define OPCODE_BRA(x)	(((x) & 0xF000) == 0xa000)
+#define OPCODE_BRAF(x)	(((x) & 0xF0FF) == 0x0023)
+#define OPCODE_BSR(x)	(((x) & 0xF000) == 0xb000)
+#define OPCODE_BSRF(x)	(((x) & 0xF0FF) == 0x0003)
+
+#define OPCODE_BF_S(x)	(((x) & 0xFF00) == 0x8f00)
+#define OPCODE_BT_S(x)	(((x) & 0xFF00) == 0x8d00)
+
+#define OPCODE_BF(x)	(((x) & 0xFF00) == 0x8b00)
+#define OPCODE_BT(x)	(((x) & 0xFF00) == 0x8900)
+
+#define OPCODE_RTS(x)	(((x) & 0x000F) == 0x000b)
+#define OPCODE_RTE(x)	(((x) & 0xFFFF) == 0x002b)
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+	kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr);
+
+	if (OPCODE_RTE(opcode))
+		return -EFAULT;	/* Bad breakpoint */
+
+	p->opcode = opcode;
+
+	return 0;
+}
+
+void __kprobes arch_copy_kprobe(struct kprobe *p)
+{
+	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+	p->opcode = *p->addr;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+	*p->addr = BREAKPOINT_INSTRUCTION;
+	flush_icache_range((unsigned long)p->addr,
+			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+	*p->addr = p->opcode;
+	flush_icache_range((unsigned long)p->addr,
+			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+	if (*p->addr == BREAKPOINT_INSTRUCTION)
+		return 1;
+
+	return 0;
+}
+
+/**
+ * If an illegal slot instruction exception occurs for an address
+ * containing a kprobe, remove the probe.
+ *
+ * Returns 0 if the exception was handled successfully, 1 otherwise.
+ */
+int __kprobes kprobe_handle_illslot(unsigned long pc)
+{
+	struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1);
+
+	if (p != NULL) {
+		printk("Warning: removing kprobe from delay slot: 0x%.8x\n",
+		       (unsigned int)pc + 2);
+		unregister_kprobe(p);
+		return 0;
+	}
+
+	return 1;
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+	if (saved_next_opcode.addr != 0x0) {
+		arch_disarm_kprobe(p);
+		arch_disarm_kprobe(&saved_next_opcode);
+		saved_next_opcode.addr = 0x0;
+		saved_next_opcode.opcode = 0x0;
+
+		if (saved_next_opcode2.addr != 0x0) {
+			arch_disarm_kprobe(&saved_next_opcode2);
+			saved_next_opcode2.addr = 0x0;
+			saved_next_opcode2.opcode = 0x0;
+		}
+	}
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	kcb->prev_kprobe.kp = kprobe_running();
+	kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+	kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+					 struct kprobe_ctlblk *kcb)
+{
+	__get_cpu_var(current_kprobe) = p;
+}
+
+/*
+ * Singlestep is implemented by disabling the current kprobe and setting one
+ * on the next instruction, following branches. Two probes are set if the
+ * branch is conditional.
+ */
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t *addr = NULL;
+	saved_current_opcode.addr = (kprobe_opcode_t *) (regs->pc);
+	addr = saved_current_opcode.addr;
+
+	if (p != NULL) {
+		arch_disarm_kprobe(p);
+
+		if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
+			unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
+			saved_next_opcode.addr =
+			    (kprobe_opcode_t *) regs->regs[reg_nr];
+		} else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
+			unsigned long disp = (p->opcode & 0x0FFF);
+			saved_next_opcode.addr =
+			    (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
+
+		} else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) {
+			unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
+			saved_next_opcode.addr =
+			    (kprobe_opcode_t *) (regs->pc + 4 +
+						 regs->regs[reg_nr]);
+
+		} else if (OPCODE_RTS(p->opcode)) {
+			saved_next_opcode.addr = (kprobe_opcode_t *) regs->pr;
+
+		} else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) {
+			unsigned long disp = (p->opcode & 0x00FF);
+			/* case 1 */
+			saved_next_opcode.addr = p->addr + 1;
+			/* case 2 */
+			saved_next_opcode2.addr =
+			    (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
+			saved_next_opcode2.opcode = *(saved_next_opcode2.addr);
+			arch_arm_kprobe(&saved_next_opcode2);
+
+		} else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) {
+			unsigned long disp = (p->opcode & 0x00FF);
+			/* case 1 */
+			saved_next_opcode.addr = p->addr + 2;
+			/* case 2 */
+			saved_next_opcode2.addr =
+			    (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
+			saved_next_opcode2.opcode = *(saved_next_opcode2.addr);
+			arch_arm_kprobe(&saved_next_opcode2);
+
+		} else {
+			saved_next_opcode.addr = p->addr + 1;
+		}
+
+		saved_next_opcode.opcode = *(saved_next_opcode.addr);
+		arch_arm_kprobe(&saved_next_opcode);
+	}
+}
+
+/* Called with kretprobe_lock held */
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+				      struct pt_regs *regs)
+{
+	ri->ret_addr = (kprobe_opcode_t *) regs->pr;
+
+	/* Replace the return addr with trampoline addr */
+	regs->pr = (unsigned long)kretprobe_trampoline;
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *p;
+	int ret = 0;
+	kprobe_opcode_t *addr = NULL;
+	struct kprobe_ctlblk *kcb;
+
+	/*
+	 * We don't want to be preempted for the entire
+	 * duration of kprobe processing
+	 */
+	preempt_disable();
+	kcb = get_kprobe_ctlblk();
+
+	addr = (kprobe_opcode_t *) (regs->pc);
+
+	/* Check we're not actually recursing */
+	if (kprobe_running()) {
+		p = get_kprobe(addr);
+		if (p) {
+			if (kcb->kprobe_status == KPROBE_HIT_SS &&
+			    *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
+				goto no_kprobe;
+			}
+			/* We have reentered the kprobe_handler(), since
+			 * another probe was hit while within the handler.
+			 * We here save the original kprobes variables and
+			 * just single step on the instruction of the new probe
+			 * without calling any user handlers.
+			 */
+			save_previous_kprobe(kcb);
+			set_current_kprobe(p, regs, kcb);
+			kprobes_inc_nmissed_count(p);
+			prepare_singlestep(p, regs);
+			kcb->kprobe_status = KPROBE_REENTER;
+			return 1;
+		} else {
+			p = __get_cpu_var(current_kprobe);
+			if (p->break_handler && p->break_handler(p, regs)) {
+				goto ss_probe;
+			}
+		}
+		goto no_kprobe;
+	}
+
+	p = get_kprobe(addr);
+	if (!p) {
+		/* Not one of ours: let kernel handle it */
+		if (*(kprobe_opcode_t *)addr != BREAKPOINT_INSTRUCTION) {
+			/*
+			 * The breakpoint instruction was removed right
+			 * after we hit it. Another cpu has removed
+			 * either a probepoint or a debugger breakpoint
+			 * at this address. In either case, no further
+			 * handling of this interrupt is appropriate.
+			 */
+			ret = 1;
+		}
+
+		goto no_kprobe;
+	}
+
+	set_current_kprobe(p, regs, kcb);
+	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+	if (p->pre_handler && p->pre_handler(p, regs))
+		/* handler has already set things up, so skip ss setup */
+		return 1;
+
+ss_probe:
+	prepare_singlestep(p, regs);
+	kcb->kprobe_status = KPROBE_HIT_SS;
+	return 1;
+
+no_kprobe:
+	preempt_enable_no_resched();
+	return ret;
+}
+
+/*
+ * For function-return probes, init_kprobes() establishes a probepoint
+ * here. When a retprobed function returns, this probe is hit and
+ * trampoline_probe_handler() runs, calling the kretprobe's handler.
+ */
+static void __used kretprobe_trampoline_holder(void)
+{
+	asm volatile (".globl kretprobe_trampoline\n"
+		      "kretprobe_trampoline:\n\t"
+		      "nop\n");
+}
+
+/*
+ * Called when we hit the probe point at kretprobe_trampoline
+ */
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kretprobe_instance *ri = NULL;
+	struct hlist_head *head, empty_rp;
+	struct hlist_node *node, *tmp;
+	unsigned long flags, orig_ret_address = 0;
+	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+	INIT_HLIST_HEAD(&empty_rp);
+	kretprobe_hash_lock(current, &head, &flags);
+
+	/*
+	 * It is possible to have multiple instances associated with a given
+	 * task either because an multiple functions in the call path
+	 * have a return probe installed on them, and/or more then one return
+	 * return probe was registered for a target function.
+	 *
+	 * We can handle this because:
+	 *     - instances are always inserted at the head of the list
+	 *     - when multiple return probes are registered for the same
+	 *       function, the first instance's ret_addr will point to the
+	 *       real return address, and all the rest will point to
+	 *       kretprobe_trampoline
+	 */
+	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+		if (ri->task != current)
+			/* another task is sharing our hash bucket */
+			continue;
+
+		if (ri->rp && ri->rp->handler) {
+			__get_cpu_var(current_kprobe) = &ri->rp->kp;
+			ri->rp->handler(ri, regs);
+			__get_cpu_var(current_kprobe) = NULL;
+		}
+
+		orig_ret_address = (unsigned long)ri->ret_addr;
+		recycle_rp_inst(ri, &empty_rp);
+
+		if (orig_ret_address != trampoline_address)
+			/*
+			 * This is the real return address. Any other
+			 * instances associated with this task are for
+			 * other calls deeper on the call stack
+			 */
+			break;
+	}
+
+	kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+	regs->pc = orig_ret_address;
+	kretprobe_hash_unlock(current, &flags);
+
+	preempt_enable_no_resched();
+
+	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+		hlist_del(&ri->hlist);
+		kfree(ri);
+	}
+
+	return orig_ret_address;
+}
+
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	kprobe_opcode_t *addr = NULL;
+	struct kprobe *p = NULL;
+
+	if (!cur)
+		return 0;
+
+	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+		cur->post_handler(cur, regs, 0);
+	}
+
+	if (saved_next_opcode.addr != 0x0) {
+		arch_disarm_kprobe(&saved_next_opcode);
+		saved_next_opcode.addr = 0x0;
+		saved_next_opcode.opcode = 0x0;
+
+		addr = saved_current_opcode.addr;
+		saved_current_opcode.addr = 0x0;
+
+		p = get_kprobe(addr);
+		arch_arm_kprobe(p);
+
+		if (saved_next_opcode2.addr != 0x0) {
+			arch_disarm_kprobe(&saved_next_opcode2);
+			saved_next_opcode2.addr = 0x0;
+			saved_next_opcode2.opcode = 0x0;
+		}
+	}
+
+	/* Restore back the original saved kprobes variables and continue. */
+	if (kcb->kprobe_status == KPROBE_REENTER) {
+		restore_previous_kprobe(kcb);
+		goto out;
+	}
+
+	reset_current_kprobe();
+
+out:
+	preempt_enable_no_resched();
+
+	return 1;
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	const struct exception_table_entry *entry;
+
+	switch (kcb->kprobe_status) {
+	case KPROBE_HIT_SS:
+	case KPROBE_REENTER:
+		/*
+		 * We are here because the instruction being single
+		 * stepped caused a page fault. We reset the current
+		 * kprobe, point the pc back to the probe address
+		 * and allow the page fault handler to continue as a
+		 * normal page fault.
+		 */
+		regs->pc = (unsigned long)cur->addr;
+		if (kcb->kprobe_status == KPROBE_REENTER)
+			restore_previous_kprobe(kcb);
+		else
+			reset_current_kprobe();
+		preempt_enable_no_resched();
+		break;
+	case KPROBE_HIT_ACTIVE:
+	case KPROBE_HIT_SSDONE:
+		/*
+		 * We increment the nmissed count for accounting,
+		 * we can also use npre/npostfault count for accounting
+		 * these specific fault cases.
+		 */
+		kprobes_inc_nmissed_count(cur);
+
+		/*
+		 * We come here because instructions in the pre/post
+		 * handler caused the page_fault, this could happen
+		 * if handler tries to access user space by
+		 * copy_from_user(), get_user() etc. Let the
+		 * user-specified handler try to fix it first.
+		 */
+		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+			return 1;
+
+		/*
+		 * In case the user-specified fault handler returned
+		 * zero, try to fix up.
+		 */
+		if ((entry = search_exception_tables(regs->pc)) != NULL) {
+			regs->pc = entry->fixup;
+			return 1;
+		}
+
+		/*
+		 * fixup_exception() could not handle it,
+		 * Let do_page_fault() fix it.
+		 */
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * Wrapper routine to for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+				       unsigned long val, void *data)
+{
+	struct kprobe *p = NULL;
+	struct die_args *args = (struct die_args *)data;
+	int ret = NOTIFY_DONE;
+	kprobe_opcode_t *addr = NULL;
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	addr = (kprobe_opcode_t *) (args->regs->pc);
+	if (val == DIE_TRAP) {
+		if (!kprobe_running()) {
+			if (kprobe_handler(args->regs)) {
+				ret = NOTIFY_STOP;
+			} else {
+				/* Not a kprobe trap */
+				ret = NOTIFY_DONE;
+			}
+		} else {
+			p = get_kprobe(addr);
+			if ((kcb->kprobe_status == KPROBE_HIT_SS) ||
+			    (kcb->kprobe_status == KPROBE_REENTER)) {
+				if (post_kprobe_handler(args->regs))
+					ret = NOTIFY_STOP;
+			} else {
+				if (kprobe_handler(args->regs)) {
+					ret = NOTIFY_STOP;
+				} else {
+					p = __get_cpu_var(current_kprobe);
+					if (p->break_handler &&
+					    p->break_handler(p, args->regs))
+						ret = NOTIFY_STOP;
+				}
+			}
+		}
+	}
+
+	return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct jprobe *jp = container_of(p, struct jprobe, kp);
+	unsigned long addr;
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	kcb->jprobe_saved_regs = *regs;
+	kcb->jprobe_saved_r15 = regs->regs[15];
+	addr = kcb->jprobe_saved_r15;
+
+	/*
+	 * TBD: As Linus pointed out, gcc assumes that the callee
+	 * owns the argument space and could overwrite it, e.g.
+	 * tailcall optimization. So, to be absolutely safe
+	 * we also save and restore enough stack bytes to cover
+	 * the argument area.
+	 */
+	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
+	       MIN_STACK_SIZE(addr));
+
+	regs->pc = (unsigned long)(jp->entry);
+
+	return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+	asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	unsigned long stack_addr = kcb->jprobe_saved_r15;
+	u8 *addr = (u8 *)regs->pc;
+
+	if ((addr >= (u8 *)jprobe_return) &&
+	    (addr <= (u8 *)jprobe_return_end)) {
+		*regs = kcb->jprobe_saved_regs;
+
+		memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
+		       MIN_STACK_SIZE(stack_addr));
+
+		kcb->kprobe_status = KPROBE_HIT_SS;
+		preempt_enable_no_resched();
+		return 1;
+	}
+
+	return 0;
+}
+
+static struct kprobe trampoline_p = {
+	.addr = (kprobe_opcode_t *)&kretprobe_trampoline,
+	.pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+	saved_next_opcode.addr = 0x0;
+	saved_next_opcode.opcode = 0x0;
+
+	saved_current_opcode.addr = 0x0;
+	saved_current_opcode.opcode = 0x0;
+
+	saved_next_opcode2.addr = 0x0;
+	saved_next_opcode2.opcode = 0x0;
+
+	return register_kprobe(&trampoline_p);
+}
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c
index 129b2cf..c1ea41e 100644
--- a/arch/sh/kernel/machvec.c
+++ b/arch/sh/kernel/machvec.c
@@ -14,6 +14,7 @@
 #include <linux/string.h>
 #include <asm/machvec.h>
 #include <asm/sections.h>
+#include <asm/setup.h>
 #include <asm/io.h>
 #include <asm/irq.h>
 
@@ -125,9 +126,6 @@
 	mv_set(insb);	mv_set(insw);	mv_set(insl);
 	mv_set(outsb);	mv_set(outsw);	mv_set(outsl);
 
-	mv_set(readb);	mv_set(readw);	mv_set(readl);
-	mv_set(writeb);	mv_set(writew);	mv_set(writel);
-
 	mv_set(ioport_map);
 	mv_set(ioport_unmap);
 	mv_set(irq_demux);
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 3326a45..b965f02 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -7,7 +7,11 @@
  *
  *  SuperH version:  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
  *		     Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
- *		     Copyright (C) 2002 - 2007  Paul Mundt
+ *		     Copyright (C) 2002 - 2008  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  */
 #include <linux/module.h>
 #include <linux/mm.h>
@@ -26,6 +30,7 @@
 #include <asm/system.h>
 #include <asm/ubc.h>
 #include <asm/fpu.h>
+#include <asm/syscalls.h>
 
 static int hlt_counter;
 int ubc_usercnt = 0;
@@ -111,15 +116,21 @@
 {
 	printk("\n");
 	printk("Pid : %d, Comm: %20s\n", task_pid_nr(current), current->comm);
+	printk("CPU : %d    %s  (%s %.*s)\n",
+	       smp_processor_id(), print_tainted(), init_utsname()->release,
+	       (int)strcspn(init_utsname()->version, " "),
+	       init_utsname()->version);
+
 	print_symbol("PC is at %s\n", instruction_pointer(regs));
+	print_symbol("PR is at %s\n", regs->pr);
+
 	printk("PC  : %08lx SP  : %08lx SR  : %08lx ",
 	       regs->pc, regs->regs[15], regs->sr);
 #ifdef CONFIG_MMU
-	printk("TEA : %08x    ", ctrl_inl(MMU_TEA));
+	printk("TEA : %08x\n", ctrl_inl(MMU_TEA));
 #else
-	printk("                  ");
+	printk("\n");
 #endif
-	printk("%s\n", print_tainted());
 
 	printk("R0  : %08lx R1  : %08lx R2  : %08lx R3  : %08lx\n",
 	       regs->regs[0],regs->regs[1],
@@ -162,6 +173,7 @@
 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 {
 	struct pt_regs regs;
+	int pid;
 
 	memset(&regs, 0, sizeof(regs));
 	regs.regs[4] = (unsigned long)arg;
@@ -171,8 +183,12 @@
 	regs.sr = (1 << 30);
 
 	/* Ok, create the new process.. */
-	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
-		       &regs, 0, NULL, NULL);
+	pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
+		      &regs, 0, NULL, NULL);
+
+	trace_mark(kernel_arch_kthread_create, "pid %d fn %p", pid, fn);
+
+	return pid;
 }
 
 /*
@@ -210,10 +226,10 @@
 	struct task_struct *tsk = current;
 
 	fpvalid = !!tsk_used_math(tsk);
-	if (fpvalid) {
-		unlazy_fpu(tsk, regs);
-		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
-	}
+	if (fpvalid)
+		fpvalid = !fpregs_get(tsk, NULL, 0,
+				      sizeof(struct user_fpu_struct),
+				      fpu, NULL);
 #endif
 
 	return fpvalid;
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index b9dbd2d..b7aa092 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/proc_fs.h>
 #include <linux/io.h>
+#include <asm/syscalls.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/mmu_context.h>
@@ -395,6 +396,7 @@
 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 {
 	struct pt_regs regs;
+	int pid;
 
 	memset(&regs, 0, sizeof(regs));
 	regs.regs[2] = (unsigned long)arg;
@@ -403,8 +405,13 @@
 	regs.pc = (unsigned long)kernel_thread_helper;
 	regs.sr = (1 << 30);
 
-	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
-		       &regs, 0, NULL, NULL);
+	/* Ok, create the new process.. */
+	pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
+		      &regs, 0, NULL, NULL);
+
+	trace_mark(kernel_arch_kthread_create, "pid %d fn %p", pid, fn);
+
+	return pid;
 }
 
 /*
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 035cb30..29ca09d 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -1,12 +1,14 @@
 /*
- * linux/arch/sh/kernel/ptrace.c
+ * SuperH process tracing
  *
- * Original x86 implementation:
- *	By Ross Biro 1/23/92
- *	edited by Linus Torvalds
+ * Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
+ * Copyright (C) 2002 - 2008  Paul Mundt
  *
- * SuperH version:   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
- * Audit support: Yuichi Nakamura <ynakam@hitachisoft.jp>
+ * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  */
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -22,16 +24,15 @@
 #include <linux/audit.h>
 #include <linux/seccomp.h>
 #include <linux/tracehook.h>
+#include <linux/elf.h>
+#include <linux/regset.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
 #include <asm/processor.h>
 #include <asm/mmu_context.h>
-
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
+#include <asm/syscalls.h>
+#include <asm/fpu.h>
 
 /*
  * This routine will get a word off of the process kernel stack.
@@ -61,16 +62,12 @@
 
 void user_enable_single_step(struct task_struct *child)
 {
-	struct pt_regs *regs = task_pt_regs(child);
-	long pc;
-
-	pc = get_stack_long(child, (long)&regs->pc);
-
 	/* Next scheduling will set up UBC */
 	if (child->thread.ubc_pc == 0)
 		ubc_usercnt += 1;
 
-	child->thread.ubc_pc = pc;
+	child->thread.ubc_pc = get_stack_long(child,
+				offsetof(struct pt_regs, pc));
 
 	set_tsk_thread_flag(child, TIF_SINGLESTEP);
 }
@@ -102,9 +99,213 @@
 	user_disable_single_step(child);
 }
 
+static int genregs_get(struct task_struct *target,
+		       const struct user_regset *regset,
+		       unsigned int pos, unsigned int count,
+		       void *kbuf, void __user *ubuf)
+{
+	const struct pt_regs *regs = task_pt_regs(target);
+	int ret;
+
+	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				  regs->regs,
+				  0, 16 * sizeof(unsigned long));
+	if (!ret)
+		/* PC, PR, SR, GBR, MACH, MACL, TRA */
+		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					  &regs->pc,
+					  offsetof(struct pt_regs, pc),
+					  sizeof(struct pt_regs));
+	if (!ret)
+		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+					       sizeof(struct pt_regs), -1);
+
+	return ret;
+}
+
+static int genregs_set(struct task_struct *target,
+		       const struct user_regset *regset,
+		       unsigned int pos, unsigned int count,
+		       const void *kbuf, const void __user *ubuf)
+{
+	struct pt_regs *regs = task_pt_regs(target);
+	int ret;
+
+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				 regs->regs,
+				 0, 16 * sizeof(unsigned long));
+	if (!ret && count > 0)
+		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					 &regs->pc,
+					 offsetof(struct pt_regs, pc),
+					 sizeof(struct pt_regs));
+	if (!ret)
+		ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+						sizeof(struct pt_regs), -1);
+
+	return ret;
+}
+
+#ifdef CONFIG_SH_FPU
+int fpregs_get(struct task_struct *target,
+	       const struct user_regset *regset,
+	       unsigned int pos, unsigned int count,
+	       void *kbuf, void __user *ubuf)
+{
+	int ret;
+
+	ret = init_fpu(target);
+	if (ret)
+		return ret;
+
+	if ((boot_cpu_data.flags & CPU_HAS_FPU))
+		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					   &target->thread.fpu.hard, 0, -1);
+
+	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				   &target->thread.fpu.soft, 0, -1);
+}
+
+static int fpregs_set(struct task_struct *target,
+		       const struct user_regset *regset,
+		       unsigned int pos, unsigned int count,
+		       const void *kbuf, const void __user *ubuf)
+{
+	int ret;
+
+	ret = init_fpu(target);
+	if (ret)
+		return ret;
+
+	set_stopped_child_used_math(target);
+
+	if ((boot_cpu_data.flags & CPU_HAS_FPU))
+		return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					  &target->thread.fpu.hard, 0, -1);
+
+	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				  &target->thread.fpu.soft, 0, -1);
+}
+
+static int fpregs_active(struct task_struct *target,
+			 const struct user_regset *regset)
+{
+	return tsk_used_math(target) ? regset->n : 0;
+}
+#endif
+
+#ifdef CONFIG_SH_DSP
+static int dspregs_get(struct task_struct *target,
+		       const struct user_regset *regset,
+		       unsigned int pos, unsigned int count,
+		       void *kbuf, void __user *ubuf)
+{
+	const struct pt_dspregs *regs = task_pt_dspregs(target);
+	int ret;
+
+	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
+				  0, sizeof(struct pt_dspregs));
+	if (!ret)
+		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+					       sizeof(struct pt_dspregs), -1);
+
+	return ret;
+}
+
+static int dspregs_set(struct task_struct *target,
+		       const struct user_regset *regset,
+		       unsigned int pos, unsigned int count,
+		       const void *kbuf, const void __user *ubuf)
+{
+	struct pt_dspregs *regs = task_pt_dspregs(target);
+	int ret;
+
+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
+				 0, sizeof(struct pt_dspregs));
+	if (!ret)
+		ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+						sizeof(struct pt_dspregs), -1);
+
+	return ret;
+}
+
+static int dspregs_active(struct task_struct *target,
+			  const struct user_regset *regset)
+{
+	struct pt_regs *regs = task_pt_regs(target);
+
+	return regs->sr & SR_DSP ? regset->n : 0;
+}
+#endif
+
+/*
+ * These are our native regset flavours.
+ */
+enum sh_regset {
+	REGSET_GENERAL,
+#ifdef CONFIG_SH_FPU
+	REGSET_FPU,
+#endif
+#ifdef CONFIG_SH_DSP
+	REGSET_DSP,
+#endif
+};
+
+static const struct user_regset sh_regsets[] = {
+	/*
+	 * Format is:
+	 *	R0 --> R15
+	 *	PC, PR, SR, GBR, MACH, MACL, TRA
+	 */
+	[REGSET_GENERAL] = {
+		.core_note_type	= NT_PRSTATUS,
+		.n		= ELF_NGREG,
+		.size		= sizeof(long),
+		.align		= sizeof(long),
+		.get		= genregs_get,
+		.set		= genregs_set,
+	},
+
+#ifdef CONFIG_SH_FPU
+	[REGSET_FPU] = {
+		.core_note_type	= NT_PRFPREG,
+		.n		= sizeof(struct user_fpu_struct) / sizeof(long),
+		.size		= sizeof(long),
+		.align		= sizeof(long),
+		.get		= fpregs_get,
+		.set		= fpregs_set,
+		.active		= fpregs_active,
+	},
+#endif
+
+#ifdef CONFIG_SH_DSP
+	[REGSET_DSP] = {
+		.n		= sizeof(struct pt_dspregs) / sizeof(long),
+		.size		= sizeof(long),
+		.align		= sizeof(long),
+		.get		= dspregs_get,
+		.set		= dspregs_set,
+		.active		= dspregs_active,
+	},
+#endif
+};
+
+static const struct user_regset_view user_sh_native_view = {
+	.name		= "sh",
+	.e_machine	= EM_SH,
+	.regsets	= sh_regsets,
+	.n		= ARRAY_SIZE(sh_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+	return &user_sh_native_view;
+}
+
 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 {
 	struct user * dummy = NULL;
+	unsigned long __user *datap = (unsigned long __user *)data;
 	int ret;
 
 	switch (request) {
@@ -133,7 +334,7 @@
 			tmp = !!tsk_used_math(child);
 		else
 			tmp = 0;
-		ret = put_user(tmp, (unsigned long __user *)data);
+		ret = put_user(tmp, datap);
 		break;
 	}
 
@@ -157,34 +358,39 @@
 		}
 		break;
 
+	case PTRACE_GETREGS:
+		return copy_regset_to_user(child, &user_sh_native_view,
+					   REGSET_GENERAL,
+					   0, sizeof(struct pt_regs),
+					   (void __user *)data);
+	case PTRACE_SETREGS:
+		return copy_regset_from_user(child, &user_sh_native_view,
+					     REGSET_GENERAL,
+					     0, sizeof(struct pt_regs),
+					     (const void __user *)data);
+#ifdef CONFIG_SH_FPU
+	case PTRACE_GETFPREGS:
+		return copy_regset_to_user(child, &user_sh_native_view,
+					   REGSET_FPU,
+					   0, sizeof(struct user_fpu_struct),
+					   (void __user *)data);
+	case PTRACE_SETFPREGS:
+		return copy_regset_from_user(child, &user_sh_native_view,
+					     REGSET_FPU,
+					     0, sizeof(struct user_fpu_struct),
+					     (const void __user *)data);
+#endif
 #ifdef CONFIG_SH_DSP
-	case PTRACE_GETDSPREGS: {
-		unsigned long dp;
-
-		ret = -EIO;
-		dp = ((unsigned long) child) + THREAD_SIZE -
-			 sizeof(struct pt_dspregs);
-		if (*((int *) (dp - 4)) == SR_FD) {
-			copy_to_user((void *)addr, (void *) dp,
-				sizeof(struct pt_dspregs));
-			ret = 0;
-		}
-		break;
-	}
-
-	case PTRACE_SETDSPREGS: {
-		unsigned long dp;
-
-		ret = -EIO;
-		dp = ((unsigned long) child) + THREAD_SIZE -
-			 sizeof(struct pt_dspregs);
-		if (*((int *) (dp - 4)) == SR_FD) {
-			copy_from_user((void *) dp, (void *)addr,
-				sizeof(struct pt_dspregs));
-			ret = 0;
-		}
-		break;
-	}
+	case PTRACE_GETDSPREGS:
+		return copy_regset_to_user(child, &user_sh_native_view,
+					   REGSET_DSP,
+					   0, sizeof(struct pt_dspregs),
+					   (void __user *)data);
+	case PTRACE_SETDSPREGS:
+		return copy_regset_from_user(child, &user_sh_native_view,
+					     REGSET_DSP,
+					     0, sizeof(struct pt_dspregs),
+					     (const void __user *)data);
 #endif
 #ifdef CONFIG_BINFMT_ELF_FDPIC
 	case PTRACE_GETFDPIC: {
@@ -202,7 +408,7 @@
 		}
 
 		ret = 0;
-		if (put_user(tmp, (unsigned long *) data)) {
+		if (put_user(tmp, datap)) {
 			ret = -EFAULT;
 			break;
 		}
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 9c64248..e15b099 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -35,6 +35,7 @@
 #include <asm/system.h>
 #include <asm/processor.h>
 #include <asm/mmu_context.h>
+#include <asm/syscalls.h>
 #include <asm/fpu.h>
 
 /* This mask defines the bits of the SR which the user is not allowed to
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index de83205..e7152cc 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -26,6 +26,9 @@
 #include <linux/err.h>
 #include <linux/debugfs.h>
 #include <linux/crash_dump.h>
+#include <linux/mmzone.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/page.h>
@@ -144,6 +147,7 @@
 {
 	unsigned long long free_mem;
 	unsigned long long crash_size, crash_base;
+	void *vp;
 	int ret;
 
 	free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
@@ -152,12 +156,14 @@
 			&crash_size, &crash_base);
 	if (ret == 0 && crash_size) {
 		if (crash_base <= 0) {
-			printk(KERN_INFO "crashkernel reservation failed - "
-					"you have to specify a base address\n");
-			return;
-		}
-
-		if (reserve_bootmem(crash_base, crash_size,
+			vp = alloc_bootmem_nopanic(crash_size); 
+			if (!vp) {
+				printk(KERN_INFO "crashkernel allocation "
+				       "failed\n");
+				return;
+			}
+			crash_base = __pa(vp);
+		} else if (reserve_bootmem(crash_base, crash_size,
 					BOOTMEM_EXCLUSIVE) < 0) {
 			printk(KERN_INFO "crashkernel reservation failed - "
 					"memory is in use\n");
@@ -179,6 +185,24 @@
 {}
 #endif
 
+#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
+void __cpuinit calibrate_delay(void)
+{
+	struct clk *clk = clk_get(NULL, "cpu_clk");
+
+	if (IS_ERR(clk))
+		panic("Need a sane CPU clock definition!");
+
+	loops_per_jiffy = (clk_get_rate(clk) >> 1) / HZ;
+
+	printk(KERN_INFO "Calibrating delay loop (skipped)... "
+			 "%lu.%02lu BogoMIPS PRESET (lpj=%lu)\n",
+			 loops_per_jiffy/(500000/HZ),
+			 (loops_per_jiffy/(5000/HZ)) % 100,
+			 loops_per_jiffy);
+}
+#endif
+
 void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
 						unsigned long end_pfn)
 {
@@ -232,15 +256,17 @@
 	 * case of us accidentally initializing the bootmem allocator with
 	 * an invalid RAM area.
 	 */
-	reserve_bootmem(__MEMORY_START+PAGE_SIZE,
-		(PFN_PHYS(free_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START,
-		BOOTMEM_DEFAULT);
+	reserve_bootmem(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
+			(PFN_PHYS(free_pfn) + bootmap_size + PAGE_SIZE - 1) -
+			(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET),
+			BOOTMEM_DEFAULT);
 
 	/*
 	 * reserve physical page 0 - it's a special BIOS page on many boxes,
 	 * enabling clean reboots, SMP operation, laptop functions.
 	 */
-	reserve_bootmem(__MEMORY_START, PAGE_SIZE, BOOTMEM_DEFAULT);
+	reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET,
+			BOOTMEM_DEFAULT);
 
 	sparse_memory_present_with_active_regions(0);
 
@@ -248,17 +274,18 @@
 	ROOT_DEV = Root_RAM0;
 
 	if (LOADER_TYPE && INITRD_START) {
-		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
-			reserve_bootmem(INITRD_START + __MEMORY_START,
-					INITRD_SIZE, BOOTMEM_DEFAULT);
-			initrd_start = INITRD_START + PAGE_OFFSET +
-					__MEMORY_START;
+		unsigned long initrd_start_phys = INITRD_START + __MEMORY_START;
+
+		if (initrd_start_phys + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
+			reserve_bootmem(initrd_start_phys, INITRD_SIZE,
+					BOOTMEM_DEFAULT);
+			initrd_start = (unsigned long)__va(initrd_start_phys);
 			initrd_end = initrd_start + INITRD_SIZE;
 		} else {
 			printk("initrd extends beyond end of memory "
-			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-				    INITRD_START + INITRD_SIZE,
-				    max_low_pfn << PAGE_SHIFT);
+			       "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+			       initrd_start_phys + INITRD_SIZE,
+			       (unsigned long)PFN_PHYS(max_low_pfn));
 			initrd_start = 0;
 		}
 	}
@@ -530,6 +557,8 @@
 static int __init sh_debugfs_init(void)
 {
 	sh_debugfs_root = debugfs_create_dir("sh", NULL);
+	if (!sh_debugfs_root)
+		return -ENOMEM;
 	if (IS_ERR(sh_debugfs_root))
 		return PTR_ERR(sh_debugfs_root);
 
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 6e1b1c2..d917b7b 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -16,6 +16,7 @@
 #include <asm/delay.h>
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
+#include <asm/ftrace.h>
 
 extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
 extern struct hw_interrupt_type no_irq_type;
@@ -133,6 +134,9 @@
 EXPORT_SYMBOL(clear_user_page);
 #endif
 
+#ifdef CONFIG_FTRACE
+EXPORT_SYMBOL(mcount);
+#endif
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_generic);
 #ifdef CONFIG_IPV6
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 51689d2..69d09c0 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -30,6 +30,7 @@
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
+#include <asm/syscalls.h>
 #include <asm/fpu.h>
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@@ -215,6 +216,9 @@
 	sigset_t set;
 	int r0;
 
+        /* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
 
@@ -247,9 +251,11 @@
 	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
 	sigset_t set;
-	stack_t st;
 	int r0;
 
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
 
@@ -265,11 +271,9 @@
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
 		goto badframe;
 
-	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+	if (do_sigaltstack(&frame->uc.uc_stack, NULL,
+			   regs->regs[15]) == -EFAULT)
 		goto badframe;
-	/* It is more difficult to avoid calling this function than to
-	   call it and ignore errors.  */
-	do_sigaltstack((const stack_t __user *)&st, NULL, (unsigned long)frame);
 
 	return r0;
 
@@ -429,7 +433,7 @@
 
 	/* Create the ucontext.  */
 	err |= __put_user(0, &frame->uc.uc_flags);
-	err |= __put_user(0, &frame->uc.uc_link);
+	err |= __put_user(NULL, &frame->uc.uc_link);
 	err |= __put_user((void *)current->sas_ss_sp,
 			  &frame->uc.uc_stack.ss_sp);
 	err |= __put_user(sas_ss_flags(regs->regs[15]),
@@ -492,37 +496,43 @@
 	return -EFAULT;
 }
 
+static inline void
+handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
+		       struct sigaction *sa)
+{
+	/* If we're not from a syscall, bail out */
+	if (regs->tra < 0)
+		return;
+
+	/* check for system call restart.. */
+	switch (regs->regs[0]) {
+		case -ERESTART_RESTARTBLOCK:
+		case -ERESTARTNOHAND:
+		no_system_call_restart:
+			regs->regs[0] = -EINTR;
+			regs->sr |= 1;
+			break;
+
+		case -ERESTARTSYS:
+			if (!(sa->sa_flags & SA_RESTART))
+				goto no_system_call_restart;
+		/* fallthrough */
+		case -ERESTARTNOINTR:
+			regs->regs[0] = save_r0;
+			regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+			break;
+	}
+}
+
 /*
  * OK, we're invoking a handler
  */
-
 static int
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
 	      sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0)
 {
 	int ret;
 
-	/* Are we from a system call? */
-	if (regs->tra >= 0) {
-		/* If so, check system call restarting.. */
-		switch (regs->regs[0]) {
-			case -ERESTART_RESTARTBLOCK:
-			case -ERESTARTNOHAND:
-			no_system_call_restart:
-				regs->regs[0] = -EINTR;
-				break;
-
-			case -ERESTARTSYS:
-				if (!(ka->sa.sa_flags & SA_RESTART))
-					goto no_system_call_restart;
-			/* fallthrough */
-			case -ERESTARTNOINTR:
-				regs->regs[0] = save_r0;
-				regs->pc -= instruction_size(
-						ctrl_inw(regs->pc - 4));
-				break;
-		}
-	}
 
 	/* Set up the stack frame */
 	if (ka->sa.sa_flags & SA_SIGINFO)
@@ -580,6 +590,9 @@
 
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
+		if (regs->sr & 1)
+			handle_syscall_restart(save_r0, regs, &ka.sa);
+
 		/* Whee!  Actually deliver the signal.  */
 		if (handle_signal(signr, &ka, &info, oldset,
 				  regs, save_r0) == 0) {
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 1d62dfe..ce3e851 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -43,6 +43,10 @@
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
+static void
+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+		sigset_t *oldset, struct pt_regs * regs);
+
 /*
  * Note that 'init' is a special process: it doesn't get signals it doesn't
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
@@ -371,6 +375,9 @@
 	sigset_t set;
 	long long ret;
 
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
 
@@ -408,6 +415,9 @@
 	stack_t __user st;
 	long long ret;
 
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
 
@@ -535,7 +545,7 @@
 		 * On SH5 all edited pointers are subject to NEFF
 		 */
 		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
-        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+			(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
 	} else {
 		/*
 		 * Different approach on SH5.
@@ -550,10 +560,10 @@
 		 */
 		DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
 		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
-        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+			(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
 
 		if (__copy_to_user(frame->retcode,
-			(unsigned long long)sa_default_restorer & (~1), 16) != 0)
+			(void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0)
 			goto give_sigsegv;
 
 		/* Cohere the trampoline with the I-cache. */
@@ -566,7 +576,7 @@
 	 */
 	regs->regs[REG_SP] = (unsigned long) frame;
 	regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
-        		 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
+		 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
 	regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
 
         /* FIXME:
@@ -652,7 +662,7 @@
 		 * On SH5 all edited pointers are subject to NEFF
 		 */
 		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
-        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+			(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
 	} else {
 		/*
 		 * Different approach on SH5.
@@ -668,10 +678,10 @@
 
 		DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
 		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
-        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+			(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
 
 		if (__copy_to_user(frame->retcode,
-			(unsigned long long)sa_default_rt_restorer & (~1), 16) != 0)
+			(void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0)
 			goto give_sigsegv;
 
 		flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
@@ -683,7 +693,7 @@
 	 */
 	regs->regs[REG_SP] = (unsigned long) frame;
 	regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
-        		 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
+		 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
 	regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
 	regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
 	regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 001778f..508dfb02 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -3,7 +3,7 @@
  *
  * SMP support for the SuperH processors.
  *
- * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2002 - 2008 Paul Mundt
  * Copyright (C) 2006 - 2007 Akio Idehara
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -86,9 +86,12 @@
 
 	local_irq_enable();
 
+	cpu = smp_processor_id();
+
+	/* Enable local timers */
+	local_timer_setup(cpu);
 	calibrate_delay();
 
-	cpu = smp_processor_id();
 	smp_store_cpu_info(cpu);
 
 	cpu_set(cpu, cpu_online_map);
@@ -186,6 +189,42 @@
 	plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
 }
 
+void smp_timer_broadcast(cpumask_t mask)
+{
+	int cpu;
+
+	for_each_cpu_mask(cpu, mask)
+		plat_send_ipi(cpu, SMP_MSG_TIMER);
+}
+
+static void ipi_timer(void)
+{
+	irq_enter();
+	local_timer_interrupt();
+	irq_exit();
+}
+
+void smp_message_recv(unsigned int msg)
+{
+	switch (msg) {
+	case SMP_MSG_FUNCTION:
+		generic_smp_call_function_interrupt();
+		break;
+	case SMP_MSG_RESCHEDULE:
+		break;
+	case SMP_MSG_FUNCTION_SINGLE:
+		generic_smp_call_function_single_interrupt();
+		break;
+	case SMP_MSG_TIMER:
+		ipi_timer();
+		break;
+	default:
+		printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
+		       smp_processor_id(), __func__, msg);
+		break;
+	}
+}
+
 /* Not really SMP stuff ... */
 int setup_profiling_timer(unsigned int multiplier)
 {
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
index 54d1f61..1a2a5eb 100644
--- a/arch/sh/kernel/stacktrace.c
+++ b/arch/sh/kernel/stacktrace.c
@@ -3,7 +3,7 @@
  *
  * Stack trace management functions
  *
- *  Copyright (C) 2006  Paul Mundt
+ *  Copyright (C) 2006 - 2008  Paul Mundt
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -36,3 +36,24 @@
 	}
 }
 EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+	unsigned long *sp = (unsigned long *)tsk->thread.sp;
+
+	while (!kstack_end(sp)) {
+		unsigned long addr = *sp++;
+
+		if (__kernel_text_address(addr)) {
+			if (in_sched_functions(addr))
+				break;
+			if (trace->skip > 0)
+				trace->skip--;
+			else
+				trace->entries[trace->nr_entries++] = addr;
+			if (trace->nr_entries >= trace->max_entries)
+				break;
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 9061b86..38f098c 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -23,6 +23,7 @@
 #include <linux/fs.h>
 #include <linux/ipc.h>
 #include <asm/cacheflush.h>
+#include <asm/syscalls.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 
@@ -170,6 +171,8 @@
 	version = call >> 16; /* hack for backward compatibility */
 	call &= 0xffff;
 
+	trace_mark(kernel_arch_ipc_call, "call %u first %d", call, first);
+
 	if (call <= SEMTIMEDOP)
 		switch (call) {
 		case SEMOP:
@@ -186,7 +189,7 @@
 			union semun fourth;
 			if (!ptr)
 				return -EINVAL;
-			if (get_user(fourth.__pad, (void * __user *) ptr))
+			if (get_user(fourth.__pad, (void __user * __user *) ptr))
 				return -EFAULT;
 			return sys_semctl (first, second, third, fourth);
 			}
@@ -261,13 +264,13 @@
 	return -EINVAL;
 }
 
-asmlinkage int sys_uname(struct old_utsname * name)
+asmlinkage int sys_uname(struct old_utsname __user *name)
 {
 	int err;
 	if (!name)
 		return -EFAULT;
 	down_read(&uts_sem);
-	err = copy_to_user(name, utsname(), sizeof (*name));
+	err = copy_to_user(name, utsname(), sizeof(*name));
 	up_read(&uts_sem);
 	return err?-EFAULT:0;
 }
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
index f0aa5c3..dbba1e1 100644
--- a/arch/sh/kernel/sys_sh32.c
+++ b/arch/sh/kernel/sys_sh32.c
@@ -16,6 +16,7 @@
 #include <asm/cacheflush.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
+#include <asm/syscalls.h>
 
 /*
  * sys_pipe() is the normal C calling standard for creating
@@ -37,13 +38,13 @@
 	return error;
 }
 
-asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
+asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf,
 			     size_t count, long dummy, loff_t pos)
 {
 	return sys_pread64(fd, buf, count, pos);
 }
 
-asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
+asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf,
 			      size_t count, long dummy, loff_t pos)
 {
 	return sys_pwrite64(fd, buf, count, pos);
diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c
index 0758b5e..23ca711 100644
--- a/arch/sh/kernel/time_32.c
+++ b/arch/sh/kernel/time_32.c
@@ -1,9 +1,9 @@
 /*
- *  arch/sh/kernel/time.c
+ *  arch/sh/kernel/time_32.c
  *
  *  Copyright (C) 1999  Tetsuya Okada & Niibe Yutaka
  *  Copyright (C) 2000  Philipp Rumpf <prumpf@tux.org>
- *  Copyright (C) 2002 - 2007  Paul Mundt
+ *  Copyright (C) 2002 - 2008  Paul Mundt
  *  Copyright (C) 2002  M. R. Brown  <mrbrown@linux-sh.org>
  *
  *  Some code taken from i386 version.
@@ -16,6 +16,8 @@
 #include <linux/timex.h>
 #include <linux/sched.h>
 #include <linux/clockchips.h>
+#include <linux/mc146818rtc.h>	/* for rtc_lock */
+#include <linux/smp.h>
 #include <asm/clock.h>
 #include <asm/rtc.h>
 #include <asm/timer.h>
@@ -253,6 +255,10 @@
 	set_normalized_timespec(&wall_to_monotonic,
 				-xtime.tv_sec, -xtime.tv_nsec);
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+	local_timer_setup(smp_processor_id());
+#endif
+
 	/*
 	 * Find the timer to use as the system timer, it will be
 	 * initialized for us.
@@ -260,6 +266,7 @@
 	sys_timer = get_sys_timer();
 	printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
 
+
 	if (sys_timer->ops->read)
 		clocksource_sh.read = sys_timer->ops->read;
 
diff --git a/arch/sh/kernel/time_64.c b/arch/sh/kernel/time_64.c
index 791edab..bbb2af1 100644
--- a/arch/sh/kernel/time_64.c
+++ b/arch/sh/kernel/time_64.c
@@ -39,6 +39,7 @@
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/delay.h>
+#include <asm/clock.h>
 
 #define TMU_TOCR_INIT	0x00
 #define TMU0_TCR_INIT	0x0020
@@ -51,14 +52,6 @@
 #define RTC_RCR1_CIE	0x10	/* Carry Interrupt Enable */
 #define RTC_RCR1	(rtc_base + 0x38)
 
-/* Clock, Power and Reset Controller */
-#define	CPRC_BLOCK_OFF	0x01010000
-#define CPRC_BASE	PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
-
-#define FRQCR		(cprc_base+0x0)
-#define WTCSR		(cprc_base+0x0018)
-#define STBCR		(cprc_base+0x0030)
-
 /* Time Management Unit */
 #define	TMU_BLOCK_OFF	0x01020000
 #define TMU_BASE	PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
@@ -293,103 +286,17 @@
 	return IRQ_HANDLED;
 }
 
-
-static __init unsigned int get_cpu_hz(void)
-{
-	unsigned int count;
-	unsigned long __dummy;
-	unsigned long ctc_val_init, ctc_val;
-
-	/*
-	** Regardless the toolchain, force the compiler to use the
-	** arbitrary register r3 as a clock tick counter.
-	** NOTE: r3 must be in accordance with sh64_rtc_interrupt()
-	*/
-	register unsigned long long  __rtc_irq_flag __asm__ ("r3");
-
-	local_irq_enable();
-	do {} while (ctrl_inb(rtc_base) != 0);
-	ctrl_outb(RTC_RCR1_CIE, RTC_RCR1); /* Enable carry interrupt */
-
-	/*
-	 * r3 is arbitrary. CDC does not support "=z".
-	 */
-	ctc_val_init = 0xffffffff;
-	ctc_val = ctc_val_init;
-
-	asm volatile("gettr	tr0, %1\n\t"
-		     "putcon	%0, " __CTC "\n\t"
-		     "and	%2, r63, %2\n\t"
-		     "pta	$+4, tr0\n\t"
-		     "beq/l	%2, r63, tr0\n\t"
-		     "ptabs	%1, tr0\n\t"
-		     "getcon	" __CTC ", %0\n\t"
-		: "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
-		: "0" (0));
-	local_irq_disable();
-	/*
-	 * SH-3:
-	 * CPU clock = 4 stages * loop
-	 * tst    rm,rm      if id ex
-	 * bt/s   1b            if id ex
-	 * add    #1,rd            if id ex
-         *                            (if) pipe line stole
-	 * tst    rm,rm                  if id ex
-         * ....
-	 *
-	 *
-	 * SH-4:
-	 * CPU clock = 6 stages * loop
-	 * I don't know why.
-         * ....
-	 *
-	 * SH-5:
-	 * Use CTC register to count.  This approach returns the right value
-	 * even if the I-cache is disabled (e.g. whilst debugging.)
-	 *
-	 */
-
-	count = ctc_val_init - ctc_val; /* CTC counts down */
-
-	/*
-	 * This really is count by the number of clock cycles
-         * by the ratio between a complete R64CNT
-         * wrap-around (128) and CUI interrupt being raised (64).
-	 */
-	return count*2;
-}
-
-static irqreturn_t sh64_rtc_interrupt(int irq, void *dev_id)
-{
-	struct pt_regs *regs = get_irq_regs();
-
-	ctrl_outb(0, RTC_RCR1);	/* Disable Carry Interrupts */
-	regs->regs[3] = 1;	/* Using r3 */
-
-	return IRQ_HANDLED;
-}
-
 static struct irqaction irq0  = {
 	.handler = timer_interrupt,
 	.flags = IRQF_DISABLED,
 	.mask = CPU_MASK_NONE,
 	.name = "timer",
 };
-static struct irqaction irq1  = {
-	.handler = sh64_rtc_interrupt,
-	.flags = IRQF_DISABLED,
-	.mask = CPU_MASK_NONE,
-	.name = "rtc",
-};
 
 void __init time_init(void)
 {
-	unsigned int cpu_clock, master_clock, bus_clock, module_clock;
 	unsigned long interval;
-	unsigned long frqcr, ifc, pfc;
-	static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
-#define bfc_table ifc_table	/* Same */
-#define pfc_table ifc_table	/* Same */
+	struct clk *clk;
 
 	tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
 	if (!tmu_base) {
@@ -401,50 +308,19 @@
 		panic("Unable to remap RTC\n");
 	}
 
-	cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
-	if (!cprc_base) {
-		panic("Unable to remap CPRC\n");
-	}
+	clk = clk_get(NULL, "cpu_clk");
+	scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) /
+			(unsigned long long)(clk_get_rate(clk) / HZ));
 
 	rtc_sh_get_time(&xtime);
 
 	setup_irq(TIMER_IRQ, &irq0);
-	setup_irq(RTC_IRQ, &irq1);
 
-	/* Check how fast it is.. */
-	cpu_clock = get_cpu_hz();
-
-	/* Note careful order of operations to maintain reasonable precision and avoid overflow. */
-	scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
-
-	free_irq(RTC_IRQ, NULL);
-
-	printk("CPU clock: %d.%02dMHz\n",
-	       (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
-	{
-		unsigned short bfc;
-		frqcr = ctrl_inl(FRQCR);
-		ifc  = ifc_table[(frqcr>> 6) & 0x0007];
-		bfc  = bfc_table[(frqcr>> 3) & 0x0007];
-		pfc  = pfc_table[(frqcr>> 12) & 0x0007];
-		master_clock = cpu_clock * ifc;
-		bus_clock = master_clock/bfc;
-	}
-
-	printk("Bus clock: %d.%02dMHz\n",
-	       (bus_clock/1000000), (bus_clock % 1000000)/10000);
-	module_clock = master_clock/pfc;
-	printk("Module clock: %d.%02dMHz\n",
-	       (module_clock/1000000), (module_clock % 1000000)/10000);
-	interval = (module_clock/(HZ*4));
+	clk = clk_get(NULL, "module_clk");
+	interval = (clk_get_rate(clk)/(HZ*4));
 
 	printk("Interval = %ld\n", interval);
 
-	current_cpu_data.cpu_clock    = cpu_clock;
-	current_cpu_data.master_clock = master_clock;
-	current_cpu_data.bus_clock    = bus_clock;
-	current_cpu_data.module_clock = module_clock;
-
 	/* Start TMU0 */
 	ctrl_outb(TMU_TSTR_OFF, TMU_TSTR);
 	ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
@@ -454,36 +330,6 @@
 	ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
 }
 
-void enter_deep_standby(void)
-{
-	/* Disable watchdog timer */
-	ctrl_outl(0xa5000000, WTCSR);
-	/* Configure deep standby on sleep */
-	ctrl_outl(0x03, STBCR);
-
-#ifdef CONFIG_SH_ALPHANUMERIC
-	{
-		extern void mach_alphanum(int position, unsigned char value);
-		extern void mach_alphanum_brightness(int setting);
-		char halted[] = "Halted. ";
-		int i;
-		mach_alphanum_brightness(6); /* dimmest setting above off */
-		for (i=0; i<8; i++) {
-			mach_alphanum(i, halted[i]);
-		}
-		asm __volatile__ ("synco");
-	}
-#endif
-
-	asm __volatile__ ("sleep");
-	asm __volatile__ ("synci");
-	asm __volatile__ ("nop");
-	asm __volatile__ ("nop");
-	asm __volatile__ ("nop");
-	asm __volatile__ ("nop");
-	panic("Unexpected wakeup!\n");
-}
-
 static struct resource rtc_resources[] = {
 	[0] = {
 		/* RTC base, filled in by rtc_init */
diff --git a/arch/sh/kernel/timers/Makefile b/arch/sh/kernel/timers/Makefile
index bcf244f..0b7f857 100644
--- a/arch/sh/kernel/timers/Makefile
+++ b/arch/sh/kernel/timers/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_SH_MTU2)		+= timer-mtu2.o
 obj-$(CONFIG_SH_CMT)		+= timer-cmt.o
 
+obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)	+= timer-broadcast.o
diff --git a/arch/sh/kernel/timers/timer-broadcast.c b/arch/sh/kernel/timers/timer-broadcast.c
new file mode 100644
index 0000000..c231763
--- /dev/null
+++ b/arch/sh/kernel/timers/timer-broadcast.c
@@ -0,0 +1,57 @@
+/*
+ * Dummy local timer
+ *
+ * Copyright (C) 2008  Paul Mundt
+ *
+ * cloned from:
+ *
+ *  linux/arch/arm/mach-realview/localtimer.c
+ *
+ *  Copyright (C) 2002 ARM Ltd.
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/smp.h>
+#include <linux/jiffies.h>
+#include <linux/percpu.h>
+#include <linux/clockchips.h>
+#include <linux/irq.h>
+
+static DEFINE_PER_CPU(struct clock_event_device, local_clockevent);
+
+/*
+ * Used on SMP for either the local timer or SMP_MSG_TIMER
+ */
+void local_timer_interrupt(void)
+{
+	struct clock_event_device *clk = &__get_cpu_var(local_clockevent);
+
+	clk->event_handler(clk);
+}
+
+static void dummy_timer_set_mode(enum clock_event_mode mode,
+				 struct clock_event_device *clk)
+{
+}
+
+void __cpuinit local_timer_setup(unsigned int cpu)
+{
+	struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
+
+	clk->name		= "dummy_timer";
+	clk->features		= CLOCK_EVT_FEAT_DUMMY;
+	clk->rating		= 200;
+	clk->mult		= 1;
+	clk->set_mode		= dummy_timer_set_mode;
+	clk->broadcast		= smp_timer_broadcast;
+	clk->cpumask		= cpumask_of_cpu(cpu);
+
+	clockevents_register_device(clk);
+}
diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c
index d20c8c3..c127293 100644
--- a/arch/sh/kernel/timers/timer-cmt.c
+++ b/arch/sh/kernel/timers/timer-cmt.c
@@ -174,7 +174,7 @@
 	return 0;
 }
 
-struct sys_timer_ops cmt_timer_ops = {
+static struct sys_timer_ops cmt_timer_ops = {
 	.init		= cmt_timer_init,
 	.start		= cmt_timer_start,
 	.stop		= cmt_timer_stop,
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index 1ca9ad4..aaaf90d 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -28,43 +28,90 @@
 #define TMU_TOCR_INIT	0x00
 #define TMU_TCR_INIT	0x0020
 
+#define TMU0		(0)
+#define TMU1		(1)
+
+static inline void _tmu_start(int tmu_num)
+{
+	ctrl_outb(ctrl_inb(TMU_012_TSTR) | (0x1<<tmu_num), TMU_012_TSTR);
+}
+
+static inline void _tmu_set_irq(int tmu_num, int enabled)
+{
+	register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num);
+	ctrl_outw( (enabled ? ctrl_inw(tmu_tcr) | (1<<5) : ctrl_inw(tmu_tcr) & ~(1<<5)), tmu_tcr);
+}
+
+static inline void _tmu_stop(int tmu_num)
+{
+	ctrl_outb(ctrl_inb(TMU_012_TSTR) & ~(0x1<<tmu_num), TMU_012_TSTR);
+}
+
+static inline void _tmu_clear_status(int tmu_num)
+{
+	register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num);
+	/* Clear UNF bit */
+	ctrl_outw(ctrl_inw(tmu_tcr) & ~0x100, tmu_tcr);
+}
+
+static inline unsigned long _tmu_read(int tmu_num)
+{
+        return ctrl_inl(TMU0_TCNT+0xC*tmu_num);
+}
+
 static int tmu_timer_start(void)
 {
-	ctrl_outb(ctrl_inb(TMU_012_TSTR) | 0x3, TMU_012_TSTR);
+	_tmu_start(TMU0);
+	_tmu_start(TMU1);
+	_tmu_set_irq(TMU0,1);
 	return 0;
 }
 
-static void tmu0_timer_set_interval(unsigned long interval, unsigned int reload)
+static int tmu_timer_stop(void)
 {
-	ctrl_outl(interval, TMU0_TCNT);
+	_tmu_stop(TMU0);
+	_tmu_stop(TMU1);
+	_tmu_clear_status(TMU0);
+	return 0;
+}
+
+/*
+ * also when the module_clk is scaled the TMU1
+ * will show the same frequency
+ */
+static int tmus_are_scaled;
+
+static cycle_t tmu_timer_read(void)
+{
+	return ((cycle_t)(~_tmu_read(TMU1)))<<tmus_are_scaled;
+}
+
+
+static unsigned long tmu_latest_interval[3];
+static void tmu_timer_set_interval(int tmu_num, unsigned long interval, unsigned int reload)
+{
+	unsigned long tmu_tcnt = TMU0_TCNT + tmu_num*0xC;
+	unsigned long tmu_tcor = TMU0_TCOR + tmu_num*0xC;
+
+	_tmu_stop(tmu_num);
+
+	ctrl_outl(interval, tmu_tcnt);
+	tmu_latest_interval[tmu_num] = interval;
 
 	/*
 	 * TCNT reloads from TCOR on underflow, clear it if we don't
 	 * intend to auto-reload
 	 */
-	if (reload)
-		ctrl_outl(interval, TMU0_TCOR);
-	else
-		ctrl_outl(0, TMU0_TCOR);
+	ctrl_outl( reload ? interval : 0 , tmu_tcor);
 
-	tmu_timer_start();
-}
-
-static int tmu_timer_stop(void)
-{
-	ctrl_outb(ctrl_inb(TMU_012_TSTR) & ~0x3, TMU_012_TSTR);
-	return 0;
-}
-
-static cycle_t tmu_timer_read(void)
-{
-	return ~ctrl_inl(TMU1_TCNT);
+	_tmu_start(tmu_num);
 }
 
 static int tmu_set_next_event(unsigned long cycles,
 			      struct clock_event_device *evt)
 {
-	tmu0_timer_set_interval(cycles, 1);
+	tmu_timer_set_interval(TMU0,cycles, evt->mode == CLOCK_EVT_MODE_PERIODIC);
+	_tmu_set_irq(TMU0,1);
 	return 0;
 }
 
@@ -96,12 +143,8 @@
 static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
 {
 	struct clock_event_device *evt = &tmu0_clockevent;
-	unsigned long timer_status;
-
-	/* Clear UNF bit */
-	timer_status = ctrl_inw(TMU0_TCR);
-	timer_status &= ~0x100;
-	ctrl_outw(timer_status, TMU0_TCR);
+	_tmu_clear_status(TMU0);
+	_tmu_set_irq(TMU0,tmu0_clockevent.mode != CLOCK_EVT_MODE_ONESHOT);
 
 	evt->event_handler(evt);
 
@@ -109,56 +152,73 @@
 }
 
 static struct irqaction tmu0_irq = {
-	.name		= "periodic timer",
+	.name		= "periodic/oneshot timer",
 	.handler	= tmu_timer_interrupt,
 	.flags		= IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
 	.mask		= CPU_MASK_NONE,
 };
 
-static void tmu0_clk_init(struct clk *clk)
+static void __init tmu_clk_init(struct clk *clk)
 {
-	u8 divisor = TMU_TCR_INIT & 0x7;
-	ctrl_outw(TMU_TCR_INIT, TMU0_TCR);
-	clk->rate = clk->parent->rate / (4 << (divisor << 1));
+	u8 divisor  = TMU_TCR_INIT & 0x7;
+	int tmu_num = clk->name[3]-'0';
+	ctrl_outw(TMU_TCR_INIT, TMU0_TCR+(tmu_num*0xC));
+	clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1));
 }
 
-static void tmu0_clk_recalc(struct clk *clk)
+static void tmu_clk_recalc(struct clk *clk)
 {
-	u8 divisor = ctrl_inw(TMU0_TCR) & 0x7;
-	clk->rate = clk->parent->rate / (4 << (divisor << 1));
+	int tmu_num = clk->name[3]-'0';
+	unsigned long prev_rate = clk_get_rate(clk);
+	unsigned long flags;
+	u8 divisor = ctrl_inw(TMU0_TCR+tmu_num*0xC) & 0x7;
+	clk->rate  = clk_get_rate(clk->parent) / (4 << (divisor << 1));
+
+	if(prev_rate==clk_get_rate(clk))
+		return;
+
+	if(tmu_num)
+		return; /* No more work on TMU1 */
+
+	local_irq_save(flags);
+	tmus_are_scaled = (prev_rate > clk->rate);
+
+	_tmu_stop(TMU0);
+
+	tmu0_clockevent.mult = div_sc(clk->rate, NSEC_PER_SEC,
+				tmu0_clockevent.shift);
+	tmu0_clockevent.max_delta_ns =
+			clockevent_delta2ns(-1, &tmu0_clockevent);
+	tmu0_clockevent.min_delta_ns =
+			clockevent_delta2ns(1, &tmu0_clockevent);
+
+	if (tmus_are_scaled)
+		tmu_latest_interval[TMU0] >>= 1;
+	else
+		tmu_latest_interval[TMU0] <<= 1;
+
+	tmu_timer_set_interval(TMU0,
+		tmu_latest_interval[TMU0],
+		tmu0_clockevent.mode == CLOCK_EVT_MODE_PERIODIC);
+
+	_tmu_start(TMU0);
+
+	local_irq_restore(flags);
 }
 
-static struct clk_ops tmu0_clk_ops = {
-	.init		= tmu0_clk_init,
-	.recalc		= tmu0_clk_recalc,
+static struct clk_ops tmu_clk_ops = {
+	.init		= tmu_clk_init,
+	.recalc		= tmu_clk_recalc,
 };
 
 static struct clk tmu0_clk = {
 	.name		= "tmu0_clk",
-	.ops		= &tmu0_clk_ops,
-};
-
-static void tmu1_clk_init(struct clk *clk)
-{
-	u8 divisor = TMU_TCR_INIT & 0x7;
-	ctrl_outw(divisor, TMU1_TCR);
-	clk->rate = clk->parent->rate / (4 << (divisor << 1));
-}
-
-static void tmu1_clk_recalc(struct clk *clk)
-{
-	u8 divisor = ctrl_inw(TMU1_TCR) & 0x7;
-	clk->rate = clk->parent->rate / (4 << (divisor << 1));
-}
-
-static struct clk_ops tmu1_clk_ops = {
-	.init		= tmu1_clk_init,
-	.recalc		= tmu1_clk_recalc,
+	.ops		= &tmu_clk_ops,
 };
 
 static struct clk tmu1_clk = {
 	.name		= "tmu1_clk",
-	.ops		= &tmu1_clk_ops,
+	.ops		= &tmu_clk_ops,
 };
 
 static int tmu_timer_init(void)
@@ -189,11 +249,12 @@
 	frequency = clk_get_rate(&tmu0_clk);
 	interval = (frequency + HZ / 2) / HZ;
 
-	sh_hpt_frequency = clk_get_rate(&tmu1_clk);
-	ctrl_outl(~0, TMU1_TCNT);
-	ctrl_outl(~0, TMU1_TCOR);
+	tmu_timer_set_interval(TMU0,interval, 1);
+	tmu_timer_set_interval(TMU1,~0,1);
 
-	tmu0_timer_set_interval(interval, 1);
+	_tmu_start(TMU1);
+
+	sh_hpt_frequency = clk_get_rate(&tmu1_clk);
 
 	tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC,
 				      tmu0_clockevent.shift);
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 511a942..b359b08 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -26,6 +26,7 @@
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/fpu.h>
+#include <asm/kprobes.h>
 
 #ifdef CONFIG_SH_KGDB
 #include <asm/kgdb.h>
@@ -192,6 +193,7 @@
 	int ret, index, count;
 	unsigned long *rm, *rn;
 	unsigned char *src, *dst;
+	unsigned char __user *srcu, *dstu;
 
 	index = (instruction>>8)&15;	/* 0x0F00 */
 	rn = &regs->regs[index];
@@ -206,28 +208,28 @@
 	case 0: /* mov.[bwl] to/from memory via r0+rn */
 		if (instruction & 8) {
 			/* from memory */
-			src = (unsigned char*) *rm;
-			src += regs->regs[0];
-			dst = (unsigned char*) rn;
-			*(unsigned long*)dst = 0;
+			srcu = (unsigned char __user *)*rm;
+			srcu += regs->regs[0];
+			dst = (unsigned char *)rn;
+			*(unsigned long *)dst = 0;
 
 #if !defined(__LITTLE_ENDIAN__)
 			dst += 4-count;
 #endif
-			if (ma->from(dst, src, count))
+			if (ma->from(dst, srcu, count))
 				goto fetch_fault;
 
 			sign_extend(count, dst);
 		} else {
 			/* to memory */
-			src = (unsigned char*) rm;
+			src = (unsigned char *)rm;
 #if !defined(__LITTLE_ENDIAN__)
 			src += 4-count;
 #endif
-			dst = (unsigned char*) *rn;
-			dst += regs->regs[0];
+			dstu = (unsigned char __user *)*rn;
+			dstu += regs->regs[0];
 
-			if (ma->to(dst, src, count))
+			if (ma->to(dstu, src, count))
 				goto fetch_fault;
 		}
 		ret = 0;
@@ -235,10 +237,10 @@
 
 	case 1: /* mov.l Rm,@(disp,Rn) */
 		src = (unsigned char*) rm;
-		dst = (unsigned char*) *rn;
-		dst += (instruction&0x000F)<<2;
+		dstu = (unsigned char __user *)*rn;
+		dstu += (instruction&0x000F)<<2;
 
-		if (ma->to(dst, src, 4))
+		if (ma->to(dstu, src, 4))
 			goto fetch_fault;
 		ret = 0;
 		break;
@@ -247,28 +249,28 @@
 		if (instruction & 4)
 			*rn -= count;
 		src = (unsigned char*) rm;
-		dst = (unsigned char*) *rn;
+		dstu = (unsigned char __user *)*rn;
 #if !defined(__LITTLE_ENDIAN__)
 		src += 4-count;
 #endif
-		if (ma->to(dst, src, count))
+		if (ma->to(dstu, src, count))
 			goto fetch_fault;
 		ret = 0;
 		break;
 
 	case 5: /* mov.l @(disp,Rm),Rn */
-		src = (unsigned char*) *rm;
-		src += (instruction&0x000F)<<2;
-		dst = (unsigned char*) rn;
-		*(unsigned long*)dst = 0;
+		srcu = (unsigned char __user *)*rm;
+		srcu += (instruction & 0x000F) << 2;
+		dst = (unsigned char *)rn;
+		*(unsigned long *)dst = 0;
 
-		if (ma->from(dst, src, 4))
+		if (ma->from(dst, srcu, 4))
 			goto fetch_fault;
 		ret = 0;
 		break;
 
 	case 6:	/* mov.[bwl] from memory, possibly with post-increment */
-		src = (unsigned char*) *rm;
+		srcu = (unsigned char __user *)*rm;
 		if (instruction & 4)
 			*rm += count;
 		dst = (unsigned char*) rn;
@@ -277,7 +279,7 @@
 #if !defined(__LITTLE_ENDIAN__)
 		dst += 4-count;
 #endif
-		if (ma->from(dst, src, count))
+		if (ma->from(dst, srcu, count))
 			goto fetch_fault;
 		sign_extend(count, dst);
 		ret = 0;
@@ -286,28 +288,28 @@
 	case 8:
 		switch ((instruction&0xFF00)>>8) {
 		case 0x81: /* mov.w R0,@(disp,Rn) */
-			src = (unsigned char*) &regs->regs[0];
+			src = (unsigned char *) &regs->regs[0];
 #if !defined(__LITTLE_ENDIAN__)
 			src += 2;
 #endif
-			dst = (unsigned char*) *rm; /* called Rn in the spec */
-			dst += (instruction&0x000F)<<1;
+			dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
+			dstu += (instruction & 0x000F) << 1;
 
-			if (ma->to(dst, src, 2))
+			if (ma->to(dstu, src, 2))
 				goto fetch_fault;
 			ret = 0;
 			break;
 
 		case 0x85: /* mov.w @(disp,Rm),R0 */
-			src = (unsigned char*) *rm;
-			src += (instruction&0x000F)<<1;
-			dst = (unsigned char*) &regs->regs[0];
-			*(unsigned long*)dst = 0;
+			srcu = (unsigned char __user *)*rm;
+			srcu += (instruction & 0x000F) << 1;
+			dst = (unsigned char *) &regs->regs[0];
+			*(unsigned long *)dst = 0;
 
 #if !defined(__LITTLE_ENDIAN__)
 			dst += 2;
 #endif
-			if (ma->from(dst, src, 2))
+			if (ma->from(dst, srcu, 2))
 				goto fetch_fault;
 			sign_extend(2, dst);
 			ret = 0;
@@ -333,7 +335,8 @@
 				   struct mem_access *ma)
 {
 	opcode_t instruction;
-	void *addr = (void *)(regs->pc + instruction_size(old_instruction));
+	void __user *addr = (void __user *)(regs->pc +
+		instruction_size(old_instruction));
 
 	if (copy_from_user(&instruction, addr, sizeof(instruction))) {
 		/* the instruction-fetch faulted */
@@ -511,14 +514,6 @@
 	return ret;
 }
 
-#ifdef CONFIG_CPU_HAS_SR_RB
-#define lookup_exception_vector(x)	\
-	__asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
-#else
-#define lookup_exception_vector(x)	\
-	__asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
-#endif
-
 /*
  * Handle various address error exceptions:
  *  - instruction address error:
@@ -542,7 +537,7 @@
 
 	/* Intentional ifdef */
 #ifdef CONFIG_CPU_HAS_SR_RB
-	lookup_exception_vector(error_code);
+	error_code = lookup_exception_vector();
 #endif
 
 	oldfs = get_fs();
@@ -559,7 +554,7 @@
 		}
 
 		set_fs(USER_DS);
-		if (copy_from_user(&instruction, (void *)(regs->pc),
+		if (copy_from_user(&instruction, (void __user *)(regs->pc),
 				   sizeof(instruction))) {
 			/* Argh. Fault on the instruction itself.
 			   This should never happen non-SMP
@@ -589,7 +584,7 @@
 			die("unaligned program counter", regs, error_code);
 
 		set_fs(KERNEL_DS);
-		if (copy_from_user(&instruction, (void *)(regs->pc),
+		if (copy_from_user(&instruction, (void __user *)(regs->pc),
 				   sizeof(instruction))) {
 			/* Argh. Fault on the instruction itself.
 			   This should never happen non-SMP
@@ -683,7 +678,7 @@
 	}
 #endif
 
-	lookup_exception_vector(error_code);
+	error_code = lookup_exception_vector();
 
 	local_irq_enable();
 	CHK_REMOTE_DEBUG(regs);
@@ -739,11 +734,13 @@
 				struct pt_regs __regs)
 {
 	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-	unsigned long error_code;
+	unsigned long inst;
 	struct task_struct *tsk = current;
-#ifdef CONFIG_SH_FPU_EMU
-	unsigned short inst = 0;
 
+	if (kprobe_handle_illslot(regs->pc) == 0)
+		return;
+
+#ifdef CONFIG_SH_FPU_EMU
 	get_user(inst, (unsigned short *)regs->pc + 1);
 	if (!do_fpu_inst(inst, regs)) {
 		get_user(inst, (unsigned short *)regs->pc);
@@ -754,12 +751,12 @@
 	/* not a FPU inst. */
 #endif
 
-	lookup_exception_vector(error_code);
+	inst = lookup_exception_vector();
 
 	local_irq_enable();
 	CHK_REMOTE_DEBUG(regs);
 	force_sig(SIGILL, tsk);
-	die_if_no_fixup("illegal slot instruction", regs, error_code);
+	die_if_no_fixup("illegal slot instruction", regs, inst);
 }
 
 asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
@@ -769,7 +766,7 @@
 	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	long ex;
 
-	lookup_exception_vector(ex);
+	ex = lookup_exception_vector();
 	die_if_kernel("exception", regs, ex);
 }
 
diff --git a/arch/sh/lib/div64-generic.c b/arch/sh/lib/div64-generic.c
index 4bef3b5..60e76aa 100644
--- a/arch/sh/lib/div64-generic.c
+++ b/arch/sh/lib/div64-generic.c
@@ -3,6 +3,7 @@
  */
 
 #include <linux/types.h>
+#include <asm/div64.h>
 
 extern uint64_t __xdiv64_32(u64 n, u32 d);
 
diff --git a/arch/sh/lib/io.c b/arch/sh/lib/io.c
index 4f54ec4..88dfe6e 100644
--- a/arch/sh/lib/io.c
+++ b/arch/sh/lib/io.c
@@ -14,12 +14,12 @@
 #include <linux/module.h>
 #include <linux/io.h>
 
-void __raw_readsl(unsigned long addr, void *datap, int len)
+void __raw_readsl(const void __iomem *addr, void *datap, int len)
 {
 	u32 *data;
 
 	for (data = datap; (len != 0) && (((u32)data & 0x1f) != 0); len--)
-		*data++ = ctrl_inl(addr);
+		*data++ = __raw_readl(addr);
 
 	if (likely(len >= (0x20 >> 2))) {
 		int tmp2, tmp3, tmp4, tmp5, tmp6;
@@ -59,11 +59,11 @@
 	}
 
 	for (; len != 0; len--)
-		*data++ = ctrl_inl(addr);
+		*data++ = __raw_readl(addr);
 }
 EXPORT_SYMBOL(__raw_readsl);
 
-void __raw_writesl(unsigned long addr, const void *data, int len)
+void __raw_writesl(void __iomem *addr, const void *data, int len)
 {
 	if (likely(len != 0)) {
 		int tmp1;
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 8a03926..555ec97 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -132,7 +132,11 @@
 
 config ARCH_ENABLE_MEMORY_HOTPLUG
 	def_bool y
-	depends on SPARSEMEM
+	depends on SPARSEMEM && MMU
+
+config ARCH_ENABLE_MEMORY_HOTREMOVE
+	def_bool y
+	depends on SPARSEMEM && MMU
 
 config ARCH_MEMORY_PROBE
 	def_bool y
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 0e189cc..5ba067b 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -130,12 +130,18 @@
 	dcache_dentry = debugfs_create_file("dcache", S_IRUSR, sh_debugfs_root,
 					    (unsigned int *)CACHE_TYPE_DCACHE,
 					    &cache_debugfs_fops);
+	if (!dcache_dentry)
+		return -ENOMEM;
 	if (IS_ERR(dcache_dentry))
 		return PTR_ERR(dcache_dentry);
 
 	icache_dentry = debugfs_create_file("icache", S_IRUSR, sh_debugfs_root,
 					    (unsigned int *)CACHE_TYPE_ICACHE,
 					    &cache_debugfs_fops);
+	if (!icache_dentry) {
+		debugfs_remove(dcache_dentry);
+		return -ENOMEM;
+	}
 	if (IS_ERR(icache_dentry)) {
 		debugfs_remove(dcache_dentry);
 		return PTR_ERR(icache_dentry);
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 1fdc8d9..5cfe08d 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -261,7 +261,7 @@
 }
 
 /* TODO: Selective icache invalidation through IC address array.. */
-static inline void __uses_jump_to_uncached flush_icache_all(void)
+static void __uses_jump_to_uncached flush_icache_all(void)
 {
 	unsigned long flags, ccr;
 
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 64b8f7f..9f8ea3a 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -16,14 +16,6 @@
 #include <asm/addrspace.h>
 #include <asm/io.h>
 
-struct dma_coherent_mem {
-	void		*virt_base;
-	u32		device_base;
-	int		size;
-	int		flags;
-	unsigned long	*bitmap;
-};
-
 void *dma_alloc_coherent(struct device *dev, size_t size,
 			   dma_addr_t *dma_handle, gfp_t gfp)
 {
@@ -44,7 +36,7 @@
 	 */
 	dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
 
-	ret_nocache = ioremap_nocache(virt_to_phys(ret), size);
+	ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
 	if (!ret_nocache) {
 		free_pages((unsigned long)ret, order);
 		return NULL;
@@ -58,12 +50,10 @@
 void dma_free_coherent(struct device *dev, size_t size,
 			 void *vaddr, dma_addr_t dma_handle)
 {
-	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
 	int order = get_order(size);
 
 	if (!dma_release_from_coherent(dev, order, vaddr)) {
 		WARN_ON(irqs_disabled());	/* for portability */
-		BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
 		free_pages((unsigned long)phys_to_virt(dma_handle), order);
 		iounmap(vaddr);
 	}
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 0c776fd..898d477 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -2,7 +2,7 @@
  * Page fault handler for SH with an MMU.
  *
  *  Copyright (C) 1999  Niibe Yutaka
- *  Copyright (C) 2003 - 2007  Paul Mundt
+ *  Copyright (C) 2003 - 2008  Paul Mundt
  *
  *  Based on linux/arch/i386/mm/fault.c:
  *   Copyright (C) 1995  Linus Torvalds
@@ -15,6 +15,7 @@
 #include <linux/mm.h>
 #include <linux/hardirq.h>
 #include <linux/kprobes.h>
+#include <linux/marker.h>
 #include <asm/io_trapped.h>
 #include <asm/system.h>
 #include <asm/mmu_context.h>
@@ -37,10 +38,10 @@
 	int fault;
 	siginfo_t info;
 
-#ifdef CONFIG_SH_KGDB
-	if (kgdb_nofault && kgdb_bus_err_hook)
-		kgdb_bus_err_hook();
-#endif
+	/*
+	 * We don't bother with any notifier callbacks here, as they are
+	 * all handled through the __do_page_fault() fast-path.
+	 */
 
 	tsk = current;
 	si_code = SEGV_MAPERR;
@@ -61,7 +62,6 @@
 		pgd = get_TTB() + offset;
 		pgd_k = swapper_pg_dir + offset;
 
-		/* This will never happen with the folded page table. */
 		if (!pgd_present(*pgd)) {
 			if (!pgd_present(*pgd_k))
 				goto bad_area_nosemaphore;
@@ -71,9 +71,13 @@
 
 		pud = pud_offset(pgd, address);
 		pud_k = pud_offset(pgd_k, address);
-		if (pud_present(*pud) || !pud_present(*pud_k))
-			goto bad_area_nosemaphore;
-		set_pud(pud, *pud_k);
+
+		if (!pud_present(*pud)) {
+			if (!pud_present(*pud_k))
+				goto bad_area_nosemaphore;
+			set_pud(pud, *pud_k);
+			return;
+		}
 
 		pmd = pmd_offset(pud, address);
 		pmd_k = pmd_offset(pud_k, address);
@@ -242,6 +246,25 @@
 		goto no_context;
 }
 
+static inline int notify_page_fault(struct pt_regs *regs, int trap)
+{
+	int ret = 0;
+
+	trace_mark(kernel_arch_trap_entry, "trap_id %d ip #p%ld",
+		   trap >> 5, instruction_pointer(regs));
+
+#ifdef CONFIG_KPROBES
+	if (!user_mode(regs)) {
+		preempt_disable();
+		if (kprobe_running() && kprobe_fault_handler(regs, trap))
+			ret = 1;
+		preempt_enable();
+	}
+#endif
+
+	return ret;
+}
+
 #ifdef CONFIG_SH_STORE_QUEUES
 /*
  * This is a special case for the SH-4 store queues, as pages for this
@@ -265,12 +288,18 @@
 	pmd_t *pmd;
 	pte_t *pte;
 	pte_t entry;
+	int ret = 0;
+
+	if (notify_page_fault(regs, lookup_exception_vector()))
+		goto out;
 
 #ifdef CONFIG_SH_KGDB
 	if (kgdb_nofault && kgdb_bus_err_hook)
 		kgdb_bus_err_hook();
 #endif
 
+	ret = 1;
+
 	/*
 	 * We don't take page faults for P1, P2, and parts of P4, these
 	 * are always mapped, whether it be due to legacy behaviour in
@@ -280,24 +309,23 @@
 		pgd = pgd_offset_k(address);
 	} else {
 		if (unlikely(address >= TASK_SIZE || !current->mm))
-			return 1;
+			goto out;
 
 		pgd = pgd_offset(current->mm, address);
 	}
 
 	pud = pud_offset(pgd, address);
 	if (pud_none_or_clear_bad(pud))
-		return 1;
+		goto out;
 	pmd = pmd_offset(pud, address);
 	if (pmd_none_or_clear_bad(pmd))
-		return 1;
-
+		goto out;
 	pte = pte_offset_kernel(pmd, address);
 	entry = *pte;
 	if (unlikely(pte_none(entry) || pte_not_present(entry)))
-		return 1;
+		goto out;
 	if (unlikely(writeaccess && !pte_write(entry)))
-		return 1;
+		goto out;
 
 	if (writeaccess)
 		entry = pte_mkdirty(entry);
@@ -314,5 +342,8 @@
 	set_pte(pte, entry);
 	update_mmu_cache(NULL, address, entry);
 
-	return 0;
+	ret = 0;
+out:
+	trace_mark(kernel_arch_trap_exit, MARK_NOARGS);
+	return ret;
 }
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index b75a7ac..2a53943 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -23,7 +23,19 @@
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 pgd_t swapper_pg_dir[PTRS_PER_PGD];
-unsigned long cached_to_uncached = 0;
+
+#ifdef CONFIG_SUPERH32
+/*
+ * Handle trivial transitions between cached and uncached
+ * segments, making use of the 1:1 mapping relationship in
+ * 512MB lowmem.
+ *
+ * This is the offset of the uncached section from its cached alias.
+ * Default value only valid in 29 bit mode, in 32bit mode will be
+ * overridden in pmb_init.
+ */
+unsigned long cached_to_uncached = P2SEG - P1SEG;
+#endif
 
 #ifdef CONFIG_MMU
 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
@@ -58,9 +70,7 @@
 	}
 
 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
-
-	if (cached_to_uncached)
-		flush_tlb_one(get_asid(), addr);
+	flush_tlb_one(get_asid(), addr);
 }
 
 /*
@@ -113,7 +123,6 @@
 		if (!pmd_present(*pmd)) {
 			pte_t *pte_table;
 			pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
-			memset(pte_table, 0, PAGE_SIZE);
 			pmd_populate_kernel(&init_mm, pmd, pte_table);
 		}
 
@@ -165,15 +174,6 @@
 #ifdef CONFIG_SUPERH32
 	/* Set up the uncached fixmap */
 	set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
-
-#ifdef CONFIG_29BIT
-	/*
-	 * Handle trivial transitions between cached and uncached
-	 * segments, making use of the 1:1 mapping relationship in
-	 * 512MB lowmem.
-	 */
-	cached_to_uncached = P2SEG - P1SEG;
-#endif
 #endif
 }
 
@@ -265,6 +265,35 @@
 }
 #endif
 
+#if THREAD_SHIFT < PAGE_SHIFT
+static struct kmem_cache *thread_info_cache;
+
+struct thread_info *alloc_thread_info(struct task_struct *tsk)
+{
+	struct thread_info *ti;
+
+	ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
+	if (unlikely(ti == NULL))
+		return NULL;
+#ifdef CONFIG_DEBUG_STACK_USAGE
+	memset(ti, 0, THREAD_SIZE);
+#endif
+	return ti;
+}
+
+void free_thread_info(struct thread_info *ti)
+{
+	kmem_cache_free(thread_info_cache, ti);
+}
+
+void thread_info_cache_init(void)
+{
+	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+					      THREAD_SIZE, 0, NULL);
+	BUG_ON(thread_info_cache == NULL);
+}
+#endif /* THREAD_SHIFT < PAGE_SHIFT */
+
 #ifdef CONFIG_MEMORY_HOTPLUG
 int arch_add_memory(int nid, u64 start, u64 size)
 {
@@ -292,4 +321,21 @@
 }
 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 #endif
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+int remove_memory(u64 start, u64 size)
+{
+	unsigned long start_pfn = start >> PAGE_SHIFT;
+	unsigned long end_pfn = start_pfn + (size >> PAGE_SHIFT);
+	int ret;
+
+	ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
+	if (unlikely(ret))
+		printk("%s: Failed, offline_pages() == %d\n", __func__, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(remove_memory);
 #endif
+
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/sh/mm/pg-nommu.c b/arch/sh/mm/pg-nommu.c
index 677dd57..91ed4e6 100644
--- a/arch/sh/mm/pg-nommu.c
+++ b/arch/sh/mm/pg-nommu.c
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <asm/page.h>
+#include <asm/uaccess.h>
 
 void copy_page(void *to, void *from)
 {
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index cef7276..8424167 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -394,6 +394,8 @@
 
 	dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
 				     sh_debugfs_root, NULL, &pmb_debugfs_fops);
+	if (!dentry)
+		return -ENOMEM;
 	if (IS_ERR(dentry))
 		return PTR_ERR(dentry);
 
diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c
index 15111bc..71c742b 100644
--- a/arch/sh/mm/tlb-nommu.c
+++ b/arch/sh/mm/tlb-nommu.c
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <asm/pgtable.h>
+#include <asm/tlbflush.h>
 
 /*
  * Nothing too terribly exciting here ..
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index 0a11cc08..d4fb11f 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -30,6 +30,7 @@
 DREAMCAST		SH_DREAMCAST
 SNAPGEAR		SH_SECUREEDGE5410
 EDOSK7705		SH_EDOSK7705
+EDOSK7760		SH_EDOSK7760
 SH4202_MICRODEV		SH_SH4202_MICRODEV
 SH03			SH_SH03
 LANDISK			SH_LANDISK
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 97671da..e594559 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -37,6 +37,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 menu "General machine setup"
 
 config SMP
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index 29899fd..80fe547 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -135,6 +135,7 @@
 #define TIF_POLLING_NRFLAG	9	/* true if poll_idle() is polling
 					 * TIF_NEED_RESCHED */
 #define TIF_MEMDIE		10
+#define TIF_FREEZE		11	/* is freezing for suspend */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
@@ -148,6 +149,7 @@
 #define _TIF_DO_NOTIFY_RESUME_MASK	(_TIF_NOTIFY_RESUME | \
 					 _TIF_SIGPENDING | \
 					 _TIF_RESTORE_SIGMASK)
+#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #endif /* __KERNEL__ */
 
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index c0a737d..639ac80 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -237,6 +237,7 @@
 #define TIF_ABI_PENDING		12
 #define TIF_MEMDIE		13
 #define TIF_POLLING_NRFLAG	14
+#define TIF_FREEZE		15	/* is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
@@ -249,6 +250,7 @@
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_ABI_PENDING	(1<<TIF_ABI_PENDING)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
+#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #define _TIF_USER_WORK_MASK	((0xff << TI_FLAG_WSAVED_SHIFT) | \
 				 _TIF_DO_NOTIFY_RESUME_MASK | \
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c
index 9ab815b..17bb603 100644
--- a/arch/sparc/oprofile/init.c
+++ b/arch/sparc/oprofile/init.c
@@ -12,7 +12,7 @@
 #include <linux/errno.h>
 #include <linux/init.h>
  
-int __init oprofile_arch_init(struct oprofile_operations * ops)
+int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
 	return -ENODEV;
 }
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 5446e2a..035b15a 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -96,6 +96,7 @@
 	def_bool y
 
 source "init/Kconfig"
+source "kernel/Kconfig.freezer"
 
 menu "Processor type and features"
 
diff --git a/arch/sparc64/oprofile/init.c b/arch/sparc64/oprofile/init.c
index 9ab815b..17bb603 100644
--- a/arch/sparc64/oprofile/init.c
+++ b/arch/sparc64/oprofile/init.c
@@ -12,7 +12,7 @@
 #include <linux/errno.h>
 #include <linux/init.h>
  
-int __init oprofile_arch_init(struct oprofile_operations * ops)
+int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
 	return -ENODEV;
 }
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 6976812..393bccf 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -229,6 +229,8 @@
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 source "drivers/block/Kconfig"
 
 source "arch/um/Kconfig.char"
diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c
index fd0c25a..1296473 100644
--- a/arch/um/sys-i386/signal.c
+++ b/arch/um/sys-i386/signal.c
@@ -179,7 +179,8 @@
 	if (have_fpx_regs) {
 		struct user_fxsr_struct fpx;
 
-		err = copy_from_user(&fpx, &sc.fpstate->_fxsr_env[0],
+		err = copy_from_user(&fpx,
+			&((struct _fpstate __user *)sc.fpstate)->_fxsr_env[0],
 				     sizeof(struct user_fxsr_struct));
 		if (err)
 			return 1;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index bd3c2c5..5b9b123 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,7 @@
 	select HAVE_KPROBES
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select HAVE_KRETPROBES
+	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE
 	select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
@@ -193,6 +194,7 @@
 config KTIME_SCALAR
 	def_bool X86_32
 source "init/Kconfig"
+source "kernel/Kconfig.freezer"
 
 menu "Processor type and features"
 
@@ -1241,14 +1243,6 @@
   	resultant kernel should continue to boot on existing non-EFI
   	platforms.
 
-config IRQBALANCE
-	def_bool y
-	prompt "Enable kernel irq balancing"
-	depends on X86_32 && SMP && X86_IO_APIC
-	help
-	  The default yes will allow the kernel to do irq load balancing.
-	  Saying no will keep the kernel from doing irq load balancing.
-
 config SECCOMP
 	def_bool y
 	prompt "Enable seccomp to safely compute untrusted bytecode"
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 52d0359..13b8c86 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -287,7 +287,6 @@
 # CONFIG_MTRR_SANITIZER is not set
 CONFIG_X86_PAT=y
 CONFIG_EFI=y
-# CONFIG_IRQBALANCE is not set
 CONFIG_SECCOMP=y
 # CONFIG_HZ_100 is not set
 # CONFIG_HZ_250 is not set
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 0d41f03..d7e5a58 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -23,7 +23,7 @@
 CFLAGS_tsc.o		:= $(nostackp)
 
 obj-y			:= process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
-obj-y			+= traps.o irq_$(BITS).o dumpstack_$(BITS).o
+obj-y			+= traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
 obj-y			+= time_$(BITS).o ioport.o ldt.o
 obj-y			+= setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
 obj-$(CONFIG_X86_VISWS)	+= visws_quirks.o
@@ -60,8 +60,8 @@
 obj-$(CONFIG_X86_64_SMP)	+= tsc_sync.o smpcommon.o
 obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline_$(BITS).o
 obj-$(CONFIG_X86_MPPARSE)	+= mpparse.o
-obj-$(CONFIG_X86_LOCAL_APIC)	+= apic_$(BITS).o nmi.o
-obj-$(CONFIG_X86_IO_APIC)	+= io_apic_$(BITS).o
+obj-$(CONFIG_X86_LOCAL_APIC)	+= apic.o nmi.o
+obj-$(CONFIG_X86_IO_APIC)	+= io_apic.o
 obj-$(CONFIG_X86_REBOOTFIXUPS)	+= reboot_fixups_32.o
 obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o
 obj-$(CONFIG_KEXEC)		+= machine_kexec_$(BITS).o
@@ -108,7 +108,7 @@
 # 64 bit specific files
 ifeq ($(CONFIG_X86_64),y)
         obj-y				+= genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
-	obj-y				+= bios_uv.o
+	obj-y				+= bios_uv.o uv_irq.o uv_sysfs.o
         obj-y				+= genx2apic_cluster.o
         obj-y				+= genx2apic_phys.o
         obj-$(CONFIG_X86_PM_TIMER)	+= pmtimer_64.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index eb875cd..0d1c26a 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1256,7 +1256,7 @@
 
 	count =
 	    acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
-				  NR_IRQ_VECTORS);
+				  nr_irqs);
 	if (count < 0) {
 		printk(KERN_ERR PREFIX
 		       "Error parsing interrupt source overrides entry\n");
@@ -1276,7 +1276,7 @@
 
 	count =
 	    acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
-				  NR_IRQ_VECTORS);
+				  nr_irqs);
 	if (count < 0) {
 		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
 		/* TBD: Cleanup to allow fallback to MPS */
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 426e5d9..c44cd6d 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -10,6 +10,7 @@
 #include <linux/dmi.h>
 #include <linux/cpumask.h>
 #include <asm/segment.h>
+#include <asm/desc.h>
 
 #include "realmode/wakeup.h"
 #include "sleep.h"
@@ -98,6 +99,8 @@
 	header->trampoline_segment = setup_trampoline() >> 4;
 #ifdef CONFIG_SMP
 	stack_start.sp = temp_stack + 4096;
+	early_gdt_descr.address =
+			(unsigned long)get_cpu_gdt_table(smp_processor_id());
 #endif
 	initial_code = (unsigned long)wakeup_long64;
 	saved_magic = 0x123456789abcdef0;
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 4cd8083..0cdcda3 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -212,7 +212,7 @@
 /* Programs the physical address of the device table into the IOMMU hardware */
 static void __init iommu_set_device_table(struct amd_iommu *iommu)
 {
-	u32 entry;
+	u64 entry;
 
 	BUG_ON(iommu->mmio_base == NULL);
 
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic.c
similarity index 78%
rename from arch/x86/kernel/apic_32.c
rename to arch/x86/kernel/apic.c
index 21c831d..04a7f96 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic.c
@@ -23,11 +23,13 @@
 #include <linux/mc146818rtc.h>
 #include <linux/kernel_stat.h>
 #include <linux/sysdev.h>
+#include <linux/ioport.h>
 #include <linux/cpu.h>
 #include <linux/clockchips.h>
 #include <linux/acpi_pmtmr.h>
 #include <linux/module.h>
 #include <linux/dmi.h>
+#include <linux/dmar.h>
 
 #include <asm/atomic.h>
 #include <asm/smp.h>
@@ -36,8 +38,14 @@
 #include <asm/desc.h>
 #include <asm/arch_hooks.h>
 #include <asm/hpet.h>
+#include <asm/pgalloc.h>
 #include <asm/i8253.h>
 #include <asm/nmi.h>
+#include <asm/idle.h>
+#include <asm/proto.h>
+#include <asm/timex.h>
+#include <asm/apic.h>
+#include <asm/i8259.h>
 
 #include <mach_apic.h>
 #include <mach_apicdef.h>
@@ -50,16 +58,58 @@
 # error SPURIOUS_APIC_VECTOR definition error
 #endif
 
-unsigned long mp_lapic_addr;
-
+#ifdef CONFIG_X86_32
 /*
  * Knob to control our willingness to enable the local APIC.
  *
  * +1=force-enable
  */
 static int force_enable_local_apic;
-int disable_apic;
+/*
+ * APIC command line parameters
+ */
+static int __init parse_lapic(char *arg)
+{
+	force_enable_local_apic = 1;
+	return 0;
+}
+early_param("lapic", parse_lapic);
+/* Local APIC was disabled by the BIOS and enabled by the kernel */
+static int enabled_via_apicbase;
 
+#endif
+
+#ifdef CONFIG_X86_64
+static int apic_calibrate_pmtmr __initdata;
+static __init int setup_apicpmtimer(char *s)
+{
+	apic_calibrate_pmtmr = 1;
+	notsc_setup(NULL);
+	return 0;
+}
+__setup("apicpmtimer", setup_apicpmtimer);
+#endif
+
+#ifdef CONFIG_X86_64
+#define HAVE_X2APIC
+#endif
+
+#ifdef HAVE_X2APIC
+int x2apic;
+/* x2apic enabled before OS handover */
+int x2apic_preenabled;
+int disable_x2apic;
+static __init int setup_nox2apic(char *str)
+{
+	disable_x2apic = 1;
+	setup_clear_cpu_cap(X86_FEATURE_X2APIC);
+	return 0;
+}
+early_param("nox2apic", setup_nox2apic);
+#endif
+
+unsigned long mp_lapic_addr;
+int disable_apic;
 /* Disable local APIC timer from the kernel commandline or via dmi quirk */
 static int disable_apic_timer __cpuinitdata;
 /* Local APIC timer works in C2 */
@@ -110,9 +160,6 @@
 };
 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
 
-/* Local APIC was disabled by the BIOS and enabled by the kernel */
-static int enabled_via_apicbase;
-
 static unsigned long apic_phys;
 
 /*
@@ -202,6 +249,42 @@
 struct apic_ops __read_mostly *apic_ops = &xapic_ops;
 EXPORT_SYMBOL_GPL(apic_ops);
 
+#ifdef HAVE_X2APIC
+static void x2apic_wait_icr_idle(void)
+{
+	/* no need to wait for icr idle in x2apic */
+	return;
+}
+
+static u32 safe_x2apic_wait_icr_idle(void)
+{
+	/* no need to wait for icr idle in x2apic */
+	return 0;
+}
+
+void x2apic_icr_write(u32 low, u32 id)
+{
+	wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
+}
+
+u64 x2apic_icr_read(void)
+{
+	unsigned long val;
+
+	rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
+	return val;
+}
+
+static struct apic_ops x2apic_ops = {
+	.read = native_apic_msr_read,
+	.write = native_apic_msr_write,
+	.icr_read = x2apic_icr_read,
+	.icr_write = x2apic_icr_write,
+	.wait_icr_idle = x2apic_wait_icr_idle,
+	.safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
+};
+#endif
+
 /**
  * enable_NMI_through_LVT0 - enable NMI through local vector table 0
  */
@@ -219,6 +302,7 @@
 	apic_write(APIC_LVT0, v);
 }
 
+#ifdef CONFIG_X86_32
 /**
  * get_physical_broadcast - Get number of physical broadcast IDs
  */
@@ -226,6 +310,7 @@
 {
 	return modern_apic() ? 0xff : 0xf;
 }
+#endif
 
 /**
  * lapic_get_maxlvt - get the maximum number of local vector table entries
@@ -247,11 +332,7 @@
  */
 
 /* Clock divisor */
-#ifdef CONFG_X86_64
-#define APIC_DIVISOR 1
-#else
 #define APIC_DIVISOR 16
-#endif
 
 /*
  * This function sets up the local APIC timer, with a timeout of
@@ -383,7 +464,7 @@
  * Setup the local APIC timer for this CPU. Copy the initilized values
  * of the boot CPU and register the clock event in the framework.
  */
-static void __devinit setup_APIC_timer(void)
+static void __cpuinit setup_APIC_timer(void)
 {
 	struct clock_event_device *levt = &__get_cpu_var(lapic_events);
 
@@ -453,14 +534,51 @@
 	}
 }
 
+static int __init calibrate_by_pmtimer(long deltapm, long *delta)
+{
+	const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
+	const long pm_thresh = pm_100ms / 100;
+	unsigned long mult;
+	u64 res;
+
+#ifndef CONFIG_X86_PM_TIMER
+	return -1;
+#endif
+
+	apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm);
+
+	/* Check, if the PM timer is available */
+	if (!deltapm)
+		return -1;
+
+	mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
+
+	if (deltapm > (pm_100ms - pm_thresh) &&
+	    deltapm < (pm_100ms + pm_thresh)) {
+		apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
+	} else {
+		res = (((u64)deltapm) *  mult) >> 22;
+		do_div(res, 1000000);
+		printk(KERN_WARNING "APIC calibration not consistent "
+			"with PM Timer: %ldms instead of 100ms\n",
+			(long)res);
+		/* Correct the lapic counter value */
+		res = (((u64)(*delta)) * pm_100ms);
+		do_div(res, deltapm);
+		printk(KERN_INFO "APIC delta adjusted to PM-Timer: "
+			"%lu (%ld)\n", (unsigned long)res, *delta);
+		*delta = (long)res;
+	}
+
+	return 0;
+}
+
 static int __init calibrate_APIC_clock(void)
 {
 	struct clock_event_device *levt = &__get_cpu_var(lapic_events);
-	const long pm_100ms = PMTMR_TICKS_PER_SEC/10;
-	const long pm_thresh = pm_100ms/100;
 	void (*real_handler)(struct clock_event_device *dev);
 	unsigned long deltaj;
-	long delta, deltapm;
+	long delta;
 	int pm_referenced = 0;
 
 	local_irq_disable();
@@ -470,10 +588,10 @@
 	global_clock_event->event_handler = lapic_cal_handler;
 
 	/*
-	 * Setup the APIC counter to 1e9. There is no way the lapic
+	 * Setup the APIC counter to maximum. There is no way the lapic
 	 * can underflow in the 100ms detection time frame
 	 */
-	__setup_APIC_LVTT(1000000000, 0, 0);
+	__setup_APIC_LVTT(0xffffffff, 0, 0);
 
 	/* Let the interrupts run */
 	local_irq_enable();
@@ -490,34 +608,9 @@
 	delta = lapic_cal_t1 - lapic_cal_t2;
 	apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
 
-	/* Check, if the PM timer is available */
-	deltapm = lapic_cal_pm2 - lapic_cal_pm1;
-	apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm);
-
-	if (deltapm) {
-		unsigned long mult;
-		u64 res;
-
-		mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
-
-		if (deltapm > (pm_100ms - pm_thresh) &&
-		    deltapm < (pm_100ms + pm_thresh)) {
-			apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
-		} else {
-			res = (((u64) deltapm) *  mult) >> 22;
-			do_div(res, 1000000);
-			printk(KERN_WARNING "APIC calibration not consistent "
-			       "with PM Timer: %ldms instead of 100ms\n",
-			       (long)res);
-			/* Correct the lapic counter value */
-			res = (((u64) delta) * pm_100ms);
-			do_div(res, deltapm);
-			printk(KERN_INFO "APIC delta adjusted to PM-Timer: "
-			       "%lu (%ld)\n", (unsigned long) res, delta);
-			delta = (long) res;
-		}
-		pm_referenced = 1;
-	}
+	/* we trust the PM based calibration if possible */
+	pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
+					&delta);
 
 	/* Calculate the scaled math multiplication factor */
 	lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
@@ -559,7 +652,10 @@
 
 	levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
 
-	/* We trust the pm timer based calibration */
+	/*
+	 * PM timer calibration failed or not turned on
+	 * so lets try APIC timer based calibration
+	 */
 	if (!pm_referenced) {
 		apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
 
@@ -652,7 +748,7 @@
 	setup_APIC_timer();
 }
 
-void __devinit setup_secondary_APIC_clock(void)
+void __cpuinit setup_secondary_APIC_clock(void)
 {
 	setup_APIC_timer();
 }
@@ -718,6 +814,9 @@
 	 * Besides, if we don't timer interrupts ignore the global
 	 * interrupt lock, which is the WrongThing (tm) to do.
 	 */
+#ifdef CONFIG_X86_64
+	exit_idle();
+#endif
 	irq_enter();
 	local_apic_timer_interrupt();
 	irq_exit();
@@ -991,40 +1090,43 @@
 
 static void __cpuinit lapic_setup_esr(void)
 {
-	unsigned long oldvalue, value, maxlvt;
-	if (lapic_is_integrated() && !esr_disable) {
-		if (esr_disable) {
-			/*
-			 * Something untraceable is creating bad interrupts on
-			 * secondary quads ... for the moment, just leave the
-			 * ESR disabled - we can't do anything useful with the
-			 * errors anyway - mbligh
-			 */
-			printk(KERN_INFO "Leaving ESR disabled.\n");
-			return;
-		}
-		/* !82489DX */
-		maxlvt = lapic_get_maxlvt();
-		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
-			apic_write(APIC_ESR, 0);
-		oldvalue = apic_read(APIC_ESR);
+	unsigned int oldvalue, value, maxlvt;
 
-		/* enables sending errors */
-		value = ERROR_APIC_VECTOR;
-		apic_write(APIC_LVTERR, value);
-		/*
-		 * spec says clear errors after enabling vector.
-		 */
-		if (maxlvt > 3)
-			apic_write(APIC_ESR, 0);
-		value = apic_read(APIC_ESR);
-		if (value != oldvalue)
-			apic_printk(APIC_VERBOSE, "ESR value before enabling "
-				"vector: 0x%08lx  after: 0x%08lx\n",
-				oldvalue, value);
-	} else {
+	if (!lapic_is_integrated()) {
 		printk(KERN_INFO "No ESR for 82489DX.\n");
+		return;
 	}
+
+	if (esr_disable) {
+		/*
+		 * Something untraceable is creating bad interrupts on
+		 * secondary quads ... for the moment, just leave the
+		 * ESR disabled - we can't do anything useful with the
+		 * errors anyway - mbligh
+		 */
+		printk(KERN_INFO "Leaving ESR disabled.\n");
+		return;
+	}
+
+	maxlvt = lapic_get_maxlvt();
+	if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
+		apic_write(APIC_ESR, 0);
+	oldvalue = apic_read(APIC_ESR);
+
+	/* enables sending errors */
+	value = ERROR_APIC_VECTOR;
+	apic_write(APIC_LVTERR, value);
+
+	/*
+	 * spec says clear errors after enabling vector.
+	 */
+	if (maxlvt > 3)
+		apic_write(APIC_ESR, 0);
+	value = apic_read(APIC_ESR);
+	if (value != oldvalue)
+		apic_printk(APIC_VERBOSE, "ESR value before enabling "
+			"vector: 0x%08x  after: 0x%08x\n",
+			oldvalue, value);
 }
 
 
@@ -1033,24 +1135,27 @@
  */
 void __cpuinit setup_local_APIC(void)
 {
-	unsigned long value, integrated;
+	unsigned int value;
 	int i, j;
 
+#ifdef CONFIG_X86_32
 	/* Pound the ESR really hard over the head with a big hammer - mbligh */
-	if (esr_disable) {
+	if (lapic_is_integrated() && esr_disable) {
 		apic_write(APIC_ESR, 0);
 		apic_write(APIC_ESR, 0);
 		apic_write(APIC_ESR, 0);
 		apic_write(APIC_ESR, 0);
 	}
+#endif
 
-	integrated = lapic_is_integrated();
+	preempt_disable();
 
 	/*
 	 * Double-check whether this APIC is really registered.
+	 * This is meaningless in clustered apic mode, so we skip it.
 	 */
 	if (!apic_id_registered())
-		WARN_ON_ONCE(1);
+		BUG();
 
 	/*
 	 * Intel recommends to set DFR, LDR and TPR before enabling
@@ -1096,6 +1201,7 @@
 	 */
 	value |= APIC_SPIV_APIC_ENABLED;
 
+#ifdef CONFIG_X86_32
 	/*
 	 * Some unknown Intel IO/APIC (or APIC) errata is biting us with
 	 * certain networking cards. If high frequency interrupts are
@@ -1116,8 +1222,13 @@
 	 * See also the comment in end_level_ioapic_irq().  --macro
 	 */
 
-	/* Enable focus processor (bit==0) */
+	/*
+	 * - enable focus processor (bit==0)
+	 * - 64bit mode always use processor focus
+	 *   so no need to set it
+	 */
 	value &= ~APIC_SPIV_FOCUS_DISABLED;
+#endif
 
 	/*
 	 * Set spurious IRQ vector
@@ -1154,9 +1265,11 @@
 		value = APIC_DM_NMI;
 	else
 		value = APIC_DM_NMI | APIC_LVT_MASKED;
-	if (!integrated)		/* 82489DX */
+	if (!lapic_is_integrated())		/* 82489DX */
 		value |= APIC_LVT_LEVEL_TRIGGER;
 	apic_write(APIC_LVT1, value);
+
+	preempt_enable();
 }
 
 void __cpuinit end_local_APIC_setup(void)
@@ -1177,6 +1290,153 @@
 	apic_pm_activate();
 }
 
+#ifdef HAVE_X2APIC
+void check_x2apic(void)
+{
+	int msr, msr2;
+
+	rdmsr(MSR_IA32_APICBASE, msr, msr2);
+
+	if (msr & X2APIC_ENABLE) {
+		printk("x2apic enabled by BIOS, switching to x2apic ops\n");
+		x2apic_preenabled = x2apic = 1;
+		apic_ops = &x2apic_ops;
+	}
+}
+
+void enable_x2apic(void)
+{
+	int msr, msr2;
+
+	rdmsr(MSR_IA32_APICBASE, msr, msr2);
+	if (!(msr & X2APIC_ENABLE)) {
+		printk("Enabling x2apic\n");
+		wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
+	}
+}
+
+void enable_IR_x2apic(void)
+{
+#ifdef CONFIG_INTR_REMAP
+	int ret;
+	unsigned long flags;
+
+	if (!cpu_has_x2apic)
+		return;
+
+	if (!x2apic_preenabled && disable_x2apic) {
+		printk(KERN_INFO
+		       "Skipped enabling x2apic and Interrupt-remapping "
+		       "because of nox2apic\n");
+		return;
+	}
+
+	if (x2apic_preenabled && disable_x2apic)
+		panic("Bios already enabled x2apic, can't enforce nox2apic");
+
+	if (!x2apic_preenabled && skip_ioapic_setup) {
+		printk(KERN_INFO
+		       "Skipped enabling x2apic and Interrupt-remapping "
+		       "because of skipping io-apic setup\n");
+		return;
+	}
+
+	ret = dmar_table_init();
+	if (ret) {
+		printk(KERN_INFO
+		       "dmar_table_init() failed with %d:\n", ret);
+
+		if (x2apic_preenabled)
+			panic("x2apic enabled by bios. But IR enabling failed");
+		else
+			printk(KERN_INFO
+			       "Not enabling x2apic,Intr-remapping\n");
+		return;
+	}
+
+	local_irq_save(flags);
+	mask_8259A();
+
+	ret = save_mask_IO_APIC_setup();
+	if (ret) {
+		printk(KERN_INFO "Saving IO-APIC state failed: %d\n", ret);
+		goto end;
+	}
+
+	ret = enable_intr_remapping(1);
+
+	if (ret && x2apic_preenabled) {
+		local_irq_restore(flags);
+		panic("x2apic enabled by bios. But IR enabling failed");
+	}
+
+	if (ret)
+		goto end_restore;
+
+	if (!x2apic) {
+		x2apic = 1;
+		apic_ops = &x2apic_ops;
+		enable_x2apic();
+	}
+
+end_restore:
+	if (ret)
+		/*
+		 * IR enabling failed
+		 */
+		restore_IO_APIC_setup();
+	else
+		reinit_intr_remapped_IO_APIC(x2apic_preenabled);
+
+end:
+	unmask_8259A();
+	local_irq_restore(flags);
+
+	if (!ret) {
+		if (!x2apic_preenabled)
+			printk(KERN_INFO
+			       "Enabled x2apic and interrupt-remapping\n");
+		else
+			printk(KERN_INFO
+			       "Enabled Interrupt-remapping\n");
+	} else
+		printk(KERN_ERR
+		       "Failed to enable Interrupt-remapping and x2apic\n");
+#else
+	if (!cpu_has_x2apic)
+		return;
+
+	if (x2apic_preenabled)
+		panic("x2apic enabled prior OS handover,"
+		      " enable CONFIG_INTR_REMAP");
+
+	printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping "
+	       " and x2apic\n");
+#endif
+
+	return;
+}
+#endif /* HAVE_X2APIC */
+
+#ifdef CONFIG_X86_64
+/*
+ * Detect and enable local APICs on non-SMP boards.
+ * Original code written by Keir Fraser.
+ * On AMD64 we trust the BIOS - if it says no APIC it is likely
+ * not correctly set up (usually the APIC timer won't work etc.)
+ */
+static int __init detect_init_APIC(void)
+{
+	if (!cpu_has_apic) {
+		printk(KERN_INFO "No local APIC present\n");
+		return -1;
+	}
+
+	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
+	boot_cpu_physical_apicid = 0;
+	return 0;
+}
+#else
 /*
  * Detect and initialize APIC
  */
@@ -1255,12 +1515,46 @@
 	printk(KERN_INFO "No local APIC present or hardware disabled\n");
 	return -1;
 }
+#endif
+
+#ifdef CONFIG_X86_64
+void __init early_init_lapic_mapping(void)
+{
+	unsigned long phys_addr;
+
+	/*
+	 * If no local APIC can be found then go out
+	 * : it means there is no mpatable and MADT
+	 */
+	if (!smp_found_config)
+		return;
+
+	phys_addr = mp_lapic_addr;
+
+	set_fixmap_nocache(FIX_APIC_BASE, phys_addr);
+	apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
+		    APIC_BASE, phys_addr);
+
+	/*
+	 * Fetch the APIC ID of the BSP in case we have a
+	 * default configuration (or the MP table is broken).
+	 */
+	boot_cpu_physical_apicid = read_apic_id();
+}
+#endif
 
 /**
  * init_apic_mappings - initialize APIC mappings
  */
 void __init init_apic_mappings(void)
 {
+#ifdef HAVE_X2APIC
+	if (x2apic) {
+		boot_cpu_physical_apicid = read_apic_id();
+		return;
+	}
+#endif
+
 	/*
 	 * If no local APIC can be found then set up a fake all
 	 * zeroes page to simulate the local APIC and another
@@ -1273,8 +1567,8 @@
 		apic_phys = mp_lapic_addr;
 
 	set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
-	printk(KERN_DEBUG "mapped APIC to %08lx (%08lx)\n", APIC_BASE,
-	       apic_phys);
+	apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
+				APIC_BASE, apic_phys);
 
 	/*
 	 * Fetch the APIC ID of the BSP in case we have a
@@ -1282,18 +1576,27 @@
 	 */
 	if (boot_cpu_physical_apicid == -1U)
 		boot_cpu_physical_apicid = read_apic_id();
-
 }
 
 /*
  * This initializes the IO-APIC and APIC hardware if this is
  * a UP kernel.
  */
-
 int apic_version[MAX_APICS];
 
 int __init APIC_init_uniprocessor(void)
 {
+#ifdef CONFIG_X86_64
+	if (disable_apic) {
+		printk(KERN_INFO "Apic disabled\n");
+		return -1;
+	}
+	if (!cpu_has_apic) {
+		disable_apic = 1;
+		printk(KERN_INFO "Apic disabled by BIOS\n");
+		return -1;
+	}
+#else
 	if (!smp_found_config && !cpu_has_apic)
 		return -1;
 
@@ -1302,39 +1605,68 @@
 	 */
 	if (!cpu_has_apic &&
 	    APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
-		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
+		printk(KERN_ERR "BIOS bug, local APIC 0x%x not detected!...\n",
 		       boot_cpu_physical_apicid);
 		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
 		return -1;
 	}
+#endif
+
+#ifdef HAVE_X2APIC
+	enable_IR_x2apic();
+#endif
+#ifdef CONFIG_X86_64
+	setup_apic_routing();
+#endif
 
 	verify_local_APIC();
-
 	connect_bsp_APIC();
 
+#ifdef CONFIG_X86_64
+	apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
+#else
 	/*
 	 * Hack: In case of kdump, after a crash, kernel might be booting
 	 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
 	 * might be zero if read from MP tables. Get it from LAPIC.
 	 */
-#ifdef CONFIG_CRASH_DUMP
+# ifdef CONFIG_CRASH_DUMP
 	boot_cpu_physical_apicid = read_apic_id();
+# endif
 #endif
 	physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
-
 	setup_local_APIC();
 
+#ifdef CONFIG_X86_64
+	/*
+	 * Now enable IO-APICs, actually call clear_IO_APIC
+	 * We need clear_IO_APIC before enabling vector on BP
+	 */
+	if (!skip_ioapic_setup && nr_ioapics)
+		enable_IO_APIC();
+#endif
+
 #ifdef CONFIG_X86_IO_APIC
 	if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
 #endif
 		localise_nmi_watchdog();
 	end_local_APIC_setup();
+
 #ifdef CONFIG_X86_IO_APIC
-	if (smp_found_config)
-		if (!skip_ioapic_setup && nr_ioapics)
-			setup_IO_APIC();
+	if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
+		setup_IO_APIC();
+# ifdef CONFIG_X86_64
+	else
+		nr_ioapics = 0;
+# endif
 #endif
+
+#ifdef CONFIG_X86_64
+	setup_boot_APIC_clock();
+	check_nmi_watchdog();
+#else
 	setup_boot_clock();
+#endif
 
 	return 0;
 }
@@ -1348,8 +1680,11 @@
  */
 void smp_spurious_interrupt(struct pt_regs *regs)
 {
-	unsigned long v;
+	u32 v;
 
+#ifdef CONFIG_X86_64
+	exit_idle();
+#endif
 	irq_enter();
 	/*
 	 * Check if this really is a spurious interrupt and ACK it
@@ -1360,10 +1695,14 @@
 	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
 		ack_APIC_irq();
 
+#ifdef CONFIG_X86_64
+	add_pda(irq_spurious_count, 1);
+#else
 	/* see sw-dev-man vol 3, chapter 7.4.13.5 */
 	printk(KERN_INFO "spurious APIC interrupt on CPU#%d, "
 	       "should never happen.\n", smp_processor_id());
 	__get_cpu_var(irq_stat).irq_spurious_count++;
+#endif
 	irq_exit();
 }
 
@@ -1372,8 +1711,11 @@
  */
 void smp_error_interrupt(struct pt_regs *regs)
 {
-	unsigned long v, v1;
+	u32 v, v1;
 
+#ifdef CONFIG_X86_64
+	exit_idle();
+#endif
 	irq_enter();
 	/* First tickle the hardware, only then report what went on. -- REW */
 	v = apic_read(APIC_ESR);
@@ -1392,7 +1734,7 @@
 	   6: Received illegal vector
 	   7: Illegal register address
 	*/
-	printk(KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
+	printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
 		smp_processor_id(), v , v1);
 	irq_exit();
 }
@@ -1565,6 +1907,13 @@
 	cpu_set(cpu, cpu_present_map);
 }
 
+#ifdef CONFIG_X86_64
+int hard_smp_processor_id(void)
+{
+	return read_apic_id();
+}
+#endif
+
 /*
  * Power management
  */
@@ -1640,7 +1989,7 @@
 
 	local_irq_save(flags);
 
-#ifdef CONFIG_X86_64
+#ifdef HAVE_X2APIC
 	if (x2apic)
 		enable_x2apic();
 	else
@@ -1702,7 +2051,7 @@
 	.cls	= &lapic_sysclass,
 };
 
-static void __devinit apic_pm_activate(void)
+static void __cpuinit apic_pm_activate(void)
 {
 	apic_pm_state.active = 1;
 }
@@ -1728,16 +2077,87 @@
 
 #endif	/* CONFIG_PM */
 
+#ifdef CONFIG_X86_64
+/*
+ * apic_is_clustered_box() -- Check if we can expect good TSC
+ *
+ * Thus far, the major user of this is IBM's Summit2 series:
+ *
+ * Clustered boxes may have unsynced TSC problems if they are
+ * multi-chassis. Use available data to take a good guess.
+ * If in doubt, go HPET.
+ */
+__cpuinit int apic_is_clustered_box(void)
+{
+	int i, clusters, zeros;
+	unsigned id;
+	u16 *bios_cpu_apicid;
+	DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
+
+	/*
+	 * there is not this kind of box with AMD CPU yet.
+	 * Some AMD box with quadcore cpu and 8 sockets apicid
+	 * will be [4, 0x23] or [8, 0x27] could be thought to
+	 * vsmp box still need checking...
+	 */
+	if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
+		return 0;
+
+	bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
+	bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
+
+	for (i = 0; i < NR_CPUS; i++) {
+		/* are we being called early in kernel startup? */
+		if (bios_cpu_apicid) {
+			id = bios_cpu_apicid[i];
+		}
+		else if (i < nr_cpu_ids) {
+			if (cpu_present(i))
+				id = per_cpu(x86_bios_cpu_apicid, i);
+			else
+				continue;
+		}
+		else
+			break;
+
+		if (id != BAD_APICID)
+			__set_bit(APIC_CLUSTERID(id), clustermap);
+	}
+
+	/* Problem:  Partially populated chassis may not have CPUs in some of
+	 * the APIC clusters they have been allocated.  Only present CPUs have
+	 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
+	 * Since clusters are allocated sequentially, count zeros only if
+	 * they are bounded by ones.
+	 */
+	clusters = 0;
+	zeros = 0;
+	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
+		if (test_bit(i, clustermap)) {
+			clusters += 1 + zeros;
+			zeros = 0;
+		} else
+			++zeros;
+	}
+
+	/* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
+	 * not guaranteed to be synced between boards
+	 */
+	if (is_vsmp_box() && clusters > 1)
+		return 1;
+
+	/*
+	 * If clusters > 2, then should be multi-chassis.
+	 * May have to revisit this when multi-core + hyperthreaded CPUs come
+	 * out, but AFAIK this will work even for them.
+	 */
+	return (clusters > 2);
+}
+#endif
+
 /*
  * APIC command line parameters
  */
-static int __init parse_lapic(char *arg)
-{
-	force_enable_local_apic = 1;
-	return 0;
-}
-early_param("lapic", parse_lapic);
-
 static int __init setup_disableapic(char *arg)
 {
 	disable_apic = 1;
@@ -1779,7 +2199,6 @@
 	if (!arg)  {
 #ifdef CONFIG_X86_64
 		skip_ioapic_setup = 0;
-		ioapic_force = 1;
 		return 0;
 #endif
 		return -EINVAL;
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
deleted file mode 100644
index 94ddb69a..0000000
--- a/arch/x86/kernel/apic_64.c
+++ /dev/null
@@ -1,1848 +0,0 @@
-/*
- *	Local APIC handling, local APIC timers
- *
- *	(c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
- *
- *	Fixes
- *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
- *					thanks to Eric Gilmore
- *					and Rolf G. Tews
- *					for testing these extensively.
- *	Maciej W. Rozycki	:	Various updates and fixes.
- *	Mikael Pettersson	:	Power Management for UP-APIC.
- *	Pavel Machek and
- *	Mikael Pettersson	:	PM converted to driver model.
- */
-
-#include <linux/init.h>
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/bootmem.h>
-#include <linux/interrupt.h>
-#include <linux/mc146818rtc.h>
-#include <linux/kernel_stat.h>
-#include <linux/sysdev.h>
-#include <linux/ioport.h>
-#include <linux/clockchips.h>
-#include <linux/acpi_pmtmr.h>
-#include <linux/module.h>
-#include <linux/dmar.h>
-
-#include <asm/atomic.h>
-#include <asm/smp.h>
-#include <asm/mtrr.h>
-#include <asm/mpspec.h>
-#include <asm/hpet.h>
-#include <asm/pgalloc.h>
-#include <asm/nmi.h>
-#include <asm/idle.h>
-#include <asm/proto.h>
-#include <asm/timex.h>
-#include <asm/apic.h>
-#include <asm/i8259.h>
-
-#include <mach_ipi.h>
-#include <mach_apic.h>
-
-/* Disable local APIC timer from the kernel commandline or via dmi quirk */
-static int disable_apic_timer __cpuinitdata;
-static int apic_calibrate_pmtmr __initdata;
-int disable_apic;
-int disable_x2apic;
-int x2apic;
-
-/* x2apic enabled before OS handover */
-int x2apic_preenabled;
-
-/* Local APIC timer works in C2 */
-int local_apic_timer_c2_ok;
-EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
-
-/*
- * Debug level, exported for io_apic.c
- */
-unsigned int apic_verbosity;
-
-/* Have we found an MP table */
-int smp_found_config;
-
-static struct resource lapic_resource = {
-	.name = "Local APIC",
-	.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
-};
-
-static unsigned int calibration_result;
-
-static int lapic_next_event(unsigned long delta,
-			    struct clock_event_device *evt);
-static void lapic_timer_setup(enum clock_event_mode mode,
-			      struct clock_event_device *evt);
-static void lapic_timer_broadcast(cpumask_t mask);
-static void apic_pm_activate(void);
-
-/*
- * The local apic timer can be used for any function which is CPU local.
- */
-static struct clock_event_device lapic_clockevent = {
-	.name		= "lapic",
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
-			| CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
-	.shift		= 32,
-	.set_mode	= lapic_timer_setup,
-	.set_next_event	= lapic_next_event,
-	.broadcast	= lapic_timer_broadcast,
-	.rating		= 100,
-	.irq		= -1,
-};
-static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
-
-static unsigned long apic_phys;
-
-unsigned long mp_lapic_addr;
-
-/*
- * Get the LAPIC version
- */
-static inline int lapic_get_version(void)
-{
-	return GET_APIC_VERSION(apic_read(APIC_LVR));
-}
-
-/*
- * Check, if the APIC is integrated or a separate chip
- */
-static inline int lapic_is_integrated(void)
-{
-#ifdef CONFIG_X86_64
-	return 1;
-#else
-	return APIC_INTEGRATED(lapic_get_version());
-#endif
-}
-
-/*
- * Check, whether this is a modern or a first generation APIC
- */
-static int modern_apic(void)
-{
-	/* AMD systems use old APIC versions, so check the CPU */
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-	    boot_cpu_data.x86 >= 0xf)
-		return 1;
-	return lapic_get_version() >= 0x14;
-}
-
-/*
- * Paravirt kernels also might be using these below ops. So we still
- * use generic apic_read()/apic_write(), which might be pointing to different
- * ops in PARAVIRT case.
- */
-void xapic_wait_icr_idle(void)
-{
-	while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
-		cpu_relax();
-}
-
-u32 safe_xapic_wait_icr_idle(void)
-{
-	u32 send_status;
-	int timeout;
-
-	timeout = 0;
-	do {
-		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-		if (!send_status)
-			break;
-		udelay(100);
-	} while (timeout++ < 1000);
-
-	return send_status;
-}
-
-void xapic_icr_write(u32 low, u32 id)
-{
-	apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
-	apic_write(APIC_ICR, low);
-}
-
-u64 xapic_icr_read(void)
-{
-	u32 icr1, icr2;
-
-	icr2 = apic_read(APIC_ICR2);
-	icr1 = apic_read(APIC_ICR);
-
-	return icr1 | ((u64)icr2 << 32);
-}
-
-static struct apic_ops xapic_ops = {
-	.read = native_apic_mem_read,
-	.write = native_apic_mem_write,
-	.icr_read = xapic_icr_read,
-	.icr_write = xapic_icr_write,
-	.wait_icr_idle = xapic_wait_icr_idle,
-	.safe_wait_icr_idle = safe_xapic_wait_icr_idle,
-};
-
-struct apic_ops __read_mostly *apic_ops = &xapic_ops;
-EXPORT_SYMBOL_GPL(apic_ops);
-
-static void x2apic_wait_icr_idle(void)
-{
-	/* no need to wait for icr idle in x2apic */
-	return;
-}
-
-static u32 safe_x2apic_wait_icr_idle(void)
-{
-	/* no need to wait for icr idle in x2apic */
-	return 0;
-}
-
-void x2apic_icr_write(u32 low, u32 id)
-{
-	wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
-}
-
-u64 x2apic_icr_read(void)
-{
-	unsigned long val;
-
-	rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
-	return val;
-}
-
-static struct apic_ops x2apic_ops = {
-	.read = native_apic_msr_read,
-	.write = native_apic_msr_write,
-	.icr_read = x2apic_icr_read,
-	.icr_write = x2apic_icr_write,
-	.wait_icr_idle = x2apic_wait_icr_idle,
-	.safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
-};
-
-/**
- * enable_NMI_through_LVT0 - enable NMI through local vector table 0
- */
-void __cpuinit enable_NMI_through_LVT0(void)
-{
-	unsigned int v;
-
-	/* unmask and set to NMI */
-	v = APIC_DM_NMI;
-
-	/* Level triggered for 82489DX (32bit mode) */
-	if (!lapic_is_integrated())
-		v |= APIC_LVT_LEVEL_TRIGGER;
-
-	apic_write(APIC_LVT0, v);
-}
-
-/**
- * lapic_get_maxlvt - get the maximum number of local vector table entries
- */
-int lapic_get_maxlvt(void)
-{
-	unsigned int v;
-
-	v = apic_read(APIC_LVR);
-	/*
-	 * - we always have APIC integrated on 64bit mode
-	 * - 82489DXs do not report # of LVT entries
-	 */
-	return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
-}
-
-/*
- * Local APIC timer
- */
-
-/* Clock divisor */
-#ifdef CONFG_X86_64
-#define APIC_DIVISOR 1
-#else
-#define APIC_DIVISOR 16
-#endif
-
-/*
- * This function sets up the local APIC timer, with a timeout of
- * 'clocks' APIC bus clock. During calibration we actually call
- * this function twice on the boot CPU, once with a bogus timeout
- * value, second time for real. The other (noncalibrating) CPUs
- * call this function only once, with the real, calibrated value.
- *
- * We do reads before writes even if unnecessary, to get around the
- * P5 APIC double write bug.
- */
-static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
-{
-	unsigned int lvtt_value, tmp_value;
-
-	lvtt_value = LOCAL_TIMER_VECTOR;
-	if (!oneshot)
-		lvtt_value |= APIC_LVT_TIMER_PERIODIC;
-	if (!lapic_is_integrated())
-		lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
-
-	if (!irqen)
-		lvtt_value |= APIC_LVT_MASKED;
-
-	apic_write(APIC_LVTT, lvtt_value);
-
-	/*
-	 * Divide PICLK by 16
-	 */
-	tmp_value = apic_read(APIC_TDCR);
-	apic_write(APIC_TDCR,
-		(tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
-		APIC_TDR_DIV_16);
-
-	if (!oneshot)
-		apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
-}
-
-/*
- * Setup extended LVT, AMD specific (K8, family 10h)
- *
- * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
- * MCE interrupts are supported. Thus MCE offset must be set to 0.
- *
- * If mask=1, the LVT entry does not generate interrupts while mask=0
- * enables the vector. See also the BKDGs.
- */
-
-#define APIC_EILVT_LVTOFF_MCE 0
-#define APIC_EILVT_LVTOFF_IBS 1
-
-static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
-{
-	unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
-	unsigned int  v   = (mask << 16) | (msg_type << 8) | vector;
-
-	apic_write(reg, v);
-}
-
-u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
-{
-	setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
-	return APIC_EILVT_LVTOFF_MCE;
-}
-
-u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
-{
-	setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
-	return APIC_EILVT_LVTOFF_IBS;
-}
-EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
-
-/*
- * Program the next event, relative to now
- */
-static int lapic_next_event(unsigned long delta,
-			    struct clock_event_device *evt)
-{
-	apic_write(APIC_TMICT, delta);
-	return 0;
-}
-
-/*
- * Setup the lapic timer in periodic or oneshot mode
- */
-static void lapic_timer_setup(enum clock_event_mode mode,
-			      struct clock_event_device *evt)
-{
-	unsigned long flags;
-	unsigned int v;
-
-	/* Lapic used as dummy for broadcast ? */
-	if (evt->features & CLOCK_EVT_FEAT_DUMMY)
-		return;
-
-	local_irq_save(flags);
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-	case CLOCK_EVT_MODE_ONESHOT:
-		__setup_APIC_LVTT(calibration_result,
-				  mode != CLOCK_EVT_MODE_PERIODIC, 1);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		v = apic_read(APIC_LVTT);
-		v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
-		apic_write(APIC_LVTT, v);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		/* Nothing to do here */
-		break;
-	}
-
-	local_irq_restore(flags);
-}
-
-/*
- * Local APIC timer broadcast function
- */
-static void lapic_timer_broadcast(cpumask_t mask)
-{
-#ifdef CONFIG_SMP
-	send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
-#endif
-}
-
-/*
- * Setup the local APIC timer for this CPU. Copy the initilized values
- * of the boot CPU and register the clock event in the framework.
- */
-static void setup_APIC_timer(void)
-{
-	struct clock_event_device *levt = &__get_cpu_var(lapic_events);
-
-	memcpy(levt, &lapic_clockevent, sizeof(*levt));
-	levt->cpumask = cpumask_of_cpu(smp_processor_id());
-
-	clockevents_register_device(levt);
-}
-
-/*
- * In this function we calibrate APIC bus clocks to the external
- * timer. Unfortunately we cannot use jiffies and the timer irq
- * to calibrate, since some later bootup code depends on getting
- * the first irq? Ugh.
- *
- * We want to do the calibration only once since we
- * want to have local timer irqs syncron. CPUs connected
- * by the same APIC bus have the very same bus frequency.
- * And we want to have irqs off anyways, no accidental
- * APIC irq that way.
- */
-
-#define TICK_COUNT 100000000
-
-static int __init calibrate_APIC_clock(void)
-{
-	unsigned apic, apic_start;
-	unsigned long tsc, tsc_start;
-	int result;
-
-	local_irq_disable();
-
-	/*
-	 * Put whatever arbitrary (but long enough) timeout
-	 * value into the APIC clock, we just want to get the
-	 * counter running for calibration.
-	 *
-	 * No interrupt enable !
-	 */
-	__setup_APIC_LVTT(250000000, 0, 0);
-
-	apic_start = apic_read(APIC_TMCCT);
-#ifdef CONFIG_X86_PM_TIMER
-	if (apic_calibrate_pmtmr && pmtmr_ioport) {
-		pmtimer_wait(5000);  /* 5ms wait */
-		apic = apic_read(APIC_TMCCT);
-		result = (apic_start - apic) * 1000L / 5;
-	} else
-#endif
-	{
-		rdtscll(tsc_start);
-
-		do {
-			apic = apic_read(APIC_TMCCT);
-			rdtscll(tsc);
-		} while ((tsc - tsc_start) < TICK_COUNT &&
-				(apic_start - apic) < TICK_COUNT);
-
-		result = (apic_start - apic) * 1000L * tsc_khz /
-					(tsc - tsc_start);
-	}
-
-	local_irq_enable();
-
-	printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
-
-	printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
-		result / 1000 / 1000, result / 1000 % 1000);
-
-	/* Calculate the scaled math multiplication factor */
-	lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC,
-				       lapic_clockevent.shift);
-	lapic_clockevent.max_delta_ns =
-		clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
-	lapic_clockevent.min_delta_ns =
-		clockevent_delta2ns(0xF, &lapic_clockevent);
-
-	calibration_result = (result * APIC_DIVISOR) / HZ;
-
-	/*
-	 * Do a sanity check on the APIC calibration result
-	 */
-	if (calibration_result < (1000000 / HZ)) {
-		printk(KERN_WARNING
-			"APIC frequency too slow, disabling apic timer\n");
-		return -1;
-	}
-
-	return 0;
-}
-
-/*
- * Setup the boot APIC
- *
- * Calibrate and verify the result.
- */
-void __init setup_boot_APIC_clock(void)
-{
-	/*
-	 * The local apic timer can be disabled via the kernel
-	 * commandline or from the CPU detection code. Register the lapic
-	 * timer as a dummy clock event source on SMP systems, so the
-	 * broadcast mechanism is used. On UP systems simply ignore it.
-	 */
-	if (disable_apic_timer) {
-		printk(KERN_INFO "Disabling APIC timer\n");
-		/* No broadcast on UP ! */
-		if (num_possible_cpus() > 1) {
-			lapic_clockevent.mult = 1;
-			setup_APIC_timer();
-		}
-		return;
-	}
-
-	apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
-		    "calibrating APIC timer ...\n");
-
-	if (calibrate_APIC_clock()) {
-		/* No broadcast on UP ! */
-		if (num_possible_cpus() > 1)
-			setup_APIC_timer();
-		return;
-	}
-
-	/*
-	 * If nmi_watchdog is set to IO_APIC, we need the
-	 * PIT/HPET going.  Otherwise register lapic as a dummy
-	 * device.
-	 */
-	if (nmi_watchdog != NMI_IO_APIC)
-		lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
-	else
-		printk(KERN_WARNING "APIC timer registered as dummy,"
-			" due to nmi_watchdog=%d!\n", nmi_watchdog);
-
-	/* Setup the lapic or request the broadcast */
-	setup_APIC_timer();
-}
-
-void __cpuinit setup_secondary_APIC_clock(void)
-{
-	setup_APIC_timer();
-}
-
-/*
- * The guts of the apic timer interrupt
- */
-static void local_apic_timer_interrupt(void)
-{
-	int cpu = smp_processor_id();
-	struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
-
-	/*
-	 * Normally we should not be here till LAPIC has been initialized but
-	 * in some cases like kdump, its possible that there is a pending LAPIC
-	 * timer interrupt from previous kernel's context and is delivered in
-	 * new kernel the moment interrupts are enabled.
-	 *
-	 * Interrupts are enabled early and LAPIC is setup much later, hence
-	 * its possible that when we get here evt->event_handler is NULL.
-	 * Check for event_handler being NULL and discard the interrupt as
-	 * spurious.
-	 */
-	if (!evt->event_handler) {
-		printk(KERN_WARNING
-		       "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
-		/* Switch it off */
-		lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
-		return;
-	}
-
-	/*
-	 * the NMI deadlock-detector uses this.
-	 */
-#ifdef CONFIG_X86_64
-	add_pda(apic_timer_irqs, 1);
-#else
-	per_cpu(irq_stat, cpu).apic_timer_irqs++;
-#endif
-
-	evt->event_handler(evt);
-}
-
-/*
- * Local APIC timer interrupt. This is the most natural way for doing
- * local interrupts, but local timer interrupts can be emulated by
- * broadcast interrupts too. [in case the hw doesn't support APIC timers]
- *
- * [ if a single-CPU system runs an SMP kernel then we call the local
- *   interrupt as well. Thus we cannot inline the local irq ... ]
- */
-void smp_apic_timer_interrupt(struct pt_regs *regs)
-{
-	struct pt_regs *old_regs = set_irq_regs(regs);
-
-	/*
-	 * NOTE! We'd better ACK the irq immediately,
-	 * because timer handling can be slow.
-	 */
-	ack_APIC_irq();
-	/*
-	 * update_process_times() expects us to have done irq_enter().
-	 * Besides, if we don't timer interrupts ignore the global
-	 * interrupt lock, which is the WrongThing (tm) to do.
-	 */
-	exit_idle();
-	irq_enter();
-	local_apic_timer_interrupt();
-	irq_exit();
-
-	set_irq_regs(old_regs);
-}
-
-int setup_profiling_timer(unsigned int multiplier)
-{
-	return -EINVAL;
-}
-
-
-/*
- * Local APIC start and shutdown
- */
-
-/**
- * clear_local_APIC - shutdown the local APIC
- *
- * This is called, when a CPU is disabled and before rebooting, so the state of
- * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
- * leftovers during boot.
- */
-void clear_local_APIC(void)
-{
-	int maxlvt;
-	u32 v;
-
-	/* APIC hasn't been mapped yet */
-	if (!apic_phys)
-		return;
-
-	maxlvt = lapic_get_maxlvt();
-	/*
-	 * Masking an LVT entry can trigger a local APIC error
-	 * if the vector is zero. Mask LVTERR first to prevent this.
-	 */
-	if (maxlvt >= 3) {
-		v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
-		apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
-	}
-	/*
-	 * Careful: we have to set masks only first to deassert
-	 * any level-triggered sources.
-	 */
-	v = apic_read(APIC_LVTT);
-	apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
-	v = apic_read(APIC_LVT0);
-	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
-	v = apic_read(APIC_LVT1);
-	apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
-	if (maxlvt >= 4) {
-		v = apic_read(APIC_LVTPC);
-		apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
-	}
-
-	/* lets not touch this if we didn't frob it */
-#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL)
-	if (maxlvt >= 5) {
-		v = apic_read(APIC_LVTTHMR);
-		apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
-	}
-#endif
-	/*
-	 * Clean APIC state for other OSs:
-	 */
-	apic_write(APIC_LVTT, APIC_LVT_MASKED);
-	apic_write(APIC_LVT0, APIC_LVT_MASKED);
-	apic_write(APIC_LVT1, APIC_LVT_MASKED);
-	if (maxlvt >= 3)
-		apic_write(APIC_LVTERR, APIC_LVT_MASKED);
-	if (maxlvt >= 4)
-		apic_write(APIC_LVTPC, APIC_LVT_MASKED);
-
-	/* Integrated APIC (!82489DX) ? */
-	if (lapic_is_integrated()) {
-		if (maxlvt > 3)
-			/* Clear ESR due to Pentium errata 3AP and 11AP */
-			apic_write(APIC_ESR, 0);
-		apic_read(APIC_ESR);
-	}
-}
-
-/**
- * disable_local_APIC - clear and disable the local APIC
- */
-void disable_local_APIC(void)
-{
-	unsigned int value;
-
-	clear_local_APIC();
-
-	/*
-	 * Disable APIC (implies clearing of registers
-	 * for 82489DX!).
-	 */
-	value = apic_read(APIC_SPIV);
-	value &= ~APIC_SPIV_APIC_ENABLED;
-	apic_write(APIC_SPIV, value);
-
-#ifdef CONFIG_X86_32
-	/*
-	 * When LAPIC was disabled by the BIOS and enabled by the kernel,
-	 * restore the disabled state.
-	 */
-	if (enabled_via_apicbase) {
-		unsigned int l, h;
-
-		rdmsr(MSR_IA32_APICBASE, l, h);
-		l &= ~MSR_IA32_APICBASE_ENABLE;
-		wrmsr(MSR_IA32_APICBASE, l, h);
-	}
-#endif
-}
-
-/*
- * If Linux enabled the LAPIC against the BIOS default disable it down before
- * re-entering the BIOS on shutdown.  Otherwise the BIOS may get confused and
- * not power-off.  Additionally clear all LVT entries before disable_local_APIC
- * for the case where Linux didn't enable the LAPIC.
- */
-void lapic_shutdown(void)
-{
-	unsigned long flags;
-
-	if (!cpu_has_apic)
-		return;
-
-	local_irq_save(flags);
-
-#ifdef CONFIG_X86_32
-	if (!enabled_via_apicbase)
-		clear_local_APIC();
-	else
-#endif
-		disable_local_APIC();
-
-
-	local_irq_restore(flags);
-}
-
-/*
- * This is to verify that we're looking at a real local APIC.
- * Check these against your board if the CPUs aren't getting
- * started for no apparent reason.
- */
-int __init verify_local_APIC(void)
-{
-	unsigned int reg0, reg1;
-
-	/*
-	 * The version register is read-only in a real APIC.
-	 */
-	reg0 = apic_read(APIC_LVR);
-	apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
-	apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
-	reg1 = apic_read(APIC_LVR);
-	apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
-
-	/*
-	 * The two version reads above should print the same
-	 * numbers.  If the second one is different, then we
-	 * poke at a non-APIC.
-	 */
-	if (reg1 != reg0)
-		return 0;
-
-	/*
-	 * Check if the version looks reasonably.
-	 */
-	reg1 = GET_APIC_VERSION(reg0);
-	if (reg1 == 0x00 || reg1 == 0xff)
-		return 0;
-	reg1 = lapic_get_maxlvt();
-	if (reg1 < 0x02 || reg1 == 0xff)
-		return 0;
-
-	/*
-	 * The ID register is read/write in a real APIC.
-	 */
-	reg0 = apic_read(APIC_ID);
-	apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
-	apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
-	reg1 = apic_read(APIC_ID);
-	apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
-	apic_write(APIC_ID, reg0);
-	if (reg1 != (reg0 ^ APIC_ID_MASK))
-		return 0;
-
-	/*
-	 * The next two are just to see if we have sane values.
-	 * They're only really relevant if we're in Virtual Wire
-	 * compatibility mode, but most boxes are anymore.
-	 */
-	reg0 = apic_read(APIC_LVT0);
-	apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
-	reg1 = apic_read(APIC_LVT1);
-	apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
-
-	return 1;
-}
-
-/**
- * sync_Arb_IDs - synchronize APIC bus arbitration IDs
- */
-void __init sync_Arb_IDs(void)
-{
-	/*
-	 * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
-	 * needed on AMD.
-	 */
-	if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
-		return;
-
-	/*
-	 * Wait for idle.
-	 */
-	apic_wait_icr_idle();
-
-	apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
-	apic_write(APIC_ICR, APIC_DEST_ALLINC |
-			APIC_INT_LEVELTRIG | APIC_DM_INIT);
-}
-
-/*
- * An initial setup of the virtual wire mode.
- */
-void __init init_bsp_APIC(void)
-{
-	unsigned int value;
-
-	/*
-	 * Don't do the setup now if we have a SMP BIOS as the
-	 * through-I/O-APIC virtual wire mode might be active.
-	 */
-	if (smp_found_config || !cpu_has_apic)
-		return;
-
-	/*
-	 * Do not trust the local APIC being empty at bootup.
-	 */
-	clear_local_APIC();
-
-	/*
-	 * Enable APIC.
-	 */
-	value = apic_read(APIC_SPIV);
-	value &= ~APIC_VECTOR_MASK;
-	value |= APIC_SPIV_APIC_ENABLED;
-
-#ifdef CONFIG_X86_32
-	/* This bit is reserved on P4/Xeon and should be cleared */
-	if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
-	    (boot_cpu_data.x86 == 15))
-		value &= ~APIC_SPIV_FOCUS_DISABLED;
-	else
-#endif
-		value |= APIC_SPIV_FOCUS_DISABLED;
-	value |= SPURIOUS_APIC_VECTOR;
-	apic_write(APIC_SPIV, value);
-
-	/*
-	 * Set up the virtual wire mode.
-	 */
-	apic_write(APIC_LVT0, APIC_DM_EXTINT);
-	value = APIC_DM_NMI;
-	if (!lapic_is_integrated())		/* 82489DX */
-		value |= APIC_LVT_LEVEL_TRIGGER;
-	apic_write(APIC_LVT1, value);
-}
-
-static void __cpuinit lapic_setup_esr(void)
-{
-	unsigned long oldvalue, value, maxlvt;
-	if (lapic_is_integrated() && !esr_disable) {
-		if (esr_disable) {
-			/*
-			 * Something untraceable is creating bad interrupts on
-			 * secondary quads ... for the moment, just leave the
-			 * ESR disabled - we can't do anything useful with the
-			 * errors anyway - mbligh
-			 */
-			printk(KERN_INFO "Leaving ESR disabled.\n");
-			return;
-		}
-		/* !82489DX */
-		maxlvt = lapic_get_maxlvt();
-		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
-			apic_write(APIC_ESR, 0);
-		oldvalue = apic_read(APIC_ESR);
-
-		/* enables sending errors */
-		value = ERROR_APIC_VECTOR;
-		apic_write(APIC_LVTERR, value);
-		/*
-		 * spec says clear errors after enabling vector.
-		 */
-		if (maxlvt > 3)
-			apic_write(APIC_ESR, 0);
-		value = apic_read(APIC_ESR);
-		if (value != oldvalue)
-			apic_printk(APIC_VERBOSE, "ESR value before enabling "
-				"vector: 0x%08lx  after: 0x%08lx\n",
-				oldvalue, value);
-	} else {
-		printk(KERN_INFO "No ESR for 82489DX.\n");
-	}
-}
-
-
-/**
- * setup_local_APIC - setup the local APIC
- */
-void __cpuinit setup_local_APIC(void)
-{
-	unsigned int value;
-	int i, j;
-
-	preempt_disable();
-	value = apic_read(APIC_LVR);
-
-	BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
-
-	/*
-	 * Double-check whether this APIC is really registered.
-	 * This is meaningless in clustered apic mode, so we skip it.
-	 */
-	if (!apic_id_registered())
-		BUG();
-
-	/*
-	 * Intel recommends to set DFR, LDR and TPR before enabling
-	 * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
-	 * document number 292116).  So here it goes...
-	 */
-	init_apic_ldr();
-
-	/*
-	 * Set Task Priority to 'accept all'. We never change this
-	 * later on.
-	 */
-	value = apic_read(APIC_TASKPRI);
-	value &= ~APIC_TPRI_MASK;
-	apic_write(APIC_TASKPRI, value);
-
-	/*
-	 * After a crash, we no longer service the interrupts and a pending
-	 * interrupt from previous kernel might still have ISR bit set.
-	 *
-	 * Most probably by now CPU has serviced that pending interrupt and
-	 * it might not have done the ack_APIC_irq() because it thought,
-	 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
-	 * does not clear the ISR bit and cpu thinks it has already serivced
-	 * the interrupt. Hence a vector might get locked. It was noticed
-	 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
-	 */
-	for (i = APIC_ISR_NR - 1; i >= 0; i--) {
-		value = apic_read(APIC_ISR + i*0x10);
-		for (j = 31; j >= 0; j--) {
-			if (value & (1<<j))
-				ack_APIC_irq();
-		}
-	}
-
-	/*
-	 * Now that we are all set up, enable the APIC
-	 */
-	value = apic_read(APIC_SPIV);
-	value &= ~APIC_VECTOR_MASK;
-	/*
-	 * Enable APIC
-	 */
-	value |= APIC_SPIV_APIC_ENABLED;
-
-	/* We always use processor focus */
-
-	/*
-	 * Set spurious IRQ vector
-	 */
-	value |= SPURIOUS_APIC_VECTOR;
-	apic_write(APIC_SPIV, value);
-
-	/*
-	 * Set up LVT0, LVT1:
-	 *
-	 * set up through-local-APIC on the BP's LINT0. This is not
-	 * strictly necessary in pure symmetric-IO mode, but sometimes
-	 * we delegate interrupts to the 8259A.
-	 */
-	/*
-	 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
-	 */
-	value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
-	if (!smp_processor_id() && !value) {
-		value = APIC_DM_EXTINT;
-		apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
-			    smp_processor_id());
-	} else {
-		value = APIC_DM_EXTINT | APIC_LVT_MASKED;
-		apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
-			    smp_processor_id());
-	}
-	apic_write(APIC_LVT0, value);
-
-	/*
-	 * only the BP should see the LINT1 NMI signal, obviously.
-	 */
-	if (!smp_processor_id())
-		value = APIC_DM_NMI;
-	else
-		value = APIC_DM_NMI | APIC_LVT_MASKED;
-	apic_write(APIC_LVT1, value);
-	preempt_enable();
-}
-
-void __cpuinit end_local_APIC_setup(void)
-{
-	lapic_setup_esr();
-
-#ifdef CONFIG_X86_32
-	{
-		unsigned int value;
-		/* Disable the local apic timer */
-		value = apic_read(APIC_LVTT);
-		value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
-		apic_write(APIC_LVTT, value);
-	}
-#endif
-
-	setup_apic_nmi_watchdog(NULL);
-	apic_pm_activate();
-}
-
-void check_x2apic(void)
-{
-	int msr, msr2;
-
-	rdmsr(MSR_IA32_APICBASE, msr, msr2);
-
-	if (msr & X2APIC_ENABLE) {
-		printk("x2apic enabled by BIOS, switching to x2apic ops\n");
-		x2apic_preenabled = x2apic = 1;
-		apic_ops = &x2apic_ops;
-	}
-}
-
-void enable_x2apic(void)
-{
-	int msr, msr2;
-
-	rdmsr(MSR_IA32_APICBASE, msr, msr2);
-	if (!(msr & X2APIC_ENABLE)) {
-		printk("Enabling x2apic\n");
-		wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
-	}
-}
-
-void enable_IR_x2apic(void)
-{
-#ifdef CONFIG_INTR_REMAP
-	int ret;
-	unsigned long flags;
-
-	if (!cpu_has_x2apic)
-		return;
-
-	if (!x2apic_preenabled && disable_x2apic) {
-		printk(KERN_INFO
-		       "Skipped enabling x2apic and Interrupt-remapping "
-		       "because of nox2apic\n");
-		return;
-	}
-
-	if (x2apic_preenabled && disable_x2apic)
-		panic("Bios already enabled x2apic, can't enforce nox2apic");
-
-	if (!x2apic_preenabled && skip_ioapic_setup) {
-		printk(KERN_INFO
-		       "Skipped enabling x2apic and Interrupt-remapping "
-		       "because of skipping io-apic setup\n");
-		return;
-	}
-
-	ret = dmar_table_init();
-	if (ret) {
-		printk(KERN_INFO
-		       "dmar_table_init() failed with %d:\n", ret);
-
-		if (x2apic_preenabled)
-			panic("x2apic enabled by bios. But IR enabling failed");
-		else
-			printk(KERN_INFO
-			       "Not enabling x2apic,Intr-remapping\n");
-		return;
-	}
-
-	local_irq_save(flags);
-	mask_8259A();
-	save_mask_IO_APIC_setup();
-
-	ret = enable_intr_remapping(1);
-
-	if (ret && x2apic_preenabled) {
-		local_irq_restore(flags);
-		panic("x2apic enabled by bios. But IR enabling failed");
-	}
-
-	if (ret)
-		goto end;
-
-	if (!x2apic) {
-		x2apic = 1;
-		apic_ops = &x2apic_ops;
-		enable_x2apic();
-	}
-end:
-	if (ret)
-		/*
-		 * IR enabling failed
-		 */
-		restore_IO_APIC_setup();
-	else
-		reinit_intr_remapped_IO_APIC(x2apic_preenabled);
-
-	unmask_8259A();
-	local_irq_restore(flags);
-
-	if (!ret) {
-		if (!x2apic_preenabled)
-			printk(KERN_INFO
-			       "Enabled x2apic and interrupt-remapping\n");
-		else
-			printk(KERN_INFO
-			       "Enabled Interrupt-remapping\n");
-	} else
-		printk(KERN_ERR
-		       "Failed to enable Interrupt-remapping and x2apic\n");
-#else
-	if (!cpu_has_x2apic)
-		return;
-
-	if (x2apic_preenabled)
-		panic("x2apic enabled prior OS handover,"
-		      " enable CONFIG_INTR_REMAP");
-
-	printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping "
-	       " and x2apic\n");
-#endif
-
-	return;
-}
-
-/*
- * Detect and enable local APICs on non-SMP boards.
- * Original code written by Keir Fraser.
- * On AMD64 we trust the BIOS - if it says no APIC it is likely
- * not correctly set up (usually the APIC timer won't work etc.)
- */
-static int __init detect_init_APIC(void)
-{
-	if (!cpu_has_apic) {
-		printk(KERN_INFO "No local APIC present\n");
-		return -1;
-	}
-
-	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-	boot_cpu_physical_apicid = 0;
-	return 0;
-}
-
-void __init early_init_lapic_mapping(void)
-{
-	unsigned long phys_addr;
-
-	/*
-	 * If no local APIC can be found then go out
-	 * : it means there is no mpatable and MADT
-	 */
-	if (!smp_found_config)
-		return;
-
-	phys_addr = mp_lapic_addr;
-
-	set_fixmap_nocache(FIX_APIC_BASE, phys_addr);
-	apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
-		    APIC_BASE, phys_addr);
-
-	/*
-	 * Fetch the APIC ID of the BSP in case we have a
-	 * default configuration (or the MP table is broken).
-	 */
-	boot_cpu_physical_apicid = read_apic_id();
-}
-
-/**
- * init_apic_mappings - initialize APIC mappings
- */
-void __init init_apic_mappings(void)
-{
-	if (x2apic) {
-		boot_cpu_physical_apicid = read_apic_id();
-		return;
-	}
-
-	/*
-	 * If no local APIC can be found then set up a fake all
-	 * zeroes page to simulate the local APIC and another
-	 * one for the IO-APIC.
-	 */
-	if (!smp_found_config && detect_init_APIC()) {
-		apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
-		apic_phys = __pa(apic_phys);
-	} else
-		apic_phys = mp_lapic_addr;
-
-	set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
-	apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
-				APIC_BASE, apic_phys);
-
-	/*
-	 * Fetch the APIC ID of the BSP in case we have a
-	 * default configuration (or the MP table is broken).
-	 */
-	boot_cpu_physical_apicid = read_apic_id();
-}
-
-/*
- * This initializes the IO-APIC and APIC hardware if this is
- * a UP kernel.
- */
-int apic_version[MAX_APICS];
-
-int __init APIC_init_uniprocessor(void)
-{
-	if (disable_apic) {
-		printk(KERN_INFO "Apic disabled\n");
-		return -1;
-	}
-	if (!cpu_has_apic) {
-		disable_apic = 1;
-		printk(KERN_INFO "Apic disabled by BIOS\n");
-		return -1;
-	}
-
-	enable_IR_x2apic();
-	setup_apic_routing();
-
-	verify_local_APIC();
-
-	connect_bsp_APIC();
-
-	physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
-	apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
-
-	setup_local_APIC();
-
-	/*
-	 * Now enable IO-APICs, actually call clear_IO_APIC
-	 * We need clear_IO_APIC before enabling vector on BP
-	 */
-	if (!skip_ioapic_setup && nr_ioapics)
-		enable_IO_APIC();
-
-	if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
-		localise_nmi_watchdog();
-	end_local_APIC_setup();
-
-	if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
-		setup_IO_APIC();
-	else
-		nr_ioapics = 0;
-	setup_boot_APIC_clock();
-	check_nmi_watchdog();
-	return 0;
-}
-
-/*
- * Local APIC interrupts
- */
-
-/*
- * This interrupt should _never_ happen with our APIC/SMP architecture
- */
-asmlinkage void smp_spurious_interrupt(void)
-{
-	unsigned int v;
-	exit_idle();
-	irq_enter();
-	/*
-	 * Check if this really is a spurious interrupt and ACK it
-	 * if it is a vectored one.  Just in case...
-	 * Spurious interrupts should not be ACKed.
-	 */
-	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
-	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
-		ack_APIC_irq();
-
-	add_pda(irq_spurious_count, 1);
-	irq_exit();
-}
-
-/*
- * This interrupt should never happen with our APIC/SMP architecture
- */
-asmlinkage void smp_error_interrupt(void)
-{
-	unsigned int v, v1;
-
-	exit_idle();
-	irq_enter();
-	/* First tickle the hardware, only then report what went on. -- REW */
-	v = apic_read(APIC_ESR);
-	apic_write(APIC_ESR, 0);
-	v1 = apic_read(APIC_ESR);
-	ack_APIC_irq();
-	atomic_inc(&irq_err_count);
-
-	/* Here is what the APIC error bits mean:
-	   0: Send CS error
-	   1: Receive CS error
-	   2: Send accept error
-	   3: Receive accept error
-	   4: Reserved
-	   5: Send illegal vector
-	   6: Received illegal vector
-	   7: Illegal register address
-	*/
-	printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
-		smp_processor_id(), v , v1);
-	irq_exit();
-}
-
-/**
- * connect_bsp_APIC - attach the APIC to the interrupt system
- */
-void __init connect_bsp_APIC(void)
-{
-#ifdef CONFIG_X86_32
-	if (pic_mode) {
-		/*
-		 * Do not trust the local APIC being empty at bootup.
-		 */
-		clear_local_APIC();
-		/*
-		 * PIC mode, enable APIC mode in the IMCR, i.e.  connect BSP's
-		 * local APIC to INT and NMI lines.
-		 */
-		apic_printk(APIC_VERBOSE, "leaving PIC mode, "
-				"enabling APIC mode.\n");
-		outb(0x70, 0x22);
-		outb(0x01, 0x23);
-	}
-#endif
-	enable_apic_mode();
-}
-
-/**
- * disconnect_bsp_APIC - detach the APIC from the interrupt system
- * @virt_wire_setup:	indicates, whether virtual wire mode is selected
- *
- * Virtual wire mode is necessary to deliver legacy interrupts even when the
- * APIC is disabled.
- */
-void disconnect_bsp_APIC(int virt_wire_setup)
-{
-	unsigned int value;
-
-#ifdef CONFIG_X86_32
-	if (pic_mode) {
-		/*
-		 * Put the board back into PIC mode (has an effect only on
-		 * certain older boards).  Note that APIC interrupts, including
-		 * IPIs, won't work beyond this point!  The only exception are
-		 * INIT IPIs.
-		 */
-		apic_printk(APIC_VERBOSE, "disabling APIC mode, "
-				"entering PIC mode.\n");
-		outb(0x70, 0x22);
-		outb(0x00, 0x23);
-		return;
-	}
-#endif
-
-	/* Go back to Virtual Wire compatibility mode */
-
-	/* For the spurious interrupt use vector F, and enable it */
-	value = apic_read(APIC_SPIV);
-	value &= ~APIC_VECTOR_MASK;
-	value |= APIC_SPIV_APIC_ENABLED;
-	value |= 0xf;
-	apic_write(APIC_SPIV, value);
-
-	if (!virt_wire_setup) {
-		/*
-		 * For LVT0 make it edge triggered, active high,
-		 * external and enabled
-		 */
-		value = apic_read(APIC_LVT0);
-		value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
-			APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
-			APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
-		value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
-		value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
-		apic_write(APIC_LVT0, value);
-	} else {
-		/* Disable LVT0 */
-		apic_write(APIC_LVT0, APIC_LVT_MASKED);
-	}
-
-	/*
-	 * For LVT1 make it edge triggered, active high,
-	 * nmi and enabled
-	 */
-	value = apic_read(APIC_LVT1);
-	value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
-			APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
-			APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
-	value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
-	value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
-	apic_write(APIC_LVT1, value);
-}
-
-void __cpuinit generic_processor_info(int apicid, int version)
-{
-	int cpu;
-	cpumask_t tmp_map;
-
-	/*
-	 * Validate version
-	 */
-	if (version == 0x0) {
-		printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
-				"fixing up to 0x10. (tell your hw vendor)\n",
-				version);
-		version = 0x10;
-	}
-	apic_version[apicid] = version;
-
-	if (num_processors >= NR_CPUS) {
-		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
-			"  Processor ignored.\n", NR_CPUS);
-		return;
-	}
-
-	num_processors++;
-	cpus_complement(tmp_map, cpu_present_map);
-	cpu = first_cpu(tmp_map);
-
-	physid_set(apicid, phys_cpu_present_map);
-	if (apicid == boot_cpu_physical_apicid) {
-		/*
-		 * x86_bios_cpu_apicid is required to have processors listed
-		 * in same order as logical cpu numbers. Hence the first
-		 * entry is BSP, and so on.
-		 */
-		cpu = 0;
-	}
-	if (apicid > max_physical_apicid)
-		max_physical_apicid = apicid;
-
-#ifdef CONFIG_X86_32
-	/*
-	 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
-	 * but we need to work other dependencies like SMP_SUSPEND etc
-	 * before this can be done without some confusion.
-	 * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
-	 *       - Ashok Raj <ashok.raj@intel.com>
-	 */
-	if (max_physical_apicid >= 8) {
-		switch (boot_cpu_data.x86_vendor) {
-		case X86_VENDOR_INTEL:
-			if (!APIC_XAPIC(version)) {
-				def_to_bigsmp = 0;
-				break;
-			}
-			/* If P4 and above fall through */
-		case X86_VENDOR_AMD:
-			def_to_bigsmp = 1;
-		}
-	}
-#endif
-
-#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
-	/* are we being called early in kernel startup? */
-	if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
-		u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
-		u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
-
-		cpu_to_apicid[cpu] = apicid;
-		bios_cpu_apicid[cpu] = apicid;
-	} else {
-		per_cpu(x86_cpu_to_apicid, cpu) = apicid;
-		per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
-	}
-#endif
-
-	cpu_set(cpu, cpu_possible_map);
-	cpu_set(cpu, cpu_present_map);
-}
-
-int hard_smp_processor_id(void)
-{
-	return read_apic_id();
-}
-
-/*
- * Power management
- */
-#ifdef CONFIG_PM
-
-static struct {
-	/*
-	 * 'active' is true if the local APIC was enabled by us and
-	 * not the BIOS; this signifies that we are also responsible
-	 * for disabling it before entering apm/acpi suspend
-	 */
-	int active;
-	/* r/w apic fields */
-	unsigned int apic_id;
-	unsigned int apic_taskpri;
-	unsigned int apic_ldr;
-	unsigned int apic_dfr;
-	unsigned int apic_spiv;
-	unsigned int apic_lvtt;
-	unsigned int apic_lvtpc;
-	unsigned int apic_lvt0;
-	unsigned int apic_lvt1;
-	unsigned int apic_lvterr;
-	unsigned int apic_tmict;
-	unsigned int apic_tdcr;
-	unsigned int apic_thmr;
-} apic_pm_state;
-
-static int lapic_suspend(struct sys_device *dev, pm_message_t state)
-{
-	unsigned long flags;
-	int maxlvt;
-
-	if (!apic_pm_state.active)
-		return 0;
-
-	maxlvt = lapic_get_maxlvt();
-
-	apic_pm_state.apic_id = apic_read(APIC_ID);
-	apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
-	apic_pm_state.apic_ldr = apic_read(APIC_LDR);
-	apic_pm_state.apic_dfr = apic_read(APIC_DFR);
-	apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
-	apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
-	if (maxlvt >= 4)
-		apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
-	apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
-	apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
-	apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
-	apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
-	apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
-#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
-	if (maxlvt >= 5)
-		apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
-#endif
-
-	local_irq_save(flags);
-	disable_local_APIC();
-	local_irq_restore(flags);
-	return 0;
-}
-
-static int lapic_resume(struct sys_device *dev)
-{
-	unsigned int l, h;
-	unsigned long flags;
-	int maxlvt;
-
-	if (!apic_pm_state.active)
-		return 0;
-
-	maxlvt = lapic_get_maxlvt();
-
-	local_irq_save(flags);
-
-#ifdef CONFIG_X86_64
-	if (x2apic)
-		enable_x2apic();
-	else
-#endif
-	{
-		/*
-		 * Make sure the APICBASE points to the right address
-		 *
-		 * FIXME! This will be wrong if we ever support suspend on
-		 * SMP! We'll need to do this as part of the CPU restore!
-		 */
-		rdmsr(MSR_IA32_APICBASE, l, h);
-		l &= ~MSR_IA32_APICBASE_BASE;
-		l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
-		wrmsr(MSR_IA32_APICBASE, l, h);
-	}
-
-	apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
-	apic_write(APIC_ID, apic_pm_state.apic_id);
-	apic_write(APIC_DFR, apic_pm_state.apic_dfr);
-	apic_write(APIC_LDR, apic_pm_state.apic_ldr);
-	apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
-	apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
-	apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
-	apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
-#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
-	if (maxlvt >= 5)
-		apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
-#endif
-	if (maxlvt >= 4)
-		apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
-	apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
-	apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
-	apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
-	apic_write(APIC_ESR, 0);
-	apic_read(APIC_ESR);
-	apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
-	apic_write(APIC_ESR, 0);
-	apic_read(APIC_ESR);
-
-	local_irq_restore(flags);
-
-	return 0;
-}
-
-/*
- * This device has no shutdown method - fully functioning local APICs
- * are needed on every CPU up until machine_halt/restart/poweroff.
- */
-
-static struct sysdev_class lapic_sysclass = {
-	.name		= "lapic",
-	.resume		= lapic_resume,
-	.suspend	= lapic_suspend,
-};
-
-static struct sys_device device_lapic = {
-	.id	= 0,
-	.cls	= &lapic_sysclass,
-};
-
-static void __cpuinit apic_pm_activate(void)
-{
-	apic_pm_state.active = 1;
-}
-
-static int __init init_lapic_sysfs(void)
-{
-	int error;
-
-	if (!cpu_has_apic)
-		return 0;
-	/* XXX: remove suspend/resume procs if !apic_pm_state.active? */
-
-	error = sysdev_class_register(&lapic_sysclass);
-	if (!error)
-		error = sysdev_register(&device_lapic);
-	return error;
-}
-device_initcall(init_lapic_sysfs);
-
-#else	/* CONFIG_PM */
-
-static void apic_pm_activate(void) { }
-
-#endif	/* CONFIG_PM */
-
-/*
- * apic_is_clustered_box() -- Check if we can expect good TSC
- *
- * Thus far, the major user of this is IBM's Summit2 series:
- *
- * Clustered boxes may have unsynced TSC problems if they are
- * multi-chassis. Use available data to take a good guess.
- * If in doubt, go HPET.
- */
-__cpuinit int apic_is_clustered_box(void)
-{
-	int i, clusters, zeros;
-	unsigned id;
-	u16 *bios_cpu_apicid;
-	DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
-
-	/*
-	 * there is not this kind of box with AMD CPU yet.
-	 * Some AMD box with quadcore cpu and 8 sockets apicid
-	 * will be [4, 0x23] or [8, 0x27] could be thought to
-	 * vsmp box still need checking...
-	 */
-	if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
-		return 0;
-
-	bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
-	bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
-
-	for (i = 0; i < NR_CPUS; i++) {
-		/* are we being called early in kernel startup? */
-		if (bios_cpu_apicid) {
-			id = bios_cpu_apicid[i];
-		}
-		else if (i < nr_cpu_ids) {
-			if (cpu_present(i))
-				id = per_cpu(x86_bios_cpu_apicid, i);
-			else
-				continue;
-		}
-		else
-			break;
-
-		if (id != BAD_APICID)
-			__set_bit(APIC_CLUSTERID(id), clustermap);
-	}
-
-	/* Problem:  Partially populated chassis may not have CPUs in some of
-	 * the APIC clusters they have been allocated.  Only present CPUs have
-	 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
-	 * Since clusters are allocated sequentially, count zeros only if
-	 * they are bounded by ones.
-	 */
-	clusters = 0;
-	zeros = 0;
-	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
-		if (test_bit(i, clustermap)) {
-			clusters += 1 + zeros;
-			zeros = 0;
-		} else
-			++zeros;
-	}
-
-	/* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
-	 * not guaranteed to be synced between boards
-	 */
-	if (is_vsmp_box() && clusters > 1)
-		return 1;
-
-	/*
-	 * If clusters > 2, then should be multi-chassis.
-	 * May have to revisit this when multi-core + hyperthreaded CPUs come
-	 * out, but AFAIK this will work even for them.
-	 */
-	return (clusters > 2);
-}
-
-static __init int setup_nox2apic(char *str)
-{
-	disable_x2apic = 1;
-	clear_cpu_cap(&boot_cpu_data, X86_FEATURE_X2APIC);
-	return 0;
-}
-early_param("nox2apic", setup_nox2apic);
-
-
-/*
- * APIC command line parameters
- */
-static int __init setup_disableapic(char *arg)
-{
-	disable_apic = 1;
-	setup_clear_cpu_cap(X86_FEATURE_APIC);
-	return 0;
-}
-early_param("disableapic", setup_disableapic);
-
-/* same as disableapic, for compatibility */
-static int __init setup_nolapic(char *arg)
-{
-	return setup_disableapic(arg);
-}
-early_param("nolapic", setup_nolapic);
-
-static int __init parse_lapic_timer_c2_ok(char *arg)
-{
-	local_apic_timer_c2_ok = 1;
-	return 0;
-}
-early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
-
-static int __init parse_disable_apic_timer(char *arg)
-{
-	disable_apic_timer = 1;
-	return 0;
-}
-early_param("noapictimer", parse_disable_apic_timer);
-
-static int __init parse_nolapic_timer(char *arg)
-{
-	disable_apic_timer = 1;
-	return 0;
-}
-early_param("nolapic_timer", parse_nolapic_timer);
-
-static __init int setup_apicpmtimer(char *s)
-{
-	apic_calibrate_pmtmr = 1;
-	notsc_setup(NULL);
-	return 0;
-}
-__setup("apicpmtimer", setup_apicpmtimer);
-
-static int __init apic_set_verbosity(char *arg)
-{
-	if (!arg)  {
-#ifdef CONFIG_X86_64
-		skip_ioapic_setup = 0;
-		ioapic_force = 1;
-		return 0;
-#endif
-		return -EINVAL;
-	}
-
-	if (strcmp("debug", arg) == 0)
-		apic_verbosity = APIC_DEBUG;
-	else if (strcmp("verbose", arg) == 0)
-		apic_verbosity = APIC_VERBOSE;
-	else {
-		printk(KERN_WARNING "APIC Verbosity level %s not recognised"
-			" use apic=verbose or apic=debug\n", arg);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-early_param("apic", apic_set_verbosity);
-
-static int __init lapic_insert_resource(void)
-{
-	if (!apic_phys)
-		return -1;
-
-	/* Put local APIC into the resource map. */
-	lapic_resource.start = apic_phys;
-	lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
-	insert_resource(&iomem_resource, &lapic_resource);
-
-	return 0;
-}
-
-/*
- * need call insert after e820_reserve_resources()
- * that is using request_resource
- */
-late_initcall(lapic_insert_resource);
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index fdd585f..f0dfe6f 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -1,8 +1,6 @@
 /*
  * BIOS run time interface routines.
  *
- *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
- *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
  *  the Free Software Foundation; either version 2 of the License, or
@@ -16,33 +14,128 @@
  *  You should have received a copy of the GNU General Public License
  *  along with this program; if not, write to the Free Software
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
+ *  Copyright (c) Russ Anderson
  */
 
+#include <linux/efi.h>
+#include <asm/efi.h>
+#include <linux/io.h>
 #include <asm/uv/bios.h>
+#include <asm/uv/uv_hub.h>
 
-const char *
-x86_bios_strerror(long status)
+struct uv_systab uv_systab;
+
+s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
 {
-	const char *str;
-	switch (status) {
-	case  0: str = "Call completed without error";	break;
-	case -1: str = "Not implemented";		break;
-	case -2: str = "Invalid argument";		break;
-	case -3: str = "Call completed with error";	break;
-	default: str = "Unknown BIOS status code";	break;
+	struct uv_systab *tab = &uv_systab;
+
+	if (!tab->function)
+		/*
+		 * BIOS does not support UV systab
+		 */
+		return BIOS_STATUS_UNIMPLEMENTED;
+
+	return efi_call6((void *)__va(tab->function),
+					(u64)which, a1, a2, a3, a4, a5);
+}
+
+s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
+					u64 a4, u64 a5)
+{
+	unsigned long bios_flags;
+	s64 ret;
+
+	local_irq_save(bios_flags);
+	ret = uv_bios_call(which, a1, a2, a3, a4, a5);
+	local_irq_restore(bios_flags);
+
+	return ret;
+}
+
+s64 uv_bios_call_reentrant(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
+					u64 a4, u64 a5)
+{
+	s64 ret;
+
+	preempt_disable();
+	ret = uv_bios_call(which, a1, a2, a3, a4, a5);
+	preempt_enable();
+
+	return ret;
+}
+
+
+long sn_partition_id;
+EXPORT_SYMBOL_GPL(sn_partition_id);
+long uv_coherency_id;
+EXPORT_SYMBOL_GPL(uv_coherency_id);
+long uv_region_size;
+EXPORT_SYMBOL_GPL(uv_region_size);
+int uv_type;
+
+
+s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
+		long *region)
+{
+	s64 ret;
+	u64 v0, v1;
+	union partition_info_u part;
+
+	ret = uv_bios_call_irqsave(UV_BIOS_GET_SN_INFO, fc,
+				(u64)(&v0), (u64)(&v1), 0, 0);
+	if (ret != BIOS_STATUS_SUCCESS)
+		return ret;
+
+	part.val = v0;
+	if (uvtype)
+		*uvtype = part.hub_version;
+	if (partid)
+		*partid = part.partition_id;
+	if (coher)
+		*coher = part.coherence_id;
+	if (region)
+		*region = part.region_size;
+	return ret;
+}
+
+
+s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
+{
+	return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type,
+			   (u64)ticks_per_second, 0, 0, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_freq_base);
+
+
+#ifdef CONFIG_EFI
+void uv_bios_init(void)
+{
+	struct uv_systab *tab;
+
+	if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) ||
+	    (efi.uv_systab == (unsigned long)NULL)) {
+		printk(KERN_CRIT "No EFI UV System Table.\n");
+		uv_systab.function = (unsigned long)NULL;
+		return;
 	}
-	return str;
-}
 
-long
-x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second,
-		   unsigned long *drift_info)
-{
-	struct uv_bios_retval isrv;
+	tab = (struct uv_systab *)ioremap(efi.uv_systab,
+					sizeof(struct uv_systab));
+	if (strncmp(tab->signature, "UVST", 4) != 0)
+		printk(KERN_ERR "bad signature in UV system table!");
 
-	BIOS_CALL(isrv, BIOS_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
-	*ticks_per_second = isrv.v0;
-	*drift_info = isrv.v1;
-	return isrv.status;
+	/*
+	 * Copy table to permanent spot for later use.
+	 */
+	memcpy(&uv_systab, tab, sizeof(struct uv_systab));
+	iounmap(tab);
+
+	printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision);
 }
-EXPORT_SYMBOL_GPL(x86_bios_freq_base);
+#else	/* !CONFIG_EFI */
+
+void uv_bios_init(void) { }
+#endif
+
diff --git a/arch/x86/kernel/cpu/.gitignore b/arch/x86/kernel/cpu/.gitignore
new file mode 100644
index 0000000..667df55
--- /dev/null
+++ b/arch/x86/kernel/cpu/.gitignore
@@ -0,0 +1 @@
+capflags.c
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 32e7352..8f1e31d 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -249,7 +249,7 @@
 	}
 	numa_set_node(cpu, node);
 
-	printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
+	printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
 #endif
 }
 
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index 06fcce5..b046185 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -1,5 +1,5 @@
 /*
- *  (C) 2001-2004  Dave Jones. <davej@codemonkey.org.uk>
+ *  (C) 2001-2004  Dave Jones. <davej@redhat.com>
  *  (C) 2002  Padraig Brady. <padraig@antefacto.com>
  *
  *  Licensed under the terms of the GNU GPL License version 2.
@@ -1019,7 +1019,7 @@
 module_param(revid_errata, int, 0644);
 MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
 
-MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
+MODULE_AUTHOR ("Dave Jones <davej@redhat.com>");
 MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
 MODULE_LICENSE ("GPL");
 
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
index b5ced80..c1ac579 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
@@ -246,7 +246,7 @@
 }
 
 
-MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>, Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>");
+MODULE_AUTHOR("Arjan van de Ven, Dave Jones <davej@redhat.com>, Dominik Brodowski <linux@brodo.de>");
 MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors.");
 MODULE_LICENSE("GPL");
 
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index 0a61159..7c7d56b 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -1,6 +1,6 @@
 /*
  *  AMD K7 Powernow driver.
- *  (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs.
+ *  (C) 2003 Dave Jones on behalf of SuSE Labs.
  *  (C) 2003-2004 Dave Jones <davej@redhat.com>
  *
  *  Licensed under the terms of the GNU GPL License version 2.
@@ -692,7 +692,7 @@
 module_param(acpi_force,  int, 0444);
 MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
 
-MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
+MODULE_AUTHOR ("Dave Jones <davej@redhat.com>");
 MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors.");
 MODULE_LICENSE ("GPL");
 
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 84bb395..008d23b 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -7,7 +7,7 @@
  *  Support : mark.langsdorf@amd.com
  *
  *  Based on the powernow-k7.c module written by Dave Jones.
- *  (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs
+ *  (C) 2003 Dave Jones on behalf of SuSE Labs
  *  (C) 2004 Dominik Brodowski <linux@brodo.de>
  *  (C) 2004 Pavel Machek <pavel@suse.cz>
  *  Licensed under the terms of the GNU GPL License version 2.
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 191f726..04d0376 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -431,7 +431,7 @@
 }
 
 
-MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>");
+MODULE_AUTHOR ("Dave Jones <davej@redhat.com>, Dominik Brodowski <linux@brodo.de>");
 MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors on chipsets with ICH-M southbridges.");
 MODULE_LICENSE ("GPL");
 
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 99468db..cce0b61 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -174,7 +174,7 @@
 		node = first_node(node_online_map);
 	numa_set_node(cpu, node);
 
-	printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
+	printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
 #endif
 }
 
diff --git a/arch/x86/kernel/cpu/mcheck/k7.c b/arch/x86/kernel/cpu/mcheck/k7.c
index f390c9f..dd3af6e 100644
--- a/arch/x86/kernel/cpu/mcheck/k7.c
+++ b/arch/x86/kernel/cpu/mcheck/k7.c
@@ -1,6 +1,6 @@
 /*
- * Athlon/Hammer specific Machine Check Exception Reporting
- * (C) Copyright 2002 Dave Jones <davej@codemonkey.org.uk>
+ * Athlon specific Machine Check Exception Reporting
+ * (C) Copyright 2002 Dave Jones <davej@redhat.com>
  */
 
 #include <linux/init.h>
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
index 774d87c..0ebf3fc 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_32.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_32.c
@@ -1,6 +1,6 @@
 /*
  * mce.c - x86 Machine Check Exception Reporting
- * (c) 2002 Alan Cox <alan@redhat.com>, Dave Jones <davej@codemonkey.org.uk>
+ * (c) 2002 Alan Cox <alan@redhat.com>, Dave Jones <davej@redhat.com>
  */
 
 #include <linux/init.h>
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c
index cc1fccd..a74af12 100644
--- a/arch/x86/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c
@@ -1,7 +1,7 @@
 /*
  * Non Fatal Machine Check Exception Reporting
  *
- * (C) Copyright 2002 Dave Jones. <davej@codemonkey.org.uk>
+ * (C) Copyright 2002 Dave Jones. <davej@redhat.com>
  *
  * This file contains routines to check for non-fatal MCEs every 15s
  *
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 6bff382..9abd48b 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -17,6 +17,8 @@
 #include <linux/bitops.h>
 #include <linux/smp.h>
 #include <linux/nmi.h>
+#include <linux/kprobes.h>
+
 #include <asm/apic.h>
 #include <asm/intel_arch_perfmon.h>
 
@@ -336,7 +338,8 @@
 	release_perfctr_nmi(wd_ops->perfctr);
 }
 
-static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
+static void __kprobes
+single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
 {
 	/* start the cycle over again */
 	write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
@@ -401,7 +404,7 @@
 	return 1;
 }
 
-static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
+static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
 {
 	/*
 	 * P6 based Pentium M need to re-unmask
@@ -605,7 +608,7 @@
 	release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
 }
 
-static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
+static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
 {
 	unsigned dummy;
 	/*
@@ -784,7 +787,7 @@
 	return hz;
 }
 
-int lapic_wd_event(unsigned nmi_hz)
+int __kprobes lapic_wd_event(unsigned nmi_hz)
 {
 	struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
 	u64 ctr;
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index 72d0c56..f7cdb3b 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -13,6 +13,9 @@
 
 static void *kdump_buf_page;
 
+/* Stores the physical address of elf header of crash image. */
+unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
+
 /**
  * copy_oldmem_page - copy one page from "oldmem"
  * @pfn: page frame number to be copied
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index e90a60e..045b36c 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -10,6 +10,9 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 
+/* Stores the physical address of elf header of crash image. */
+unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
+
 /**
  * copy_oldmem_page - copy one page from "oldmem"
  * @pfn: page frame number to be copied
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 945a31c..1119d24 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -367,6 +367,10 @@
 			efi.smbios = config_tables[i].table;
 			printk(" SMBIOS=0x%lx ", config_tables[i].table);
 		} else if (!efi_guidcmp(config_tables[i].guid,
+					UV_SYSTEM_TABLE_GUID)) {
+			efi.uv_systab = config_tables[i].table;
+			printk(" UVsystab=0x%lx ", config_tables[i].table);
+		} else if (!efi_guidcmp(config_tables[i].guid,
 					HCDP_TABLE_GUID)) {
 			efi.hcdp = config_tables[i].table;
 			printk(" HCDP=0x%lx ", config_tables[i].table);
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index b21fbfa..c356423 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -629,7 +629,7 @@
 ENTRY(irq_entries_start)
 	RING0_INT_FRAME
 vector=0
-.rept NR_IRQS
+.rept NR_VECTORS
 	ALIGN
  .if vector
 	CFI_ADJUST_CFA_OFFSET -4
@@ -1153,20 +1153,6 @@
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 ENTRY(mcount)
-	pushl %eax
-	pushl %ecx
-	pushl %edx
-	movl 0xc(%esp), %eax
-	subl $MCOUNT_INSN_SIZE, %eax
-
-.globl mcount_call
-mcount_call:
-	call ftrace_stub
-
-	popl %edx
-	popl %ecx
-	popl %eax
-
 	ret
 END(mcount)
 
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1db6ce4..09e7145 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -64,32 +64,6 @@
 #ifdef CONFIG_FTRACE
 #ifdef CONFIG_DYNAMIC_FTRACE
 ENTRY(mcount)
-
-	subq $0x38, %rsp
-	movq %rax, (%rsp)
-	movq %rcx, 8(%rsp)
-	movq %rdx, 16(%rsp)
-	movq %rsi, 24(%rsp)
-	movq %rdi, 32(%rsp)
-	movq %r8, 40(%rsp)
-	movq %r9, 48(%rsp)
-
-	movq 0x38(%rsp), %rdi
-	subq $MCOUNT_INSN_SIZE, %rdi
-
-.globl mcount_call
-mcount_call:
-	call ftrace_stub
-
-	movq 48(%rsp), %r9
-	movq 40(%rsp), %r8
-	movq 32(%rsp), %rdi
-	movq 24(%rsp), %rsi
-	movq 16(%rsp), %rdx
-	movq 8(%rsp), %rcx
-	movq (%rsp), %rax
-	addq $0x38, %rsp
-
 	retq
 END(mcount)
 
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index ab115cd..d073d98 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -11,17 +11,18 @@
 
 #include <linux/spinlock.h>
 #include <linux/hardirq.h>
+#include <linux/uaccess.h>
 #include <linux/ftrace.h>
 #include <linux/percpu.h>
 #include <linux/init.h>
 #include <linux/list.h>
 
-#include <asm/alternative.h>
 #include <asm/ftrace.h>
+#include <asm/nops.h>
 
 
 /* Long is fine, even if it is only 4 bytes ;-) */
-static long *ftrace_nop;
+static unsigned long *ftrace_nop;
 
 union ftrace_code_union {
 	char code[MCOUNT_INSN_SIZE];
@@ -60,11 +61,7 @@
 ftrace_modify_code(unsigned long ip, unsigned char *old_code,
 		   unsigned char *new_code)
 {
-	unsigned replaced;
-	unsigned old = *(unsigned *)old_code; /* 4 bytes */
-	unsigned new = *(unsigned *)new_code; /* 4 bytes */
-	unsigned char newch = new_code[4];
-	int faulted = 0;
+	unsigned char replaced[MCOUNT_INSN_SIZE];
 
 	/*
 	 * Note: Due to modules and __init, code can
@@ -72,29 +69,20 @@
 	 *  as well as code changing.
 	 *
 	 * No real locking needed, this code is run through
-	 * kstop_machine.
+	 * kstop_machine, or before SMP starts.
 	 */
-	asm volatile (
-		"1: lock\n"
-		"   cmpxchg %3, (%2)\n"
-		"   jnz 2f\n"
-		"   movb %b4, 4(%2)\n"
-		"2:\n"
-		".section .fixup, \"ax\"\n"
-		"3:	movl $1, %0\n"
-		"	jmp 2b\n"
-		".previous\n"
-		_ASM_EXTABLE(1b, 3b)
-		: "=r"(faulted), "=a"(replaced)
-		: "r"(ip), "r"(new), "c"(newch),
-		  "0"(faulted), "a"(old)
-		: "memory");
+	if (__copy_from_user_inatomic(replaced, (char __user *)ip, MCOUNT_INSN_SIZE))
+		return 1;
+
+	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
+		return 2;
+
+	WARN_ON_ONCE(__copy_to_user_inatomic((char __user *)ip, new_code,
+				    MCOUNT_INSN_SIZE));
+
 	sync_core();
 
-	if (replaced != old && replaced != new)
-		faulted = 2;
-
-	return faulted;
+	return 0;
 }
 
 notrace int ftrace_update_ftrace_func(ftrace_func_t func)
@@ -112,30 +100,76 @@
 
 notrace int ftrace_mcount_set(unsigned long *data)
 {
-	unsigned long ip = (long)(&mcount_call);
-	unsigned long *addr = data;
-	unsigned char old[MCOUNT_INSN_SIZE], *new;
-
-	/*
-	 * Replace the mcount stub with a pointer to the
-	 * ip recorder function.
-	 */
-	memcpy(old, &mcount_call, MCOUNT_INSN_SIZE);
-	new = ftrace_call_replace(ip, *addr);
-	*addr = ftrace_modify_code(ip, old, new);
-
+	/* mcount is initialized as a nop */
+	*data = 0;
 	return 0;
 }
 
 int __init ftrace_dyn_arch_init(void *data)
 {
-	const unsigned char *const *noptable = find_nop_table();
+	extern const unsigned char ftrace_test_p6nop[];
+	extern const unsigned char ftrace_test_nop5[];
+	extern const unsigned char ftrace_test_jmp[];
+	int faulted = 0;
 
-	/* This is running in kstop_machine */
+	/*
+	 * There is no good nop for all x86 archs.
+	 * We will default to using the P6_NOP5, but first we
+	 * will test to make sure that the nop will actually
+	 * work on this CPU. If it faults, we will then
+	 * go to a lesser efficient 5 byte nop. If that fails
+	 * we then just use a jmp as our nop. This isn't the most
+	 * efficient nop, but we can not use a multi part nop
+	 * since we would then risk being preempted in the middle
+	 * of that nop, and if we enabled tracing then, it might
+	 * cause a system crash.
+	 *
+	 * TODO: check the cpuid to determine the best nop.
+	 */
+	asm volatile (
+		"jmp ftrace_test_jmp\n"
+		/* This code needs to stay around */
+		".section .text, \"ax\"\n"
+		"ftrace_test_jmp:"
+		"jmp ftrace_test_p6nop\n"
+		"nop\n"
+		"nop\n"
+		"nop\n"  /* 2 byte jmp + 3 bytes */
+		"ftrace_test_p6nop:"
+		P6_NOP5
+		"jmp 1f\n"
+		"ftrace_test_nop5:"
+		".byte 0x66,0x66,0x66,0x66,0x90\n"
+		"jmp 1f\n"
+		".previous\n"
+		"1:"
+		".section .fixup, \"ax\"\n"
+		"2:	movl $1, %0\n"
+		"	jmp ftrace_test_nop5\n"
+		"3:	movl $2, %0\n"
+		"	jmp 1b\n"
+		".previous\n"
+		_ASM_EXTABLE(ftrace_test_p6nop, 2b)
+		_ASM_EXTABLE(ftrace_test_nop5, 3b)
+		: "=r"(faulted) : "0" (faulted));
 
-	ftrace_mcount_set(data);
+	switch (faulted) {
+	case 0:
+		pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
+		ftrace_nop = (unsigned long *)ftrace_test_p6nop;
+		break;
+	case 1:
+		pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
+		ftrace_nop = (unsigned long *)ftrace_test_nop5;
+		break;
+	case 2:
+		pr_info("ftrace: converting mcount calls to jmp . + 5\n");
+		ftrace_nop = (unsigned long *)ftrace_test_jmp;
+		break;
+	}
 
-	ftrace_nop = (unsigned long *)noptable[MCOUNT_INSN_SIZE];
+	/* The return code is retured via data */
+	*(unsigned long *)data = 0;
 
 	return 0;
 }
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index 9eca5ba..2ec2de8 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -179,8 +179,10 @@
 	 * is an example).
 	 */
 	if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
-		(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL))
+		(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
+		printk(KERN_DEBUG "system APIC only can use physical flat");
 		return 1;
+	}
 #endif
 
 	return 0;
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index 33581d9..bfd5328 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -341,12 +341,12 @@
 
 static __init void uv_rtc_init(void)
 {
-	long status, ticks_per_sec, drift;
+	long status;
+	u64 ticks_per_sec;
 
-	status =
-	    x86_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
-					&drift);
-	if (status != 0 || ticks_per_sec < 100000) {
+	status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
+					&ticks_per_sec);
+	if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
 		printk(KERN_WARNING
 			"unable to determine platform RTC clock frequency, "
 			"guessing.\n");
@@ -356,7 +356,22 @@
 		sn_rtc_cycles_per_second = ticks_per_sec;
 }
 
-static bool uv_system_inited;
+/*
+ * Called on each cpu to initialize the per_cpu UV data area.
+ * 	ZZZ hotplug not supported yet
+ */
+void __cpuinit uv_cpu_init(void)
+{
+	/* CPU 0 initilization will be done via uv_system_init. */
+	if (!uv_blade_info)
+		return;
+
+	uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
+
+	if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
+		set_x2apic_extra_bits(uv_hub_info->pnode);
+}
+
 
 void __init uv_system_init(void)
 {
@@ -412,6 +427,9 @@
 	gnode_upper = (((unsigned long)node_id.s.node_id) &
 		       ~((1 << n_val) - 1)) << m_val;
 
+	uv_bios_init();
+	uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
+			    &uv_coherency_id, &uv_region_size);
 	uv_rtc_init();
 
 	for_each_present_cpu(cpu) {
@@ -433,7 +451,7 @@
 		uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
 		uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
 		uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
-		uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */
+		uv_cpu_hub_info(cpu)->coherency_domain_number = uv_coherency_id;
 		uv_node_to_blade[nid] = blade;
 		uv_cpu_to_blade[cpu] = blade;
 		max_pnode = max(pnode, max_pnode);
@@ -448,21 +466,6 @@
 	map_mmr_high(max_pnode);
 	map_config_high(max_pnode);
 	map_mmioh_high(max_pnode);
-	uv_system_inited = true;
+
+	uv_cpu_init();
 }
-
-/*
- * Called on each cpu to initialize the per_cpu UV data area.
- * 	ZZZ hotplug not supported yet
- */
-void __cpuinit uv_cpu_init(void)
-{
-	BUG_ON(!uv_system_inited);
-
-	uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
-
-	if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
-		set_x2apic_extra_bits(uv_hub_info->pnode);
-}
-
-
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index acf62fc..77017e8 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1,29 +1,49 @@
 #include <linux/clocksource.h>
 #include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/sysdev.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/hpet.h>
 #include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/cpu.h>
 #include <linux/pm.h>
+#include <linux/io.h>
 
 #include <asm/fixmap.h>
-#include <asm/hpet.h>
 #include <asm/i8253.h>
-#include <asm/io.h>
+#include <asm/hpet.h>
 
-#define HPET_MASK	CLOCKSOURCE_MASK(32)
-#define HPET_SHIFT	22
+#define HPET_MASK			CLOCKSOURCE_MASK(32)
+#define HPET_SHIFT			22
 
 /* FSEC = 10^-15
    NSEC = 10^-9 */
-#define FSEC_PER_NSEC	1000000L
+#define FSEC_PER_NSEC			1000000L
+
+#define HPET_DEV_USED_BIT		2
+#define HPET_DEV_USED			(1 << HPET_DEV_USED_BIT)
+#define HPET_DEV_VALID			0x8
+#define HPET_DEV_FSB_CAP		0x1000
+#define HPET_DEV_PERI_CAP		0x2000
+
+#define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
 
 /*
  * HPET address is set in acpi/boot.c, when an ACPI entry exists
  */
-unsigned long hpet_address;
-static void __iomem *hpet_virt_address;
+unsigned long				hpet_address;
+unsigned long				hpet_num_timers;
+static void __iomem			*hpet_virt_address;
+
+struct hpet_dev {
+	struct clock_event_device	evt;
+	unsigned int			num;
+	int				cpu;
+	unsigned int			irq;
+	unsigned int			flags;
+	char				name[10];
+};
 
 unsigned long hpet_readl(unsigned long a)
 {
@@ -59,7 +79,7 @@
 static int boot_hpet_disable;
 int hpet_force_user;
 
-static int __init hpet_setup(char* str)
+static int __init hpet_setup(char *str)
 {
 	if (str) {
 		if (!strncmp("disable", str, 7))
@@ -80,7 +100,7 @@
 
 static inline int is_hpet_capable(void)
 {
-	return (!boot_hpet_disable && hpet_address);
+	return !boot_hpet_disable && hpet_address;
 }
 
 /*
@@ -102,6 +122,9 @@
  * timer 0 and timer 1 in case of RTC emulation.
  */
 #ifdef CONFIG_HPET
+
+static void hpet_reserve_msi_timers(struct hpet_data *hd);
+
 static void hpet_reserve_platform_timers(unsigned long id)
 {
 	struct hpet __iomem *hpet = hpet_virt_address;
@@ -111,10 +134,10 @@
 
 	nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
 
-	memset(&hd, 0, sizeof (hd));
-	hd.hd_phys_address = hpet_address;
-	hd.hd_address = hpet;
-	hd.hd_nirqs = nrtimers;
+	memset(&hd, 0, sizeof(hd));
+	hd.hd_phys_address	= hpet_address;
+	hd.hd_address		= hpet;
+	hd.hd_nirqs		= nrtimers;
 	hpet_reserve_timer(&hd, 0);
 
 #ifdef CONFIG_HPET_EMULATE_RTC
@@ -130,10 +153,12 @@
 	hd.hd_irq[1] = HPET_LEGACY_RTC;
 
 	for (i = 2; i < nrtimers; timer++, i++) {
-		hd.hd_irq[i] = (readl(&timer->hpet_config) & Tn_INT_ROUTE_CNF_MASK) >>
-			Tn_INT_ROUTE_CNF_SHIFT;
+		hd.hd_irq[i] = (readl(&timer->hpet_config) &
+			Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
 	}
 
+	hpet_reserve_msi_timers(&hd);
+
 	hpet_alloc(&hd);
 
 }
@@ -227,60 +252,70 @@
 	printk(KERN_DEBUG "hpet clockevent registered\n");
 }
 
-static void hpet_legacy_set_mode(enum clock_event_mode mode,
-			  struct clock_event_device *evt)
+static int hpet_setup_msi_irq(unsigned int irq);
+
+static void hpet_set_mode(enum clock_event_mode mode,
+			  struct clock_event_device *evt, int timer)
 {
 	unsigned long cfg, cmp, now;
 	uint64_t delta;
 
-	switch(mode) {
+	switch (mode) {
 	case CLOCK_EVT_MODE_PERIODIC:
-		delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * hpet_clockevent.mult;
-		delta >>= hpet_clockevent.shift;
+		delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
+		delta >>= evt->shift;
 		now = hpet_readl(HPET_COUNTER);
 		cmp = now + (unsigned long) delta;
-		cfg = hpet_readl(HPET_T0_CFG);
+		cfg = hpet_readl(HPET_Tn_CFG(timer));
 		cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
 		       HPET_TN_SETVAL | HPET_TN_32BIT;
-		hpet_writel(cfg, HPET_T0_CFG);
+		hpet_writel(cfg, HPET_Tn_CFG(timer));
 		/*
 		 * The first write after writing TN_SETVAL to the
 		 * config register sets the counter value, the second
 		 * write sets the period.
 		 */
-		hpet_writel(cmp, HPET_T0_CMP);
+		hpet_writel(cmp, HPET_Tn_CMP(timer));
 		udelay(1);
-		hpet_writel((unsigned long) delta, HPET_T0_CMP);
+		hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
 		break;
 
 	case CLOCK_EVT_MODE_ONESHOT:
-		cfg = hpet_readl(HPET_T0_CFG);
+		cfg = hpet_readl(HPET_Tn_CFG(timer));
 		cfg &= ~HPET_TN_PERIODIC;
 		cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
-		hpet_writel(cfg, HPET_T0_CFG);
+		hpet_writel(cfg, HPET_Tn_CFG(timer));
 		break;
 
 	case CLOCK_EVT_MODE_UNUSED:
 	case CLOCK_EVT_MODE_SHUTDOWN:
-		cfg = hpet_readl(HPET_T0_CFG);
+		cfg = hpet_readl(HPET_Tn_CFG(timer));
 		cfg &= ~HPET_TN_ENABLE;
-		hpet_writel(cfg, HPET_T0_CFG);
+		hpet_writel(cfg, HPET_Tn_CFG(timer));
 		break;
 
 	case CLOCK_EVT_MODE_RESUME:
-		hpet_enable_legacy_int();
+		if (timer == 0) {
+			hpet_enable_legacy_int();
+		} else {
+			struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+			hpet_setup_msi_irq(hdev->irq);
+			disable_irq(hdev->irq);
+			irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu));
+			enable_irq(hdev->irq);
+		}
 		break;
 	}
 }
 
-static int hpet_legacy_next_event(unsigned long delta,
-				  struct clock_event_device *evt)
+static int hpet_next_event(unsigned long delta,
+			   struct clock_event_device *evt, int timer)
 {
 	u32 cnt;
 
 	cnt = hpet_readl(HPET_COUNTER);
 	cnt += (u32) delta;
-	hpet_writel(cnt, HPET_T0_CMP);
+	hpet_writel(cnt, HPET_Tn_CMP(timer));
 
 	/*
 	 * We need to read back the CMP register to make sure that
@@ -292,6 +327,347 @@
 	return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
 }
 
+static void hpet_legacy_set_mode(enum clock_event_mode mode,
+			struct clock_event_device *evt)
+{
+	hpet_set_mode(mode, evt, 0);
+}
+
+static int hpet_legacy_next_event(unsigned long delta,
+			struct clock_event_device *evt)
+{
+	return hpet_next_event(delta, evt, 0);
+}
+
+/*
+ * HPET MSI Support
+ */
+#ifdef CONFIG_PCI_MSI
+
+static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
+static struct hpet_dev	*hpet_devs;
+
+void hpet_msi_unmask(unsigned int irq)
+{
+	struct hpet_dev *hdev = get_irq_data(irq);
+	unsigned long cfg;
+
+	/* unmask it */
+	cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
+	cfg |= HPET_TN_FSB;
+	hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
+}
+
+void hpet_msi_mask(unsigned int irq)
+{
+	unsigned long cfg;
+	struct hpet_dev *hdev = get_irq_data(irq);
+
+	/* mask it */
+	cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
+	cfg &= ~HPET_TN_FSB;
+	hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
+}
+
+void hpet_msi_write(unsigned int irq, struct msi_msg *msg)
+{
+	struct hpet_dev *hdev = get_irq_data(irq);
+
+	hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
+	hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
+}
+
+void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
+{
+	struct hpet_dev *hdev = get_irq_data(irq);
+
+	msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
+	msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
+	msg->address_hi = 0;
+}
+
+static void hpet_msi_set_mode(enum clock_event_mode mode,
+				struct clock_event_device *evt)
+{
+	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+	hpet_set_mode(mode, evt, hdev->num);
+}
+
+static int hpet_msi_next_event(unsigned long delta,
+				struct clock_event_device *evt)
+{
+	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+	return hpet_next_event(delta, evt, hdev->num);
+}
+
+static int hpet_setup_msi_irq(unsigned int irq)
+{
+	if (arch_setup_hpet_msi(irq)) {
+		destroy_irq(irq);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int hpet_assign_irq(struct hpet_dev *dev)
+{
+	unsigned int irq;
+
+	irq = create_irq();
+	if (!irq)
+		return -EINVAL;
+
+	set_irq_data(irq, dev);
+
+	if (hpet_setup_msi_irq(irq))
+		return -EINVAL;
+
+	dev->irq = irq;
+	return 0;
+}
+
+static irqreturn_t hpet_interrupt_handler(int irq, void *data)
+{
+	struct hpet_dev *dev = (struct hpet_dev *)data;
+	struct clock_event_device *hevt = &dev->evt;
+
+	if (!hevt->event_handler) {
+		printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
+				dev->num);
+		return IRQ_HANDLED;
+	}
+
+	hevt->event_handler(hevt);
+	return IRQ_HANDLED;
+}
+
+static int hpet_setup_irq(struct hpet_dev *dev)
+{
+
+	if (request_irq(dev->irq, hpet_interrupt_handler,
+			IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev))
+		return -1;
+
+	disable_irq(dev->irq);
+	irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu));
+	enable_irq(dev->irq);
+
+	printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
+			 dev->name, dev->irq);
+
+	return 0;
+}
+
+/* This should be called in specific @cpu */
+static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
+{
+	struct clock_event_device *evt = &hdev->evt;
+	uint64_t hpet_freq;
+
+	WARN_ON(cpu != smp_processor_id());
+	if (!(hdev->flags & HPET_DEV_VALID))
+		return;
+
+	if (hpet_setup_msi_irq(hdev->irq))
+		return;
+
+	hdev->cpu = cpu;
+	per_cpu(cpu_hpet_dev, cpu) = hdev;
+	evt->name = hdev->name;
+	hpet_setup_irq(hdev);
+	evt->irq = hdev->irq;
+
+	evt->rating = 110;
+	evt->features = CLOCK_EVT_FEAT_ONESHOT;
+	if (hdev->flags & HPET_DEV_PERI_CAP)
+		evt->features |= CLOCK_EVT_FEAT_PERIODIC;
+
+	evt->set_mode = hpet_msi_set_mode;
+	evt->set_next_event = hpet_msi_next_event;
+	evt->shift = 32;
+
+	/*
+	 * The period is a femto seconds value. We need to calculate the
+	 * scaled math multiplication factor for nanosecond to hpet tick
+	 * conversion.
+	 */
+	hpet_freq = 1000000000000000ULL;
+	do_div(hpet_freq, hpet_period);
+	evt->mult = div_sc((unsigned long) hpet_freq,
+				      NSEC_PER_SEC, evt->shift);
+	/* Calculate the max delta */
+	evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt);
+	/* 5 usec minimum reprogramming delta. */
+	evt->min_delta_ns = 5000;
+
+	evt->cpumask = cpumask_of_cpu(hdev->cpu);
+	clockevents_register_device(evt);
+}
+
+#ifdef CONFIG_HPET
+/* Reserve at least one timer for userspace (/dev/hpet) */
+#define RESERVE_TIMERS 1
+#else
+#define RESERVE_TIMERS 0
+#endif
+
+static void hpet_msi_capability_lookup(unsigned int start_timer)
+{
+	unsigned int id;
+	unsigned int num_timers;
+	unsigned int num_timers_used = 0;
+	int i;
+
+	id = hpet_readl(HPET_ID);
+
+	num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
+	num_timers++; /* Value read out starts from 0 */
+
+	hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
+	if (!hpet_devs)
+		return;
+
+	hpet_num_timers = num_timers;
+
+	for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
+		struct hpet_dev *hdev = &hpet_devs[num_timers_used];
+		unsigned long cfg = hpet_readl(HPET_Tn_CFG(i));
+
+		/* Only consider HPET timer with MSI support */
+		if (!(cfg & HPET_TN_FSB_CAP))
+			continue;
+
+		hdev->flags = 0;
+		if (cfg & HPET_TN_PERIODIC_CAP)
+			hdev->flags |= HPET_DEV_PERI_CAP;
+		hdev->num = i;
+
+		sprintf(hdev->name, "hpet%d", i);
+		if (hpet_assign_irq(hdev))
+			continue;
+
+		hdev->flags |= HPET_DEV_FSB_CAP;
+		hdev->flags |= HPET_DEV_VALID;
+		num_timers_used++;
+		if (num_timers_used == num_possible_cpus())
+			break;
+	}
+
+	printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
+		num_timers, num_timers_used);
+}
+
+#ifdef CONFIG_HPET
+static void hpet_reserve_msi_timers(struct hpet_data *hd)
+{
+	int i;
+
+	if (!hpet_devs)
+		return;
+
+	for (i = 0; i < hpet_num_timers; i++) {
+		struct hpet_dev *hdev = &hpet_devs[i];
+
+		if (!(hdev->flags & HPET_DEV_VALID))
+			continue;
+
+		hd->hd_irq[hdev->num] = hdev->irq;
+		hpet_reserve_timer(hd, hdev->num);
+	}
+}
+#endif
+
+static struct hpet_dev *hpet_get_unused_timer(void)
+{
+	int i;
+
+	if (!hpet_devs)
+		return NULL;
+
+	for (i = 0; i < hpet_num_timers; i++) {
+		struct hpet_dev *hdev = &hpet_devs[i];
+
+		if (!(hdev->flags & HPET_DEV_VALID))
+			continue;
+		if (test_and_set_bit(HPET_DEV_USED_BIT,
+			(unsigned long *)&hdev->flags))
+			continue;
+		return hdev;
+	}
+	return NULL;
+}
+
+struct hpet_work_struct {
+	struct delayed_work work;
+	struct completion complete;
+};
+
+static void hpet_work(struct work_struct *w)
+{
+	struct hpet_dev *hdev;
+	int cpu = smp_processor_id();
+	struct hpet_work_struct *hpet_work;
+
+	hpet_work = container_of(w, struct hpet_work_struct, work.work);
+
+	hdev = hpet_get_unused_timer();
+	if (hdev)
+		init_one_hpet_msi_clockevent(hdev, cpu);
+
+	complete(&hpet_work->complete);
+}
+
+static int hpet_cpuhp_notify(struct notifier_block *n,
+		unsigned long action, void *hcpu)
+{
+	unsigned long cpu = (unsigned long)hcpu;
+	struct hpet_work_struct work;
+	struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
+
+	switch (action & 0xf) {
+	case CPU_ONLINE:
+		INIT_DELAYED_WORK(&work.work, hpet_work);
+		init_completion(&work.complete);
+		/* FIXME: add schedule_work_on() */
+		schedule_delayed_work_on(cpu, &work.work, 0);
+		wait_for_completion(&work.complete);
+		break;
+	case CPU_DEAD:
+		if (hdev) {
+			free_irq(hdev->irq, hdev);
+			hdev->flags &= ~HPET_DEV_USED;
+			per_cpu(cpu_hpet_dev, cpu) = NULL;
+		}
+		break;
+	}
+	return NOTIFY_OK;
+}
+#else
+
+static int hpet_setup_msi_irq(unsigned int irq)
+{
+	return 0;
+}
+static void hpet_msi_capability_lookup(unsigned int start_timer)
+{
+	return;
+}
+
+#ifdef CONFIG_HPET
+static void hpet_reserve_msi_timers(struct hpet_data *hd)
+{
+	return;
+}
+#endif
+
+static int hpet_cpuhp_notify(struct notifier_block *n,
+		unsigned long action, void *hcpu)
+{
+	return NOTIFY_OK;
+}
+
+#endif
+
 /*
  * Clock source related code
  */
@@ -427,8 +803,10 @@
 
 	if (id & HPET_ID_LEGSUP) {
 		hpet_legacy_clockevent_register();
+		hpet_msi_capability_lookup(2);
 		return 1;
 	}
+	hpet_msi_capability_lookup(0);
 	return 0;
 
 out_nohpet:
@@ -445,6 +823,8 @@
  */
 static __init int hpet_late_init(void)
 {
+	int cpu;
+
 	if (boot_hpet_disable)
 		return -ENODEV;
 
@@ -460,6 +840,13 @@
 
 	hpet_reserve_platform_timers(hpet_readl(HPET_ID));
 
+	for_each_online_cpu(cpu) {
+		hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
+	}
+
+	/* This notifier should be called after workqueue is ready */
+	hotcpu_notifier(hpet_cpuhp_notify, -20);
+
 	return 0;
 }
 fs_initcall(hpet_late_init);
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic.c
similarity index 68%
rename from arch/x86/kernel/io_apic_64.c
rename to arch/x86/kernel/io_apic.c
index 02063ae..b764d74 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic.c
@@ -27,17 +27,21 @@
 #include <linux/sched.h>
 #include <linux/pci.h>
 #include <linux/mc146818rtc.h>
+#include <linux/compiler.h>
 #include <linux/acpi.h>
+#include <linux/module.h>
 #include <linux/sysdev.h>
 #include <linux/msi.h>
 #include <linux/htirq.h>
-#include <linux/dmar.h>
-#include <linux/jiffies.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/jiffies.h>	/* time_after() */
 #ifdef CONFIG_ACPI
 #include <acpi/acpi_bus.h>
 #endif
 #include <linux/bootmem.h>
 #include <linux/dmar.h>
+#include <linux/hpet.h>
 
 #include <asm/idle.h>
 #include <asm/io.h>
@@ -46,61 +50,28 @@
 #include <asm/proto.h>
 #include <asm/acpi.h>
 #include <asm/dma.h>
+#include <asm/timer.h>
 #include <asm/i8259.h>
 #include <asm/nmi.h>
 #include <asm/msidef.h>
 #include <asm/hypertransport.h>
+#include <asm/setup.h>
 #include <asm/irq_remapping.h>
+#include <asm/hpet.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/uv_irq.h>
 
 #include <mach_ipi.h>
 #include <mach_apic.h>
+#include <mach_apicdef.h>
 
 #define __apicdebuginit(type) static type __init
 
-struct irq_cfg {
-	cpumask_t domain;
-	cpumask_t old_domain;
-	unsigned move_cleanup_count;
-	u8 vector;
-	u8 move_in_progress : 1;
-};
-
-/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
-static struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
-	[0]  = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR,  },
-	[1]  = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR,  },
-	[2]  = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR,  },
-	[3]  = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR,  },
-	[4]  = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR,  },
-	[5]  = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR,  },
-	[6]  = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR,  },
-	[7]  = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR,  },
-	[8]  = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR,  },
-	[9]  = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR,  },
-	[10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
-	[11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
-	[12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
-	[13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
-	[14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
-	[15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
-};
-
-static int assign_irq_vector(int irq, cpumask_t mask);
-
-int first_system_vector = 0xfe;
-
-char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
-
-int sis_apic_bug; /* not actually supported, dummy for compile */
-
-static int no_timer_check;
-
-static int disable_timer_pin_1 __initdata;
-
-int timer_through_8259 __initdata;
-
-/* Where if anywhere is the i8259 connect in external int mode */
-static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
+/*
+ *      Is the SiS APIC rmw bug present ?
+ *      -1 = don't know, 0 = no, 1 = yes
+ */
+int sis_apic_bug = -1;
 
 static DEFINE_SPINLOCK(ioapic_lock);
 static DEFINE_SPINLOCK(vector_lock);
@@ -110,9 +81,6 @@
  */
 int nr_ioapic_registers[MAX_IO_APICS];
 
-/* I/O APIC RTE contents at the OS boot up */
-struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
-
 /* I/O APIC entries */
 struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
 int nr_ioapics;
@@ -123,11 +91,69 @@
 /* # of MP IRQ source entries */
 int mp_irq_entries;
 
+#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
+int mp_bus_id_to_type[MAX_MP_BUSSES];
+#endif
+
 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
 
+int skip_ioapic_setup;
+
+static int __init parse_noapic(char *str)
+{
+	/* disable IO-APIC */
+	disable_ioapic_setup();
+	return 0;
+}
+early_param("noapic", parse_noapic);
+
+struct irq_pin_list;
+struct irq_cfg {
+	unsigned int irq;
+	struct irq_pin_list *irq_2_pin;
+	cpumask_t domain;
+	cpumask_t old_domain;
+	unsigned move_cleanup_count;
+	u8 vector;
+	u8 move_in_progress : 1;
+};
+
+/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
+static struct irq_cfg irq_cfgx[NR_IRQS] = {
+	[0]  = { .irq =  0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR,  },
+	[1]  = { .irq =  1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR,  },
+	[2]  = { .irq =  2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR,  },
+	[3]  = { .irq =  3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR,  },
+	[4]  = { .irq =  4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR,  },
+	[5]  = { .irq =  5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR,  },
+	[6]  = { .irq =  6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR,  },
+	[7]  = { .irq =  7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR,  },
+	[8]  = { .irq =  8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR,  },
+	[9]  = { .irq =  9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR,  },
+	[10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
+	[11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
+	[12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
+	[13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
+	[14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
+	[15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
+};
+
+#define for_each_irq_cfg(irq, cfg)		\
+	for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++)
+
+static struct irq_cfg *irq_cfg(unsigned int irq)
+{
+	return irq < nr_irqs ? irq_cfgx + irq : NULL;
+}
+
+static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
+{
+	return irq_cfg(irq);
+}
+
 /*
- * Rough estimation of how many shared IRQs there are, can
- * be changed anytime.
+ * Rough estimation of how many shared IRQs there are, can be changed
+ * anytime.
  */
 #define MAX_PLUS_SHARED_IRQS NR_IRQS
 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
@@ -139,9 +165,36 @@
  * between pins and IRQs.
  */
 
-static struct irq_pin_list {
-	short apic, pin, next;
-} irq_2_pin[PIN_MAP_SIZE];
+struct irq_pin_list {
+	int apic, pin;
+	struct irq_pin_list *next;
+};
+
+static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE];
+static struct irq_pin_list *irq_2_pin_ptr;
+
+static void __init irq_2_pin_init(void)
+{
+	struct irq_pin_list *pin = irq_2_pin_head;
+	int i;
+
+	for (i = 1; i < PIN_MAP_SIZE; i++)
+		pin[i-1].next = &pin[i];
+
+	irq_2_pin_ptr = &pin[0];
+}
+
+static struct irq_pin_list *get_one_free_irq_2_pin(void)
+{
+	struct irq_pin_list *pin = irq_2_pin_ptr;
+
+	if (!pin)
+		panic("can not get more irq_2_pin\n");
+
+	irq_2_pin_ptr = pin->next;
+	pin->next = NULL;
+	return pin;
+}
 
 struct io_apic {
 	unsigned int index;
@@ -172,10 +225,15 @@
 /*
  * Re-write a value: to be used for read-modify-write
  * cycles where the read already set up the index register.
+ *
+ * Older SiS APIC requires we rewrite the index register
  */
-static inline void io_apic_modify(unsigned int apic, unsigned int value)
+static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
 {
 	struct io_apic __iomem *io_apic = io_apic_base(apic);
+
+	if (sis_apic_bug)
+		writel(reg, &io_apic->index);
 	writel(value, &io_apic->data);
 }
 
@@ -183,16 +241,17 @@
 {
 	struct irq_pin_list *entry;
 	unsigned long flags;
+	struct irq_cfg *cfg = irq_cfg(irq);
 
 	spin_lock_irqsave(&ioapic_lock, flags);
-	entry = irq_2_pin + irq;
+	entry = cfg->irq_2_pin;
 	for (;;) {
 		unsigned int reg;
 		int pin;
 
-		pin = entry->pin;
-		if (pin == -1)
+		if (!entry)
 			break;
+		pin = entry->pin;
 		reg = io_apic_read(entry->apic, 0x10 + pin*2);
 		/* Is the remote IRR bit set? */
 		if (reg & IO_APIC_REDIR_REMOTE_IRR) {
@@ -201,45 +260,13 @@
 		}
 		if (!entry->next)
 			break;
-		entry = irq_2_pin + entry->next;
+		entry = entry->next;
 	}
 	spin_unlock_irqrestore(&ioapic_lock, flags);
 
 	return false;
 }
 
-/*
- * Synchronize the IO-APIC and the CPU by doing
- * a dummy read from the IO-APIC
- */
-static inline void io_apic_sync(unsigned int apic)
-{
-	struct io_apic __iomem *io_apic = io_apic_base(apic);
-	readl(&io_apic->data);
-}
-
-#define __DO_ACTION(R, ACTION, FINAL)					\
-									\
-{									\
-	int pin;							\
-	struct irq_pin_list *entry = irq_2_pin + irq;			\
-									\
-	BUG_ON(irq >= NR_IRQS);						\
-	for (;;) {							\
-		unsigned int reg;					\
-		pin = entry->pin;					\
-		if (pin == -1)						\
-			break;						\
-		reg = io_apic_read(entry->apic, 0x10 + R + pin*2);	\
-		reg ACTION;						\
-		io_apic_modify(entry->apic, reg);			\
-		FINAL;							\
-		if (!entry->next)					\
-			break;						\
-		entry = irq_2_pin + entry->next;			\
-	}								\
-}
-
 union entry_union {
 	struct { u32 w1, w2; };
 	struct IO_APIC_route_entry entry;
@@ -299,59 +326,71 @@
 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
 {
 	int apic, pin;
-	struct irq_pin_list *entry = irq_2_pin + irq;
+	struct irq_cfg *cfg;
+	struct irq_pin_list *entry;
 
-	BUG_ON(irq >= NR_IRQS);
+	cfg = irq_cfg(irq);
+	entry = cfg->irq_2_pin;
 	for (;;) {
 		unsigned int reg;
+
+		if (!entry)
+			break;
+
 		apic = entry->apic;
 		pin = entry->pin;
-		if (pin == -1)
-			break;
+#ifdef CONFIG_INTR_REMAP
 		/*
 		 * With interrupt-remapping, destination information comes
 		 * from interrupt-remapping table entry.
 		 */
 		if (!irq_remapped(irq))
 			io_apic_write(apic, 0x11 + pin*2, dest);
+#else
+		io_apic_write(apic, 0x11 + pin*2, dest);
+#endif
 		reg = io_apic_read(apic, 0x10 + pin*2);
 		reg &= ~IO_APIC_REDIR_VECTOR_MASK;
 		reg |= vector;
-		io_apic_modify(apic, reg);
+		io_apic_modify(apic, 0x10 + pin*2, reg);
 		if (!entry->next)
 			break;
-		entry = irq_2_pin + entry->next;
+		entry = entry->next;
 	}
 }
 
+static int assign_irq_vector(int irq, cpumask_t mask);
+
 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
 {
-	struct irq_cfg *cfg = irq_cfg + irq;
+	struct irq_cfg *cfg;
 	unsigned long flags;
 	unsigned int dest;
 	cpumask_t tmp;
+	struct irq_desc *desc;
 
 	cpus_and(tmp, mask, cpu_online_map);
 	if (cpus_empty(tmp))
 		return;
 
+	cfg = irq_cfg(irq);
 	if (assign_irq_vector(irq, mask))
 		return;
 
 	cpus_and(tmp, cfg->domain, mask);
 	dest = cpu_mask_to_apicid(tmp);
-
 	/*
 	 * Only the high 8 bits are valid.
 	 */
 	dest = SET_APIC_LOGICAL_ID(dest);
 
+	desc = irq_to_desc(irq);
 	spin_lock_irqsave(&ioapic_lock, flags);
 	__target_IO_APIC_irq(irq, dest, cfg->vector);
-	irq_desc[irq].affinity = mask;
+	desc->affinity = mask;
 	spin_unlock_irqrestore(&ioapic_lock, flags);
 }
-#endif
+#endif /* CONFIG_SMP */
 
 /*
  * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
@@ -360,19 +399,30 @@
  */
 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
 {
-	static int first_free_entry = NR_IRQS;
-	struct irq_pin_list *entry = irq_2_pin + irq;
+	struct irq_cfg *cfg;
+	struct irq_pin_list *entry;
 
-	BUG_ON(irq >= NR_IRQS);
-	while (entry->next)
-		entry = irq_2_pin + entry->next;
-
-	if (entry->pin != -1) {
-		entry->next = first_free_entry;
-		entry = irq_2_pin + entry->next;
-		if (++first_free_entry >= PIN_MAP_SIZE)
-			panic("io_apic.c: ran out of irq_2_pin entries!");
+	/* first time to refer irq_cfg, so with new */
+	cfg = irq_cfg_alloc(irq);
+	entry = cfg->irq_2_pin;
+	if (!entry) {
+		entry = get_one_free_irq_2_pin();
+		cfg->irq_2_pin = entry;
+		entry->apic = apic;
+		entry->pin = pin;
+		return;
 	}
+
+	while (entry->next) {
+		/* not again, please */
+		if (entry->apic == apic && entry->pin == pin)
+			return;
+
+		entry = entry->next;
+	}
+
+	entry->next = get_one_free_irq_2_pin();
+	entry = entry->next;
 	entry->apic = apic;
 	entry->pin = pin;
 }
@@ -384,30 +434,86 @@
 				      int oldapic, int oldpin,
 				      int newapic, int newpin)
 {
-	struct irq_pin_list *entry = irq_2_pin + irq;
+	struct irq_cfg *cfg = irq_cfg(irq);
+	struct irq_pin_list *entry = cfg->irq_2_pin;
+	int replaced = 0;
 
-	while (1) {
+	while (entry) {
 		if (entry->apic == oldapic && entry->pin == oldpin) {
 			entry->apic = newapic;
 			entry->pin = newpin;
-		}
-		if (!entry->next)
+			replaced = 1;
+			/* every one is different, right? */
 			break;
-		entry = irq_2_pin + entry->next;
+		}
+		entry = entry->next;
+	}
+
+	/* why? call replace before add? */
+	if (!replaced)
+		add_pin_to_irq(irq, newapic, newpin);
+}
+
+static inline void io_apic_modify_irq(unsigned int irq,
+				int mask_and, int mask_or,
+				void (*final)(struct irq_pin_list *entry))
+{
+	int pin;
+	struct irq_cfg *cfg;
+	struct irq_pin_list *entry;
+
+	cfg = irq_cfg(irq);
+	for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
+		unsigned int reg;
+		pin = entry->pin;
+		reg = io_apic_read(entry->apic, 0x10 + pin * 2);
+		reg &= mask_and;
+		reg |= mask_or;
+		io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
+		if (final)
+			final(entry);
 	}
 }
 
+static void __unmask_IO_APIC_irq(unsigned int irq)
+{
+	io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 0, NULL);
+}
 
-#define DO_ACTION(name,R,ACTION, FINAL)					\
-									\
-	static void name##_IO_APIC_irq (unsigned int irq)		\
-	__DO_ACTION(R, ACTION, FINAL)
+#ifdef CONFIG_X86_64
+void io_apic_sync(struct irq_pin_list *entry)
+{
+	/*
+	 * Synchronize the IO-APIC and the CPU by doing
+	 * a dummy read from the IO-APIC
+	 */
+	struct io_apic __iomem *io_apic;
+	io_apic = io_apic_base(entry->apic);
+	readl(&io_apic->data);
+}
 
-/* mask = 1 */
-DO_ACTION(__mask,	0, |= IO_APIC_REDIR_MASKED, io_apic_sync(entry->apic))
+static void __mask_IO_APIC_irq(unsigned int irq)
+{
+	io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
+}
+#else /* CONFIG_X86_32 */
+static void __mask_IO_APIC_irq(unsigned int irq)
+{
+	io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, NULL);
+}
 
-/* mask = 0 */
-DO_ACTION(__unmask,	0, &= ~IO_APIC_REDIR_MASKED, )
+static void __mask_and_edge_IO_APIC_irq(unsigned int irq)
+{
+	io_apic_modify_irq(irq, ~IO_APIC_REDIR_LEVEL_TRIGGER,
+			IO_APIC_REDIR_MASKED, NULL);
+}
+
+static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
+{
+	io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED,
+			IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
+}
+#endif /* CONFIG_X86_32 */
 
 static void mask_IO_APIC_irq (unsigned int irq)
 {
@@ -450,6 +556,68 @@
 			clear_IO_APIC_pin(apic, pin);
 }
 
+#if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
+void send_IPI_self(int vector)
+{
+	unsigned int cfg;
+
+	/*
+	 * Wait for idle.
+	 */
+	apic_wait_icr_idle();
+	cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
+	/*
+	 * Send the IPI. The write to APIC_ICR fires this off.
+	 */
+	apic_write(APIC_ICR, cfg);
+}
+#endif /* !CONFIG_SMP && CONFIG_X86_32*/
+
+#ifdef CONFIG_X86_32
+/*
+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
+ * specific CPU-side IRQs.
+ */
+
+#define MAX_PIRQS 8
+static int pirq_entries [MAX_PIRQS];
+static int pirqs_enabled;
+
+static int __init ioapic_pirq_setup(char *str)
+{
+	int i, max;
+	int ints[MAX_PIRQS+1];
+
+	get_options(str, ARRAY_SIZE(ints), ints);
+
+	for (i = 0; i < MAX_PIRQS; i++)
+		pirq_entries[i] = -1;
+
+	pirqs_enabled = 1;
+	apic_printk(APIC_VERBOSE, KERN_INFO
+			"PIRQ redirection, working around broken MP-BIOS.\n");
+	max = MAX_PIRQS;
+	if (ints[0] < MAX_PIRQS)
+		max = ints[0];
+
+	for (i = 0; i < max; i++) {
+		apic_printk(APIC_VERBOSE, KERN_DEBUG
+				"... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
+		/*
+		 * PIRQs are mapped upside down, usually.
+		 */
+		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
+	}
+	return 1;
+}
+
+__setup("pirq=", ioapic_pirq_setup);
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_INTR_REMAP
+/* I/O APIC RTE contents at the OS boot up */
+static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
+
 /*
  * Saves and masks all the unmasked IO-APIC RTE's
  */
@@ -474,7 +642,7 @@
 			kzalloc(sizeof(struct IO_APIC_route_entry) *
 				nr_ioapic_registers[apic], GFP_KERNEL);
 		if (!early_ioapic_entries[apic])
-			return -ENOMEM;
+			goto nomem;
 	}
 
 	for (apic = 0; apic < nr_ioapics; apic++)
@@ -488,17 +656,31 @@
 				ioapic_write_entry(apic, pin, entry);
 			}
 		}
+
 	return 0;
+
+nomem:
+	while (apic >= 0)
+		kfree(early_ioapic_entries[apic--]);
+	memset(early_ioapic_entries, 0,
+		ARRAY_SIZE(early_ioapic_entries));
+
+	return -ENOMEM;
 }
 
 void restore_IO_APIC_setup(void)
 {
 	int apic, pin;
 
-	for (apic = 0; apic < nr_ioapics; apic++)
+	for (apic = 0; apic < nr_ioapics; apic++) {
+		if (!early_ioapic_entries[apic])
+			break;
 		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
 			ioapic_write_entry(apic, pin,
 					   early_ioapic_entries[apic][pin]);
+		kfree(early_ioapic_entries[apic]);
+		early_ioapic_entries[apic] = NULL;
+	}
 }
 
 void reinit_intr_remapped_IO_APIC(int intr_remapping)
@@ -512,25 +694,7 @@
 	 */
 	restore_IO_APIC_setup();
 }
-
-int skip_ioapic_setup;
-int ioapic_force;
-
-static int __init parse_noapic(char *str)
-{
-	disable_ioapic_setup();
-	return 0;
-}
-early_param("noapic", parse_noapic);
-
-/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
-static int __init disable_timer_pin_setup(char *arg)
-{
-	disable_timer_pin_1 = 1;
-	return 1;
-}
-__setup("disable_timer_pin_1", disable_timer_pin_setup);
-
+#endif
 
 /*
  * Find the IRQ entry number of a certain pin.
@@ -634,22 +798,54 @@
 				best_guess = irq;
 		}
 	}
-	BUG_ON(best_guess >= NR_IRQS);
 	return best_guess;
 }
 
+EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
+
+#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
+/*
+ * EISA Edge/Level control register, ELCR
+ */
+static int EISA_ELCR(unsigned int irq)
+{
+	if (irq < 16) {
+		unsigned int port = 0x4d0 + (irq >> 3);
+		return (inb(port) >> (irq & 7)) & 1;
+	}
+	apic_printk(APIC_VERBOSE, KERN_INFO
+			"Broken MPtable reports ISA irq %d\n", irq);
+	return 0;
+}
+
+#endif
+
 /* ISA interrupts are always polarity zero edge triggered,
  * when listed as conforming in the MP table. */
 
 #define default_ISA_trigger(idx)	(0)
 #define default_ISA_polarity(idx)	(0)
 
+/* EISA interrupts are always polarity zero and can be edge or level
+ * trigger depending on the ELCR value.  If an interrupt is listed as
+ * EISA conforming in the MP table, that means its trigger type must
+ * be read in from the ELCR */
+
+#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
+#define default_EISA_polarity(idx)	default_ISA_polarity(idx)
+
 /* PCI interrupts are always polarity one level triggered,
  * when listed as conforming in the MP table. */
 
 #define default_PCI_trigger(idx)	(1)
 #define default_PCI_polarity(idx)	(1)
 
+/* MCA interrupts are always polarity zero level triggered,
+ * when listed as conforming in the MP table. */
+
+#define default_MCA_trigger(idx)	(1)
+#define default_MCA_polarity(idx)	default_ISA_polarity(idx)
+
 static int MPBIOS_polarity(int idx)
 {
 	int bus = mp_irqs[idx].mp_srcbus;
@@ -707,6 +903,36 @@
 				trigger = default_ISA_trigger(idx);
 			else
 				trigger = default_PCI_trigger(idx);
+#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
+			switch (mp_bus_id_to_type[bus]) {
+				case MP_BUS_ISA: /* ISA pin */
+				{
+					/* set before the switch */
+					break;
+				}
+				case MP_BUS_EISA: /* EISA pin */
+				{
+					trigger = default_EISA_trigger(idx);
+					break;
+				}
+				case MP_BUS_PCI: /* PCI pin */
+				{
+					/* set before the switch */
+					break;
+				}
+				case MP_BUS_MCA: /* MCA pin */
+				{
+					trigger = default_MCA_trigger(idx);
+					break;
+				}
+				default:
+				{
+					printk(KERN_WARNING "broken BIOS!!\n");
+					trigger = 1;
+					break;
+				}
+			}
+#endif
 			break;
 		case 1: /* edge */
 		{
@@ -744,6 +970,7 @@
 	return MPBIOS_trigger(idx);
 }
 
+int (*ioapic_renumber_irq)(int ioapic, int irq);
 static int pin_2_irq(int idx, int apic, int pin)
 {
 	int irq, i;
@@ -765,8 +992,32 @@
 		while (i < apic)
 			irq += nr_ioapic_registers[i++];
 		irq += pin;
+		/*
+                 * For MPS mode, so far only needed by ES7000 platform
+                 */
+		if (ioapic_renumber_irq)
+			irq = ioapic_renumber_irq(apic, irq);
 	}
-	BUG_ON(irq >= NR_IRQS);
+
+#ifdef CONFIG_X86_32
+	/*
+	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
+	 */
+	if ((pin >= 16) && (pin <= 23)) {
+		if (pirq_entries[pin-16] != -1) {
+			if (!pirq_entries[pin-16]) {
+				apic_printk(APIC_VERBOSE, KERN_DEBUG
+						"disabling PIRQ%d\n", pin-16);
+			} else {
+				irq = pirq_entries[pin-16];
+				apic_printk(APIC_VERBOSE, KERN_DEBUG
+						"using PIRQ%d -> IRQ %d\n",
+						pin-16, irq);
+			}
+		}
+	}
+#endif
+
 	return irq;
 }
 
@@ -801,8 +1052,7 @@
 	int cpu;
 	struct irq_cfg *cfg;
 
-	BUG_ON((unsigned)irq >= NR_IRQS);
-	cfg = &irq_cfg[irq];
+	cfg = irq_cfg(irq);
 
 	/* Only try and allocate irqs on cpus that are present */
 	cpus_and(mask, mask, cpu_online_map);
@@ -837,8 +1087,13 @@
 		}
 		if (unlikely(current_vector == vector))
 			continue;
+#ifdef CONFIG_X86_64
 		if (vector == IA32_SYSCALL_VECTOR)
 			goto next;
+#else
+		if (vector == SYSCALL_VECTOR)
+			goto next;
+#endif
 		for_each_cpu_mask_nr(new_cpu, new_mask)
 			if (per_cpu(vector_irq, new_cpu)[vector] != -1)
 				goto next;
@@ -875,8 +1130,7 @@
 	cpumask_t mask;
 	int cpu, vector;
 
-	BUG_ON((unsigned)irq >= NR_IRQS);
-	cfg = &irq_cfg[irq];
+	cfg = irq_cfg(irq);
 	BUG_ON(!cfg->vector);
 
 	vector = cfg->vector;
@@ -893,12 +1147,13 @@
 	/* Initialize vector_irq on a new cpu */
 	/* This function must be called with vector_lock held */
 	int irq, vector;
+	struct irq_cfg *cfg;
 
 	/* Mark the inuse vectors */
-	for (irq = 0; irq < NR_IRQS; ++irq) {
-		if (!cpu_isset(cpu, irq_cfg[irq].domain))
+	for_each_irq_cfg(irq, cfg) {
+		if (!cpu_isset(cpu, cfg->domain))
 			continue;
-		vector = irq_cfg[irq].vector;
+		vector = cfg->vector;
 		per_cpu(vector_irq, cpu)[vector] = irq;
 	}
 	/* Mark the free vectors */
@@ -906,7 +1161,9 @@
 		irq = per_cpu(vector_irq, cpu)[vector];
 		if (irq < 0)
 			continue;
-		if (!cpu_isset(cpu, irq_cfg[irq].domain))
+
+		cfg = irq_cfg(irq);
+		if (!cpu_isset(cpu, cfg->domain))
 			per_cpu(vector_irq, cpu)[vector] = -1;
 	}
 }
@@ -916,16 +1173,49 @@
 static struct irq_chip ir_ioapic_chip;
 #endif
 
+#define IOAPIC_AUTO     -1
+#define IOAPIC_EDGE     0
+#define IOAPIC_LEVEL    1
+
+#ifdef CONFIG_X86_32
+static inline int IO_APIC_irq_trigger(int irq)
+{
+	int apic, idx, pin;
+
+	for (apic = 0; apic < nr_ioapics; apic++) {
+		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+			idx = find_irq_entry(apic, pin, mp_INT);
+			if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
+				return irq_trigger(idx);
+		}
+	}
+	/*
+         * nonexistent IRQs are edge default
+         */
+	return 0;
+}
+#else
+static inline int IO_APIC_irq_trigger(int irq)
+{
+	return 1;
+}
+#endif
+
 static void ioapic_register_intr(int irq, unsigned long trigger)
 {
-	if (trigger)
-		irq_desc[irq].status |= IRQ_LEVEL;
+	struct irq_desc *desc;
+
+	desc = irq_to_desc(irq);
+
+	if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
+	    trigger == IOAPIC_LEVEL)
+		desc->status |= IRQ_LEVEL;
 	else
-		irq_desc[irq].status &= ~IRQ_LEVEL;
+		desc->status &= ~IRQ_LEVEL;
 
 #ifdef CONFIG_INTR_REMAP
 	if (irq_remapped(irq)) {
-		irq_desc[irq].status |= IRQ_MOVE_PCNTXT;
+		desc->status |= IRQ_MOVE_PCNTXT;
 		if (trigger)
 			set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
 						      handle_fasteoi_irq,
@@ -936,7 +1226,8 @@
 		return;
 	}
 #endif
-	if (trigger)
+	if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
+	    trigger == IOAPIC_LEVEL)
 		set_irq_chip_and_handler_name(irq, &ioapic_chip,
 					      handle_fasteoi_irq,
 					      "fasteoi");
@@ -1009,13 +1300,15 @@
 static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
 			      int trigger, int polarity)
 {
-	struct irq_cfg *cfg = irq_cfg + irq;
+	struct irq_cfg *cfg;
 	struct IO_APIC_route_entry entry;
 	cpumask_t mask;
 
 	if (!IO_APIC_IRQ(irq))
 		return;
 
+	cfg = irq_cfg(irq);
+
 	mask = TARGET_CPUS;
 	if (assign_irq_vector(irq, mask))
 		return;
@@ -1047,37 +1340,49 @@
 
 static void __init setup_IO_APIC_irqs(void)
 {
-	int apic, pin, idx, irq, first_notcon = 1;
+	int apic, pin, idx, irq;
+	int notcon = 0;
 
 	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
 
 	for (apic = 0; apic < nr_ioapics; apic++) {
-	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
 
-		idx = find_irq_entry(apic,pin,mp_INT);
-		if (idx == -1) {
-			if (first_notcon) {
-				apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin);
-				first_notcon = 0;
-			} else
-				apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin);
-			continue;
+			idx = find_irq_entry(apic, pin, mp_INT);
+			if (idx == -1) {
+				if (!notcon) {
+					notcon = 1;
+					apic_printk(APIC_VERBOSE,
+						KERN_DEBUG " %d-%d",
+						mp_ioapics[apic].mp_apicid,
+						pin);
+				} else
+					apic_printk(APIC_VERBOSE, " %d-%d",
+						mp_ioapics[apic].mp_apicid,
+						pin);
+				continue;
+			}
+			if (notcon) {
+				apic_printk(APIC_VERBOSE,
+					" (apicid-pin) not connected\n");
+				notcon = 0;
+			}
+
+			irq = pin_2_irq(idx, apic, pin);
+#ifdef CONFIG_X86_32
+			if (multi_timer_check(apic, irq))
+				continue;
+#endif
+			add_pin_to_irq(irq, apic, pin);
+
+			setup_IO_APIC_irq(apic, pin, irq,
+					irq_trigger(idx), irq_polarity(idx));
 		}
-		if (!first_notcon) {
-			apic_printk(APIC_VERBOSE, " not connected.\n");
-			first_notcon = 1;
-		}
-
-		irq = pin_2_irq(idx, apic, pin);
-		add_pin_to_irq(irq, apic, pin);
-
-		setup_IO_APIC_irq(apic, pin, irq,
-				  irq_trigger(idx), irq_polarity(idx));
-	}
 	}
 
-	if (!first_notcon)
-		apic_printk(APIC_VERBOSE, " not connected.\n");
+	if (notcon)
+		apic_printk(APIC_VERBOSE,
+			" (apicid-pin) not connected\n");
 }
 
 /*
@@ -1088,8 +1393,10 @@
 {
 	struct IO_APIC_route_entry entry;
 
+#ifdef CONFIG_INTR_REMAP
 	if (intr_remapping_enabled)
 		return;
+#endif
 
 	memset(&entry, 0, sizeof(entry));
 
@@ -1124,7 +1431,10 @@
 	union IO_APIC_reg_00 reg_00;
 	union IO_APIC_reg_01 reg_01;
 	union IO_APIC_reg_02 reg_02;
+	union IO_APIC_reg_03 reg_03;
 	unsigned long flags;
+	struct irq_cfg *cfg;
+	unsigned int irq;
 
 	if (apic_verbosity == APIC_QUIET)
 		return;
@@ -1147,12 +1457,16 @@
 	reg_01.raw = io_apic_read(apic, 1);
 	if (reg_01.bits.version >= 0x10)
 		reg_02.raw = io_apic_read(apic, 2);
+	if (reg_01.bits.version >= 0x20)
+		reg_03.raw = io_apic_read(apic, 3);
 	spin_unlock_irqrestore(&ioapic_lock, flags);
 
 	printk("\n");
 	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
 	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
 	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
+	printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
+	printk(KERN_DEBUG ".......    : LTS          : %X\n", reg_00.bits.LTS);
 
 	printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
 	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);
@@ -1160,11 +1474,27 @@
 	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
 	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);
 
-	if (reg_01.bits.version >= 0x10) {
+	/*
+	 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
+	 * but the value of reg_02 is read as the previous read register
+	 * value, so ignore it if reg_02 == reg_01.
+	 */
+	if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
 		printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
 		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
 	}
 
+	/*
+	 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
+	 * or reg_03, but the value of reg_0[23] is read as the previous read
+	 * register value, so ignore it if reg_03 == reg_0[12].
+	 */
+	if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
+	    reg_03.raw != reg_01.raw) {
+		printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
+		printk(KERN_DEBUG ".......     : Boot DT    : %X\n", reg_03.bits.boot_DT);
+	}
+
 	printk(KERN_DEBUG ".... IRQ redirection table:\n");
 
 	printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
@@ -1193,16 +1523,16 @@
 	}
 	}
 	printk(KERN_DEBUG "IRQ to pin mappings:\n");
-	for (i = 0; i < NR_IRQS; i++) {
-		struct irq_pin_list *entry = irq_2_pin + i;
-		if (entry->pin < 0)
+	for_each_irq_cfg(irq, cfg) {
+		struct irq_pin_list *entry = cfg->irq_2_pin;
+		if (!entry)
 			continue;
-		printk(KERN_DEBUG "IRQ%d ", i);
+		printk(KERN_DEBUG "IRQ%d ", irq);
 		for (;;) {
 			printk("-> %d:%d", entry->apic, entry->pin);
 			if (!entry->next)
 				break;
-			entry = irq_2_pin + entry->next;
+			entry = entry->next;
 		}
 		printk("\n");
 	}
@@ -1236,7 +1566,7 @@
 __apicdebuginit(void) print_local_APIC(void *dummy)
 {
 	unsigned int v, ver, maxlvt;
-	unsigned long icr;
+	u64 icr;
 
 	if (apic_verbosity == APIC_QUIET)
 		return;
@@ -1253,20 +1583,31 @@
 	v = apic_read(APIC_TASKPRI);
 	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
 
-	v = apic_read(APIC_ARBPRI);
-	printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
-		v & APIC_ARBPRI_MASK);
-	v = apic_read(APIC_PROCPRI);
-	printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
+	if (APIC_INTEGRATED(ver)) {                     /* !82489DX */
+		if (!APIC_XAPIC(ver)) {
+			v = apic_read(APIC_ARBPRI);
+			printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
+			       v & APIC_ARBPRI_MASK);
+		}
+		v = apic_read(APIC_PROCPRI);
+		printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
+	}
 
-	v = apic_read(APIC_EOI);
-	printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
-	v = apic_read(APIC_RRR);
-	printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
+	/*
+	 * Remote read supported only in the 82489DX and local APIC for
+	 * Pentium processors.
+	 */
+	if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
+		v = apic_read(APIC_RRR);
+		printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
+	}
+
 	v = apic_read(APIC_LDR);
 	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
-	v = apic_read(APIC_DFR);
-	printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
+	if (!x2apic_enabled()) {
+		v = apic_read(APIC_DFR);
+		printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
+	}
 	v = apic_read(APIC_SPIV);
 	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
 
@@ -1277,8 +1618,13 @@
 	printk(KERN_DEBUG "... APIC IRR field:\n");
 	print_APIC_bitfield(APIC_IRR);
 
-	v = apic_read(APIC_ESR);
-	printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
+	if (APIC_INTEGRATED(ver)) {             /* !82489DX */
+		if (maxlvt > 3)         /* Due to the Pentium erratum 3AP. */
+			apic_write(APIC_ESR, 0);
+
+		v = apic_read(APIC_ESR);
+		printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
+	}
 
 	icr = apic_icr_read();
 	printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
@@ -1312,7 +1658,12 @@
 
 __apicdebuginit(void) print_all_local_APICs(void)
 {
-	on_each_cpu(print_local_APIC, NULL, 1);
+	int cpu;
+
+	preempt_disable();
+	for_each_online_cpu(cpu)
+		smp_call_function_single(cpu, print_local_APIC, NULL, 1);
+	preempt_enable();
 }
 
 __apicdebuginit(void) print_PIC(void)
@@ -1359,17 +1710,22 @@
 fs_initcall(print_all_ICs);
 
 
+/* Where if anywhere is the i8259 connect in external int mode */
+static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
+
 void __init enable_IO_APIC(void)
 {
 	union IO_APIC_reg_01 reg_01;
 	int i8259_apic, i8259_pin;
-	int i, apic;
+	int apic;
 	unsigned long flags;
 
-	for (i = 0; i < PIN_MAP_SIZE; i++) {
-		irq_2_pin[i].pin = -1;
-		irq_2_pin[i].next = 0;
-	}
+#ifdef CONFIG_X86_32
+	int i;
+	if (!pirqs_enabled)
+		for (i = 0; i < MAX_PIRQS; i++)
+			pirq_entries[i] = -1;
+#endif
 
 	/*
 	 * The number of IO-APIC IRQ registers (== #pins):
@@ -1399,6 +1755,10 @@
 	}
  found_i8259:
 	/* Look to see what if the MP table has reported the ExtINT */
+	/* If we could not find the appropriate pin by looking at the ioapic
+	 * the i8259 probably is not connected the ioapic but give the
+	 * mptable a chance anyway.
+	 */
 	i8259_pin  = find_isa_irq_pin(0, mp_ExtINT);
 	i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
 	/* Trust the MP table if nothing is setup in the hardware */
@@ -1458,6 +1818,133 @@
 	disconnect_bsp_APIC(ioapic_i8259.pin != -1);
 }
 
+#ifdef CONFIG_X86_32
+/*
+ * function to set the IO-APIC physical IDs based on the
+ * values stored in the MPC table.
+ *
+ * by Matt Domsch <Matt_Domsch@dell.com>  Tue Dec 21 12:25:05 CST 1999
+ */
+
+static void __init setup_ioapic_ids_from_mpc(void)
+{
+	union IO_APIC_reg_00 reg_00;
+	physid_mask_t phys_id_present_map;
+	int apic;
+	int i;
+	unsigned char old_id;
+	unsigned long flags;
+
+	if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
+		return;
+
+	/*
+	 * Don't check I/O APIC IDs for xAPIC systems.  They have
+	 * no meaning without the serial APIC bus.
+	 */
+	if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+		|| APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
+		return;
+	/*
+	 * This is broken; anything with a real cpu count has to
+	 * circumvent this idiocy regardless.
+	 */
+	phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
+
+	/*
+	 * Set the IOAPIC ID to the value stored in the MPC table.
+	 */
+	for (apic = 0; apic < nr_ioapics; apic++) {
+
+		/* Read the register 0 value */
+		spin_lock_irqsave(&ioapic_lock, flags);
+		reg_00.raw = io_apic_read(apic, 0);
+		spin_unlock_irqrestore(&ioapic_lock, flags);
+
+		old_id = mp_ioapics[apic].mp_apicid;
+
+		if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
+			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
+				apic, mp_ioapics[apic].mp_apicid);
+			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
+				reg_00.bits.ID);
+			mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
+		}
+
+		/*
+		 * Sanity check, is the ID really free? Every APIC in a
+		 * system must have a unique ID or we get lots of nice
+		 * 'stuck on smp_invalidate_needed IPI wait' messages.
+		 */
+		if (check_apicid_used(phys_id_present_map,
+					mp_ioapics[apic].mp_apicid)) {
+			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
+				apic, mp_ioapics[apic].mp_apicid);
+			for (i = 0; i < get_physical_broadcast(); i++)
+				if (!physid_isset(i, phys_id_present_map))
+					break;
+			if (i >= get_physical_broadcast())
+				panic("Max APIC ID exceeded!\n");
+			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
+				i);
+			physid_set(i, phys_id_present_map);
+			mp_ioapics[apic].mp_apicid = i;
+		} else {
+			physid_mask_t tmp;
+			tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
+			apic_printk(APIC_VERBOSE, "Setting %d in the "
+					"phys_id_present_map\n",
+					mp_ioapics[apic].mp_apicid);
+			physids_or(phys_id_present_map, phys_id_present_map, tmp);
+		}
+
+
+		/*
+		 * We need to adjust the IRQ routing table
+		 * if the ID changed.
+		 */
+		if (old_id != mp_ioapics[apic].mp_apicid)
+			for (i = 0; i < mp_irq_entries; i++)
+				if (mp_irqs[i].mp_dstapic == old_id)
+					mp_irqs[i].mp_dstapic
+						= mp_ioapics[apic].mp_apicid;
+
+		/*
+		 * Read the right value from the MPC table and
+		 * write it into the ID register.
+		 */
+		apic_printk(APIC_VERBOSE, KERN_INFO
+			"...changing IO-APIC physical APIC ID to %d ...",
+			mp_ioapics[apic].mp_apicid);
+
+		reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
+		spin_lock_irqsave(&ioapic_lock, flags);
+		io_apic_write(apic, 0, reg_00.raw);
+		spin_unlock_irqrestore(&ioapic_lock, flags);
+
+		/*
+		 * Sanity check
+		 */
+		spin_lock_irqsave(&ioapic_lock, flags);
+		reg_00.raw = io_apic_read(apic, 0);
+		spin_unlock_irqrestore(&ioapic_lock, flags);
+		if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
+			printk("could not set ID!\n");
+		else
+			apic_printk(APIC_VERBOSE, " ok.\n");
+	}
+}
+#endif
+
+int no_timer_check __initdata;
+
+static int __init notimercheck(char *s)
+{
+	no_timer_check = 1;
+	return 1;
+}
+__setup("no_timer_check", notimercheck);
+
 /*
  * There is a nasty bug in some older SMP boards, their mptable lies
  * about the timer IRQ. We do the following to work around the situation:
@@ -1471,6 +1958,9 @@
 	unsigned long t1 = jiffies;
 	unsigned long flags;
 
+	if (no_timer_check)
+		return 1;
+
 	local_save_flags(flags);
 	local_irq_enable();
 	/* Let ten ticks pass... */
@@ -1531,9 +2021,11 @@
 	return was_pending;
 }
 
+#ifdef CONFIG_X86_64
 static int ioapic_retrigger_irq(unsigned int irq)
 {
-	struct irq_cfg *cfg = &irq_cfg[irq];
+
+	struct irq_cfg *cfg = irq_cfg(irq);
 	unsigned long flags;
 
 	spin_lock_irqsave(&vector_lock, flags);
@@ -1542,6 +2034,14 @@
 
 	return 1;
 }
+#else
+static int ioapic_retrigger_irq(unsigned int irq)
+{
+	send_IPI_self(irq_cfg(irq)->vector);
+
+	return 1;
+}
+#endif
 
 /*
  * Level and edge triggered IO-APIC interrupts need different handling,
@@ -1580,11 +2080,11 @@
  */
 static void migrate_ioapic_irq(int irq, cpumask_t mask)
 {
-	struct irq_cfg *cfg = irq_cfg + irq;
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_cfg *cfg;
+	struct irq_desc *desc;
 	cpumask_t tmp, cleanup_mask;
 	struct irte irte;
-	int modify_ioapic_rte = desc->status & IRQ_LEVEL;
+	int modify_ioapic_rte;
 	unsigned int dest;
 	unsigned long flags;
 
@@ -1598,9 +2098,12 @@
 	if (assign_irq_vector(irq, mask))
 		return;
 
+	cfg = irq_cfg(irq);
 	cpus_and(tmp, cfg->domain, mask);
 	dest = cpu_mask_to_apicid(tmp);
 
+	desc = irq_to_desc(irq);
+	modify_ioapic_rte = desc->status & IRQ_LEVEL;
 	if (modify_ioapic_rte) {
 		spin_lock_irqsave(&ioapic_lock, flags);
 		__target_IO_APIC_irq(irq, dest, cfg->vector);
@@ -1622,18 +2125,19 @@
 		cfg->move_in_progress = 0;
 	}
 
-	irq_desc[irq].affinity = mask;
+	desc->affinity = mask;
 }
 
 static int migrate_irq_remapped_level(int irq)
 {
 	int ret = -1;
+	struct irq_desc *desc = irq_to_desc(irq);
 
 	mask_IO_APIC_irq(irq);
 
 	if (io_apic_level_ack_pending(irq)) {
 		/*
-	 	 * Interrupt in progress. Migrating irq now will change the
+		 * Interrupt in progress. Migrating irq now will change the
 		 * vector information in the IO-APIC RTE and that will confuse
 		 * the EOI broadcast performed by cpu.
 		 * So, delay the irq migration to the next instance.
@@ -1643,11 +2147,11 @@
 	}
 
 	/* everthing is clear. we have right of way */
-	migrate_ioapic_irq(irq, irq_desc[irq].pending_mask);
+	migrate_ioapic_irq(irq, desc->pending_mask);
 
 	ret = 0;
-	irq_desc[irq].status &= ~IRQ_MOVE_PENDING;
-	cpus_clear(irq_desc[irq].pending_mask);
+	desc->status &= ~IRQ_MOVE_PENDING;
+	cpus_clear(desc->pending_mask);
 
 unmask:
 	unmask_IO_APIC_irq(irq);
@@ -1656,10 +2160,10 @@
 
 static void ir_irq_migration(struct work_struct *work)
 {
-	int irq;
+	unsigned int irq;
+	struct irq_desc *desc;
 
-	for (irq = 0; irq < NR_IRQS; irq++) {
-		struct irq_desc *desc = irq_desc + irq;
+	for_each_irq_desc(irq, desc) {
 		if (desc->status & IRQ_MOVE_PENDING) {
 			unsigned long flags;
 
@@ -1671,8 +2175,7 @@
 				continue;
 			}
 
-			desc->chip->set_affinity(irq,
-					         irq_desc[irq].pending_mask);
+			desc->chip->set_affinity(irq, desc->pending_mask);
 			spin_unlock_irqrestore(&desc->lock, flags);
 		}
 	}
@@ -1683,9 +2186,11 @@
  */
 static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
 {
-	if (irq_desc[irq].status & IRQ_LEVEL) {
-		irq_desc[irq].status |= IRQ_MOVE_PENDING;
-		irq_desc[irq].pending_mask = mask;
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	if (desc->status & IRQ_LEVEL) {
+		desc->status |= IRQ_MOVE_PENDING;
+		desc->pending_mask = mask;
 		migrate_irq_remapped_level(irq);
 		return;
 	}
@@ -1698,7 +2203,9 @@
 {
 	unsigned vector, me;
 	ack_APIC_irq();
+#ifdef CONFIG_X86_64
 	exit_idle();
+#endif
 	irq_enter();
 
 	me = smp_processor_id();
@@ -1707,11 +2214,12 @@
 		struct irq_desc *desc;
 		struct irq_cfg *cfg;
 		irq = __get_cpu_var(vector_irq)[vector];
-		if (irq >= NR_IRQS)
+
+		desc = irq_to_desc(irq);
+		if (!desc)
 			continue;
 
-		desc = irq_desc + irq;
-		cfg = irq_cfg + irq;
+		cfg = irq_cfg(irq);
 		spin_lock(&desc->lock);
 		if (!cfg->move_cleanup_count)
 			goto unlock;
@@ -1730,7 +2238,7 @@
 
 static void irq_complete_move(unsigned int irq)
 {
-	struct irq_cfg *cfg = irq_cfg + irq;
+	struct irq_cfg *cfg = irq_cfg(irq);
 	unsigned vector, me;
 
 	if (likely(!cfg->move_in_progress))
@@ -1769,19 +2277,50 @@
 	ack_APIC_irq();
 }
 
+atomic_t irq_mis_count;
+
 static void ack_apic_level(unsigned int irq)
 {
+#ifdef CONFIG_X86_32
+	unsigned long v;
+	int i;
+#endif
 	int do_unmask_irq = 0;
 
 	irq_complete_move(irq);
 #ifdef CONFIG_GENERIC_PENDING_IRQ
 	/* If we are moving the irq we need to mask it */
-	if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
+	if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
 		do_unmask_irq = 1;
 		mask_IO_APIC_irq(irq);
 	}
 #endif
 
+#ifdef CONFIG_X86_32
+	/*
+	* It appears there is an erratum which affects at least version 0x11
+	* of I/O APIC (that's the 82093AA and cores integrated into various
+	* chipsets).  Under certain conditions a level-triggered interrupt is
+	* erroneously delivered as edge-triggered one but the respective IRR
+	* bit gets set nevertheless.  As a result the I/O unit expects an EOI
+	* message but it will never arrive and further interrupts are blocked
+	* from the source.  The exact reason is so far unknown, but the
+	* phenomenon was observed when two consecutive interrupt requests
+	* from a given source get delivered to the same CPU and the source is
+	* temporarily disabled in between.
+	*
+	* A workaround is to simulate an EOI message manually.  We achieve it
+	* by setting the trigger mode to edge and then to level when the edge
+	* trigger mode gets detected in the TMR of a local APIC for a
+	* level-triggered interrupt.  We mask the source for the time of the
+	* operation to prevent an edge-triggered interrupt escaping meanwhile.
+	* The idea is from Manfred Spraul.  --macro
+	*/
+	i = irq_cfg(irq)->vector;
+
+	v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
+#endif
+
 	/*
 	 * We must acknowledge the irq before we move it or the acknowledge will
 	 * not propagate properly.
@@ -1820,31 +2359,41 @@
 			move_masked_irq(irq);
 		unmask_IO_APIC_irq(irq);
 	}
+
+#ifdef CONFIG_X86_32
+	if (!(v & (1 << (i & 0x1f)))) {
+		atomic_inc(&irq_mis_count);
+		spin_lock(&ioapic_lock);
+		__mask_and_edge_IO_APIC_irq(irq);
+		__unmask_and_level_IO_APIC_irq(irq);
+		spin_unlock(&ioapic_lock);
+	}
+#endif
 }
 
 static struct irq_chip ioapic_chip __read_mostly = {
-	.name 		= "IO-APIC",
-	.startup 	= startup_ioapic_irq,
-	.mask	 	= mask_IO_APIC_irq,
-	.unmask	 	= unmask_IO_APIC_irq,
-	.ack 		= ack_apic_edge,
-	.eoi 		= ack_apic_level,
+	.name		= "IO-APIC",
+	.startup	= startup_ioapic_irq,
+	.mask		= mask_IO_APIC_irq,
+	.unmask		= unmask_IO_APIC_irq,
+	.ack		= ack_apic_edge,
+	.eoi		= ack_apic_level,
 #ifdef CONFIG_SMP
-	.set_affinity 	= set_ioapic_affinity_irq,
+	.set_affinity	= set_ioapic_affinity_irq,
 #endif
 	.retrigger	= ioapic_retrigger_irq,
 };
 
 #ifdef CONFIG_INTR_REMAP
 static struct irq_chip ir_ioapic_chip __read_mostly = {
-	.name 		= "IR-IO-APIC",
-	.startup 	= startup_ioapic_irq,
-	.mask	 	= mask_IO_APIC_irq,
-	.unmask	 	= unmask_IO_APIC_irq,
-	.ack 		= ack_x2apic_edge,
-	.eoi 		= ack_x2apic_level,
+	.name		= "IR-IO-APIC",
+	.startup	= startup_ioapic_irq,
+	.mask		= mask_IO_APIC_irq,
+	.unmask		= unmask_IO_APIC_irq,
+	.ack		= ack_x2apic_edge,
+	.eoi		= ack_x2apic_level,
 #ifdef CONFIG_SMP
-	.set_affinity 	= set_ir_ioapic_affinity_irq,
+	.set_affinity	= set_ir_ioapic_affinity_irq,
 #endif
 	.retrigger	= ioapic_retrigger_irq,
 };
@@ -1853,6 +2402,8 @@
 static inline void init_IO_APIC_traps(void)
 {
 	int irq;
+	struct irq_desc *desc;
+	struct irq_cfg *cfg;
 
 	/*
 	 * NOTE! The local APIC isn't very good at handling
@@ -1865,8 +2416,8 @@
 	 * Also, we've got to be careful not to trash gate
 	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
 	 */
-	for (irq = 0; irq < NR_IRQS ; irq++) {
-		if (IO_APIC_IRQ(irq) && !irq_cfg[irq].vector) {
+	for_each_irq_cfg(irq, cfg) {
+		if (IO_APIC_IRQ(irq) && !cfg->vector) {
 			/*
 			 * Hmm.. We don't have an entry for this,
 			 * so default to an old-fashioned 8259
@@ -1874,13 +2425,27 @@
 			 */
 			if (irq < 16)
 				make_8259A_irq(irq);
-			else
+			else {
+				desc = irq_to_desc(irq);
 				/* Strange. Oh, well.. */
-				irq_desc[irq].chip = &no_irq_chip;
+				desc->chip = &no_irq_chip;
+			}
 		}
 	}
 }
 
+/*
+ * The local APIC irq-chip implementation:
+ */
+
+static void mask_lapic_irq(unsigned int irq)
+{
+	unsigned long v;
+
+	v = apic_read(APIC_LVT0);
+	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
+}
+
 static void unmask_lapic_irq(unsigned int irq)
 {
 	unsigned long v;
@@ -1889,14 +2454,6 @@
 	apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
 }
 
-static void mask_lapic_irq(unsigned int irq)
-{
-	unsigned long v;
-
-	v = apic_read(APIC_LVT0);
-	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
-}
-
 static void ack_lapic_irq (unsigned int irq)
 {
 	ack_APIC_irq();
@@ -1911,7 +2468,10 @@
 
 static void lapic_register_intr(int irq)
 {
-	irq_desc[irq].status &= ~IRQ_LEVEL;
+	struct irq_desc *desc;
+
+	desc = irq_to_desc(irq);
+	desc->status &= ~IRQ_LEVEL;
 	set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
 				      "edge");
 }
@@ -1919,19 +2479,19 @@
 static void __init setup_nmi(void)
 {
 	/*
- 	 * Dirty trick to enable the NMI watchdog ...
+	 * Dirty trick to enable the NMI watchdog ...
 	 * We put the 8259A master into AEOI mode and
 	 * unmask on all local APICs LVT0 as NMI.
 	 *
 	 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
 	 * is from Maciej W. Rozycki - so we do not have to EOI from
 	 * the NMI handler or the timer interrupt.
-	 */ 
-	printk(KERN_INFO "activating NMI Watchdog ...");
+	 */
+	apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
 
 	enable_NMI_through_LVT0();
 
-	printk(" done.\n");
+	apic_printk(APIC_VERBOSE, " done.\n");
 }
 
 /*
@@ -1948,12 +2508,17 @@
 	unsigned char save_control, save_freq_select;
 
 	pin  = find_isa_irq_pin(8, mp_INT);
-	apic = find_isa_irq_apic(8, mp_INT);
-	if (pin == -1)
+	if (pin == -1) {
+		WARN_ON_ONCE(1);
 		return;
+	}
+	apic = find_isa_irq_apic(8, mp_INT);
+	if (apic == -1) {
+		WARN_ON_ONCE(1);
+		return;
+	}
 
 	entry0 = ioapic_read_entry(apic, pin);
-
 	clear_IO_APIC_pin(apic, pin);
 
 	memset(&entry1, 0, sizeof(entry1));
@@ -1988,23 +2553,38 @@
 	ioapic_write_entry(apic, pin, entry0);
 }
 
+static int disable_timer_pin_1 __initdata;
+/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
+static int __init disable_timer_pin_setup(char *arg)
+{
+	disable_timer_pin_1 = 1;
+	return 0;
+}
+early_param("disable_timer_pin_1", disable_timer_pin_setup);
+
+int timer_through_8259 __initdata;
+
 /*
  * This code may look a bit paranoid, but it's supposed to cooperate with
  * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
  * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
  * fanatically on his truly buggy board.
  *
- * FIXME: really need to revamp this for modern platforms only.
+ * FIXME: really need to revamp this for all platforms.
  */
 static inline void __init check_timer(void)
 {
-	struct irq_cfg *cfg = irq_cfg + 0;
+	struct irq_cfg *cfg = irq_cfg(0);
 	int apic1, pin1, apic2, pin2;
 	unsigned long flags;
+	unsigned int ver;
 	int no_pin1 = 0;
 
 	local_irq_save(flags);
 
+	ver = apic_read(APIC_LVR);
+	ver = GET_APIC_VERSION(ver);
+
 	/*
 	 * get/set the timer IRQ vector:
 	 */
@@ -2013,10 +2593,18 @@
 
 	/*
 	 * As IRQ0 is to be enabled in the 8259A, the virtual
-	 * wire has to be disabled in the local APIC.
+	 * wire has to be disabled in the local APIC.  Also
+	 * timer interrupts need to be acknowledged manually in
+	 * the 8259A for the i82489DX when using the NMI
+	 * watchdog as that APIC treats NMIs as level-triggered.
+	 * The AEOI mode will finish them in the 8259A
+	 * automatically.
 	 */
 	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
 	init_8259A(1);
+#ifdef CONFIG_X86_32
+	timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
+#endif
 
 	pin1  = find_isa_irq_pin(0, mp_INT);
 	apic1 = find_isa_irq_apic(0, mp_INT);
@@ -2035,8 +2623,10 @@
 	 * 8259A.
 	 */
 	if (pin1 == -1) {
+#ifdef CONFIG_INTR_REMAP
 		if (intr_remapping_enabled)
 			panic("BIOS bug: timer not connected to IO-APIC");
+#endif
 		pin1 = pin2;
 		apic1 = apic2;
 		no_pin1 = 1;
@@ -2054,7 +2644,7 @@
 			setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
 		}
 		unmask_IO_APIC_irq(0);
-		if (!no_timer_check && timer_irq_works()) {
+		if (timer_irq_works()) {
 			if (nmi_watchdog == NMI_IO_APIC) {
 				setup_nmi();
 				enable_8259A_irq(0);
@@ -2063,8 +2653,10 @@
 				clear_IO_APIC_pin(0, pin1);
 			goto out;
 		}
+#ifdef CONFIG_INTR_REMAP
 		if (intr_remapping_enabled)
 			panic("timer doesn't work through Interrupt-remapped IO-APIC");
+#endif
 		clear_IO_APIC_pin(apic1, pin1);
 		if (!no_pin1)
 			apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
@@ -2104,6 +2696,9 @@
 			    "through the IO-APIC - disabling NMI Watchdog!\n");
 		nmi_watchdog = NMI_NONE;
 	}
+#ifdef CONFIG_X86_32
+	timer_ack = 0;
+#endif
 
 	apic_printk(APIC_QUIET, KERN_INFO
 		    "...trying to set up timer as Virtual Wire IRQ...\n");
@@ -2140,13 +2735,6 @@
 	local_irq_restore(flags);
 }
 
-static int __init notimercheck(char *s)
-{
-	no_timer_check = 1;
-	return 1;
-}
-__setup("no_timer_check", notimercheck);
-
 /*
  * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
  * to devices.  However there may be an I/O APIC pin available for
@@ -2164,25 +2752,49 @@
  * the I/O APIC in all cases now.  No actual device should request
  * it anyway.  --macro
  */
-#define PIC_IRQS	(1<<2)
+#define PIC_IRQS	(1 << PIC_CASCADE_IR)
 
 void __init setup_IO_APIC(void)
 {
 
+#ifdef CONFIG_X86_32
+	enable_IO_APIC();
+#else
 	/*
 	 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
 	 */
+#endif
 
 	io_apic_irqs = ~PIC_IRQS;
 
 	apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
-
+	/*
+         * Set up IO-APIC IRQ routing.
+         */
+#ifdef CONFIG_X86_32
+	if (!acpi_ioapic)
+		setup_ioapic_ids_from_mpc();
+#endif
 	sync_Arb_IDs();
 	setup_IO_APIC_irqs();
 	init_IO_APIC_traps();
 	check_timer();
 }
 
+/*
+ *      Called after all the initialization is done. If we didnt find any
+ *      APIC bugs then we can allow the modify fast path
+ */
+
+static int __init io_apic_bug_finalize(void)
+{
+	if (sis_apic_bug == -1)
+		sis_apic_bug = 0;
+	return 0;
+}
+
+late_initcall(io_apic_bug_finalize);
+
 struct sysfs_ioapic_data {
 	struct sys_device dev;
 	struct IO_APIC_route_entry entry[0];
@@ -2270,32 +2882,51 @@
 /*
  * Dynamic irq allocate and deallocation
  */
-int create_irq(void)
+unsigned int create_irq_nr(unsigned int irq_want)
 {
 	/* Allocate an unused irq */
-	int irq;
-	int new;
+	unsigned int irq;
+	unsigned int new;
 	unsigned long flags;
+	struct irq_cfg *cfg_new;
 
-	irq = -ENOSPC;
+	irq_want = nr_irqs - 1;
+
+	irq = 0;
 	spin_lock_irqsave(&vector_lock, flags);
-	for (new = (NR_IRQS - 1); new >= 0; new--) {
+	for (new = irq_want; new > 0; new--) {
 		if (platform_legacy_irq(new))
 			continue;
-		if (irq_cfg[new].vector != 0)
+		cfg_new = irq_cfg(new);
+		if (cfg_new && cfg_new->vector != 0)
 			continue;
+		/* check if need to create one */
+		if (!cfg_new)
+			cfg_new = irq_cfg_alloc(new);
 		if (__assign_irq_vector(new, TARGET_CPUS) == 0)
 			irq = new;
 		break;
 	}
 	spin_unlock_irqrestore(&vector_lock, flags);
 
-	if (irq >= 0) {
+	if (irq > 0) {
 		dynamic_irq_init(irq);
 	}
 	return irq;
 }
 
+int create_irq(void)
+{
+	int irq;
+
+	irq = create_irq_nr(nr_irqs - 1);
+
+	if (irq == 0)
+		irq = -1;
+
+	return irq;
+}
+
 void destroy_irq(unsigned int irq)
 {
 	unsigned long flags;
@@ -2316,7 +2947,7 @@
 #ifdef CONFIG_PCI_MSI
 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
 {
-	struct irq_cfg *cfg = irq_cfg + irq;
+	struct irq_cfg *cfg;
 	int err;
 	unsigned dest;
 	cpumask_t tmp;
@@ -2326,6 +2957,7 @@
 	if (err)
 		return err;
 
+	cfg = irq_cfg(irq);
 	cpus_and(tmp, cfg->domain, tmp);
 	dest = cpu_mask_to_apicid(tmp);
 
@@ -2383,10 +3015,11 @@
 #ifdef CONFIG_SMP
 static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
 {
-	struct irq_cfg *cfg = irq_cfg + irq;
+	struct irq_cfg *cfg;
 	struct msi_msg msg;
 	unsigned int dest;
 	cpumask_t tmp;
+	struct irq_desc *desc;
 
 	cpus_and(tmp, mask, cpu_online_map);
 	if (cpus_empty(tmp))
@@ -2395,6 +3028,7 @@
 	if (assign_irq_vector(irq, mask))
 		return;
 
+	cfg = irq_cfg(irq);
 	cpus_and(tmp, cfg->domain, mask);
 	dest = cpu_mask_to_apicid(tmp);
 
@@ -2406,7 +3040,8 @@
 	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
 
 	write_msi_msg(irq, &msg);
-	irq_desc[irq].affinity = mask;
+	desc = irq_to_desc(irq);
+	desc->affinity = mask;
 }
 
 #ifdef CONFIG_INTR_REMAP
@@ -2416,10 +3051,11 @@
  */
 static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
 {
-	struct irq_cfg *cfg = irq_cfg + irq;
+	struct irq_cfg *cfg;
 	unsigned int dest;
 	cpumask_t tmp, cleanup_mask;
 	struct irte irte;
+	struct irq_desc *desc;
 
 	cpus_and(tmp, mask, cpu_online_map);
 	if (cpus_empty(tmp))
@@ -2431,6 +3067,7 @@
 	if (assign_irq_vector(irq, mask))
 		return;
 
+	cfg = irq_cfg(irq);
 	cpus_and(tmp, cfg->domain, mask);
 	dest = cpu_mask_to_apicid(tmp);
 
@@ -2454,7 +3091,8 @@
 		cfg->move_in_progress = 0;
 	}
 
-	irq_desc[irq].affinity = mask;
+	desc = irq_to_desc(irq);
+	desc->affinity = mask;
 }
 #endif
 #endif /* CONFIG_SMP */
@@ -2507,7 +3145,7 @@
 	if (index < 0) {
 		printk(KERN_ERR
 		       "Unable to allocate %d IRTE for PCI %s\n", nvec,
-		        pci_name(dev));
+		       pci_name(dev));
 		return -ENOSPC;
 	}
 	return index;
@@ -2528,7 +3166,7 @@
 
 #ifdef CONFIG_INTR_REMAP
 	if (irq_remapped(irq)) {
-		struct irq_desc *desc = irq_desc + irq;
+		struct irq_desc *desc = irq_to_desc(irq);
 		/*
 		 * irq migration in process context
 		 */
@@ -2538,16 +3176,34 @@
 #endif
 		set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
 
+	dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
+
 	return 0;
 }
 
+static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
+{
+	unsigned int irq;
+
+	irq = dev->bus->number;
+	irq <<= 8;
+	irq |= dev->devfn;
+	irq <<= 12;
+
+	return irq;
+}
+
 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
 {
-	int irq, ret;
+	unsigned int irq;
+	int ret;
+	unsigned int irq_want;
 
-	irq = create_irq();
-	if (irq < 0)
-		return irq;
+	irq_want = build_irq_for_pci_dev(dev) + 0x100;
+
+	irq = create_irq_nr(irq_want);
+	if (irq == 0)
+		return -1;
 
 #ifdef CONFIG_INTR_REMAP
 	if (!intr_remapping_enabled)
@@ -2574,18 +3230,22 @@
 
 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 {
-	int irq, ret, sub_handle;
+	unsigned int irq;
+	int ret, sub_handle;
 	struct msi_desc *desc;
+	unsigned int irq_want;
+
 #ifdef CONFIG_INTR_REMAP
 	struct intel_iommu *iommu = 0;
 	int index = 0;
 #endif
 
+	irq_want = build_irq_for_pci_dev(dev) + 0x100;
 	sub_handle = 0;
 	list_for_each_entry(desc, &dev->msi_list, list) {
-		irq = create_irq();
-		if (irq < 0)
-			return irq;
+		irq = create_irq_nr(irq_want--);
+		if (irq == 0)
+			return -1;
 #ifdef CONFIG_INTR_REMAP
 		if (!intr_remapping_enabled)
 			goto no_ir;
@@ -2636,10 +3296,11 @@
 #ifdef CONFIG_SMP
 static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
 {
-	struct irq_cfg *cfg = irq_cfg + irq;
+	struct irq_cfg *cfg;
 	struct msi_msg msg;
 	unsigned int dest;
 	cpumask_t tmp;
+	struct irq_desc *desc;
 
 	cpus_and(tmp, mask, cpu_online_map);
 	if (cpus_empty(tmp))
@@ -2648,6 +3309,7 @@
 	if (assign_irq_vector(irq, mask))
 		return;
 
+	cfg = irq_cfg(irq);
 	cpus_and(tmp, cfg->domain, mask);
 	dest = cpu_mask_to_apicid(tmp);
 
@@ -2659,7 +3321,8 @@
 	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
 
 	dmar_msi_write(irq, &msg);
-	irq_desc[irq].affinity = mask;
+	desc = irq_to_desc(irq);
+	desc->affinity = mask;
 }
 #endif /* CONFIG_SMP */
 
@@ -2689,6 +3352,69 @@
 }
 #endif
 
+#ifdef CONFIG_HPET_TIMER
+
+#ifdef CONFIG_SMP
+static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
+{
+	struct irq_cfg *cfg;
+	struct irq_desc *desc;
+	struct msi_msg msg;
+	unsigned int dest;
+	cpumask_t tmp;
+
+	cpus_and(tmp, mask, cpu_online_map);
+	if (cpus_empty(tmp))
+		return;
+
+	if (assign_irq_vector(irq, mask))
+		return;
+
+	cfg = irq_cfg(irq);
+	cpus_and(tmp, cfg->domain, mask);
+	dest = cpu_mask_to_apicid(tmp);
+
+	hpet_msi_read(irq, &msg);
+
+	msg.data &= ~MSI_DATA_VECTOR_MASK;
+	msg.data |= MSI_DATA_VECTOR(cfg->vector);
+	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+	hpet_msi_write(irq, &msg);
+	desc = irq_to_desc(irq);
+	desc->affinity = mask;
+}
+#endif /* CONFIG_SMP */
+
+struct irq_chip hpet_msi_type = {
+	.name = "HPET_MSI",
+	.unmask = hpet_msi_unmask,
+	.mask = hpet_msi_mask,
+	.ack = ack_apic_edge,
+#ifdef CONFIG_SMP
+	.set_affinity = hpet_msi_set_affinity,
+#endif
+	.retrigger = ioapic_retrigger_irq,
+};
+
+int arch_setup_hpet_msi(unsigned int irq)
+{
+	int ret;
+	struct msi_msg msg;
+
+	ret = msi_compose_msg(NULL, irq, &msg);
+	if (ret < 0)
+		return ret;
+
+	hpet_msi_write(irq, &msg);
+	set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
+		"edge");
+
+	return 0;
+}
+#endif
+
 #endif /* CONFIG_PCI_MSI */
 /*
  * Hypertransport interrupt support
@@ -2713,9 +3439,10 @@
 
 static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
 {
-	struct irq_cfg *cfg = irq_cfg + irq;
+	struct irq_cfg *cfg;
 	unsigned int dest;
 	cpumask_t tmp;
+	struct irq_desc *desc;
 
 	cpus_and(tmp, mask, cpu_online_map);
 	if (cpus_empty(tmp))
@@ -2724,11 +3451,13 @@
 	if (assign_irq_vector(irq, mask))
 		return;
 
+	cfg = irq_cfg(irq);
 	cpus_and(tmp, cfg->domain, mask);
 	dest = cpu_mask_to_apicid(tmp);
 
 	target_ht_irq(irq, dest, cfg->vector);
-	irq_desc[irq].affinity = mask;
+	desc = irq_to_desc(irq);
+	desc->affinity = mask;
 }
 #endif
 
@@ -2745,7 +3474,7 @@
 
 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
 {
-	struct irq_cfg *cfg = irq_cfg + irq;
+	struct irq_cfg *cfg;
 	int err;
 	cpumask_t tmp;
 
@@ -2755,6 +3484,7 @@
 		struct ht_irq_msg msg;
 		unsigned dest;
 
+		cfg = irq_cfg(irq);
 		cpus_and(tmp, cfg->domain, tmp);
 		dest = cpu_mask_to_apicid(tmp);
 
@@ -2777,18 +3507,78 @@
 
 		set_irq_chip_and_handler_name(irq, &ht_irq_chip,
 					      handle_edge_irq, "edge");
+
+		dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
 	}
 	return err;
 }
 #endif /* CONFIG_HT_IRQ */
 
-/* --------------------------------------------------------------------------
-                          ACPI-based IOAPIC Configuration
-   -------------------------------------------------------------------------- */
+#ifdef CONFIG_X86_64
+/*
+ * Re-target the irq to the specified CPU and enable the specified MMR located
+ * on the specified blade to allow the sending of MSIs to the specified CPU.
+ */
+int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
+		       unsigned long mmr_offset)
+{
+	const cpumask_t *eligible_cpu = get_cpu_mask(cpu);
+	struct irq_cfg *cfg;
+	int mmr_pnode;
+	unsigned long mmr_value;
+	struct uv_IO_APIC_route_entry *entry;
+	unsigned long flags;
+	int err;
 
-#ifdef CONFIG_ACPI
+	err = assign_irq_vector(irq, *eligible_cpu);
+	if (err != 0)
+		return err;
 
-#define IO_APIC_MAX_ID		0xFE
+	spin_lock_irqsave(&vector_lock, flags);
+	set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
+				      irq_name);
+	spin_unlock_irqrestore(&vector_lock, flags);
+
+	cfg = irq_cfg(irq);
+
+	mmr_value = 0;
+	entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
+	BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
+
+	entry->vector = cfg->vector;
+	entry->delivery_mode = INT_DELIVERY_MODE;
+	entry->dest_mode = INT_DEST_MODE;
+	entry->polarity = 0;
+	entry->trigger = 0;
+	entry->mask = 0;
+	entry->dest = cpu_mask_to_apicid(*eligible_cpu);
+
+	mmr_pnode = uv_blade_to_pnode(mmr_blade);
+	uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+
+	return irq;
+}
+
+/*
+ * Disable the specified MMR located on the specified blade so that MSIs are
+ * longer allowed to be sent.
+ */
+void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset)
+{
+	unsigned long mmr_value;
+	struct uv_IO_APIC_route_entry *entry;
+	int mmr_pnode;
+
+	mmr_value = 0;
+	entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
+	BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
+
+	entry->mask = 1;
+
+	mmr_pnode = uv_blade_to_pnode(mmr_blade);
+	uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+}
+#endif /* CONFIG_X86_64 */
 
 int __init io_apic_get_redir_entries (int ioapic)
 {
@@ -2802,6 +3592,122 @@
 	return reg_01.bits.entries;
 }
 
+int __init probe_nr_irqs(void)
+{
+	int idx;
+	int nr = 0;
+#ifndef CONFIG_XEN
+	int nr_min = 32;
+#else
+	int nr_min = NR_IRQS;
+#endif
+
+	for (idx = 0; idx < nr_ioapics; idx++)
+		nr += io_apic_get_redir_entries(idx) + 1;
+
+	/* double it for hotplug and msi and nmi */
+	nr <<= 1;
+
+	/* something wrong ? */
+	if (nr < nr_min)
+		nr = nr_min;
+
+	return nr;
+}
+
+/* --------------------------------------------------------------------------
+                          ACPI-based IOAPIC Configuration
+   -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI
+
+#ifdef CONFIG_X86_32
+int __init io_apic_get_unique_id(int ioapic, int apic_id)
+{
+	union IO_APIC_reg_00 reg_00;
+	static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
+	physid_mask_t tmp;
+	unsigned long flags;
+	int i = 0;
+
+	/*
+	 * The P4 platform supports up to 256 APIC IDs on two separate APIC
+	 * buses (one for LAPICs, one for IOAPICs), where predecessors only
+	 * supports up to 16 on one shared APIC bus.
+	 *
+	 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
+	 *      advantage of new APIC bus architecture.
+	 */
+
+	if (physids_empty(apic_id_map))
+		apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
+
+	spin_lock_irqsave(&ioapic_lock, flags);
+	reg_00.raw = io_apic_read(ioapic, 0);
+	spin_unlock_irqrestore(&ioapic_lock, flags);
+
+	if (apic_id >= get_physical_broadcast()) {
+		printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
+			"%d\n", ioapic, apic_id, reg_00.bits.ID);
+		apic_id = reg_00.bits.ID;
+	}
+
+	/*
+	 * Every APIC in a system must have a unique ID or we get lots of nice
+	 * 'stuck on smp_invalidate_needed IPI wait' messages.
+	 */
+	if (check_apicid_used(apic_id_map, apic_id)) {
+
+		for (i = 0; i < get_physical_broadcast(); i++) {
+			if (!check_apicid_used(apic_id_map, i))
+				break;
+		}
+
+		if (i == get_physical_broadcast())
+			panic("Max apic_id exceeded!\n");
+
+		printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
+			"trying %d\n", ioapic, apic_id, i);
+
+		apic_id = i;
+	}
+
+	tmp = apicid_to_cpu_present(apic_id);
+	physids_or(apic_id_map, apic_id_map, tmp);
+
+	if (reg_00.bits.ID != apic_id) {
+		reg_00.bits.ID = apic_id;
+
+		spin_lock_irqsave(&ioapic_lock, flags);
+		io_apic_write(ioapic, 0, reg_00.raw);
+		reg_00.raw = io_apic_read(ioapic, 0);
+		spin_unlock_irqrestore(&ioapic_lock, flags);
+
+		/* Sanity check */
+		if (reg_00.bits.ID != apic_id) {
+			printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
+			return -1;
+		}
+	}
+
+	apic_printk(APIC_VERBOSE, KERN_INFO
+			"IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
+
+	return apic_id;
+}
+
+int __init io_apic_get_version(int ioapic)
+{
+	union IO_APIC_reg_01	reg_01;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ioapic_lock, flags);
+	reg_01.raw = io_apic_read(ioapic, 1);
+	spin_unlock_irqrestore(&ioapic_lock, flags);
+
+	return reg_01.bits.version;
+}
+#endif
 
 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
 {
@@ -2853,6 +3759,7 @@
 void __init setup_ioapic_dest(void)
 {
 	int pin, ioapic, irq, irq_entry;
+	struct irq_cfg *cfg;
 
 	if (skip_ioapic_setup == 1)
 		return;
@@ -2868,7 +3775,8 @@
 			 * when you have too many devices, because at that time only boot
 			 * cpu is online.
 			 */
-			if (!irq_cfg[irq].vector)
+			cfg = irq_cfg(irq);
+			if (!cfg->vector)
 				setup_IO_APIC_irq(ioapic, pin, irq,
 						  irq_trigger(irq_entry),
 						  irq_polarity(irq_entry));
@@ -2926,18 +3834,33 @@
 	struct resource *ioapic_res;
 	int i;
 
+	irq_2_pin_init();
 	ioapic_res = ioapic_setup_resources();
 	for (i = 0; i < nr_ioapics; i++) {
 		if (smp_found_config) {
 			ioapic_phys = mp_ioapics[i].mp_apicaddr;
+#ifdef CONFIG_X86_32
+			if (!ioapic_phys) {
+				printk(KERN_ERR
+				       "WARNING: bogus zero IO-APIC "
+				       "address found in MPTABLE, "
+				       "disabling IO/APIC support!\n");
+				smp_found_config = 0;
+				skip_ioapic_setup = 1;
+				goto fake_ioapic_page;
+			}
+#endif
 		} else {
+#ifdef CONFIG_X86_32
+fake_ioapic_page:
+#endif
 			ioapic_phys = (unsigned long)
 				alloc_bootmem_pages(PAGE_SIZE);
 			ioapic_phys = __pa(ioapic_phys);
 		}
 		set_fixmap_nocache(idx, ioapic_phys);
 		apic_printk(APIC_VERBOSE,
-			    "mapped IOAPIC to %016lx (%016lx)\n",
+			    "mapped IOAPIC to %08lx (%08lx)\n",
 			    __fix_to_virt(idx), ioapic_phys);
 		idx++;
 
@@ -2971,4 +3894,3 @@
 /* Insert the IO APIC resources after PCI initialization has occured to handle
  * IO APICS that are mapped in on a BAR in PCI space. */
 late_initcall(ioapic_insert_resources);
-
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
deleted file mode 100644
index e710289..0000000
--- a/arch/x86/kernel/io_apic_32.c
+++ /dev/null
@@ -1,2908 +0,0 @@
-/*
- *	Intel IO-APIC support for multi-Pentium hosts.
- *
- *	Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
- *
- *	Many thanks to Stig Venaas for trying out countless experimental
- *	patches and reporting/debugging problems patiently!
- *
- *	(c) 1999, Multiple IO-APIC support, developed by
- *	Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
- *      Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
- *	further tested and cleaned up by Zach Brown <zab@redhat.com>
- *	and Ingo Molnar <mingo@redhat.com>
- *
- *	Fixes
- *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
- *					thanks to Eric Gilmore
- *					and Rolf G. Tews
- *					for testing these extensively
- *	Paul Diefenbaugh	:	Added full ACPI support
- */
-
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <linux/mc146818rtc.h>
-#include <linux/compiler.h>
-#include <linux/acpi.h>
-#include <linux/module.h>
-#include <linux/sysdev.h>
-#include <linux/pci.h>
-#include <linux/msi.h>
-#include <linux/htirq.h>
-#include <linux/freezer.h>
-#include <linux/kthread.h>
-#include <linux/jiffies.h>	/* time_after() */
-
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/desc.h>
-#include <asm/timer.h>
-#include <asm/i8259.h>
-#include <asm/nmi.h>
-#include <asm/msidef.h>
-#include <asm/hypertransport.h>
-#include <asm/setup.h>
-
-#include <mach_apic.h>
-#include <mach_apicdef.h>
-
-#define __apicdebuginit(type) static type __init
-
-int (*ioapic_renumber_irq)(int ioapic, int irq);
-atomic_t irq_mis_count;
-
-/* Where if anywhere is the i8259 connect in external int mode */
-static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
-
-static DEFINE_SPINLOCK(ioapic_lock);
-DEFINE_SPINLOCK(vector_lock);
-
-int timer_through_8259 __initdata;
-
-/*
- *	Is the SiS APIC rmw bug present ?
- *	-1 = don't know, 0 = no, 1 = yes
- */
-int sis_apic_bug = -1;
-
-/*
- * # of IRQ routing registers
- */
-int nr_ioapic_registers[MAX_IO_APICS];
-
-/* I/O APIC entries */
-struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
-int nr_ioapics;
-
-/* MP IRQ source entries */
-struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
-
-/* # of MP IRQ source entries */
-int mp_irq_entries;
-
-#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
-int mp_bus_id_to_type[MAX_MP_BUSSES];
-#endif
-
-DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
-
-static int disable_timer_pin_1 __initdata;
-
-/*
- * Rough estimation of how many shared IRQs there are, can
- * be changed anytime.
- */
-#define MAX_PLUS_SHARED_IRQS NR_IRQS
-#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
-
-/*
- * This is performance-critical, we want to do it O(1)
- *
- * the indexing order of this array favors 1:1 mappings
- * between pins and IRQs.
- */
-
-static struct irq_pin_list {
-	int apic, pin, next;
-} irq_2_pin[PIN_MAP_SIZE];
-
-struct io_apic {
-	unsigned int index;
-	unsigned int unused[3];
-	unsigned int data;
-};
-
-static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
-{
-	return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
-		+ (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
-}
-
-static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
-{
-	struct io_apic __iomem *io_apic = io_apic_base(apic);
-	writel(reg, &io_apic->index);
-	return readl(&io_apic->data);
-}
-
-static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
-{
-	struct io_apic __iomem *io_apic = io_apic_base(apic);
-	writel(reg, &io_apic->index);
-	writel(value, &io_apic->data);
-}
-
-/*
- * Re-write a value: to be used for read-modify-write
- * cycles where the read already set up the index register.
- *
- * Older SiS APIC requires we rewrite the index register
- */
-static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
-{
-	volatile struct io_apic __iomem *io_apic = io_apic_base(apic);
-	if (sis_apic_bug)
-		writel(reg, &io_apic->index);
-	writel(value, &io_apic->data);
-}
-
-union entry_union {
-	struct { u32 w1, w2; };
-	struct IO_APIC_route_entry entry;
-};
-
-static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
-{
-	union entry_union eu;
-	unsigned long flags;
-	spin_lock_irqsave(&ioapic_lock, flags);
-	eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
-	eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
-	spin_unlock_irqrestore(&ioapic_lock, flags);
-	return eu.entry;
-}
-
-/*
- * When we write a new IO APIC routing entry, we need to write the high
- * word first! If the mask bit in the low word is clear, we will enable
- * the interrupt, and we need to make sure the entry is fully populated
- * before that happens.
- */
-static void
-__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
-{
-	union entry_union eu;
-	eu.entry = e;
-	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
-	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
-}
-
-static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
-{
-	unsigned long flags;
-	spin_lock_irqsave(&ioapic_lock, flags);
-	__ioapic_write_entry(apic, pin, e);
-	spin_unlock_irqrestore(&ioapic_lock, flags);
-}
-
-/*
- * When we mask an IO APIC routing entry, we need to write the low
- * word first, in order to set the mask bit before we change the
- * high bits!
- */
-static void ioapic_mask_entry(int apic, int pin)
-{
-	unsigned long flags;
-	union entry_union eu = { .entry.mask = 1 };
-
-	spin_lock_irqsave(&ioapic_lock, flags);
-	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
-	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
-	spin_unlock_irqrestore(&ioapic_lock, flags);
-}
-
-/*
- * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
- * shared ISA-space IRQs, so we have to support them. We are super
- * fast in the common case, and fast for shared ISA-space IRQs.
- */
-static void add_pin_to_irq(unsigned int irq, int apic, int pin)
-{
-	static int first_free_entry = NR_IRQS;
-	struct irq_pin_list *entry = irq_2_pin + irq;
-
-	while (entry->next)
-		entry = irq_2_pin + entry->next;
-
-	if (entry->pin != -1) {
-		entry->next = first_free_entry;
-		entry = irq_2_pin + entry->next;
-		if (++first_free_entry >= PIN_MAP_SIZE)
-			panic("io_apic.c: whoops");
-	}
-	entry->apic = apic;
-	entry->pin = pin;
-}
-
-/*
- * Reroute an IRQ to a different pin.
- */
-static void __init replace_pin_at_irq(unsigned int irq,
-				      int oldapic, int oldpin,
-				      int newapic, int newpin)
-{
-	struct irq_pin_list *entry = irq_2_pin + irq;
-
-	while (1) {
-		if (entry->apic == oldapic && entry->pin == oldpin) {
-			entry->apic = newapic;
-			entry->pin = newpin;
-		}
-		if (!entry->next)
-			break;
-		entry = irq_2_pin + entry->next;
-	}
-}
-
-static void __modify_IO_APIC_irq(unsigned int irq, unsigned long enable, unsigned long disable)
-{
-	struct irq_pin_list *entry = irq_2_pin + irq;
-	unsigned int pin, reg;
-
-	for (;;) {
-		pin = entry->pin;
-		if (pin == -1)
-			break;
-		reg = io_apic_read(entry->apic, 0x10 + pin*2);
-		reg &= ~disable;
-		reg |= enable;
-		io_apic_modify(entry->apic, 0x10 + pin*2, reg);
-		if (!entry->next)
-			break;
-		entry = irq_2_pin + entry->next;
-	}
-}
-
-/* mask = 1 */
-static void __mask_IO_APIC_irq(unsigned int irq)
-{
-	__modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED, 0);
-}
-
-/* mask = 0 */
-static void __unmask_IO_APIC_irq(unsigned int irq)
-{
-	__modify_IO_APIC_irq(irq, 0, IO_APIC_REDIR_MASKED);
-}
-
-/* mask = 1, trigger = 0 */
-static void __mask_and_edge_IO_APIC_irq(unsigned int irq)
-{
-	__modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED,
-				IO_APIC_REDIR_LEVEL_TRIGGER);
-}
-
-/* mask = 0, trigger = 1 */
-static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
-{
-	__modify_IO_APIC_irq(irq, IO_APIC_REDIR_LEVEL_TRIGGER,
-				IO_APIC_REDIR_MASKED);
-}
-
-static void mask_IO_APIC_irq(unsigned int irq)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&ioapic_lock, flags);
-	__mask_IO_APIC_irq(irq);
-	spin_unlock_irqrestore(&ioapic_lock, flags);
-}
-
-static void unmask_IO_APIC_irq(unsigned int irq)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&ioapic_lock, flags);
-	__unmask_IO_APIC_irq(irq);
-	spin_unlock_irqrestore(&ioapic_lock, flags);
-}
-
-static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
-{
-	struct IO_APIC_route_entry entry;
-
-	/* Check delivery_mode to be sure we're not clearing an SMI pin */
-	entry = ioapic_read_entry(apic, pin);
-	if (entry.delivery_mode == dest_SMI)
-		return;
-
-	/*
-	 * Disable it in the IO-APIC irq-routing table:
-	 */
-	ioapic_mask_entry(apic, pin);
-}
-
-static void clear_IO_APIC(void)
-{
-	int apic, pin;
-
-	for (apic = 0; apic < nr_ioapics; apic++)
-		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
-			clear_IO_APIC_pin(apic, pin);
-}
-
-#ifdef CONFIG_SMP
-static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
-{
-	unsigned long flags;
-	int pin;
-	struct irq_pin_list *entry = irq_2_pin + irq;
-	unsigned int apicid_value;
-	cpumask_t tmp;
-
-	cpus_and(tmp, cpumask, cpu_online_map);
-	if (cpus_empty(tmp))
-		tmp = TARGET_CPUS;
-
-	cpus_and(cpumask, tmp, CPU_MASK_ALL);
-
-	apicid_value = cpu_mask_to_apicid(cpumask);
-	/* Prepare to do the io_apic_write */
-	apicid_value = apicid_value << 24;
-	spin_lock_irqsave(&ioapic_lock, flags);
-	for (;;) {
-		pin = entry->pin;
-		if (pin == -1)
-			break;
-		io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
-		if (!entry->next)
-			break;
-		entry = irq_2_pin + entry->next;
-	}
-	irq_desc[irq].affinity = cpumask;
-	spin_unlock_irqrestore(&ioapic_lock, flags);
-}
-
-#if defined(CONFIG_IRQBALANCE)
-# include <asm/processor.h>	/* kernel_thread() */
-# include <linux/kernel_stat.h>	/* kstat */
-# include <linux/slab.h>		/* kmalloc() */
-# include <linux/timer.h>
-
-#define IRQBALANCE_CHECK_ARCH -999
-#define MAX_BALANCED_IRQ_INTERVAL	(5*HZ)
-#define MIN_BALANCED_IRQ_INTERVAL	(HZ/2)
-#define BALANCED_IRQ_MORE_DELTA		(HZ/10)
-#define BALANCED_IRQ_LESS_DELTA		(HZ)
-
-static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH;
-static int physical_balance __read_mostly;
-static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
-
-static struct irq_cpu_info {
-	unsigned long *last_irq;
-	unsigned long *irq_delta;
-	unsigned long irq;
-} irq_cpu_data[NR_CPUS];
-
-#define CPU_IRQ(cpu)		(irq_cpu_data[cpu].irq)
-#define LAST_CPU_IRQ(cpu, irq)   (irq_cpu_data[cpu].last_irq[irq])
-#define IRQ_DELTA(cpu, irq) 	(irq_cpu_data[cpu].irq_delta[irq])
-
-#define IDLE_ENOUGH(cpu,now) \
-	(idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
-
-#define IRQ_ALLOWED(cpu, allowed_mask)	cpu_isset(cpu, allowed_mask)
-
-#define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i)))
-
-static cpumask_t balance_irq_affinity[NR_IRQS] = {
-	[0 ... NR_IRQS-1] = CPU_MASK_ALL
-};
-
-void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
-{
-	balance_irq_affinity[irq] = mask;
-}
-
-static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
-			unsigned long now, int direction)
-{
-	int search_idle = 1;
-	int cpu = curr_cpu;
-
-	goto inside;
-
-	do {
-		if (unlikely(cpu == curr_cpu))
-			search_idle = 0;
-inside:
-		if (direction == 1) {
-			cpu++;
-			if (cpu >= NR_CPUS)
-				cpu = 0;
-		} else {
-			cpu--;
-			if (cpu == -1)
-				cpu = NR_CPUS-1;
-		}
-	} while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu, allowed_mask) ||
-			(search_idle && !IDLE_ENOUGH(cpu, now)));
-
-	return cpu;
-}
-
-static inline void balance_irq(int cpu, int irq)
-{
-	unsigned long now = jiffies;
-	cpumask_t allowed_mask;
-	unsigned int new_cpu;
-
-	if (irqbalance_disabled)
-		return;
-
-	cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
-	new_cpu = move(cpu, allowed_mask, now, 1);
-	if (cpu != new_cpu)
-		set_pending_irq(irq, cpumask_of_cpu(new_cpu));
-}
-
-static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
-{
-	int i, j;
-
-	for_each_online_cpu(i) {
-		for (j = 0; j < NR_IRQS; j++) {
-			if (!irq_desc[j].action)
-				continue;
-			/* Is it a significant load ?  */
-			if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i), j) <
-						useful_load_threshold)
-				continue;
-			balance_irq(i, j);
-		}
-	}
-	balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
-		balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
-	return;
-}
-
-static void do_irq_balance(void)
-{
-	int i, j;
-	unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
-	unsigned long move_this_load = 0;
-	int max_loaded = 0, min_loaded = 0;
-	int load;
-	unsigned long useful_load_threshold = balanced_irq_interval + 10;
-	int selected_irq;
-	int tmp_loaded, first_attempt = 1;
-	unsigned long tmp_cpu_irq;
-	unsigned long imbalance = 0;
-	cpumask_t allowed_mask, target_cpu_mask, tmp;
-
-	for_each_possible_cpu(i) {
-		int package_index;
-		CPU_IRQ(i) = 0;
-		if (!cpu_online(i))
-			continue;
-		package_index = CPU_TO_PACKAGEINDEX(i);
-		for (j = 0; j < NR_IRQS; j++) {
-			unsigned long value_now, delta;
-			/* Is this an active IRQ or balancing disabled ? */
-			if (!irq_desc[j].action || irq_balancing_disabled(j))
-				continue;
-			if (package_index == i)
-				IRQ_DELTA(package_index, j) = 0;
-			/* Determine the total count per processor per IRQ */
-			value_now = (unsigned long) kstat_cpu(i).irqs[j];
-
-			/* Determine the activity per processor per IRQ */
-			delta = value_now - LAST_CPU_IRQ(i, j);
-
-			/* Update last_cpu_irq[][] for the next time */
-			LAST_CPU_IRQ(i, j) = value_now;
-
-			/* Ignore IRQs whose rate is less than the clock */
-			if (delta < useful_load_threshold)
-				continue;
-			/* update the load for the processor or package total */
-			IRQ_DELTA(package_index, j) += delta;
-
-			/* Keep track of the higher numbered sibling as well */
-			if (i != package_index)
-				CPU_IRQ(i) += delta;
-			/*
-			 * We have sibling A and sibling B in the package
-			 *
-			 * cpu_irq[A] = load for cpu A + load for cpu B
-			 * cpu_irq[B] = load for cpu B
-			 */
-			CPU_IRQ(package_index) += delta;
-		}
-	}
-	/* Find the least loaded processor package */
-	for_each_online_cpu(i) {
-		if (i != CPU_TO_PACKAGEINDEX(i))
-			continue;
-		if (min_cpu_irq > CPU_IRQ(i)) {
-			min_cpu_irq = CPU_IRQ(i);
-			min_loaded = i;
-		}
-	}
-	max_cpu_irq = ULONG_MAX;
-
-tryanothercpu:
-	/*
-	 * Look for heaviest loaded processor.
-	 * We may come back to get the next heaviest loaded processor.
-	 * Skip processors with trivial loads.
-	 */
-	tmp_cpu_irq = 0;
-	tmp_loaded = -1;
-	for_each_online_cpu(i) {
-		if (i != CPU_TO_PACKAGEINDEX(i))
-			continue;
-		if (max_cpu_irq <= CPU_IRQ(i))
-			continue;
-		if (tmp_cpu_irq < CPU_IRQ(i)) {
-			tmp_cpu_irq = CPU_IRQ(i);
-			tmp_loaded = i;
-		}
-	}
-
-	if (tmp_loaded == -1) {
-	 /*
-	  * In the case of small number of heavy interrupt sources,
-	  * loading some of the cpus too much. We use Ingo's original
-	  * approach to rotate them around.
-	  */
-		if (!first_attempt && imbalance >= useful_load_threshold) {
-			rotate_irqs_among_cpus(useful_load_threshold);
-			return;
-		}
-		goto not_worth_the_effort;
-	}
-
-	first_attempt = 0;		/* heaviest search */
-	max_cpu_irq = tmp_cpu_irq;	/* load */
-	max_loaded = tmp_loaded;	/* processor */
-	imbalance = (max_cpu_irq - min_cpu_irq) / 2;
-
-	/*
-	 * if imbalance is less than approx 10% of max load, then
-	 * observe diminishing returns action. - quit
-	 */
-	if (imbalance < (max_cpu_irq >> 3))
-		goto not_worth_the_effort;
-
-tryanotherirq:
-	/* if we select an IRQ to move that can't go where we want, then
-	 * see if there is another one to try.
-	 */
-	move_this_load = 0;
-	selected_irq = -1;
-	for (j = 0; j < NR_IRQS; j++) {
-		/* Is this an active IRQ? */
-		if (!irq_desc[j].action)
-			continue;
-		if (imbalance <= IRQ_DELTA(max_loaded, j))
-			continue;
-		/* Try to find the IRQ that is closest to the imbalance
-		 * without going over.
-		 */
-		if (move_this_load < IRQ_DELTA(max_loaded, j)) {
-			move_this_load = IRQ_DELTA(max_loaded, j);
-			selected_irq = j;
-		}
-	}
-	if (selected_irq == -1)
-		goto tryanothercpu;
-
-	imbalance = move_this_load;
-
-	/* For physical_balance case, we accumulated both load
-	 * values in the one of the siblings cpu_irq[],
-	 * to use the same code for physical and logical processors
-	 * as much as possible.
-	 *
-	 * NOTE: the cpu_irq[] array holds the sum of the load for
-	 * sibling A and sibling B in the slot for the lowest numbered
-	 * sibling (A), _AND_ the load for sibling B in the slot for
-	 * the higher numbered sibling.
-	 *
-	 * We seek the least loaded sibling by making the comparison
-	 * (A+B)/2 vs B
-	 */
-	load = CPU_IRQ(min_loaded) >> 1;
-	for_each_cpu_mask(j, per_cpu(cpu_sibling_map, min_loaded)) {
-		if (load > CPU_IRQ(j)) {
-			/* This won't change cpu_sibling_map[min_loaded] */
-			load = CPU_IRQ(j);
-			min_loaded = j;
-		}
-	}
-
-	cpus_and(allowed_mask,
-		cpu_online_map,
-		balance_irq_affinity[selected_irq]);
-	target_cpu_mask = cpumask_of_cpu(min_loaded);
-	cpus_and(tmp, target_cpu_mask, allowed_mask);
-
-	if (!cpus_empty(tmp)) {
-		/* mark for change destination */
-		set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
-
-		/* Since we made a change, come back sooner to
-		 * check for more variation.
-		 */
-		balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
-			balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
-		return;
-	}
-	goto tryanotherirq;
-
-not_worth_the_effort:
-	/*
-	 * if we did not find an IRQ to move, then adjust the time interval
-	 * upward
-	 */
-	balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
-		balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
-	return;
-}
-
-static int balanced_irq(void *unused)
-{
-	int i;
-	unsigned long prev_balance_time = jiffies;
-	long time_remaining = balanced_irq_interval;
-
-	/* push everything to CPU 0 to give us a starting point.  */
-	for (i = 0 ; i < NR_IRQS ; i++) {
-		irq_desc[i].pending_mask = cpumask_of_cpu(0);
-		set_pending_irq(i, cpumask_of_cpu(0));
-	}
-
-	set_freezable();
-	for ( ; ; ) {
-		time_remaining = schedule_timeout_interruptible(time_remaining);
-		try_to_freeze();
-		if (time_after(jiffies,
-				prev_balance_time+balanced_irq_interval)) {
-			preempt_disable();
-			do_irq_balance();
-			prev_balance_time = jiffies;
-			time_remaining = balanced_irq_interval;
-			preempt_enable();
-		}
-	}
-	return 0;
-}
-
-static int __init balanced_irq_init(void)
-{
-	int i;
-	struct cpuinfo_x86 *c;
-	cpumask_t tmp;
-
-	cpus_shift_right(tmp, cpu_online_map, 2);
-	c = &boot_cpu_data;
-	/* When not overwritten by the command line ask subarchitecture. */
-	if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
-		irqbalance_disabled = NO_BALANCE_IRQ;
-	if (irqbalance_disabled)
-		return 0;
-
-	 /* disable irqbalance completely if there is only one processor online */
-	if (num_online_cpus() < 2) {
-		irqbalance_disabled = 1;
-		return 0;
-	}
-	/*
-	 * Enable physical balance only if more than 1 physical processor
-	 * is present
-	 */
-	if (smp_num_siblings > 1 && !cpus_empty(tmp))
-		physical_balance = 1;
-
-	for_each_online_cpu(i) {
-		irq_cpu_data[i].irq_delta = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
-		irq_cpu_data[i].last_irq = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
-		if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
-			printk(KERN_ERR "balanced_irq_init: out of memory");
-			goto failed;
-		}
-	}
-
-	printk(KERN_INFO "Starting balanced_irq\n");
-	if (!IS_ERR(kthread_run(balanced_irq, NULL, "kirqd")))
-		return 0;
-	printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
-failed:
-	for_each_possible_cpu(i) {
-		kfree(irq_cpu_data[i].irq_delta);
-		irq_cpu_data[i].irq_delta = NULL;
-		kfree(irq_cpu_data[i].last_irq);
-		irq_cpu_data[i].last_irq = NULL;
-	}
-	return 0;
-}
-
-int __devinit irqbalance_disable(char *str)
-{
-	irqbalance_disabled = 1;
-	return 1;
-}
-
-__setup("noirqbalance", irqbalance_disable);
-
-late_initcall(balanced_irq_init);
-#endif /* CONFIG_IRQBALANCE */
-#endif /* CONFIG_SMP */
-
-#ifndef CONFIG_SMP
-void send_IPI_self(int vector)
-{
-	unsigned int cfg;
-
-	/*
-	 * Wait for idle.
-	 */
-	apic_wait_icr_idle();
-	cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
-	/*
-	 * Send the IPI. The write to APIC_ICR fires this off.
-	 */
-	apic_write(APIC_ICR, cfg);
-}
-#endif /* !CONFIG_SMP */
-
-
-/*
- * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
- * specific CPU-side IRQs.
- */
-
-#define MAX_PIRQS 8
-static int pirq_entries [MAX_PIRQS];
-static int pirqs_enabled;
-int skip_ioapic_setup;
-
-static int __init ioapic_pirq_setup(char *str)
-{
-	int i, max;
-	int ints[MAX_PIRQS+1];
-
-	get_options(str, ARRAY_SIZE(ints), ints);
-
-	for (i = 0; i < MAX_PIRQS; i++)
-		pirq_entries[i] = -1;
-
-	pirqs_enabled = 1;
-	apic_printk(APIC_VERBOSE, KERN_INFO
-			"PIRQ redirection, working around broken MP-BIOS.\n");
-	max = MAX_PIRQS;
-	if (ints[0] < MAX_PIRQS)
-		max = ints[0];
-
-	for (i = 0; i < max; i++) {
-		apic_printk(APIC_VERBOSE, KERN_DEBUG
-				"... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
-		/*
-		 * PIRQs are mapped upside down, usually.
-		 */
-		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
-	}
-	return 1;
-}
-
-__setup("pirq=", ioapic_pirq_setup);
-
-/*
- * Find the IRQ entry number of a certain pin.
- */
-static int find_irq_entry(int apic, int pin, int type)
-{
-	int i;
-
-	for (i = 0; i < mp_irq_entries; i++)
-		if (mp_irqs[i].mp_irqtype == type &&
-		    (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
-		     mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
-		    mp_irqs[i].mp_dstirq == pin)
-			return i;
-
-	return -1;
-}
-
-/*
- * Find the pin to which IRQ[irq] (ISA) is connected
- */
-static int __init find_isa_irq_pin(int irq, int type)
-{
-	int i;
-
-	for (i = 0; i < mp_irq_entries; i++) {
-		int lbus = mp_irqs[i].mp_srcbus;
-
-		if (test_bit(lbus, mp_bus_not_pci) &&
-		    (mp_irqs[i].mp_irqtype == type) &&
-		    (mp_irqs[i].mp_srcbusirq == irq))
-
-			return mp_irqs[i].mp_dstirq;
-	}
-	return -1;
-}
-
-static int __init find_isa_irq_apic(int irq, int type)
-{
-	int i;
-
-	for (i = 0; i < mp_irq_entries; i++) {
-		int lbus = mp_irqs[i].mp_srcbus;
-
-		if (test_bit(lbus, mp_bus_not_pci) &&
-		    (mp_irqs[i].mp_irqtype == type) &&
-		    (mp_irqs[i].mp_srcbusirq == irq))
-			break;
-	}
-	if (i < mp_irq_entries) {
-		int apic;
-		for (apic = 0; apic < nr_ioapics; apic++) {
-			if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
-				return apic;
-		}
-	}
-
-	return -1;
-}
-
-/*
- * Find a specific PCI IRQ entry.
- * Not an __init, possibly needed by modules
- */
-static int pin_2_irq(int idx, int apic, int pin);
-
-int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
-{
-	int apic, i, best_guess = -1;
-
-	apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
-		"slot:%d, pin:%d.\n", bus, slot, pin);
-	if (test_bit(bus, mp_bus_not_pci)) {
-		printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
-		return -1;
-	}
-	for (i = 0; i < mp_irq_entries; i++) {
-		int lbus = mp_irqs[i].mp_srcbus;
-
-		for (apic = 0; apic < nr_ioapics; apic++)
-			if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
-			    mp_irqs[i].mp_dstapic == MP_APIC_ALL)
-				break;
-
-		if (!test_bit(lbus, mp_bus_not_pci) &&
-		    !mp_irqs[i].mp_irqtype &&
-		    (bus == lbus) &&
-		    (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
-			int irq = pin_2_irq(i, apic, mp_irqs[i].mp_dstirq);
-
-			if (!(apic || IO_APIC_IRQ(irq)))
-				continue;
-
-			if (pin == (mp_irqs[i].mp_srcbusirq & 3))
-				return irq;
-			/*
-			 * Use the first all-but-pin matching entry as a
-			 * best-guess fuzzy result for broken mptables.
-			 */
-			if (best_guess < 0)
-				best_guess = irq;
-		}
-	}
-	return best_guess;
-}
-EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-
-/*
- * This function currently is only a helper for the i386 smp boot process where
- * we need to reprogram the ioredtbls to cater for the cpus which have come online
- * so mask in all cases should simply be TARGET_CPUS
- */
-#ifdef CONFIG_SMP
-void __init setup_ioapic_dest(void)
-{
-	int pin, ioapic, irq, irq_entry;
-
-	if (skip_ioapic_setup == 1)
-		return;
-
-	for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
-		for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
-			irq_entry = find_irq_entry(ioapic, pin, mp_INT);
-			if (irq_entry == -1)
-				continue;
-			irq = pin_2_irq(irq_entry, ioapic, pin);
-			set_ioapic_affinity_irq(irq, TARGET_CPUS);
-		}
-
-	}
-}
-#endif
-
-#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
-/*
- * EISA Edge/Level control register, ELCR
- */
-static int EISA_ELCR(unsigned int irq)
-{
-	if (irq < 16) {
-		unsigned int port = 0x4d0 + (irq >> 3);
-		return (inb(port) >> (irq & 7)) & 1;
-	}
-	apic_printk(APIC_VERBOSE, KERN_INFO
-			"Broken MPtable reports ISA irq %d\n", irq);
-	return 0;
-}
-#endif
-
-/* ISA interrupts are always polarity zero edge triggered,
- * when listed as conforming in the MP table. */
-
-#define default_ISA_trigger(idx)	(0)
-#define default_ISA_polarity(idx)	(0)
-
-/* EISA interrupts are always polarity zero and can be edge or level
- * trigger depending on the ELCR value.  If an interrupt is listed as
- * EISA conforming in the MP table, that means its trigger type must
- * be read in from the ELCR */
-
-#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
-#define default_EISA_polarity(idx)	default_ISA_polarity(idx)
-
-/* PCI interrupts are always polarity one level triggered,
- * when listed as conforming in the MP table. */
-
-#define default_PCI_trigger(idx)	(1)
-#define default_PCI_polarity(idx)	(1)
-
-/* MCA interrupts are always polarity zero level triggered,
- * when listed as conforming in the MP table. */
-
-#define default_MCA_trigger(idx)	(1)
-#define default_MCA_polarity(idx)	default_ISA_polarity(idx)
-
-static int MPBIOS_polarity(int idx)
-{
-	int bus = mp_irqs[idx].mp_srcbus;
-	int polarity;
-
-	/*
-	 * Determine IRQ line polarity (high active or low active):
-	 */
-	switch (mp_irqs[idx].mp_irqflag & 3) {
-	case 0: /* conforms, ie. bus-type dependent polarity */
-	{
-		polarity = test_bit(bus, mp_bus_not_pci)?
-			default_ISA_polarity(idx):
-			default_PCI_polarity(idx);
-		break;
-	}
-	case 1: /* high active */
-	{
-		polarity = 0;
-		break;
-	}
-	case 2: /* reserved */
-	{
-		printk(KERN_WARNING "broken BIOS!!\n");
-		polarity = 1;
-		break;
-	}
-	case 3: /* low active */
-	{
-		polarity = 1;
-		break;
-	}
-	default: /* invalid */
-	{
-		printk(KERN_WARNING "broken BIOS!!\n");
-		polarity = 1;
-		break;
-	}
-	}
-	return polarity;
-}
-
-static int MPBIOS_trigger(int idx)
-{
-	int bus = mp_irqs[idx].mp_srcbus;
-	int trigger;
-
-	/*
-	 * Determine IRQ trigger mode (edge or level sensitive):
-	 */
-	switch ((mp_irqs[idx].mp_irqflag>>2) & 3) {
-	case 0: /* conforms, ie. bus-type dependent */
-	{
-		trigger = test_bit(bus, mp_bus_not_pci)?
-				default_ISA_trigger(idx):
-				default_PCI_trigger(idx);
-#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
-		switch (mp_bus_id_to_type[bus]) {
-		case MP_BUS_ISA: /* ISA pin */
-		{
-			/* set before the switch */
-			break;
-		}
-		case MP_BUS_EISA: /* EISA pin */
-		{
-			trigger = default_EISA_trigger(idx);
-			break;
-		}
-		case MP_BUS_PCI: /* PCI pin */
-		{
-			/* set before the switch */
-			break;
-		}
-		case MP_BUS_MCA: /* MCA pin */
-		{
-			trigger = default_MCA_trigger(idx);
-			break;
-		}
-		default:
-		{
-			printk(KERN_WARNING "broken BIOS!!\n");
-			trigger = 1;
-			break;
-		}
-	}
-#endif
-		break;
-	}
-	case 1: /* edge */
-	{
-		trigger = 0;
-		break;
-	}
-	case 2: /* reserved */
-	{
-		printk(KERN_WARNING "broken BIOS!!\n");
-		trigger = 1;
-		break;
-	}
-	case 3: /* level */
-	{
-		trigger = 1;
-		break;
-	}
-	default: /* invalid */
-	{
-		printk(KERN_WARNING "broken BIOS!!\n");
-		trigger = 0;
-		break;
-	}
-	}
-	return trigger;
-}
-
-static inline int irq_polarity(int idx)
-{
-	return MPBIOS_polarity(idx);
-}
-
-static inline int irq_trigger(int idx)
-{
-	return MPBIOS_trigger(idx);
-}
-
-static int pin_2_irq(int idx, int apic, int pin)
-{
-	int irq, i;
-	int bus = mp_irqs[idx].mp_srcbus;
-
-	/*
-	 * Debugging check, we are in big trouble if this message pops up!
-	 */
-	if (mp_irqs[idx].mp_dstirq != pin)
-		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
-
-	if (test_bit(bus, mp_bus_not_pci))
-		irq = mp_irqs[idx].mp_srcbusirq;
-	else {
-		/*
-		 * PCI IRQs are mapped in order
-		 */
-		i = irq = 0;
-		while (i < apic)
-			irq += nr_ioapic_registers[i++];
-		irq += pin;
-
-		/*
-		 * For MPS mode, so far only needed by ES7000 platform
-		 */
-		if (ioapic_renumber_irq)
-			irq = ioapic_renumber_irq(apic, irq);
-	}
-
-	/*
-	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
-	 */
-	if ((pin >= 16) && (pin <= 23)) {
-		if (pirq_entries[pin-16] != -1) {
-			if (!pirq_entries[pin-16]) {
-				apic_printk(APIC_VERBOSE, KERN_DEBUG
-						"disabling PIRQ%d\n", pin-16);
-			} else {
-				irq = pirq_entries[pin-16];
-				apic_printk(APIC_VERBOSE, KERN_DEBUG
-						"using PIRQ%d -> IRQ %d\n",
-						pin-16, irq);
-			}
-		}
-	}
-	return irq;
-}
-
-static inline int IO_APIC_irq_trigger(int irq)
-{
-	int apic, idx, pin;
-
-	for (apic = 0; apic < nr_ioapics; apic++) {
-		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-			idx = find_irq_entry(apic, pin, mp_INT);
-			if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
-				return irq_trigger(idx);
-		}
-	}
-	/*
-	 * nonexistent IRQs are edge default
-	 */
-	return 0;
-}
-
-/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
-
-static int __assign_irq_vector(int irq)
-{
-	static int current_vector = FIRST_DEVICE_VECTOR, current_offset;
-	int vector, offset;
-
-	BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
-
-	if (irq_vector[irq] > 0)
-		return irq_vector[irq];
-
-	vector = current_vector;
-	offset = current_offset;
-next:
-	vector += 8;
-	if (vector >= first_system_vector) {
-		offset = (offset + 1) % 8;
-		vector = FIRST_DEVICE_VECTOR + offset;
-	}
-	if (vector == current_vector)
-		return -ENOSPC;
-	if (test_and_set_bit(vector, used_vectors))
-		goto next;
-
-	current_vector = vector;
-	current_offset = offset;
-	irq_vector[irq] = vector;
-
-	return vector;
-}
-
-static int assign_irq_vector(int irq)
-{
-	unsigned long flags;
-	int vector;
-
-	spin_lock_irqsave(&vector_lock, flags);
-	vector = __assign_irq_vector(irq);
-	spin_unlock_irqrestore(&vector_lock, flags);
-
-	return vector;
-}
-
-static struct irq_chip ioapic_chip;
-
-#define IOAPIC_AUTO	-1
-#define IOAPIC_EDGE	0
-#define IOAPIC_LEVEL	1
-
-static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
-{
-	if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-	    trigger == IOAPIC_LEVEL) {
-		irq_desc[irq].status |= IRQ_LEVEL;
-		set_irq_chip_and_handler_name(irq, &ioapic_chip,
-					 handle_fasteoi_irq, "fasteoi");
-	} else {
-		irq_desc[irq].status &= ~IRQ_LEVEL;
-		set_irq_chip_and_handler_name(irq, &ioapic_chip,
-					 handle_edge_irq, "edge");
-	}
-	set_intr_gate(vector, interrupt[irq]);
-}
-
-static void __init setup_IO_APIC_irqs(void)
-{
-	struct IO_APIC_route_entry entry;
-	int apic, pin, idx, irq, first_notcon = 1, vector;
-
-	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
-
-	for (apic = 0; apic < nr_ioapics; apic++) {
-	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-
-		/*
-		 * add it to the IO-APIC irq-routing table:
-		 */
-		memset(&entry, 0, sizeof(entry));
-
-		entry.delivery_mode = INT_DELIVERY_MODE;
-		entry.dest_mode = INT_DEST_MODE;
-		entry.mask = 0;				/* enable IRQ */
-		entry.dest.logical.logical_dest =
-					cpu_mask_to_apicid(TARGET_CPUS);
-
-		idx = find_irq_entry(apic, pin, mp_INT);
-		if (idx == -1) {
-			if (first_notcon) {
-				apic_printk(APIC_VERBOSE, KERN_DEBUG
-						" IO-APIC (apicid-pin) %d-%d",
-						mp_ioapics[apic].mp_apicid,
-						pin);
-				first_notcon = 0;
-			} else
-				apic_printk(APIC_VERBOSE, ", %d-%d",
-					mp_ioapics[apic].mp_apicid, pin);
-			continue;
-		}
-
-		if (!first_notcon) {
-			apic_printk(APIC_VERBOSE, " not connected.\n");
-			first_notcon = 1;
-		}
-
-		entry.trigger = irq_trigger(idx);
-		entry.polarity = irq_polarity(idx);
-
-		if (irq_trigger(idx)) {
-			entry.trigger = 1;
-			entry.mask = 1;
-		}
-
-		irq = pin_2_irq(idx, apic, pin);
-		/*
-		 * skip adding the timer int on secondary nodes, which causes
-		 * a small but painful rift in the time-space continuum
-		 */
-		if (multi_timer_check(apic, irq))
-			continue;
-		else
-			add_pin_to_irq(irq, apic, pin);
-
-		if (!apic && !IO_APIC_IRQ(irq))
-			continue;
-
-		if (IO_APIC_IRQ(irq)) {
-			vector = assign_irq_vector(irq);
-			entry.vector = vector;
-			ioapic_register_intr(irq, vector, IOAPIC_AUTO);
-
-			if (!apic && (irq < 16))
-				disable_8259A_irq(irq);
-		}
-		ioapic_write_entry(apic, pin, entry);
-	}
-	}
-
-	if (!first_notcon)
-		apic_printk(APIC_VERBOSE, " not connected.\n");
-}
-
-/*
- * Set up the timer pin, possibly with the 8259A-master behind.
- */
-static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
-					int vector)
-{
-	struct IO_APIC_route_entry entry;
-
-	memset(&entry, 0, sizeof(entry));
-
-	/*
-	 * We use logical delivery to get the timer IRQ
-	 * to the first CPU.
-	 */
-	entry.dest_mode = INT_DEST_MODE;
-	entry.mask = 1;					/* mask IRQ now */
-	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-	entry.delivery_mode = INT_DELIVERY_MODE;
-	entry.polarity = 0;
-	entry.trigger = 0;
-	entry.vector = vector;
-
-	/*
-	 * The timer IRQ doesn't have to know that behind the
-	 * scene we may have a 8259A-master in AEOI mode ...
-	 */
-	ioapic_register_intr(0, vector, IOAPIC_EDGE);
-
-	/*
-	 * Add it to the IO-APIC irq-routing table:
-	 */
-	ioapic_write_entry(apic, pin, entry);
-}
-
-
-__apicdebuginit(void) print_IO_APIC(void)
-{
-	int apic, i;
-	union IO_APIC_reg_00 reg_00;
-	union IO_APIC_reg_01 reg_01;
-	union IO_APIC_reg_02 reg_02;
-	union IO_APIC_reg_03 reg_03;
-	unsigned long flags;
-
-	if (apic_verbosity == APIC_QUIET)
-		return;
-
-	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
-	for (i = 0; i < nr_ioapics; i++)
-		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
-		       mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
-
-	/*
-	 * We are a bit conservative about what we expect.  We have to
-	 * know about every hardware change ASAP.
-	 */
-	printk(KERN_INFO "testing the IO APIC.......................\n");
-
-	for (apic = 0; apic < nr_ioapics; apic++) {
-
-	spin_lock_irqsave(&ioapic_lock, flags);
-	reg_00.raw = io_apic_read(apic, 0);
-	reg_01.raw = io_apic_read(apic, 1);
-	if (reg_01.bits.version >= 0x10)
-		reg_02.raw = io_apic_read(apic, 2);
-	if (reg_01.bits.version >= 0x20)
-		reg_03.raw = io_apic_read(apic, 3);
-	spin_unlock_irqrestore(&ioapic_lock, flags);
-
-	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
-	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
-	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
-	printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
-	printk(KERN_DEBUG ".......    : LTS          : %X\n", reg_00.bits.LTS);
-
-	printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
-	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);
-
-	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
-	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);
-
-	/*
-	 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
-	 * but the value of reg_02 is read as the previous read register
-	 * value, so ignore it if reg_02 == reg_01.
-	 */
-	if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
-		printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
-		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
-	}
-
-	/*
-	 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
-	 * or reg_03, but the value of reg_0[23] is read as the previous read
-	 * register value, so ignore it if reg_03 == reg_0[12].
-	 */
-	if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
-	    reg_03.raw != reg_01.raw) {
-		printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
-		printk(KERN_DEBUG ".......     : Boot DT    : %X\n", reg_03.bits.boot_DT);
-	}
-
-	printk(KERN_DEBUG ".... IRQ redirection table:\n");
-
-	printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
-			  " Stat Dest Deli Vect:   \n");
-
-	for (i = 0; i <= reg_01.bits.entries; i++) {
-		struct IO_APIC_route_entry entry;
-
-		entry = ioapic_read_entry(apic, i);
-
-		printk(KERN_DEBUG " %02x %03X %02X  ",
-			i,
-			entry.dest.logical.logical_dest,
-			entry.dest.physical.physical_dest
-		);
-
-		printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
-			entry.mask,
-			entry.trigger,
-			entry.irr,
-			entry.polarity,
-			entry.delivery_status,
-			entry.dest_mode,
-			entry.delivery_mode,
-			entry.vector
-		);
-	}
-	}
-	printk(KERN_DEBUG "IRQ to pin mappings:\n");
-	for (i = 0; i < NR_IRQS; i++) {
-		struct irq_pin_list *entry = irq_2_pin + i;
-		if (entry->pin < 0)
-			continue;
-		printk(KERN_DEBUG "IRQ%d ", i);
-		for (;;) {
-			printk("-> %d:%d", entry->apic, entry->pin);
-			if (!entry->next)
-				break;
-			entry = irq_2_pin + entry->next;
-		}
-		printk("\n");
-	}
-
-	printk(KERN_INFO ".................................... done.\n");
-
-	return;
-}
-
-__apicdebuginit(void) print_APIC_bitfield(int base)
-{
-	unsigned int v;
-	int i, j;
-
-	if (apic_verbosity == APIC_QUIET)
-		return;
-
-	printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
-	for (i = 0; i < 8; i++) {
-		v = apic_read(base + i*0x10);
-		for (j = 0; j < 32; j++) {
-			if (v & (1<<j))
-				printk("1");
-			else
-				printk("0");
-		}
-		printk("\n");
-	}
-}
-
-__apicdebuginit(void) print_local_APIC(void *dummy)
-{
-	unsigned int v, ver, maxlvt;
-	u64 icr;
-
-	if (apic_verbosity == APIC_QUIET)
-		return;
-
-	printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
-		smp_processor_id(), hard_smp_processor_id());
-	v = apic_read(APIC_ID);
-	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v,
-			GET_APIC_ID(v));
-	v = apic_read(APIC_LVR);
-	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
-	ver = GET_APIC_VERSION(v);
-	maxlvt = lapic_get_maxlvt();
-
-	v = apic_read(APIC_TASKPRI);
-	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
-
-	if (APIC_INTEGRATED(ver)) {			/* !82489DX */
-		v = apic_read(APIC_ARBPRI);
-		printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
-			v & APIC_ARBPRI_MASK);
-		v = apic_read(APIC_PROCPRI);
-		printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
-	}
-
-	v = apic_read(APIC_EOI);
-	printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
-	v = apic_read(APIC_RRR);
-	printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
-	v = apic_read(APIC_LDR);
-	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
-	v = apic_read(APIC_DFR);
-	printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
-	v = apic_read(APIC_SPIV);
-	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
-
-	printk(KERN_DEBUG "... APIC ISR field:\n");
-	print_APIC_bitfield(APIC_ISR);
-	printk(KERN_DEBUG "... APIC TMR field:\n");
-	print_APIC_bitfield(APIC_TMR);
-	printk(KERN_DEBUG "... APIC IRR field:\n");
-	print_APIC_bitfield(APIC_IRR);
-
-	if (APIC_INTEGRATED(ver)) {		/* !82489DX */
-		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
-			apic_write(APIC_ESR, 0);
-		v = apic_read(APIC_ESR);
-		printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
-	}
-
-	icr = apic_icr_read();
-	printk(KERN_DEBUG "... APIC ICR: %08x\n", icr);
-	printk(KERN_DEBUG "... APIC ICR2: %08x\n", icr >> 32);
-
-	v = apic_read(APIC_LVTT);
-	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
-
-	if (maxlvt > 3) {                       /* PC is LVT#4. */
-		v = apic_read(APIC_LVTPC);
-		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
-	}
-	v = apic_read(APIC_LVT0);
-	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
-	v = apic_read(APIC_LVT1);
-	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
-
-	if (maxlvt > 2) {			/* ERR is LVT#3. */
-		v = apic_read(APIC_LVTERR);
-		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
-	}
-
-	v = apic_read(APIC_TMICT);
-	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
-	v = apic_read(APIC_TMCCT);
-	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
-	v = apic_read(APIC_TDCR);
-	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
-	printk("\n");
-}
-
-__apicdebuginit(void) print_all_local_APICs(void)
-{
-	on_each_cpu(print_local_APIC, NULL, 1);
-}
-
-__apicdebuginit(void) print_PIC(void)
-{
-	unsigned int v;
-	unsigned long flags;
-
-	if (apic_verbosity == APIC_QUIET)
-		return;
-
-	printk(KERN_DEBUG "\nprinting PIC contents\n");
-
-	spin_lock_irqsave(&i8259A_lock, flags);
-
-	v = inb(0xa1) << 8 | inb(0x21);
-	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
-
-	v = inb(0xa0) << 8 | inb(0x20);
-	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
-
-	outb(0x0b, 0xa0);
-	outb(0x0b, 0x20);
-	v = inb(0xa0) << 8 | inb(0x20);
-	outb(0x0a, 0xa0);
-	outb(0x0a, 0x20);
-
-	spin_unlock_irqrestore(&i8259A_lock, flags);
-
-	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
-
-	v = inb(0x4d1) << 8 | inb(0x4d0);
-	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
-}
-
-__apicdebuginit(int) print_all_ICs(void)
-{
-	print_PIC();
-	print_all_local_APICs();
-	print_IO_APIC();
-
-	return 0;
-}
-
-fs_initcall(print_all_ICs);
-
-
-static void __init enable_IO_APIC(void)
-{
-	union IO_APIC_reg_01 reg_01;
-	int i8259_apic, i8259_pin;
-	int i, apic;
-	unsigned long flags;
-
-	for (i = 0; i < PIN_MAP_SIZE; i++) {
-		irq_2_pin[i].pin = -1;
-		irq_2_pin[i].next = 0;
-	}
-	if (!pirqs_enabled)
-		for (i = 0; i < MAX_PIRQS; i++)
-			pirq_entries[i] = -1;
-
-	/*
-	 * The number of IO-APIC IRQ registers (== #pins):
-	 */
-	for (apic = 0; apic < nr_ioapics; apic++) {
-		spin_lock_irqsave(&ioapic_lock, flags);
-		reg_01.raw = io_apic_read(apic, 1);
-		spin_unlock_irqrestore(&ioapic_lock, flags);
-		nr_ioapic_registers[apic] = reg_01.bits.entries+1;
-	}
-	for (apic = 0; apic < nr_ioapics; apic++) {
-		int pin;
-		/* See if any of the pins is in ExtINT mode */
-		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-			struct IO_APIC_route_entry entry;
-			entry = ioapic_read_entry(apic, pin);
-
-
-			/* If the interrupt line is enabled and in ExtInt mode
-			 * I have found the pin where the i8259 is connected.
-			 */
-			if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
-				ioapic_i8259.apic = apic;
-				ioapic_i8259.pin  = pin;
-				goto found_i8259;
-			}
-		}
-	}
- found_i8259:
-	/* Look to see what if the MP table has reported the ExtINT */
-	/* If we could not find the appropriate pin by looking at the ioapic
-	 * the i8259 probably is not connected the ioapic but give the
-	 * mptable a chance anyway.
-	 */
-	i8259_pin  = find_isa_irq_pin(0, mp_ExtINT);
-	i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
-	/* Trust the MP table if nothing is setup in the hardware */
-	if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
-		printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
-		ioapic_i8259.pin  = i8259_pin;
-		ioapic_i8259.apic = i8259_apic;
-	}
-	/* Complain if the MP table and the hardware disagree */
-	if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
-		(i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
-	{
-		printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
-	}
-
-	/*
-	 * Do not trust the IO-APIC being empty at bootup
-	 */
-	clear_IO_APIC();
-}
-
-/*
- * Not an __init, needed by the reboot code
- */
-void disable_IO_APIC(void)
-{
-	/*
-	 * Clear the IO-APIC before rebooting:
-	 */
-	clear_IO_APIC();
-
-	/*
-	 * If the i8259 is routed through an IOAPIC
-	 * Put that IOAPIC in virtual wire mode
-	 * so legacy interrupts can be delivered.
-	 */
-	if (ioapic_i8259.pin != -1) {
-		struct IO_APIC_route_entry entry;
-
-		memset(&entry, 0, sizeof(entry));
-		entry.mask            = 0; /* Enabled */
-		entry.trigger         = 0; /* Edge */
-		entry.irr             = 0;
-		entry.polarity        = 0; /* High */
-		entry.delivery_status = 0;
-		entry.dest_mode       = 0; /* Physical */
-		entry.delivery_mode   = dest_ExtINT; /* ExtInt */
-		entry.vector          = 0;
-		entry.dest.physical.physical_dest = read_apic_id();
-
-		/*
-		 * Add it to the IO-APIC irq-routing table:
-		 */
-		ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
-	}
-	disconnect_bsp_APIC(ioapic_i8259.pin != -1);
-}
-
-/*
- * function to set the IO-APIC physical IDs based on the
- * values stored in the MPC table.
- *
- * by Matt Domsch <Matt_Domsch@dell.com>  Tue Dec 21 12:25:05 CST 1999
- */
-
-static void __init setup_ioapic_ids_from_mpc(void)
-{
-	union IO_APIC_reg_00 reg_00;
-	physid_mask_t phys_id_present_map;
-	int apic;
-	int i;
-	unsigned char old_id;
-	unsigned long flags;
-
-	if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
-		return;
-
-	/*
-	 * Don't check I/O APIC IDs for xAPIC systems.  They have
-	 * no meaning without the serial APIC bus.
-	 */
-	if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-		|| APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
-		return;
-	/*
-	 * This is broken; anything with a real cpu count has to
-	 * circumvent this idiocy regardless.
-	 */
-	phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
-
-	/*
-	 * Set the IOAPIC ID to the value stored in the MPC table.
-	 */
-	for (apic = 0; apic < nr_ioapics; apic++) {
-
-		/* Read the register 0 value */
-		spin_lock_irqsave(&ioapic_lock, flags);
-		reg_00.raw = io_apic_read(apic, 0);
-		spin_unlock_irqrestore(&ioapic_lock, flags);
-
-		old_id = mp_ioapics[apic].mp_apicid;
-
-		if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
-			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
-				apic, mp_ioapics[apic].mp_apicid);
-			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
-				reg_00.bits.ID);
-			mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
-		}
-
-		/*
-		 * Sanity check, is the ID really free? Every APIC in a
-		 * system must have a unique ID or we get lots of nice
-		 * 'stuck on smp_invalidate_needed IPI wait' messages.
-		 */
-		if (check_apicid_used(phys_id_present_map,
-					mp_ioapics[apic].mp_apicid)) {
-			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
-				apic, mp_ioapics[apic].mp_apicid);
-			for (i = 0; i < get_physical_broadcast(); i++)
-				if (!physid_isset(i, phys_id_present_map))
-					break;
-			if (i >= get_physical_broadcast())
-				panic("Max APIC ID exceeded!\n");
-			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
-				i);
-			physid_set(i, phys_id_present_map);
-			mp_ioapics[apic].mp_apicid = i;
-		} else {
-			physid_mask_t tmp;
-			tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
-			apic_printk(APIC_VERBOSE, "Setting %d in the "
-					"phys_id_present_map\n",
-					mp_ioapics[apic].mp_apicid);
-			physids_or(phys_id_present_map, phys_id_present_map, tmp);
-		}
-
-
-		/*
-		 * We need to adjust the IRQ routing table
-		 * if the ID changed.
-		 */
-		if (old_id != mp_ioapics[apic].mp_apicid)
-			for (i = 0; i < mp_irq_entries; i++)
-				if (mp_irqs[i].mp_dstapic == old_id)
-					mp_irqs[i].mp_dstapic
-						= mp_ioapics[apic].mp_apicid;
-
-		/*
-		 * Read the right value from the MPC table and
-		 * write it into the ID register.
-		 */
-		apic_printk(APIC_VERBOSE, KERN_INFO
-			"...changing IO-APIC physical APIC ID to %d ...",
-			mp_ioapics[apic].mp_apicid);
-
-		reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
-		spin_lock_irqsave(&ioapic_lock, flags);
-		io_apic_write(apic, 0, reg_00.raw);
-		spin_unlock_irqrestore(&ioapic_lock, flags);
-
-		/*
-		 * Sanity check
-		 */
-		spin_lock_irqsave(&ioapic_lock, flags);
-		reg_00.raw = io_apic_read(apic, 0);
-		spin_unlock_irqrestore(&ioapic_lock, flags);
-		if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
-			printk("could not set ID!\n");
-		else
-			apic_printk(APIC_VERBOSE, " ok.\n");
-	}
-}
-
-int no_timer_check __initdata;
-
-static int __init notimercheck(char *s)
-{
-	no_timer_check = 1;
-	return 1;
-}
-__setup("no_timer_check", notimercheck);
-
-/*
- * There is a nasty bug in some older SMP boards, their mptable lies
- * about the timer IRQ. We do the following to work around the situation:
- *
- *	- timer IRQ defaults to IO-APIC IRQ
- *	- if this function detects that timer IRQs are defunct, then we fall
- *	  back to ISA timer IRQs
- */
-static int __init timer_irq_works(void)
-{
-	unsigned long t1 = jiffies;
-	unsigned long flags;
-
-	if (no_timer_check)
-		return 1;
-
-	local_save_flags(flags);
-	local_irq_enable();
-	/* Let ten ticks pass... */
-	mdelay((10 * 1000) / HZ);
-	local_irq_restore(flags);
-
-	/*
-	 * Expect a few ticks at least, to be sure some possible
-	 * glue logic does not lock up after one or two first
-	 * ticks in a non-ExtINT mode.  Also the local APIC
-	 * might have cached one ExtINT interrupt.  Finally, at
-	 * least one tick may be lost due to delays.
-	 */
-	if (time_after(jiffies, t1 + 4))
-		return 1;
-
-	return 0;
-}
-
-/*
- * In the SMP+IOAPIC case it might happen that there are an unspecified
- * number of pending IRQ events unhandled. These cases are very rare,
- * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
- * better to do it this way as thus we do not have to be aware of
- * 'pending' interrupts in the IRQ path, except at this point.
- */
-/*
- * Edge triggered needs to resend any interrupt
- * that was delayed but this is now handled in the device
- * independent code.
- */
-
-/*
- * Startup quirk:
- *
- * Starting up a edge-triggered IO-APIC interrupt is
- * nasty - we need to make sure that we get the edge.
- * If it is already asserted for some reason, we need
- * return 1 to indicate that is was pending.
- *
- * This is not complete - we should be able to fake
- * an edge even if it isn't on the 8259A...
- *
- * (We do this for level-triggered IRQs too - it cannot hurt.)
- */
-static unsigned int startup_ioapic_irq(unsigned int irq)
-{
-	int was_pending = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ioapic_lock, flags);
-	if (irq < 16) {
-		disable_8259A_irq(irq);
-		if (i8259A_irq_pending(irq))
-			was_pending = 1;
-	}
-	__unmask_IO_APIC_irq(irq);
-	spin_unlock_irqrestore(&ioapic_lock, flags);
-
-	return was_pending;
-}
-
-static void ack_ioapic_irq(unsigned int irq)
-{
-	move_native_irq(irq);
-	ack_APIC_irq();
-}
-
-static void ack_ioapic_quirk_irq(unsigned int irq)
-{
-	unsigned long v;
-	int i;
-
-	move_native_irq(irq);
-/*
- * It appears there is an erratum which affects at least version 0x11
- * of I/O APIC (that's the 82093AA and cores integrated into various
- * chipsets).  Under certain conditions a level-triggered interrupt is
- * erroneously delivered as edge-triggered one but the respective IRR
- * bit gets set nevertheless.  As a result the I/O unit expects an EOI
- * message but it will never arrive and further interrupts are blocked
- * from the source.  The exact reason is so far unknown, but the
- * phenomenon was observed when two consecutive interrupt requests
- * from a given source get delivered to the same CPU and the source is
- * temporarily disabled in between.
- *
- * A workaround is to simulate an EOI message manually.  We achieve it
- * by setting the trigger mode to edge and then to level when the edge
- * trigger mode gets detected in the TMR of a local APIC for a
- * level-triggered interrupt.  We mask the source for the time of the
- * operation to prevent an edge-triggered interrupt escaping meanwhile.
- * The idea is from Manfred Spraul.  --macro
- */
-	i = irq_vector[irq];
-
-	v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
-
-	ack_APIC_irq();
-
-	if (!(v & (1 << (i & 0x1f)))) {
-		atomic_inc(&irq_mis_count);
-		spin_lock(&ioapic_lock);
-		__mask_and_edge_IO_APIC_irq(irq);
-		__unmask_and_level_IO_APIC_irq(irq);
-		spin_unlock(&ioapic_lock);
-	}
-}
-
-static int ioapic_retrigger_irq(unsigned int irq)
-{
-	send_IPI_self(irq_vector[irq]);
-
-	return 1;
-}
-
-static struct irq_chip ioapic_chip __read_mostly = {
-	.name 		= "IO-APIC",
-	.startup 	= startup_ioapic_irq,
-	.mask	 	= mask_IO_APIC_irq,
-	.unmask	 	= unmask_IO_APIC_irq,
-	.ack 		= ack_ioapic_irq,
-	.eoi 		= ack_ioapic_quirk_irq,
-#ifdef CONFIG_SMP
-	.set_affinity 	= set_ioapic_affinity_irq,
-#endif
-	.retrigger	= ioapic_retrigger_irq,
-};
-
-
-static inline void init_IO_APIC_traps(void)
-{
-	int irq;
-
-	/*
-	 * NOTE! The local APIC isn't very good at handling
-	 * multiple interrupts at the same interrupt level.
-	 * As the interrupt level is determined by taking the
-	 * vector number and shifting that right by 4, we
-	 * want to spread these out a bit so that they don't
-	 * all fall in the same interrupt level.
-	 *
-	 * Also, we've got to be careful not to trash gate
-	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
-	 */
-	for (irq = 0; irq < NR_IRQS ; irq++) {
-		if (IO_APIC_IRQ(irq) && !irq_vector[irq]) {
-			/*
-			 * Hmm.. We don't have an entry for this,
-			 * so default to an old-fashioned 8259
-			 * interrupt if we can..
-			 */
-			if (irq < 16)
-				make_8259A_irq(irq);
-			else
-				/* Strange. Oh, well.. */
-				irq_desc[irq].chip = &no_irq_chip;
-		}
-	}
-}
-
-/*
- * The local APIC irq-chip implementation:
- */
-
-static void ack_lapic_irq(unsigned int irq)
-{
-	ack_APIC_irq();
-}
-
-static void mask_lapic_irq(unsigned int irq)
-{
-	unsigned long v;
-
-	v = apic_read(APIC_LVT0);
-	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
-}
-
-static void unmask_lapic_irq(unsigned int irq)
-{
-	unsigned long v;
-
-	v = apic_read(APIC_LVT0);
-	apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
-}
-
-static struct irq_chip lapic_chip __read_mostly = {
-	.name		= "local-APIC",
-	.mask		= mask_lapic_irq,
-	.unmask		= unmask_lapic_irq,
-	.ack		= ack_lapic_irq,
-};
-
-static void lapic_register_intr(int irq, int vector)
-{
-	irq_desc[irq].status &= ~IRQ_LEVEL;
-	set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
-				      "edge");
-	set_intr_gate(vector, interrupt[irq]);
-}
-
-static void __init setup_nmi(void)
-{
-	/*
-	 * Dirty trick to enable the NMI watchdog ...
-	 * We put the 8259A master into AEOI mode and
-	 * unmask on all local APICs LVT0 as NMI.
-	 *
-	 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
-	 * is from Maciej W. Rozycki - so we do not have to EOI from
-	 * the NMI handler or the timer interrupt.
-	 */
-	apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
-
-	enable_NMI_through_LVT0();
-
-	apic_printk(APIC_VERBOSE, " done.\n");
-}
-
-/*
- * This looks a bit hackish but it's about the only one way of sending
- * a few INTA cycles to 8259As and any associated glue logic.  ICR does
- * not support the ExtINT mode, unfortunately.  We need to send these
- * cycles as some i82489DX-based boards have glue logic that keeps the
- * 8259A interrupt line asserted until INTA.  --macro
- */
-static inline void __init unlock_ExtINT_logic(void)
-{
-	int apic, pin, i;
-	struct IO_APIC_route_entry entry0, entry1;
-	unsigned char save_control, save_freq_select;
-
-	pin  = find_isa_irq_pin(8, mp_INT);
-	if (pin == -1) {
-		WARN_ON_ONCE(1);
-		return;
-	}
-	apic = find_isa_irq_apic(8, mp_INT);
-	if (apic == -1) {
-		WARN_ON_ONCE(1);
-		return;
-	}
-
-	entry0 = ioapic_read_entry(apic, pin);
-	clear_IO_APIC_pin(apic, pin);
-
-	memset(&entry1, 0, sizeof(entry1));
-
-	entry1.dest_mode = 0;			/* physical delivery */
-	entry1.mask = 0;			/* unmask IRQ now */
-	entry1.dest.physical.physical_dest = hard_smp_processor_id();
-	entry1.delivery_mode = dest_ExtINT;
-	entry1.polarity = entry0.polarity;
-	entry1.trigger = 0;
-	entry1.vector = 0;
-
-	ioapic_write_entry(apic, pin, entry1);
-
-	save_control = CMOS_READ(RTC_CONTROL);
-	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
-		   RTC_FREQ_SELECT);
-	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
-
-	i = 100;
-	while (i-- > 0) {
-		mdelay(10);
-		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
-			i -= 10;
-	}
-
-	CMOS_WRITE(save_control, RTC_CONTROL);
-	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-	clear_IO_APIC_pin(apic, pin);
-
-	ioapic_write_entry(apic, pin, entry0);
-}
-
-/*
- * This code may look a bit paranoid, but it's supposed to cooperate with
- * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
- * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
- * fanatically on his truly buggy board.
- */
-static inline void __init check_timer(void)
-{
-	int apic1, pin1, apic2, pin2;
-	int no_pin1 = 0;
-	int vector;
-	unsigned int ver;
-	unsigned long flags;
-
-	local_irq_save(flags);
-
-	ver = apic_read(APIC_LVR);
-	ver = GET_APIC_VERSION(ver);
-
-	/*
-	 * get/set the timer IRQ vector:
-	 */
-	disable_8259A_irq(0);
-	vector = assign_irq_vector(0);
-	set_intr_gate(vector, interrupt[0]);
-
-	/*
-	 * As IRQ0 is to be enabled in the 8259A, the virtual
-	 * wire has to be disabled in the local APIC.  Also
-	 * timer interrupts need to be acknowledged manually in
-	 * the 8259A for the i82489DX when using the NMI
-	 * watchdog as that APIC treats NMIs as level-triggered.
-	 * The AEOI mode will finish them in the 8259A
-	 * automatically.
-	 */
-	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-	init_8259A(1);
-	timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
-
-	pin1  = find_isa_irq_pin(0, mp_INT);
-	apic1 = find_isa_irq_apic(0, mp_INT);
-	pin2  = ioapic_i8259.pin;
-	apic2 = ioapic_i8259.apic;
-
-	apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
-		    "apic1=%d pin1=%d apic2=%d pin2=%d\n",
-		    vector, apic1, pin1, apic2, pin2);
-
-	/*
-	 * Some BIOS writers are clueless and report the ExtINTA
-	 * I/O APIC input from the cascaded 8259A as the timer
-	 * interrupt input.  So just in case, if only one pin
-	 * was found above, try it both directly and through the
-	 * 8259A.
-	 */
-	if (pin1 == -1) {
-		pin1 = pin2;
-		apic1 = apic2;
-		no_pin1 = 1;
-	} else if (pin2 == -1) {
-		pin2 = pin1;
-		apic2 = apic1;
-	}
-
-	if (pin1 != -1) {
-		/*
-		 * Ok, does IRQ0 through the IOAPIC work?
-		 */
-		if (no_pin1) {
-			add_pin_to_irq(0, apic1, pin1);
-			setup_timer_IRQ0_pin(apic1, pin1, vector);
-		}
-		unmask_IO_APIC_irq(0);
-		if (timer_irq_works()) {
-			if (nmi_watchdog == NMI_IO_APIC) {
-				setup_nmi();
-				enable_8259A_irq(0);
-			}
-			if (disable_timer_pin_1 > 0)
-				clear_IO_APIC_pin(0, pin1);
-			goto out;
-		}
-		clear_IO_APIC_pin(apic1, pin1);
-		if (!no_pin1)
-			apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
-				    "8254 timer not connected to IO-APIC\n");
-
-		apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
-			    "(IRQ0) through the 8259A ...\n");
-		apic_printk(APIC_QUIET, KERN_INFO
-			    "..... (found apic %d pin %d) ...\n", apic2, pin2);
-		/*
-		 * legacy devices should be connected to IO APIC #0
-		 */
-		replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
-		setup_timer_IRQ0_pin(apic2, pin2, vector);
-		unmask_IO_APIC_irq(0);
-		enable_8259A_irq(0);
-		if (timer_irq_works()) {
-			apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
-			timer_through_8259 = 1;
-			if (nmi_watchdog == NMI_IO_APIC) {
-				disable_8259A_irq(0);
-				setup_nmi();
-				enable_8259A_irq(0);
-			}
-			goto out;
-		}
-		/*
-		 * Cleanup, just in case ...
-		 */
-		disable_8259A_irq(0);
-		clear_IO_APIC_pin(apic2, pin2);
-		apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
-	}
-
-	if (nmi_watchdog == NMI_IO_APIC) {
-		apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
-			    "through the IO-APIC - disabling NMI Watchdog!\n");
-		nmi_watchdog = NMI_NONE;
-	}
-	timer_ack = 0;
-
-	apic_printk(APIC_QUIET, KERN_INFO
-		    "...trying to set up timer as Virtual Wire IRQ...\n");
-
-	lapic_register_intr(0, vector);
-	apic_write(APIC_LVT0, APIC_DM_FIXED | vector);	/* Fixed mode */
-	enable_8259A_irq(0);
-
-	if (timer_irq_works()) {
-		apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
-		goto out;
-	}
-	disable_8259A_irq(0);
-	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
-	apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
-
-	apic_printk(APIC_QUIET, KERN_INFO
-		    "...trying to set up timer as ExtINT IRQ...\n");
-
-	init_8259A(0);
-	make_8259A_irq(0);
-	apic_write(APIC_LVT0, APIC_DM_EXTINT);
-
-	unlock_ExtINT_logic();
-
-	if (timer_irq_works()) {
-		apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
-		goto out;
-	}
-	apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
-	panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
-		"report.  Then try booting with the 'noapic' option.\n");
-out:
-	local_irq_restore(flags);
-}
-
-/*
- * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
- * to devices.  However there may be an I/O APIC pin available for
- * this interrupt regardless.  The pin may be left unconnected, but
- * typically it will be reused as an ExtINT cascade interrupt for
- * the master 8259A.  In the MPS case such a pin will normally be
- * reported as an ExtINT interrupt in the MP table.  With ACPI
- * there is no provision for ExtINT interrupts, and in the absence
- * of an override it would be treated as an ordinary ISA I/O APIC
- * interrupt, that is edge-triggered and unmasked by default.  We
- * used to do this, but it caused problems on some systems because
- * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
- * the same ExtINT cascade interrupt to drive the local APIC of the
- * bootstrap processor.  Therefore we refrain from routing IRQ2 to
- * the I/O APIC in all cases now.  No actual device should request
- * it anyway.  --macro
- */
-#define PIC_IRQS	(1 << PIC_CASCADE_IR)
-
-void __init setup_IO_APIC(void)
-{
-	int i;
-
-	/* Reserve all the system vectors. */
-	for (i = first_system_vector; i < NR_VECTORS; i++)
-		set_bit(i, used_vectors);
-
-	enable_IO_APIC();
-
-	io_apic_irqs = ~PIC_IRQS;
-
-	printk("ENABLING IO-APIC IRQs\n");
-
-	/*
-	 * Set up IO-APIC IRQ routing.
-	 */
-	if (!acpi_ioapic)
-		setup_ioapic_ids_from_mpc();
-	sync_Arb_IDs();
-	setup_IO_APIC_irqs();
-	init_IO_APIC_traps();
-	check_timer();
-}
-
-/*
- *	Called after all the initialization is done. If we didnt find any
- *	APIC bugs then we can allow the modify fast path
- */
-
-static int __init io_apic_bug_finalize(void)
-{
-	if (sis_apic_bug == -1)
-		sis_apic_bug = 0;
-	return 0;
-}
-
-late_initcall(io_apic_bug_finalize);
-
-struct sysfs_ioapic_data {
-	struct sys_device dev;
-	struct IO_APIC_route_entry entry[0];
-};
-static struct sysfs_ioapic_data *mp_ioapic_data[MAX_IO_APICS];
-
-static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
-{
-	struct IO_APIC_route_entry *entry;
-	struct sysfs_ioapic_data *data;
-	int i;
-
-	data = container_of(dev, struct sysfs_ioapic_data, dev);
-	entry = data->entry;
-	for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
-		entry[i] = ioapic_read_entry(dev->id, i);
-
-	return 0;
-}
-
-static int ioapic_resume(struct sys_device *dev)
-{
-	struct IO_APIC_route_entry *entry;
-	struct sysfs_ioapic_data *data;
-	unsigned long flags;
-	union IO_APIC_reg_00 reg_00;
-	int i;
-
-	data = container_of(dev, struct sysfs_ioapic_data, dev);
-	entry = data->entry;
-
-	spin_lock_irqsave(&ioapic_lock, flags);
-	reg_00.raw = io_apic_read(dev->id, 0);
-	if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
-		reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
-		io_apic_write(dev->id, 0, reg_00.raw);
-	}
-	spin_unlock_irqrestore(&ioapic_lock, flags);
-	for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
-		ioapic_write_entry(dev->id, i, entry[i]);
-
-	return 0;
-}
-
-static struct sysdev_class ioapic_sysdev_class = {
-	.name = "ioapic",
-	.suspend = ioapic_suspend,
-	.resume = ioapic_resume,
-};
-
-static int __init ioapic_init_sysfs(void)
-{
-	struct sys_device *dev;
-	int i, size, error = 0;
-
-	error = sysdev_class_register(&ioapic_sysdev_class);
-	if (error)
-		return error;
-
-	for (i = 0; i < nr_ioapics; i++) {
-		size = sizeof(struct sys_device) + nr_ioapic_registers[i]
-			* sizeof(struct IO_APIC_route_entry);
-		mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
-		if (!mp_ioapic_data[i]) {
-			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-			continue;
-		}
-		dev = &mp_ioapic_data[i]->dev;
-		dev->id = i;
-		dev->cls = &ioapic_sysdev_class;
-		error = sysdev_register(dev);
-		if (error) {
-			kfree(mp_ioapic_data[i]);
-			mp_ioapic_data[i] = NULL;
-			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-			continue;
-		}
-	}
-
-	return 0;
-}
-
-device_initcall(ioapic_init_sysfs);
-
-/*
- * Dynamic irq allocate and deallocation
- */
-int create_irq(void)
-{
-	/* Allocate an unused irq */
-	int irq, new, vector = 0;
-	unsigned long flags;
-
-	irq = -ENOSPC;
-	spin_lock_irqsave(&vector_lock, flags);
-	for (new = (NR_IRQS - 1); new >= 0; new--) {
-		if (platform_legacy_irq(new))
-			continue;
-		if (irq_vector[new] != 0)
-			continue;
-		vector = __assign_irq_vector(new);
-		if (likely(vector > 0))
-			irq = new;
-		break;
-	}
-	spin_unlock_irqrestore(&vector_lock, flags);
-
-	if (irq >= 0) {
-		set_intr_gate(vector, interrupt[irq]);
-		dynamic_irq_init(irq);
-	}
-	return irq;
-}
-
-void destroy_irq(unsigned int irq)
-{
-	unsigned long flags;
-
-	dynamic_irq_cleanup(irq);
-
-	spin_lock_irqsave(&vector_lock, flags);
-	clear_bit(irq_vector[irq], used_vectors);
-	irq_vector[irq] = 0;
-	spin_unlock_irqrestore(&vector_lock, flags);
-}
-
-/*
- * MSI message composition
- */
-#ifdef CONFIG_PCI_MSI
-static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
-{
-	int vector;
-	unsigned dest;
-
-	vector = assign_irq_vector(irq);
-	if (vector >= 0) {
-		dest = cpu_mask_to_apicid(TARGET_CPUS);
-
-		msg->address_hi = MSI_ADDR_BASE_HI;
-		msg->address_lo =
-			MSI_ADDR_BASE_LO |
-			((INT_DEST_MODE == 0) ?
-MSI_ADDR_DEST_MODE_PHYSICAL:
-				MSI_ADDR_DEST_MODE_LOGICAL) |
-			((INT_DELIVERY_MODE != dest_LowestPrio) ?
-				MSI_ADDR_REDIRECTION_CPU:
-				MSI_ADDR_REDIRECTION_LOWPRI) |
-			MSI_ADDR_DEST_ID(dest);
-
-		msg->data =
-			MSI_DATA_TRIGGER_EDGE |
-			MSI_DATA_LEVEL_ASSERT |
-			((INT_DELIVERY_MODE != dest_LowestPrio) ?
-MSI_DATA_DELIVERY_FIXED:
-				MSI_DATA_DELIVERY_LOWPRI) |
-			MSI_DATA_VECTOR(vector);
-	}
-	return vector;
-}
-
-#ifdef CONFIG_SMP
-static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
-{
-	struct msi_msg msg;
-	unsigned int dest;
-	cpumask_t tmp;
-	int vector;
-
-	cpus_and(tmp, mask, cpu_online_map);
-	if (cpus_empty(tmp))
-		tmp = TARGET_CPUS;
-
-	vector = assign_irq_vector(irq);
-	if (vector < 0)
-		return;
-
-	dest = cpu_mask_to_apicid(mask);
-
-	read_msi_msg(irq, &msg);
-
-	msg.data &= ~MSI_DATA_VECTOR_MASK;
-	msg.data |= MSI_DATA_VECTOR(vector);
-	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
-	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-
-	write_msi_msg(irq, &msg);
-	irq_desc[irq].affinity = mask;
-}
-#endif /* CONFIG_SMP */
-
-/*
- * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
- * which implement the MSI or MSI-X Capability Structure.
- */
-static struct irq_chip msi_chip = {
-	.name		= "PCI-MSI",
-	.unmask		= unmask_msi_irq,
-	.mask		= mask_msi_irq,
-	.ack		= ack_ioapic_irq,
-#ifdef CONFIG_SMP
-	.set_affinity	= set_msi_irq_affinity,
-#endif
-	.retrigger	= ioapic_retrigger_irq,
-};
-
-int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
-{
-	struct msi_msg msg;
-	int irq, ret;
-	irq = create_irq();
-	if (irq < 0)
-		return irq;
-
-	ret = msi_compose_msg(dev, irq, &msg);
-	if (ret < 0) {
-		destroy_irq(irq);
-		return ret;
-	}
-
-	set_irq_msi(irq, desc);
-	write_msi_msg(irq, &msg);
-
-	set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
-				      "edge");
-
-	return 0;
-}
-
-void arch_teardown_msi_irq(unsigned int irq)
-{
-	destroy_irq(irq);
-}
-
-#endif /* CONFIG_PCI_MSI */
-
-/*
- * Hypertransport interrupt support
- */
-#ifdef CONFIG_HT_IRQ
-
-#ifdef CONFIG_SMP
-
-static void target_ht_irq(unsigned int irq, unsigned int dest)
-{
-	struct ht_irq_msg msg;
-	fetch_ht_irq_msg(irq, &msg);
-
-	msg.address_lo &= ~(HT_IRQ_LOW_DEST_ID_MASK);
-	msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
-
-	msg.address_lo |= HT_IRQ_LOW_DEST_ID(dest);
-	msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
-
-	write_ht_irq_msg(irq, &msg);
-}
-
-static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
-{
-	unsigned int dest;
-	cpumask_t tmp;
-
-	cpus_and(tmp, mask, cpu_online_map);
-	if (cpus_empty(tmp))
-		tmp = TARGET_CPUS;
-
-	cpus_and(mask, tmp, CPU_MASK_ALL);
-
-	dest = cpu_mask_to_apicid(mask);
-
-	target_ht_irq(irq, dest);
-	irq_desc[irq].affinity = mask;
-}
-#endif
-
-static struct irq_chip ht_irq_chip = {
-	.name		= "PCI-HT",
-	.mask		= mask_ht_irq,
-	.unmask		= unmask_ht_irq,
-	.ack		= ack_ioapic_irq,
-#ifdef CONFIG_SMP
-	.set_affinity	= set_ht_irq_affinity,
-#endif
-	.retrigger	= ioapic_retrigger_irq,
-};
-
-int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
-{
-	int vector;
-
-	vector = assign_irq_vector(irq);
-	if (vector >= 0) {
-		struct ht_irq_msg msg;
-		unsigned dest;
-		cpumask_t tmp;
-
-		cpus_clear(tmp);
-		cpu_set(vector >> 8, tmp);
-		dest = cpu_mask_to_apicid(tmp);
-
-		msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
-
-		msg.address_lo =
-			HT_IRQ_LOW_BASE |
-			HT_IRQ_LOW_DEST_ID(dest) |
-			HT_IRQ_LOW_VECTOR(vector) |
-			((INT_DEST_MODE == 0) ?
-				HT_IRQ_LOW_DM_PHYSICAL :
-				HT_IRQ_LOW_DM_LOGICAL) |
-			HT_IRQ_LOW_RQEOI_EDGE |
-			((INT_DELIVERY_MODE != dest_LowestPrio) ?
-				HT_IRQ_LOW_MT_FIXED :
-				HT_IRQ_LOW_MT_ARBITRATED) |
-			HT_IRQ_LOW_IRQ_MASKED;
-
-		write_ht_irq_msg(irq, &msg);
-
-		set_irq_chip_and_handler_name(irq, &ht_irq_chip,
-					      handle_edge_irq, "edge");
-	}
-	return vector;
-}
-#endif /* CONFIG_HT_IRQ */
-
-/* --------------------------------------------------------------------------
-			ACPI-based IOAPIC Configuration
-   -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI
-
-int __init io_apic_get_unique_id(int ioapic, int apic_id)
-{
-	union IO_APIC_reg_00 reg_00;
-	static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
-	physid_mask_t tmp;
-	unsigned long flags;
-	int i = 0;
-
-	/*
-	 * The P4 platform supports up to 256 APIC IDs on two separate APIC
-	 * buses (one for LAPICs, one for IOAPICs), where predecessors only
-	 * supports up to 16 on one shared APIC bus.
-	 *
-	 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
-	 *      advantage of new APIC bus architecture.
-	 */
-
-	if (physids_empty(apic_id_map))
-		apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
-
-	spin_lock_irqsave(&ioapic_lock, flags);
-	reg_00.raw = io_apic_read(ioapic, 0);
-	spin_unlock_irqrestore(&ioapic_lock, flags);
-
-	if (apic_id >= get_physical_broadcast()) {
-		printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
-			"%d\n", ioapic, apic_id, reg_00.bits.ID);
-		apic_id = reg_00.bits.ID;
-	}
-
-	/*
-	 * Every APIC in a system must have a unique ID or we get lots of nice
-	 * 'stuck on smp_invalidate_needed IPI wait' messages.
-	 */
-	if (check_apicid_used(apic_id_map, apic_id)) {
-
-		for (i = 0; i < get_physical_broadcast(); i++) {
-			if (!check_apicid_used(apic_id_map, i))
-				break;
-		}
-
-		if (i == get_physical_broadcast())
-			panic("Max apic_id exceeded!\n");
-
-		printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
-			"trying %d\n", ioapic, apic_id, i);
-
-		apic_id = i;
-	}
-
-	tmp = apicid_to_cpu_present(apic_id);
-	physids_or(apic_id_map, apic_id_map, tmp);
-
-	if (reg_00.bits.ID != apic_id) {
-		reg_00.bits.ID = apic_id;
-
-		spin_lock_irqsave(&ioapic_lock, flags);
-		io_apic_write(ioapic, 0, reg_00.raw);
-		reg_00.raw = io_apic_read(ioapic, 0);
-		spin_unlock_irqrestore(&ioapic_lock, flags);
-
-		/* Sanity check */
-		if (reg_00.bits.ID != apic_id) {
-			printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
-			return -1;
-		}
-	}
-
-	apic_printk(APIC_VERBOSE, KERN_INFO
-			"IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
-
-	return apic_id;
-}
-
-
-int __init io_apic_get_version(int ioapic)
-{
-	union IO_APIC_reg_01	reg_01;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ioapic_lock, flags);
-	reg_01.raw = io_apic_read(ioapic, 1);
-	spin_unlock_irqrestore(&ioapic_lock, flags);
-
-	return reg_01.bits.version;
-}
-
-
-int __init io_apic_get_redir_entries(int ioapic)
-{
-	union IO_APIC_reg_01	reg_01;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ioapic_lock, flags);
-	reg_01.raw = io_apic_read(ioapic, 1);
-	spin_unlock_irqrestore(&ioapic_lock, flags);
-
-	return reg_01.bits.entries;
-}
-
-
-int io_apic_set_pci_routing(int ioapic, int pin, int irq, int edge_level, int active_high_low)
-{
-	struct IO_APIC_route_entry entry;
-
-	if (!IO_APIC_IRQ(irq)) {
-		printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
-			ioapic);
-		return -EINVAL;
-	}
-
-	/*
-	 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
-	 * Note that we mask (disable) IRQs now -- these get enabled when the
-	 * corresponding device driver registers for this IRQ.
-	 */
-
-	memset(&entry, 0, sizeof(entry));
-
-	entry.delivery_mode = INT_DELIVERY_MODE;
-	entry.dest_mode = INT_DEST_MODE;
-	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-	entry.trigger = edge_level;
-	entry.polarity = active_high_low;
-	entry.mask  = 1;
-
-	/*
-	 * IRQs < 16 are already in the irq_2_pin[] map
-	 */
-	if (irq >= 16)
-		add_pin_to_irq(irq, ioapic, pin);
-
-	entry.vector = assign_irq_vector(irq);
-
-	apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
-		"(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
-		mp_ioapics[ioapic].mp_apicid, pin, entry.vector, irq,
-		edge_level, active_high_low);
-
-	ioapic_register_intr(irq, entry.vector, edge_level);
-
-	if (!ioapic && (irq < 16))
-		disable_8259A_irq(irq);
-
-	ioapic_write_entry(ioapic, pin, entry);
-
-	return 0;
-}
-
-int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
-{
-	int i;
-
-	if (skip_ioapic_setup)
-		return -1;
-
-	for (i = 0; i < mp_irq_entries; i++)
-		if (mp_irqs[i].mp_irqtype == mp_INT &&
-		    mp_irqs[i].mp_srcbusirq == bus_irq)
-			break;
-	if (i >= mp_irq_entries)
-		return -1;
-
-	*trigger = irq_trigger(i);
-	*polarity = irq_polarity(i);
-	return 0;
-}
-
-#endif /* CONFIG_ACPI */
-
-static int __init parse_disable_timer_pin_1(char *arg)
-{
-	disable_timer_pin_1 = 1;
-	return 0;
-}
-early_param("disable_timer_pin_1", parse_disable_timer_pin_1);
-
-static int __init parse_enable_timer_pin_1(char *arg)
-{
-	disable_timer_pin_1 = -1;
-	return 0;
-}
-early_param("enable_timer_pin_1", parse_enable_timer_pin_1);
-
-static int __init parse_noapic(char *arg)
-{
-	/* disable IO-APIC */
-	disable_ioapic_setup();
-	return 0;
-}
-early_param("noapic", parse_noapic);
-
-void __init ioapic_init_mappings(void)
-{
-	unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
-	int i;
-
-	for (i = 0; i < nr_ioapics; i++) {
-		if (smp_found_config) {
-			ioapic_phys = mp_ioapics[i].mp_apicaddr;
-			if (!ioapic_phys) {
-				printk(KERN_ERR
-				       "WARNING: bogus zero IO-APIC "
-				       "address found in MPTABLE, "
-				       "disabling IO/APIC support!\n");
-				smp_found_config = 0;
-				skip_ioapic_setup = 1;
-				goto fake_ioapic_page;
-			}
-		} else {
-fake_ioapic_page:
-			ioapic_phys = (unsigned long)
-				      alloc_bootmem_pages(PAGE_SIZE);
-			ioapic_phys = __pa(ioapic_phys);
-		}
-		set_fixmap_nocache(idx, ioapic_phys);
-		printk(KERN_DEBUG "mapped IOAPIC to %08lx (%08lx)\n",
-		       __fix_to_virt(idx), ioapic_phys);
-		idx++;
-	}
-}
-
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
new file mode 100644
index 0000000..d1d4dc5
--- /dev/null
+++ b/arch/x86/kernel/irq.c
@@ -0,0 +1,189 @@
+/*
+ * Common interrupt code for 32 and 64 bit
+ */
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/seq_file.h>
+
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/smp.h>
+
+atomic_t irq_err_count;
+
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+	printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+	/*
+	 * Currently unexpected vectors happen only on SMP and APIC.
+	 * We _must_ ack these because every local APIC has only N
+	 * irq slots per priority level, and a 'hanging, unacked' IRQ
+	 * holds up an irq slot - in excessive cases (when multiple
+	 * unexpected vectors occur) that might lock up the APIC
+	 * completely.
+	 * But only ack when the APIC is enabled -AK
+	 */
+	if (cpu_has_apic)
+		ack_APIC_irq();
+#endif
+}
+
+#ifdef CONFIG_X86_32
+# define irq_stats(x)		(&per_cpu(irq_stat, x))
+#else
+# define irq_stats(x)		cpu_pda(x)
+#endif
+/*
+ * /proc/interrupts printing:
+ */
+static int show_other_interrupts(struct seq_file *p)
+{
+	int j;
+
+	seq_printf(p, "NMI: ");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
+	seq_printf(p, "  Non-maskable interrupts\n");
+#ifdef CONFIG_X86_LOCAL_APIC
+	seq_printf(p, "LOC: ");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
+	seq_printf(p, "  Local timer interrupts\n");
+#endif
+#ifdef CONFIG_SMP
+	seq_printf(p, "RES: ");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
+	seq_printf(p, "  Rescheduling interrupts\n");
+	seq_printf(p, "CAL: ");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
+	seq_printf(p, "  Function call interrupts\n");
+	seq_printf(p, "TLB: ");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
+	seq_printf(p, "  TLB shootdowns\n");
+#endif
+#ifdef CONFIG_X86_MCE
+	seq_printf(p, "TRM: ");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
+	seq_printf(p, "  Thermal event interrupts\n");
+# ifdef CONFIG_X86_64
+	seq_printf(p, "THR: ");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
+	seq_printf(p, "  Threshold APIC interrupts\n");
+# endif
+#endif
+#ifdef CONFIG_X86_LOCAL_APIC
+	seq_printf(p, "SPU: ");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
+	seq_printf(p, "  Spurious interrupts\n");
+#endif
+	seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+#if defined(CONFIG_X86_IO_APIC)
+	seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
+#endif
+	return 0;
+}
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+	unsigned long flags, any_count = 0;
+	int i = *(loff_t *) v, j;
+	struct irqaction *action;
+	struct irq_desc *desc;
+
+	if (i > nr_irqs)
+		return 0;
+
+	if (i == nr_irqs)
+		return show_other_interrupts(p);
+
+	/* print header */
+	if (i == 0) {
+		seq_printf(p, "           ");
+		for_each_online_cpu(j)
+			seq_printf(p, "CPU%-8d", j);
+		seq_putc(p, '\n');
+	}
+
+	desc = irq_to_desc(i);
+	spin_lock_irqsave(&desc->lock, flags);
+#ifndef CONFIG_SMP
+	any_count = kstat_irqs(i);
+#else
+	for_each_online_cpu(j)
+		any_count |= kstat_irqs_cpu(i, j);
+#endif
+	action = desc->action;
+	if (!action && !any_count)
+		goto out;
+
+	seq_printf(p, "%3d: ", i);
+#ifndef CONFIG_SMP
+	seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+#endif
+	seq_printf(p, " %8s", desc->chip->name);
+	seq_printf(p, "-%-8s", desc->name);
+
+	if (action) {
+		seq_printf(p, "  %s", action->name);
+		while ((action = action->next) != NULL)
+			seq_printf(p, ", %s", action->name);
+	}
+
+	seq_putc(p, '\n');
+out:
+	spin_unlock_irqrestore(&desc->lock, flags);
+	return 0;
+}
+
+/*
+ * /proc/stat helpers
+ */
+u64 arch_irq_stat_cpu(unsigned int cpu)
+{
+	u64 sum = irq_stats(cpu)->__nmi_count;
+
+#ifdef CONFIG_X86_LOCAL_APIC
+	sum += irq_stats(cpu)->apic_timer_irqs;
+#endif
+#ifdef CONFIG_SMP
+	sum += irq_stats(cpu)->irq_resched_count;
+	sum += irq_stats(cpu)->irq_call_count;
+	sum += irq_stats(cpu)->irq_tlb_count;
+#endif
+#ifdef CONFIG_X86_MCE
+	sum += irq_stats(cpu)->irq_thermal_count;
+# ifdef CONFIG_X86_64
+	sum += irq_stats(cpu)->irq_threshold_count;
+#endif
+#endif
+#ifdef CONFIG_X86_LOCAL_APIC
+	sum += irq_stats(cpu)->irq_spurious_count;
+#endif
+	return sum;
+}
+
+u64 arch_irq_stat(void)
+{
+	u64 sum = atomic_read(&irq_err_count);
+
+#ifdef CONFIG_X86_IO_APIC
+	sum += atomic_read(&irq_mis_count);
+#endif
+	return sum;
+}
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index b71e02d..a513826 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -25,29 +25,6 @@
 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
 EXPORT_PER_CPU_SYMBOL(irq_regs);
 
-/*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves.
- */
-void ack_bad_irq(unsigned int irq)
-{
-	printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
-
-#ifdef CONFIG_X86_LOCAL_APIC
-	/*
-	 * Currently unexpected vectors happen only on SMP and APIC.
-	 * We _must_ ack these because every local APIC has only N
-	 * irq slots per priority level, and a 'hanging, unacked' IRQ
-	 * holds up an irq slot - in excessive cases (when multiple
-	 * unexpected vectors occur) that might lock up the APIC
-	 * completely.
-	 * But only ack when the APIC is enabled -AK
-	 */
-	if (cpu_has_apic)
-		ack_APIC_irq();
-#endif
-}
-
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 /* Debugging check for stack overflow: is there less than 1KB free? */
 static int check_stack_overflow(void)
@@ -223,20 +200,25 @@
 {
 	struct pt_regs *old_regs;
 	/* high bit used in ret_from_ code */
-	int overflow, irq = ~regs->orig_ax;
-	struct irq_desc *desc = irq_desc + irq;
+	int overflow;
+	unsigned vector = ~regs->orig_ax;
+	struct irq_desc *desc;
+	unsigned irq;
 
-	if (unlikely((unsigned)irq >= NR_IRQS)) {
-		printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
-					__func__, irq);
-		BUG();
-	}
 
 	old_regs = set_irq_regs(regs);
 	irq_enter();
+	irq = __get_cpu_var(vector_irq)[vector];
 
 	overflow = check_stack_overflow();
 
+	desc = irq_to_desc(irq);
+	if (unlikely(!desc)) {
+		printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n",
+					__func__, irq, vector, smp_processor_id());
+		BUG();
+	}
+
 	if (!execute_on_irq_stack(overflow, desc, irq)) {
 		if (unlikely(overflow))
 			print_stack_overflow();
@@ -248,146 +230,6 @@
 	return 1;
 }
 
-/*
- * Interrupt statistics:
- */
-
-atomic_t irq_err_count;
-
-/*
- * /proc/interrupts printing:
- */
-
-int show_interrupts(struct seq_file *p, void *v)
-{
-	int i = *(loff_t *) v, j;
-	struct irqaction * action;
-	unsigned long flags;
-
-	if (i == 0) {
-		seq_printf(p, "           ");
-		for_each_online_cpu(j)
-			seq_printf(p, "CPU%-8d",j);
-		seq_putc(p, '\n');
-	}
-
-	if (i < NR_IRQS) {
-		unsigned any_count = 0;
-
-		spin_lock_irqsave(&irq_desc[i].lock, flags);
-#ifndef CONFIG_SMP
-		any_count = kstat_irqs(i);
-#else
-		for_each_online_cpu(j)
-			any_count |= kstat_cpu(j).irqs[i];
-#endif
-		action = irq_desc[i].action;
-		if (!action && !any_count)
-			goto skip;
-		seq_printf(p, "%3d: ",i);
-#ifndef CONFIG_SMP
-		seq_printf(p, "%10u ", kstat_irqs(i));
-#else
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-#endif
-		seq_printf(p, " %8s", irq_desc[i].chip->name);
-		seq_printf(p, "-%-8s", irq_desc[i].name);
-
-		if (action) {
-			seq_printf(p, "  %s", action->name);
-			while ((action = action->next) != NULL)
-				seq_printf(p, ", %s", action->name);
-		}
-
-		seq_putc(p, '\n');
-skip:
-		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-	} else if (i == NR_IRQS) {
-		seq_printf(p, "NMI: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ", nmi_count(j));
-		seq_printf(p, "  Non-maskable interrupts\n");
-#ifdef CONFIG_X86_LOCAL_APIC
-		seq_printf(p, "LOC: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ",
-				per_cpu(irq_stat,j).apic_timer_irqs);
-		seq_printf(p, "  Local timer interrupts\n");
-#endif
-#ifdef CONFIG_SMP
-		seq_printf(p, "RES: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ",
-				per_cpu(irq_stat,j).irq_resched_count);
-		seq_printf(p, "  Rescheduling interrupts\n");
-		seq_printf(p, "CAL: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ",
-				per_cpu(irq_stat,j).irq_call_count);
-		seq_printf(p, "  Function call interrupts\n");
-		seq_printf(p, "TLB: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ",
-				per_cpu(irq_stat,j).irq_tlb_count);
-		seq_printf(p, "  TLB shootdowns\n");
-#endif
-#ifdef CONFIG_X86_MCE
-		seq_printf(p, "TRM: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ",
-				per_cpu(irq_stat,j).irq_thermal_count);
-		seq_printf(p, "  Thermal event interrupts\n");
-#endif
-#ifdef CONFIG_X86_LOCAL_APIC
-		seq_printf(p, "SPU: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ",
-				per_cpu(irq_stat,j).irq_spurious_count);
-		seq_printf(p, "  Spurious interrupts\n");
-#endif
-		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-#if defined(CONFIG_X86_IO_APIC)
-		seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
-#endif
-	}
-	return 0;
-}
-
-/*
- * /proc/stat helpers
- */
-u64 arch_irq_stat_cpu(unsigned int cpu)
-{
-	u64 sum = nmi_count(cpu);
-
-#ifdef CONFIG_X86_LOCAL_APIC
-	sum += per_cpu(irq_stat, cpu).apic_timer_irqs;
-#endif
-#ifdef CONFIG_SMP
-	sum += per_cpu(irq_stat, cpu).irq_resched_count;
-	sum += per_cpu(irq_stat, cpu).irq_call_count;
-	sum += per_cpu(irq_stat, cpu).irq_tlb_count;
-#endif
-#ifdef CONFIG_X86_MCE
-	sum += per_cpu(irq_stat, cpu).irq_thermal_count;
-#endif
-#ifdef CONFIG_X86_LOCAL_APIC
-	sum += per_cpu(irq_stat, cpu).irq_spurious_count;
-#endif
-	return sum;
-}
-
-u64 arch_irq_stat(void)
-{
-	u64 sum = atomic_read(&irq_err_count);
-
-#ifdef CONFIG_X86_IO_APIC
-	sum += atomic_read(&irq_mis_count);
-#endif
-	return sum;
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 #include <mach_apic.h>
 
@@ -395,20 +237,22 @@
 {
 	unsigned int irq;
 	static int warned;
+	struct irq_desc *desc;
 
-	for (irq = 0; irq < NR_IRQS; irq++) {
+	for_each_irq_desc(irq, desc) {
 		cpumask_t mask;
+
 		if (irq == 2)
 			continue;
 
-		cpus_and(mask, irq_desc[irq].affinity, map);
+		cpus_and(mask, desc->affinity, map);
 		if (any_online_cpu(mask) == NR_CPUS) {
 			printk("Breaking affinity for irq %i\n", irq);
 			mask = map;
 		}
-		if (irq_desc[irq].chip->set_affinity)
-			irq_desc[irq].chip->set_affinity(irq, mask);
-		else if (irq_desc[irq].action && !(warned++))
+		if (desc->chip->set_affinity)
+			desc->chip->set_affinity(irq, mask);
+		else if (desc->action && !(warned++))
 			printk("Cannot set affinity for irq %i\n", irq);
 	}
 
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index f065fe9..60eb84e 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -18,28 +18,6 @@
 #include <asm/idle.h>
 #include <asm/smp.h>
 
-atomic_t irq_err_count;
-
-/*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves.
- */
-void ack_bad_irq(unsigned int irq)
-{
-	printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
-	/*
-	 * Currently unexpected vectors happen only on SMP and APIC.
-	 * We _must_ ack these because every local APIC has only N
-	 * irq slots per priority level, and a 'hanging, unacked' IRQ
-	 * holds up an irq slot - in excessive cases (when multiple
-	 * unexpected vectors occur) that might lock up the APIC
-	 * completely.
-	 * But don't ack when the APIC is disabled. -AK
-	 */
-	if (!disable_apic)
-		ack_APIC_irq();
-}
-
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 /*
  * Probabilistic stack overflow check:
@@ -65,122 +43,6 @@
 #endif
 
 /*
- * Generic, controller-independent functions:
- */
-
-int show_interrupts(struct seq_file *p, void *v)
-{
-	int i = *(loff_t *) v, j;
-	struct irqaction * action;
-	unsigned long flags;
-
-	if (i == 0) {
-		seq_printf(p, "           ");
-		for_each_online_cpu(j)
-			seq_printf(p, "CPU%-8d",j);
-		seq_putc(p, '\n');
-	}
-
-	if (i < NR_IRQS) {
-		unsigned any_count = 0;
-
-		spin_lock_irqsave(&irq_desc[i].lock, flags);
-#ifndef CONFIG_SMP
-		any_count = kstat_irqs(i);
-#else
-		for_each_online_cpu(j)
-			any_count |= kstat_cpu(j).irqs[i];
-#endif
-		action = irq_desc[i].action;
-		if (!action && !any_count)
-			goto skip;
-		seq_printf(p, "%3d: ",i);
-#ifndef CONFIG_SMP
-		seq_printf(p, "%10u ", kstat_irqs(i));
-#else
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-#endif
-		seq_printf(p, " %8s", irq_desc[i].chip->name);
-		seq_printf(p, "-%-8s", irq_desc[i].name);
-
-		if (action) {
-			seq_printf(p, "  %s", action->name);
-			while ((action = action->next) != NULL)
-				seq_printf(p, ", %s", action->name);
-		}
-		seq_putc(p, '\n');
-skip:
-		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-	} else if (i == NR_IRQS) {
-		seq_printf(p, "NMI: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
-		seq_printf(p, "  Non-maskable interrupts\n");
-		seq_printf(p, "LOC: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
-		seq_printf(p, "  Local timer interrupts\n");
-#ifdef CONFIG_SMP
-		seq_printf(p, "RES: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count);
-		seq_printf(p, "  Rescheduling interrupts\n");
-		seq_printf(p, "CAL: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
-		seq_printf(p, "  Function call interrupts\n");
-		seq_printf(p, "TLB: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
-		seq_printf(p, "  TLB shootdowns\n");
-#endif
-#ifdef CONFIG_X86_MCE
-		seq_printf(p, "TRM: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count);
-		seq_printf(p, "  Thermal event interrupts\n");
-		seq_printf(p, "THR: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count);
-		seq_printf(p, "  Threshold APIC interrupts\n");
-#endif
-		seq_printf(p, "SPU: ");
-		for_each_online_cpu(j)
-			seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count);
-		seq_printf(p, "  Spurious interrupts\n");
-		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-	}
-	return 0;
-}
-
-/*
- * /proc/stat helpers
- */
-u64 arch_irq_stat_cpu(unsigned int cpu)
-{
-	u64 sum = cpu_pda(cpu)->__nmi_count;
-
-	sum += cpu_pda(cpu)->apic_timer_irqs;
-#ifdef CONFIG_SMP
-	sum += cpu_pda(cpu)->irq_resched_count;
-	sum += cpu_pda(cpu)->irq_call_count;
-	sum += cpu_pda(cpu)->irq_tlb_count;
-#endif
-#ifdef CONFIG_X86_MCE
-	sum += cpu_pda(cpu)->irq_thermal_count;
-	sum += cpu_pda(cpu)->irq_threshold_count;
-#endif
-	sum += cpu_pda(cpu)->irq_spurious_count;
-	return sum;
-}
-
-u64 arch_irq_stat(void)
-{
-	return atomic_read(&irq_err_count);
-}
-
-/*
  * do_IRQ handles all normal device IRQ's (the special
  * SMP cross-CPU interrupts have their own specific
  * handlers).
@@ -188,6 +50,7 @@
 asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
+	struct irq_desc *desc;
 
 	/* high bit used in ret_from_ code  */
 	unsigned vector = ~regs->orig_ax;
@@ -201,8 +64,9 @@
 	stack_overflow_check(regs);
 #endif
 
-	if (likely(irq < NR_IRQS))
-		generic_handle_irq(irq);
+	desc = irq_to_desc(irq);
+	if (likely(desc))
+		generic_handle_irq_desc(irq, desc);
 	else {
 		if (!disable_apic)
 			ack_APIC_irq();
@@ -223,8 +87,9 @@
 {
 	unsigned int irq;
 	static int warned;
+	struct irq_desc *desc;
 
-	for (irq = 0; irq < NR_IRQS; irq++) {
+	for_each_irq_desc(irq, desc) {
 		cpumask_t mask;
 		int break_affinity = 0;
 		int set_affinity = 1;
@@ -233,32 +98,32 @@
 			continue;
 
 		/* interrupt's are disabled at this point */
-		spin_lock(&irq_desc[irq].lock);
+		spin_lock(&desc->lock);
 
 		if (!irq_has_action(irq) ||
-		    cpus_equal(irq_desc[irq].affinity, map)) {
-			spin_unlock(&irq_desc[irq].lock);
+		    cpus_equal(desc->affinity, map)) {
+			spin_unlock(&desc->lock);
 			continue;
 		}
 
-		cpus_and(mask, irq_desc[irq].affinity, map);
+		cpus_and(mask, desc->affinity, map);
 		if (cpus_empty(mask)) {
 			break_affinity = 1;
 			mask = map;
 		}
 
-		if (irq_desc[irq].chip->mask)
-			irq_desc[irq].chip->mask(irq);
+		if (desc->chip->mask)
+			desc->chip->mask(irq);
 
-		if (irq_desc[irq].chip->set_affinity)
-			irq_desc[irq].chip->set_affinity(irq, mask);
+		if (desc->chip->set_affinity)
+			desc->chip->set_affinity(irq, mask);
 		else if (!(warned++))
 			set_affinity = 0;
 
-		if (irq_desc[irq].chip->unmask)
-			irq_desc[irq].chip->unmask(irq);
+		if (desc->chip->unmask)
+			desc->chip->unmask(irq);
 
-		spin_unlock(&irq_desc[irq].lock);
+		spin_unlock(&desc->lock);
 
 		if (break_affinity && set_affinity)
 			printk("Broke affinity for irq %i\n", irq);
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 9200a1e..845aa98 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -69,6 +69,13 @@
 	 * 16 old-style INTA-cycle interrupts:
 	 */
 	for (i = 0; i < 16; i++) {
+		/* first time call this irq_desc */
+		struct irq_desc *desc = irq_to_desc(i);
+
+		desc->status = IRQ_DISABLED;
+		desc->action = NULL;
+		desc->depth = 1;
+
 		set_irq_chip_and_handler_name(i, &i8259A_chip,
 					      handle_level_irq, "XT");
 	}
@@ -83,6 +90,27 @@
 	.name = "cascade",
 };
 
+DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
+	[0 ... IRQ0_VECTOR - 1] = -1,
+	[IRQ0_VECTOR] = 0,
+	[IRQ1_VECTOR] = 1,
+	[IRQ2_VECTOR] = 2,
+	[IRQ3_VECTOR] = 3,
+	[IRQ4_VECTOR] = 4,
+	[IRQ5_VECTOR] = 5,
+	[IRQ6_VECTOR] = 6,
+	[IRQ7_VECTOR] = 7,
+	[IRQ8_VECTOR] = 8,
+	[IRQ9_VECTOR] = 9,
+	[IRQ10_VECTOR] = 10,
+	[IRQ11_VECTOR] = 11,
+	[IRQ12_VECTOR] = 12,
+	[IRQ13_VECTOR] = 13,
+	[IRQ14_VECTOR] = 14,
+	[IRQ15_VECTOR] = 15,
+	[IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
+};
+
 /* Overridden in paravirt.c */
 void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
 
@@ -98,22 +126,14 @@
 	 * us. (some of these will be overridden and become
 	 * 'special' SMP interrupts)
 	 */
-	for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
-		int vector = FIRST_EXTERNAL_VECTOR + i;
-		if (i >= NR_IRQS)
-			break;
+	for (i =  FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
 		/* SYSCALL_VECTOR was reserved in trap_init. */
-		if (!test_bit(vector, used_vectors))
-			set_intr_gate(vector, interrupt[i]);
+		if (i != SYSCALL_VECTOR)
+			set_intr_gate(i, interrupt[i]);
 	}
 
-#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
-	/*
-	 * IRQ0 must be given a fixed assignment and initialized,
-	 * because it's used before the IO-APIC is set up.
-	 */
-	set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
 
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
 	/*
 	 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
 	 * IPI, driven by wakeup.
@@ -128,6 +148,9 @@
 
 	/* IPI for single call function */
 	set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt);
+
+	/* Low priority IPI to cleanup after moving an irq */
+	set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index 5b5be9d..ff02353 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -142,23 +142,19 @@
 	init_bsp_APIC();
 	init_8259A(0);
 
-	for (i = 0; i < NR_IRQS; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = NULL;
-		irq_desc[i].depth = 1;
+	for (i = 0; i < 16; i++) {
+		/* first time call this irq_desc */
+		struct irq_desc *desc = irq_to_desc(i);
 
-		if (i < 16) {
-			/*
-			 * 16 old-style INTA-cycle interrupts:
-			 */
-			set_irq_chip_and_handler_name(i, &i8259A_chip,
+		desc->status = IRQ_DISABLED;
+		desc->action = NULL;
+		desc->depth = 1;
+
+		/*
+		 * 16 old-style INTA-cycle interrupts:
+		 */
+		set_irq_chip_and_handler_name(i, &i8259A_chip,
 						      handle_level_irq, "XT");
-		} else {
-			/*
-			 * 'high' PCI IRQs filled in on demand
-			 */
-			irq_desc[i].chip = &no_irq_chip;
-		}
 	}
 }
 
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 1926248..1972266 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -9,8 +9,6 @@
 #include <asm/calgary.h>
 #include <asm/amd_iommu.h>
 
-static int forbid_dac __read_mostly;
-
 struct dma_mapping_ops *dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
@@ -293,17 +291,3 @@
 }
 /* Must execute after PCI subsystem */
 fs_initcall(pci_iommu_init);
-
-#ifdef CONFIG_PCI
-/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
-
-static __devinit void via_no_dac(struct pci_dev *dev)
-{
-	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
-		printk(KERN_INFO "PCI: VIA PCI bridge detected."
-				 "Disabling DAC.\n");
-		forbid_dac = 1;
-	}
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
-#endif
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index f6a11b9..67465ed 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -35,9 +35,6 @@
 	if (!(word & (1 << 13))) {
 		dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
 			"disabling irq balancing and affinity\n");
-#ifdef CONFIG_IRQBALANCE
-		irqbalance_disable("");
-#endif
 		noirqdebug_setup("");
 #ifdef CONFIG_PROC_FS
 		no_irq_affinity = 1;
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 0a23b57..dd6f2b7 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -52,7 +52,7 @@
 
 	cmos_minutes = CMOS_READ(RTC_MINUTES);
 	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-		BCD_TO_BIN(cmos_minutes);
+		cmos_minutes = bcd2bin(cmos_minutes);
 
 	/*
 	 * since we're only adjusting minutes and seconds,
@@ -69,8 +69,8 @@
 
 	if (abs(real_minutes - cmos_minutes) < 30) {
 		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-			BIN_TO_BCD(real_seconds);
-			BIN_TO_BCD(real_minutes);
+			real_seconds = bin2bcd(real_seconds);
+			real_minutes = bin2bcd(real_minutes);
 		}
 		CMOS_WRITE(real_seconds,RTC_SECONDS);
 		CMOS_WRITE(real_minutes,RTC_MINUTES);
@@ -124,16 +124,16 @@
 	WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY));
 
 	if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) {
-		BCD_TO_BIN(sec);
-		BCD_TO_BIN(min);
-		BCD_TO_BIN(hour);
-		BCD_TO_BIN(day);
-		BCD_TO_BIN(mon);
-		BCD_TO_BIN(year);
+		sec = bcd2bin(sec);
+		min = bcd2bin(min);
+		hour = bcd2bin(hour);
+		day = bcd2bin(day);
+		mon = bcd2bin(mon);
+		year = bcd2bin(year);
 	}
 
 	if (century) {
-		BCD_TO_BIN(century);
+		century = bcd2bin(century);
 		year += century * 100;
 		printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
 	} else
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 2255782..0fa6790 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -561,7 +561,13 @@
 
 }
 
-#ifdef CONFIG_PROC_VMCORE
+/*
+ * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
+ * is_kdump_kernel() to determine if we are booting after a panic. Hence
+ * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
+ */
+
+#ifdef CONFIG_CRASH_DUMP
 /* elfcorehdr= specifies the location of elf core header
  * stored by the crashed kernel. This option will be passed
  * by kexec loader to the capture kernel.
@@ -1067,6 +1073,7 @@
 #endif
 
 	prefill_possible_map();
+
 #ifdef CONFIG_X86_64
 	init_cpu_to_node();
 #endif
@@ -1074,6 +1081,9 @@
 	init_apic_mappings();
 	ioapic_init_mappings();
 
+	/* need to wait for io_apic is mapped */
+	nr_irqs = probe_nr_irqs();
+
 	kvm_guest_init();
 
 	e820_reserve_resources();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 0e67f72..410c88f 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -140,25 +140,30 @@
  */
 void __init setup_per_cpu_areas(void)
 {
-	ssize_t size = PERCPU_ENOUGH_ROOM;
+	ssize_t size, old_size;
 	char *ptr;
 	int cpu;
+	unsigned long align = 1;
 
 	/* Setup cpu_pda map */
 	setup_cpu_pda_map();
 
 	/* Copy section for each CPU (we discard the original) */
-	size = PERCPU_ENOUGH_ROOM;
+	old_size = PERCPU_ENOUGH_ROOM;
+	align = max_t(unsigned long, PAGE_SIZE, align);
+	size = roundup(old_size, align);
 	printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
 			  size);
 
 	for_each_possible_cpu(cpu) {
 #ifndef CONFIG_NEED_MULTIPLE_NODES
-		ptr = alloc_bootmem_pages(size);
+		ptr = __alloc_bootmem(size, align,
+				 __pa(MAX_DMA_ADDRESS));
 #else
 		int node = early_cpu_to_node(cpu);
 		if (!node_online(node) || !NODE_DATA(node)) {
-			ptr = alloc_bootmem_pages(size);
+			ptr = __alloc_bootmem(size, align,
+					 __pa(MAX_DMA_ADDRESS));
 			printk(KERN_INFO
 			       "cpu %d has no node %d or node-local memory\n",
 				cpu, node);
@@ -167,7 +172,8 @@
 					 cpu, __pa(ptr));
 		}
 		else {
-			ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
+			ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
+							__pa(MAX_DMA_ADDRESS));
 			if (ptr)
 				printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n",
 					 cpu, node, __pa(ptr));
@@ -175,7 +181,6 @@
 #endif
 		per_cpu_offset(cpu) = ptr - __per_cpu_start;
 		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
-
 	}
 
 	printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7ed9e07..7ece815 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -543,10 +543,10 @@
 	int timeout;
 	u32 status;
 
-	printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
+	printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid);
 
 	for (i = 0; i < ARRAY_SIZE(regs); i++) {
-		printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]);
+		printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]);
 
 		/*
 		 * Wait for idle.
@@ -874,7 +874,7 @@
 	start_ip = setup_trampoline();
 
 	/* So we see what's up   */
-	printk(KERN_INFO "Booting processor %d/%d ip %lx\n",
+	printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
 			  cpu, apicid, start_ip);
 
 	/*
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c
new file mode 100644
index 0000000..aeef529
--- /dev/null
+++ b/arch/x86/kernel/uv_irq.c
@@ -0,0 +1,79 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV IRQ functions
+ *
+ * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/irq.h>
+
+#include <asm/apic.h>
+#include <asm/uv/uv_irq.h>
+
+static void uv_noop(unsigned int irq)
+{
+}
+
+static unsigned int uv_noop_ret(unsigned int irq)
+{
+	return 0;
+}
+
+static void uv_ack_apic(unsigned int irq)
+{
+	ack_APIC_irq();
+}
+
+struct irq_chip uv_irq_chip = {
+	.name		= "UV-CORE",
+	.startup	= uv_noop_ret,
+	.shutdown	= uv_noop,
+	.enable		= uv_noop,
+	.disable	= uv_noop,
+	.ack		= uv_noop,
+	.mask		= uv_noop,
+	.unmask		= uv_noop,
+	.eoi		= uv_ack_apic,
+	.end		= uv_noop,
+};
+
+/*
+ * Set up a mapping of an available irq and vector, and enable the specified
+ * MMR that defines the MSI that is to be sent to the specified CPU when an
+ * interrupt is raised.
+ */
+int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
+		 unsigned long mmr_offset)
+{
+	int irq;
+	int ret;
+
+	irq = create_irq();
+	if (irq <= 0)
+		return -EBUSY;
+
+	ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset);
+	if (ret != irq)
+		destroy_irq(irq);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(uv_setup_irq);
+
+/*
+ * Tear down a mapping of an irq and vector, and disable the specified MMR that
+ * defined the MSI that was to be sent to the specified CPU when an interrupt
+ * was raised.
+ *
+ * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
+ */
+void uv_teardown_irq(unsigned int irq, int mmr_blade, unsigned long mmr_offset)
+{
+	arch_disable_uv_irq(mmr_blade, mmr_offset);
+	destroy_irq(irq);
+}
+EXPORT_SYMBOL_GPL(uv_teardown_irq);
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/kernel/uv_sysfs.c
new file mode 100644
index 0000000..67f9b9d
--- /dev/null
+++ b/arch/x86/kernel/uv_sysfs.c
@@ -0,0 +1,72 @@
+/*
+ * This file supports the /sys/firmware/sgi_uv interfaces for SGI UV.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
+ *  Copyright (c) Russ Anderson
+ */
+
+#include <linux/sysdev.h>
+#include <asm/uv/bios.h>
+
+struct kobject *sgi_uv_kobj;
+
+static ssize_t partition_id_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%ld\n", sn_partition_id);
+}
+
+static ssize_t coherence_id_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%ld\n", partition_coherence_id());
+}
+
+static struct kobj_attribute partition_id_attr =
+	__ATTR(partition_id, S_IRUGO, partition_id_show, NULL);
+
+static struct kobj_attribute coherence_id_attr =
+	__ATTR(coherence_id, S_IRUGO, coherence_id_show, NULL);
+
+
+static int __init sgi_uv_sysfs_init(void)
+{
+	unsigned long ret;
+
+	if (!sgi_uv_kobj)
+		sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
+	if (!sgi_uv_kobj) {
+		printk(KERN_WARNING "kobject_create_and_add sgi_uv failed \n");
+		return -EINVAL;
+	}
+
+	ret = sysfs_create_file(sgi_uv_kobj, &partition_id_attr.attr);
+	if (ret) {
+		printk(KERN_WARNING "sysfs_create_file partition_id failed \n");
+		return ret;
+	}
+
+	ret = sysfs_create_file(sgi_uv_kobj, &coherence_id_attr.attr);
+	if (ret) {
+		printk(KERN_WARNING "sysfs_create_file coherence_id failed \n");
+		return ret;
+	}
+
+	return 0;
+}
+
+device_initcall(sgi_uv_sysfs_init);
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 61a97e6..0c9667f 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -484,10 +484,11 @@
 static unsigned int startup_cobalt_irq(unsigned int irq)
 {
 	unsigned long flags;
+	struct irq_desc *desc = irq_to_desc(irq);
 
 	spin_lock_irqsave(&cobalt_lock, flags);
-	if ((irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING)))
-		irq_desc[irq].status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
+	if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING)))
+		desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
 	enable_cobalt_irq(irq);
 	spin_unlock_irqrestore(&cobalt_lock, flags);
 	return 0;
@@ -506,9 +507,10 @@
 static void end_cobalt_irq(unsigned int irq)
 {
 	unsigned long flags;
+	struct irq_desc *desc = irq_to_desc(irq);
 
 	spin_lock_irqsave(&cobalt_lock, flags);
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
+	if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS)))
 		enable_cobalt_irq(irq);
 	spin_unlock_irqrestore(&cobalt_lock, flags);
 }
@@ -626,12 +628,12 @@
 
 	spin_unlock_irqrestore(&i8259A_lock, flags);
 
-	desc = irq_desc + realirq;
+	desc = irq_to_desc(realirq);
 
 	/*
 	 * handle this 'virtual interrupt' as a Cobalt one now.
 	 */
-	kstat_cpu(smp_processor_id()).irqs[realirq]++;
+	kstat_incr_irqs_this_cpu(realirq, desc);
 
 	if (likely(desc->action != NULL))
 		handle_IRQ_event(realirq, desc->action);
@@ -662,27 +664,29 @@
 	int i;
 
 	for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 1;
+		struct irq_desc *desc = irq_to_desc(i);
+
+		desc->status = IRQ_DISABLED;
+		desc->action = 0;
+		desc->depth = 1;
 
 		if (i == 0) {
-			irq_desc[i].chip = &cobalt_irq_type;
+			desc->chip = &cobalt_irq_type;
 		}
 		else if (i == CO_IRQ_IDE0) {
-			irq_desc[i].chip = &cobalt_irq_type;
+			desc->chip = &cobalt_irq_type;
 		}
 		else if (i == CO_IRQ_IDE1) {
-			irq_desc[i].chip = &cobalt_irq_type;
+			desc->chip = &cobalt_irq_type;
 		}
 		else if (i == CO_IRQ_8259) {
-			irq_desc[i].chip = &piix4_master_irq_type;
+			desc->chip = &piix4_master_irq_type;
 		}
 		else if (i < CO_IRQ_APIC0) {
-			irq_desc[i].chip = &piix4_virtual_irq_type;
+			desc->chip = &piix4_virtual_irq_type;
 		}
 		else if (IS_CO_APIC(i)) {
-			irq_desc[i].chip = &cobalt_irq_type;
+			desc->chip = &cobalt_irq_type;
 		}
 	}
 
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 6953859..254ee07 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -235,11 +235,14 @@
 
 void __init vmi_time_init(void)
 {
+	unsigned int cpu;
 	/* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */
 	outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */
 
 	vmi_time_init_clockevent();
 	setup_irq(0, &vmi_clock_action);
+	for_each_possible_cpu(cpu)
+		per_cpu(vector_irq, cpu)[vmi_get_timer_vector()] = 0;
 }
 
 #ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 65f0b8a..48ee4f9 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -582,7 +582,7 @@
 	for (i = 0; i < LGUEST_IRQS; i++) {
 		int vector = FIRST_EXTERNAL_VECTOR + i;
 		if (vector != SYSCALL_VECTOR) {
-			set_intr_gate(vector, interrupt[i]);
+			set_intr_gate(vector, interrupt[vector]);
 			set_irq_chip_and_handler_name(i, &lguest_irq_controller,
 						      handle_level_irq,
 						      "level");
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
index df37fc9..3c3b471 100644
--- a/arch/x86/mach-generic/bigsmp.c
+++ b/arch/x86/mach-generic/bigsmp.c
@@ -41,6 +41,10 @@
 	 { }
 };
 
+static cpumask_t vector_allocation_domain(int cpu)
+{
+        return cpumask_of_cpu(cpu);
+}
 
 static int probe_bigsmp(void)
 {
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c
index 6513d41..28459ca 100644
--- a/arch/x86/mach-generic/es7000.c
+++ b/arch/x86/mach-generic/es7000.c
@@ -75,4 +75,18 @@
 }
 #endif
 
+static cpumask_t vector_allocation_domain(int cpu)
+{
+	/* Careful. Some cpus do not strictly honor the set of cpus
+	 * specified in the interrupt destination when using lowest
+	 * priority interrupt delivery mode.
+	 *
+	 * In particular there was a hyperthreading cpu observed to
+	 * deliver interrupts to the wrong hyperthread when only one
+	 * hyperthread was specified in the interrupt desitination.
+	 */
+	cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
+	return domain;
+}
+
 struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c
index 8cf5839..71a309b 100644
--- a/arch/x86/mach-generic/numaq.c
+++ b/arch/x86/mach-generic/numaq.c
@@ -38,4 +38,18 @@
 	return 0;
 }
 
+static cpumask_t vector_allocation_domain(int cpu)
+{
+	/* Careful. Some cpus do not strictly honor the set of cpus
+	 * specified in the interrupt destination when using lowest
+	 * priority interrupt delivery mode.
+	 *
+	 * In particular there was a hyperthreading cpu observed to
+	 * deliver interrupts to the wrong hyperthread when only one
+	 * hyperthread was specified in the interrupt desitination.
+	 */
+	cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
+	return domain;
+}
+
 struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c
index 6ad6b67..6272b5e 100644
--- a/arch/x86/mach-generic/summit.c
+++ b/arch/x86/mach-generic/summit.c
@@ -23,4 +23,18 @@
 	return 0;
 }
 
+static cpumask_t vector_allocation_domain(int cpu)
+{
+	/* Careful. Some cpus do not strictly honor the set of cpus
+	 * specified in the interrupt destination when using lowest
+	 * priority interrupt delivery mode.
+	 *
+	 * In particular there was a hyperthreading cpu observed to
+	 * deliver interrupts to the wrong hyperthread when only one
+	 * hyperthread was specified in the interrupt desitination.
+	 */
+	cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
+	return domain;
+}
+
 struct genapic apic_summit = APIC_INIT("summit", probe_summit);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 199a5f4..0f6e8a6 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -1483,7 +1483,7 @@
  * the interrupt off to another CPU */
 static void before_handle_vic_irq(unsigned int irq)
 {
-	irq_desc_t *desc = irq_desc + irq;
+	irq_desc_t *desc = irq_to_desc(irq);
 	__u8 cpu = smp_processor_id();
 
 	_raw_spin_lock(&vic_irq_lock);
@@ -1518,7 +1518,7 @@
 /* Finish the VIC interrupt: basically mask */
 static void after_handle_vic_irq(unsigned int irq)
 {
-	irq_desc_t *desc = irq_desc + irq;
+	irq_desc_t *desc = irq_to_desc(irq);
 
 	_raw_spin_lock(&vic_irq_lock);
 	{
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 635b50e..2c4baa8 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -56,13 +56,6 @@
 static DEFINE_PER_CPU(struct trap_reason, pf_reason);
 static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace);
 
-#if 0 /* XXX: no way gather this info anymore */
-/* Access to this is not per-cpu. */
-static DEFINE_PER_CPU(atomic_t, dropped);
-#endif
-
-static struct dentry *marker_file;
-
 static DEFINE_MUTEX(mmiotrace_mutex);
 static DEFINE_SPINLOCK(trace_lock);
 static atomic_t mmiotrace_enabled;
@@ -75,7 +68,7 @@
  *   and trace_lock.
  * - Routines depending on is_enabled() must take trace_lock.
  * - trace_list users must hold trace_lock.
- * - is_enabled() guarantees that mmio_trace_record is allowed.
+ * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
  * - pre/post callbacks assume the effect of is_enabled() being true.
  */
 
@@ -97,44 +90,6 @@
 	return atomic_read(&mmiotrace_enabled);
 }
 
-#if 0 /* XXX: needs rewrite */
-/*
- * Write callback for the debugfs entry:
- * Read a marker and write it to the mmio trace log
- */
-static ssize_t write_marker(struct file *file, const char __user *buffer,
-						size_t count, loff_t *ppos)
-{
-	char *event = NULL;
-	struct mm_io_header *headp;
-	ssize_t len = (count > 65535) ? 65535 : count;
-
-	event = kzalloc(sizeof(*headp) + len, GFP_KERNEL);
-	if (!event)
-		return -ENOMEM;
-
-	headp = (struct mm_io_header *)event;
-	headp->type = MMIO_MAGIC | (MMIO_MARKER << MMIO_OPCODE_SHIFT);
-	headp->data_len = len;
-
-	if (copy_from_user(event + sizeof(*headp), buffer, len)) {
-		kfree(event);
-		return -EFAULT;
-	}
-
-	spin_lock_irq(&trace_lock);
-#if 0 /* XXX: convert this to use tracing */
-	if (is_enabled())
-		relay_write(chan, event, sizeof(*headp) + len);
-	else
-#endif
-		len = -EINVAL;
-	spin_unlock_irq(&trace_lock);
-	kfree(event);
-	return len;
-}
-#endif
-
 static void print_pte(unsigned long address)
 {
 	unsigned int level;
@@ -307,8 +262,10 @@
 	map.map_id = trace->id;
 
 	spin_lock_irq(&trace_lock);
-	if (!is_enabled())
+	if (!is_enabled()) {
+		kfree(trace);
 		goto not_enabled;
+	}
 
 	mmio_trace_mapping(&map);
 	list_add_tail(&trace->list, &trace_list);
@@ -377,6 +334,23 @@
 		iounmap_trace_core(addr);
 }
 
+int mmiotrace_printk(const char *fmt, ...)
+{
+	int ret = 0;
+	va_list args;
+	unsigned long flags;
+	va_start(args, fmt);
+
+	spin_lock_irqsave(&trace_lock, flags);
+	if (is_enabled())
+		ret = mmio_trace_printk(fmt, args);
+	spin_unlock_irqrestore(&trace_lock, flags);
+
+	va_end(args);
+	return ret;
+}
+EXPORT_SYMBOL(mmiotrace_printk);
+
 static void clear_trace_list(void)
 {
 	struct remap_trace *trace;
@@ -462,26 +436,12 @@
 }
 #endif
 
-#if 0 /* XXX: out of order */
-static struct file_operations fops_marker = {
-	.owner =	THIS_MODULE,
-	.write =	write_marker
-};
-#endif
-
 void enable_mmiotrace(void)
 {
 	mutex_lock(&mmiotrace_mutex);
 	if (is_enabled())
 		goto out;
 
-#if 0 /* XXX: tracing does not support text entries */
-	marker_file = debugfs_create_file("marker", 0660, dir, NULL,
-								&fops_marker);
-	if (!marker_file)
-		pr_err(NAME "marker file creation failed.\n");
-#endif
-
 	if (nommiotrace)
 		pr_info(NAME "MMIO tracing disabled.\n");
 	enter_uniprocessor();
@@ -506,11 +466,6 @@
 
 	clear_trace_list(); /* guarantees: no more kmmio callbacks */
 	leave_uniprocessor();
-	if (marker_file) {
-		debugfs_remove(marker_file);
-		marker_file = NULL;
-	}
-
 	pr_info(NAME "disabled.\n");
 out:
 	mutex_unlock(&mmiotrace_mutex);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index a9ec89c..407d878 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -792,6 +792,8 @@
 	/* Must avoid aliasing mappings in the highmem code */
 	kmap_flush_unused();
 
+	vm_unmap_aliases();
+
 	cpa.vaddr = addr;
 	cpa.numpages = numpages;
 	cpa.mask_set = mask_set;
diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
index efa1911..df3d5c8 100644
--- a/arch/x86/mm/pf_in.c
+++ b/arch/x86/mm/pf_in.c
@@ -79,25 +79,34 @@
 static unsigned int mw64[] = { 0x89, 0x8B };
 #endif /* not __i386__ */
 
-static int skip_prefix(unsigned char *addr, int *shorted, int *enlarged,
-								int *rexr)
+struct prefix_bits {
+	unsigned shorted:1;
+	unsigned enlarged:1;
+	unsigned rexr:1;
+	unsigned rex:1;
+};
+
+static int skip_prefix(unsigned char *addr, struct prefix_bits *prf)
 {
 	int i;
 	unsigned char *p = addr;
-	*shorted = 0;
-	*enlarged = 0;
-	*rexr = 0;
+	prf->shorted = 0;
+	prf->enlarged = 0;
+	prf->rexr = 0;
+	prf->rex = 0;
 
 restart:
 	for (i = 0; i < ARRAY_SIZE(prefix_codes); i++) {
 		if (*p == prefix_codes[i]) {
 			if (*p == 0x66)
-				*shorted = 1;
+				prf->shorted = 1;
 #ifdef __amd64__
 			if ((*p & 0xf8) == 0x48)
-				*enlarged = 1;
+				prf->enlarged = 1;
 			if ((*p & 0xf4) == 0x44)
-				*rexr = 1;
+				prf->rexr = 1;
+			if ((*p & 0xf0) == 0x40)
+				prf->rex = 1;
 #endif
 			p++;
 			goto restart;
@@ -135,12 +144,12 @@
 {
 	unsigned int opcode;
 	unsigned char *p;
-	int shorted, enlarged, rexr;
+	struct prefix_bits prf;
 	int i;
 	enum reason_type rv = OTHERS;
 
 	p = (unsigned char *)ins_addr;
-	p += skip_prefix(p, &shorted, &enlarged, &rexr);
+	p += skip_prefix(p, &prf);
 	p += get_opcode(p, &opcode);
 
 	CHECK_OP_TYPE(opcode, reg_rop, REG_READ);
@@ -156,10 +165,11 @@
 {
 	unsigned int opcode;
 	unsigned char *p;
-	int i, shorted, enlarged, rexr;
+	struct prefix_bits prf;
+	int i;
 
 	p = (unsigned char *)ins_addr;
-	p += skip_prefix(p, &shorted, &enlarged, &rexr);
+	p += skip_prefix(p, &prf);
 	p += get_opcode(p, &opcode);
 
 	for (i = 0; i < ARRAY_SIZE(rw8); i++)
@@ -168,7 +178,7 @@
 
 	for (i = 0; i < ARRAY_SIZE(rw32); i++)
 		if (rw32[i] == opcode)
-			return (shorted ? 2 : (enlarged ? 8 : 4));
+			return prf.shorted ? 2 : (prf.enlarged ? 8 : 4);
 
 	printk(KERN_ERR "mmiotrace: Unknown opcode 0x%02x\n", opcode);
 	return 0;
@@ -178,10 +188,11 @@
 {
 	unsigned int opcode;
 	unsigned char *p;
-	int i, shorted, enlarged, rexr;
+	struct prefix_bits prf;
+	int i;
 
 	p = (unsigned char *)ins_addr;
-	p += skip_prefix(p, &shorted, &enlarged, &rexr);
+	p += skip_prefix(p, &prf);
 	p += get_opcode(p, &opcode);
 
 	for (i = 0; i < ARRAY_SIZE(mw8); i++)
@@ -194,11 +205,11 @@
 
 	for (i = 0; i < ARRAY_SIZE(mw32); i++)
 		if (mw32[i] == opcode)
-			return shorted ? 2 : 4;
+			return prf.shorted ? 2 : 4;
 
 	for (i = 0; i < ARRAY_SIZE(mw64); i++)
 		if (mw64[i] == opcode)
-			return shorted ? 2 : (enlarged ? 8 : 4);
+			return prf.shorted ? 2 : (prf.enlarged ? 8 : 4);
 
 	printk(KERN_ERR "mmiotrace: Unknown opcode 0x%02x\n", opcode);
 	return 0;
@@ -238,7 +249,7 @@
 #endif
 };
 
-static unsigned char *get_reg_w8(int no, struct pt_regs *regs)
+static unsigned char *get_reg_w8(int no, int rex, struct pt_regs *regs)
 {
 	unsigned char *rv = NULL;
 
@@ -255,18 +266,6 @@
 	case arg_DL:
 		rv = (unsigned char *)&regs->dx;
 		break;
-	case arg_AH:
-		rv = 1 + (unsigned char *)&regs->ax;
-		break;
-	case arg_BH:
-		rv = 1 + (unsigned char *)&regs->bx;
-		break;
-	case arg_CH:
-		rv = 1 + (unsigned char *)&regs->cx;
-		break;
-	case arg_DH:
-		rv = 1 + (unsigned char *)&regs->dx;
-		break;
 #ifdef __amd64__
 	case arg_R8:
 		rv = (unsigned char *)&regs->r8;
@@ -294,9 +293,55 @@
 		break;
 #endif
 	default:
-		printk(KERN_ERR "mmiotrace: Error reg no# %d\n", no);
 		break;
 	}
+
+	if (rv)
+		return rv;
+
+	if (rex) {
+		/*
+		 * If REX prefix exists, access low bytes of SI etc.
+		 * instead of AH etc.
+		 */
+		switch (no) {
+		case arg_SI:
+			rv = (unsigned char *)&regs->si;
+			break;
+		case arg_DI:
+			rv = (unsigned char *)&regs->di;
+			break;
+		case arg_BP:
+			rv = (unsigned char *)&regs->bp;
+			break;
+		case arg_SP:
+			rv = (unsigned char *)&regs->sp;
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (no) {
+		case arg_AH:
+			rv = 1 + (unsigned char *)&regs->ax;
+			break;
+		case arg_BH:
+			rv = 1 + (unsigned char *)&regs->bx;
+			break;
+		case arg_CH:
+			rv = 1 + (unsigned char *)&regs->cx;
+			break;
+		case arg_DH:
+			rv = 1 + (unsigned char *)&regs->dx;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!rv)
+		printk(KERN_ERR "mmiotrace: Error reg no# %d\n", no);
+
 	return rv;
 }
 
@@ -368,11 +413,12 @@
 	unsigned char mod_rm;
 	int reg;
 	unsigned char *p;
-	int i, shorted, enlarged, rexr;
+	struct prefix_bits prf;
+	int i;
 	unsigned long rv;
 
 	p = (unsigned char *)ins_addr;
-	p += skip_prefix(p, &shorted, &enlarged, &rexr);
+	p += skip_prefix(p, &prf);
 	p += get_opcode(p, &opcode);
 	for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
 		if (reg_rop[i] == opcode) {
@@ -392,10 +438,10 @@
 
 do_work:
 	mod_rm = *p;
-	reg = ((mod_rm >> 3) & 0x7) | (rexr << 3);
+	reg = ((mod_rm >> 3) & 0x7) | (prf.rexr << 3);
 	switch (get_ins_reg_width(ins_addr)) {
 	case 1:
-		return *get_reg_w8(reg, regs);
+		return *get_reg_w8(reg, prf.rex, regs);
 
 	case 2:
 		return *(unsigned short *)get_reg_w32(reg, regs);
@@ -422,11 +468,12 @@
 	unsigned char mod_rm;
 	unsigned char mod;
 	unsigned char *p;
-	int i, shorted, enlarged, rexr;
+	struct prefix_bits prf;
+	int i;
 	unsigned long rv;
 
 	p = (unsigned char *)ins_addr;
-	p += skip_prefix(p, &shorted, &enlarged, &rexr);
+	p += skip_prefix(p, &prf);
 	p += get_opcode(p, &opcode);
 	for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
 		if (imm_wop[i] == opcode) {
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index d877c5b..ab50a8d 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -3,6 +3,7 @@
  */
 #include <linux/module.h>
 #include <linux/io.h>
+#include <linux/mmiotrace.h>
 
 #define MODULE_NAME "testmmiotrace"
 
@@ -13,6 +14,7 @@
 static void do_write_test(void __iomem *p)
 {
 	unsigned int i;
+	mmiotrace_printk("Write test.\n");
 	for (i = 0; i < 256; i++)
 		iowrite8(i, p + i);
 	for (i = 1024; i < (5 * 1024); i += 2)
@@ -24,6 +26,7 @@
 static void do_read_test(void __iomem *p)
 {
 	unsigned int i;
+	mmiotrace_printk("Read test.\n");
 	for (i = 0; i < 256; i++)
 		ioread8(p + i);
 	for (i = 1024; i < (5 * 1024); i += 2)
@@ -39,6 +42,7 @@
 		pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
 		return;
 	}
+	mmiotrace_printk("ioremap returned %p.\n", p);
 	do_write_test(p);
 	do_read_test(p);
 	iounmap(p);
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index e2095cb..04df67f 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -52,8 +52,7 @@
 	unsigned long ret;
 } __attribute__((packed));
 
-static struct frame_head *
-dump_user_backtrace(struct frame_head * head)
+static struct frame_head *dump_user_backtrace(struct frame_head *head)
 {
 	struct frame_head bufhead[2];
 
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 57f6c90..022cd41 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -28,85 +28,9 @@
 static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
 static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
 
-static int nmi_start(void);
-static void nmi_stop(void);
-static void nmi_cpu_start(void *dummy);
-static void nmi_cpu_stop(void *dummy);
-
 /* 0 == registered but off, 1 == registered and on */
 static int nmi_enabled = 0;
 
-#ifdef CONFIG_SMP
-static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
-				 void *data)
-{
-	int cpu = (unsigned long)data;
-	switch (action) {
-	case CPU_DOWN_FAILED:
-	case CPU_ONLINE:
-		smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
-		break;
-	case CPU_DOWN_PREPARE:
-		smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
-		break;
-	}
-	return NOTIFY_DONE;
-}
-
-static struct notifier_block oprofile_cpu_nb = {
-	.notifier_call = oprofile_cpu_notifier
-};
-#endif
-
-#ifdef CONFIG_PM
-
-static int nmi_suspend(struct sys_device *dev, pm_message_t state)
-{
-	/* Only one CPU left, just stop that one */
-	if (nmi_enabled == 1)
-		nmi_cpu_stop(NULL);
-	return 0;
-}
-
-static int nmi_resume(struct sys_device *dev)
-{
-	if (nmi_enabled == 1)
-		nmi_cpu_start(NULL);
-	return 0;
-}
-
-static struct sysdev_class oprofile_sysclass = {
-	.name		= "oprofile",
-	.resume		= nmi_resume,
-	.suspend	= nmi_suspend,
-};
-
-static struct sys_device device_oprofile = {
-	.id	= 0,
-	.cls	= &oprofile_sysclass,
-};
-
-static int __init init_sysfs(void)
-{
-	int error;
-
-	error = sysdev_class_register(&oprofile_sysclass);
-	if (!error)
-		error = sysdev_register(&device_oprofile);
-	return error;
-}
-
-static void exit_sysfs(void)
-{
-	sysdev_unregister(&device_oprofile);
-	sysdev_class_unregister(&oprofile_sysclass);
-}
-
-#else
-#define init_sysfs() do { } while (0)
-#define exit_sysfs() do { } while (0)
-#endif /* CONFIG_PM */
-
 static int profile_exceptions_notify(struct notifier_block *self,
 				     unsigned long val, void *data)
 {
@@ -361,6 +285,77 @@
 	return 0;
 }
 
+#ifdef CONFIG_SMP
+static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
+				 void *data)
+{
+	int cpu = (unsigned long)data;
+	switch (action) {
+	case CPU_DOWN_FAILED:
+	case CPU_ONLINE:
+		smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
+		break;
+	case CPU_DOWN_PREPARE:
+		smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block oprofile_cpu_nb = {
+	.notifier_call = oprofile_cpu_notifier
+};
+#endif
+
+#ifdef CONFIG_PM
+
+static int nmi_suspend(struct sys_device *dev, pm_message_t state)
+{
+	/* Only one CPU left, just stop that one */
+	if (nmi_enabled == 1)
+		nmi_cpu_stop(NULL);
+	return 0;
+}
+
+static int nmi_resume(struct sys_device *dev)
+{
+	if (nmi_enabled == 1)
+		nmi_cpu_start(NULL);
+	return 0;
+}
+
+static struct sysdev_class oprofile_sysclass = {
+	.name		= "oprofile",
+	.resume		= nmi_resume,
+	.suspend	= nmi_suspend,
+};
+
+static struct sys_device device_oprofile = {
+	.id	= 0,
+	.cls	= &oprofile_sysclass,
+};
+
+static int __init init_sysfs(void)
+{
+	int error;
+
+	error = sysdev_class_register(&oprofile_sysclass);
+	if (!error)
+		error = sysdev_register(&device_oprofile);
+	return error;
+}
+
+static void exit_sysfs(void)
+{
+	sysdev_unregister(&device_oprofile);
+	sysdev_class_unregister(&oprofile_sysclass);
+}
+
+#else
+#define init_sysfs() do { } while (0)
+#define exit_sysfs() do { } while (0)
+#endif /* CONFIG_PM */
+
 static int p4force;
 module_param(p4force, int, 0);
 
@@ -420,9 +415,6 @@
 	case 15: case 23:
 		*cpu_type = "i386/core_2";
 		break;
-	case 26:
-		*cpu_type = "i386/core_2";
-		break;
 	default:
 		/* Unknown */
 		return 0;
@@ -432,6 +424,16 @@
 	return 1;
 }
 
+static int __init arch_perfmon_init(char **cpu_type)
+{
+	if (!cpu_has_arch_perfmon)
+		return 0;
+	*cpu_type = "i386/arch_perfmon";
+	model = &op_arch_perfmon_spec;
+	arch_perfmon_setup_counters();
+	return 1;
+}
+
 /* in order to get sysfs right */
 static int using_nmi;
 
@@ -439,7 +441,7 @@
 {
 	__u8 vendor = boot_cpu_data.x86_vendor;
 	__u8 family = boot_cpu_data.x86;
-	char *cpu_type;
+	char *cpu_type = NULL;
 	int ret = 0;
 
 	if (!cpu_has_apic)
@@ -477,19 +479,20 @@
 		switch (family) {
 			/* Pentium IV */
 		case 0xf:
-			if (!p4_init(&cpu_type))
-				return -ENODEV;
+			p4_init(&cpu_type);
 			break;
 
 			/* A P6-class processor */
 		case 6:
-			if (!ppro_init(&cpu_type))
-				return -ENODEV;
+			ppro_init(&cpu_type);
 			break;
 
 		default:
-			return -ENODEV;
+			break;
 		}
+
+		if (!cpu_type && !arch_perfmon_init(&cpu_type))
+			return -ENODEV;
 		break;
 
 	default:
diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h
index 2880b15..91b6a11 100644
--- a/arch/x86/oprofile/op_counter.h
+++ b/arch/x86/oprofile/op_counter.h
@@ -6,22 +6,22 @@
  *
  * @author John Levon
  */
- 
+
 #ifndef OP_COUNTER_H
 #define OP_COUNTER_H
- 
+
 #define OP_MAX_COUNTER 8
- 
+
 /* Per-perfctr configuration as set via
  * oprofilefs.
  */
 struct op_counter_config {
-        unsigned long count;
-        unsigned long enabled;
-        unsigned long event;
-        unsigned long kernel;
-        unsigned long user;
-        unsigned long unit_mask;
+	unsigned long count;
+	unsigned long enabled;
+	unsigned long event;
+	unsigned long kernel;
+	unsigned long user;
+	unsigned long unit_mask;
 };
 
 extern struct op_counter_config counter_config[];
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index d9faf60..5095137 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -67,8 +67,9 @@
 
 /* The function interface needs to be fixed, something like add
    data. Should then be added to linux/oprofile.h. */
-extern void oprofile_add_ibs_sample(struct pt_regs *const regs,
-				    unsigned int * const ibs_sample, u8 code);
+extern void
+oprofile_add_ibs_sample(struct pt_regs *const regs,
+			unsigned int *const ibs_sample, int ibs_code);
 
 struct ibs_fetch_sample {
 	/* MSRC001_1031 IBS Fetch Linear Address Register */
@@ -309,12 +310,15 @@
 #ifdef CONFIG_OPROFILE_IBS
 	if (ibs_allowed && ibs_config.fetch_enabled) {
 		low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
-		high = IBS_FETCH_HIGH_ENABLE;
+		high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */
+			+ IBS_FETCH_HIGH_ENABLE;
 		wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
 	}
 
 	if (ibs_allowed && ibs_config.op_enabled) {
-		low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) + IBS_OP_LOW_ENABLE;
+		low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
+			+ ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
+			+ IBS_OP_LOW_ENABLE;
 		high = 0;
 		wrmsr(MSR_AMD64_IBSOPCTL, low, high);
 	}
@@ -468,11 +472,10 @@
 		on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
 }
 
-static int (*create_arch_files)(struct super_block * sb, struct dentry * root);
+static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
 
-static int setup_ibs_files(struct super_block * sb, struct dentry * root)
+static int setup_ibs_files(struct super_block *sb, struct dentry *root)
 {
-	char buf[12];
 	struct dentry *dir;
 	int ret = 0;
 
@@ -494,22 +497,22 @@
 	ibs_config.max_cnt_op = 250000;
 	ibs_config.op_enabled = 0;
 	ibs_config.dispatched_ops = 1;
-	snprintf(buf,  sizeof(buf), "ibs_fetch");
-	dir = oprofilefs_mkdir(sb, root, buf);
+
+	dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
+	oprofilefs_create_ulong(sb, dir, "enable",
+				&ibs_config.fetch_enabled);
+	oprofilefs_create_ulong(sb, dir, "max_count",
+				&ibs_config.max_cnt_fetch);
 	oprofilefs_create_ulong(sb, dir, "rand_enable",
 				&ibs_config.rand_en);
+
+	dir = oprofilefs_mkdir(sb, root, "ibs_op");
 	oprofilefs_create_ulong(sb, dir, "enable",
-		&ibs_config.fetch_enabled);
+				&ibs_config.op_enabled);
 	oprofilefs_create_ulong(sb, dir, "max_count",
-		&ibs_config.max_cnt_fetch);
-	snprintf(buf,  sizeof(buf), "ibs_uops");
-	dir = oprofilefs_mkdir(sb, root, buf);
-	oprofilefs_create_ulong(sb, dir, "enable",
-		&ibs_config.op_enabled);
-	oprofilefs_create_ulong(sb, dir, "max_count",
-		&ibs_config.max_cnt_op);
+				&ibs_config.max_cnt_op);
 	oprofilefs_create_ulong(sb, dir, "dispatched_ops",
-		&ibs_config.dispatched_ops);
+				&ibs_config.dispatched_ops);
 
 	return 0;
 }
@@ -530,14 +533,14 @@
 #endif
 
 struct op_x86_model_spec const op_amd_spec = {
-	.init = op_amd_init,
-	.exit = op_amd_exit,
-	.num_counters = NUM_COUNTERS,
-	.num_controls = NUM_CONTROLS,
-	.fill_in_addresses = &op_amd_fill_in_addresses,
-	.setup_ctrs = &op_amd_setup_ctrs,
-	.check_ctrs = &op_amd_check_ctrs,
-	.start = &op_amd_start,
-	.stop = &op_amd_stop,
-	.shutdown = &op_amd_shutdown
+	.init			= op_amd_init,
+	.exit			= op_amd_exit,
+	.num_counters		= NUM_COUNTERS,
+	.num_controls		= NUM_CONTROLS,
+	.fill_in_addresses	= &op_amd_fill_in_addresses,
+	.setup_ctrs		= &op_amd_setup_ctrs,
+	.check_ctrs		= &op_amd_check_ctrs,
+	.start			= &op_amd_start,
+	.stop			= &op_amd_stop,
+	.shutdown		= &op_amd_shutdown
 };
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 43ac5af..4c4a51c 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -698,24 +698,24 @@
 
 #ifdef CONFIG_SMP
 struct op_x86_model_spec const op_p4_ht2_spec = {
-	.num_counters = NUM_COUNTERS_HT2,
-	.num_controls = NUM_CONTROLS_HT2,
-	.fill_in_addresses = &p4_fill_in_addresses,
-	.setup_ctrs = &p4_setup_ctrs,
-	.check_ctrs = &p4_check_ctrs,
-	.start = &p4_start,
-	.stop = &p4_stop,
-	.shutdown = &p4_shutdown
+	.num_counters		= NUM_COUNTERS_HT2,
+	.num_controls		= NUM_CONTROLS_HT2,
+	.fill_in_addresses	= &p4_fill_in_addresses,
+	.setup_ctrs		= &p4_setup_ctrs,
+	.check_ctrs		= &p4_check_ctrs,
+	.start			= &p4_start,
+	.stop			= &p4_stop,
+	.shutdown		= &p4_shutdown
 };
 #endif
 
 struct op_x86_model_spec const op_p4_spec = {
-	.num_counters = NUM_COUNTERS_NON_HT,
-	.num_controls = NUM_CONTROLS_NON_HT,
-	.fill_in_addresses = &p4_fill_in_addresses,
-	.setup_ctrs = &p4_setup_ctrs,
-	.check_ctrs = &p4_check_ctrs,
-	.start = &p4_start,
-	.stop = &p4_stop,
-	.shutdown = &p4_shutdown
+	.num_counters		= NUM_COUNTERS_NON_HT,
+	.num_controls		= NUM_CONTROLS_NON_HT,
+	.fill_in_addresses	= &p4_fill_in_addresses,
+	.setup_ctrs		= &p4_setup_ctrs,
+	.check_ctrs		= &p4_check_ctrs,
+	.start			= &p4_start,
+	.stop			= &p4_stop,
+	.shutdown		= &p4_shutdown
 };
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index eff431f..0620d6d 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -1,32 +1,34 @@
 /*
  * @file op_model_ppro.h
- * pentium pro / P6 model-specific MSR operations
+ * Family 6 perfmon and architectural perfmon MSR operations
  *
  * @remark Copyright 2002 OProfile authors
+ * @remark Copyright 2008 Intel Corporation
  * @remark Read the file COPYING
  *
  * @author John Levon
  * @author Philippe Elie
  * @author Graydon Hoare
+ * @author Andi Kleen
  */
 
 #include <linux/oprofile.h>
+#include <linux/slab.h>
 #include <asm/ptrace.h>
 #include <asm/msr.h>
 #include <asm/apic.h>
 #include <asm/nmi.h>
+#include <asm/intel_arch_perfmon.h>
 
 #include "op_x86_model.h"
 #include "op_counter.h"
 
-#define NUM_COUNTERS 2
-#define NUM_CONTROLS 2
+static int num_counters = 2;
+static int counter_width = 32;
 
 #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
 #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
-#define CTR_32BIT_WRITE(l, msrs, c)	\
-	do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), 0); } while (0)
-#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
+#define CTR_OVERFLOWED(n) (!((n) & (1U<<(counter_width-1))))
 
 #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
 #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
@@ -40,20 +42,20 @@
 #define CTRL_SET_UM(val, m) (val |= (m << 8))
 #define CTRL_SET_EVENT(val, e) (val |= e)
 
-static unsigned long reset_value[NUM_COUNTERS];
+static u64 *reset_value;
 
 static void ppro_fill_in_addresses(struct op_msrs * const msrs)
 {
 	int i;
 
-	for (i = 0; i < NUM_COUNTERS; i++) {
+	for (i = 0; i < num_counters; i++) {
 		if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
 			msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
 		else
 			msrs->counters[i].addr = 0;
 	}
 
-	for (i = 0; i < NUM_CONTROLS; i++) {
+	for (i = 0; i < num_counters; i++) {
 		if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
 			msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
 		else
@@ -67,8 +69,22 @@
 	unsigned int low, high;
 	int i;
 
+	if (!reset_value) {
+		reset_value = kmalloc(sizeof(unsigned) * num_counters,
+					GFP_ATOMIC);
+		if (!reset_value)
+			return;
+	}
+
+	if (cpu_has_arch_perfmon) {
+		union cpuid10_eax eax;
+		eax.full = cpuid_eax(0xa);
+		if (counter_width < eax.split.bit_width)
+			counter_width = eax.split.bit_width;
+	}
+
 	/* clear all counters */
-	for (i = 0 ; i < NUM_CONTROLS; ++i) {
+	for (i = 0 ; i < num_counters; ++i) {
 		if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
 			continue;
 		CTRL_READ(low, high, msrs, i);
@@ -77,18 +93,18 @@
 	}
 
 	/* avoid a false detection of ctr overflows in NMI handler */
-	for (i = 0; i < NUM_COUNTERS; ++i) {
+	for (i = 0; i < num_counters; ++i) {
 		if (unlikely(!CTR_IS_RESERVED(msrs, i)))
 			continue;
-		CTR_32BIT_WRITE(1, msrs, i);
+		wrmsrl(msrs->counters[i].addr, -1LL);
 	}
 
 	/* enable active counters */
-	for (i = 0; i < NUM_COUNTERS; ++i) {
+	for (i = 0; i < num_counters; ++i) {
 		if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
 			reset_value[i] = counter_config[i].count;
 
-			CTR_32BIT_WRITE(counter_config[i].count, msrs, i);
+			wrmsrl(msrs->counters[i].addr, -reset_value[i]);
 
 			CTRL_READ(low, high, msrs, i);
 			CTRL_CLEAR(low);
@@ -111,13 +127,13 @@
 	unsigned int low, high;
 	int i;
 
-	for (i = 0 ; i < NUM_COUNTERS; ++i) {
+	for (i = 0 ; i < num_counters; ++i) {
 		if (!reset_value[i])
 			continue;
 		CTR_READ(low, high, msrs, i);
 		if (CTR_OVERFLOWED(low)) {
 			oprofile_add_sample(regs, i);
-			CTR_32BIT_WRITE(reset_value[i], msrs, i);
+			wrmsrl(msrs->counters[i].addr, -reset_value[i]);
 		}
 	}
 
@@ -141,7 +157,7 @@
 	unsigned int low, high;
 	int i;
 
-	for (i = 0; i < NUM_COUNTERS; ++i) {
+	for (i = 0; i < num_counters; ++i) {
 		if (reset_value[i]) {
 			CTRL_READ(low, high, msrs, i);
 			CTRL_SET_ACTIVE(low);
@@ -156,7 +172,7 @@
 	unsigned int low, high;
 	int i;
 
-	for (i = 0; i < NUM_COUNTERS; ++i) {
+	for (i = 0; i < num_counters; ++i) {
 		if (!reset_value[i])
 			continue;
 		CTRL_READ(low, high, msrs, i);
@@ -169,24 +185,70 @@
 {
 	int i;
 
-	for (i = 0 ; i < NUM_COUNTERS ; ++i) {
+	for (i = 0 ; i < num_counters ; ++i) {
 		if (CTR_IS_RESERVED(msrs, i))
 			release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
 	}
-	for (i = 0 ; i < NUM_CONTROLS ; ++i) {
+	for (i = 0 ; i < num_counters ; ++i) {
 		if (CTRL_IS_RESERVED(msrs, i))
 			release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
 	}
+	if (reset_value) {
+		kfree(reset_value);
+		reset_value = NULL;
+	}
 }
 
 
-struct op_x86_model_spec const op_ppro_spec = {
-	.num_counters = NUM_COUNTERS,
-	.num_controls = NUM_CONTROLS,
-	.fill_in_addresses = &ppro_fill_in_addresses,
-	.setup_ctrs = &ppro_setup_ctrs,
-	.check_ctrs = &ppro_check_ctrs,
-	.start = &ppro_start,
-	.stop = &ppro_stop,
-	.shutdown = &ppro_shutdown
+struct op_x86_model_spec op_ppro_spec = {
+	.num_counters		= 2,	/* can be overriden */
+	.num_controls		= 2,	/* dito */
+	.fill_in_addresses	= &ppro_fill_in_addresses,
+	.setup_ctrs		= &ppro_setup_ctrs,
+	.check_ctrs		= &ppro_check_ctrs,
+	.start			= &ppro_start,
+	.stop			= &ppro_stop,
+	.shutdown		= &ppro_shutdown
+};
+
+/*
+ * Architectural performance monitoring.
+ *
+ * Newer Intel CPUs (Core1+) have support for architectural
+ * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
+ * The advantage of this is that it can be done without knowing about
+ * the specific CPU.
+ */
+
+void arch_perfmon_setup_counters(void)
+{
+	union cpuid10_eax eax;
+
+	eax.full = cpuid_eax(0xa);
+
+	/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
+	if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
+		current_cpu_data.x86_model == 15) {
+		eax.split.version_id = 2;
+		eax.split.num_counters = 2;
+		eax.split.bit_width = 40;
+	}
+
+	num_counters = eax.split.num_counters;
+
+	op_arch_perfmon_spec.num_counters = num_counters;
+	op_arch_perfmon_spec.num_controls = num_counters;
+	op_ppro_spec.num_counters = num_counters;
+	op_ppro_spec.num_controls = num_counters;
+}
+
+struct op_x86_model_spec op_arch_perfmon_spec = {
+	/* num_counters/num_controls filled in at runtime */
+	.fill_in_addresses	= &ppro_fill_in_addresses,
+	/* user space does the cpuid check for available events */
+	.setup_ctrs		= &ppro_setup_ctrs,
+	.check_ctrs		= &ppro_check_ctrs,
+	.start			= &ppro_start,
+	.stop			= &ppro_stop,
+	.shutdown		= &ppro_shutdown
 };
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 05a0261..825e790 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -22,8 +22,8 @@
 };
 
 struct op_msrs {
-	struct op_msr * counters;
-	struct op_msr * controls;
+	struct op_msr *counters;
+	struct op_msr *controls;
 };
 
 struct pt_regs;
@@ -34,8 +34,8 @@
 struct op_x86_model_spec {
 	int (*init)(struct oprofile_operations *ops);
 	void (*exit)(void);
-	unsigned int const num_counters;
-	unsigned int const num_controls;
+	unsigned int num_counters;
+	unsigned int num_controls;
 	void (*fill_in_addresses)(struct op_msrs * const msrs);
 	void (*setup_ctrs)(struct op_msrs const * const msrs);
 	int (*check_ctrs)(struct pt_regs * const regs,
@@ -45,9 +45,12 @@
 	void (*shutdown)(struct op_msrs const * const msrs);
 };
 
-extern struct op_x86_model_spec const op_ppro_spec;
+extern struct op_x86_model_spec op_ppro_spec;
 extern struct op_x86_model_spec const op_p4_spec;
 extern struct op_x86_model_spec const op_p4_ht2_spec;
 extern struct op_x86_model_spec const op_amd_spec;
+extern struct op_x86_model_spec op_arch_perfmon_spec;
+
+extern void arch_perfmon_setup_counters(void);
 
 #endif /* OP_X86_MODEL_H */
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 006599d..bf69dbe 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -493,7 +493,7 @@
 	if (pirq <= 4)
 		irq = read_config_nybble(router, 0x56, pirq - 1);
 	dev_info(&dev->dev,
-		 "AMD756: dev [%04x/%04x], router PIRQ %d get IRQ %d\n",
+		 "AMD756: dev [%04x:%04x], router PIRQ %d get IRQ %d\n",
 		 dev->vendor, dev->device, pirq, irq);
 	return irq;
 }
@@ -501,7 +501,7 @@
 static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
 {
 	dev_info(&dev->dev,
-		 "AMD756: dev [%04x/%04x], router PIRQ %d set IRQ %d\n",
+		 "AMD756: dev [%04x:%04x], router PIRQ %d set IRQ %d\n",
 		 dev->vendor, dev->device, pirq, irq);
 	if (pirq <= 4)
 		write_config_nybble(router, 0x56, pirq - 1, irq);
@@ -590,13 +590,20 @@
 	case PCI_DEVICE_ID_INTEL_ICH10_1:
 	case PCI_DEVICE_ID_INTEL_ICH10_2:
 	case PCI_DEVICE_ID_INTEL_ICH10_3:
-	case PCI_DEVICE_ID_INTEL_PCH_0:
-	case PCI_DEVICE_ID_INTEL_PCH_1:
 		r->name = "PIIX/ICH";
 		r->get = pirq_piix_get;
 		r->set = pirq_piix_set;
 		return 1;
 	}
+
+	if ((device >= PCI_DEVICE_ID_INTEL_PCH_LPC_MIN) && 
+		(device <= PCI_DEVICE_ID_INTEL_PCH_LPC_MAX)) {
+		r->name = "PIIX/ICH";
+		r->get = pirq_piix_get;
+		r->set = pirq_piix_set;
+		return 1;
+	}
+
 	return 0;
 }
 
@@ -823,7 +830,7 @@
 	r->get = NULL;
 	r->set = NULL;
 
-	DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
+	DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for [%04x:%04x]\n",
 	    rt->rtr_vendor, rt->rtr_device);
 
 	pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn);
@@ -843,7 +850,7 @@
 			h->probe(r, pirq_router_dev, pirq_router_dev->device))
 			break;
 	}
-	dev_info(&pirq_router_dev->dev, "%s IRQ router [%04x/%04x]\n",
+	dev_info(&pirq_router_dev->dev, "%s IRQ router [%04x:%04x]\n",
 		 pirq_router.name,
 		 pirq_router_dev->vendor, pirq_router_dev->device);
 
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0013a72..b61534c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -871,6 +871,7 @@
 			/* make sure there are no stray mappings of
 			   this page */
 			kmap_flush_unused();
+			vm_unmap_aliases();
 	}
 }
 
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 28b85ab..bb04260 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -21,7 +21,6 @@
 
 static void __init __xen_init_IRQ(void)
 {
-#ifdef CONFIG_X86_64
 	int i;
 
 	/* Create identity vector->irq map */
@@ -31,7 +30,6 @@
 		for_each_possible_cpu(cpu)
 			per_cpu(vector_irq, cpu)[i] = i;
 	}
-#endif	/* CONFIG_X86_64 */
 
 	xen_init_IRQ();
 }
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index ae173f6..d4d52f5 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -846,6 +846,7 @@
 		/* re-enable interrupts for kmap_flush_unused */
 		xen_mc_issue(0);
 		kmap_flush_unused();
+		vm_unmap_aliases();
 		xen_mc_batch();
 	}
 
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index dd71e3a..5601506 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -241,7 +241,7 @@
 		ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
 	} while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
 
-	kstat_this_cpu.irqs[irq]++;
+	kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
 
 out:
 	raw_local_irq_restore(flags);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 02e417d..6c873dc 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -55,6 +55,7 @@
 	default 100
 
 source "init/Kconfig"
+source "kernel/Kconfig.freezer"
 
 menu "Processor type and features"
 
@@ -63,7 +64,12 @@
 	default XTENSA_VARIANT_FSF
 
 config XTENSA_VARIANT_FSF
-	bool "fsf"
+	bool "fsf - default (not generic) configuration"
+
+config XTENSA_VARIANT_DC232B
+	bool "dc232b - Diamond 232L Standard Core Rev.B (LE)"
+	help
+	This variant refers to Tensilica's Diamond 232L Standard core Rev.B (LE).
 endchoice
 
 config MMU
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 4bd1e14..015b6b2 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -14,6 +14,7 @@
 # (Use VAR=<xtensa_config> to use another default compiler.)
 
 variant-$(CONFIG_XTENSA_VARIANT_FSF)		:= fsf
+variant-$(CONFIG_XTENSA_VARIANT_DC232B)		:= dc232b
 variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM)	:= custom
 
 VARIANT = $(variant-y)
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index c9ea73b..5fbcde5 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -48,7 +48,7 @@
 
 	if (irq >= NR_IRQS) {
 		printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
-				__FUNCTION__, irq);
+				__func__, irq);
 	}
 
 	irq_enter();
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index a2e2522..11a20ad 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -640,7 +640,7 @@
 	*lp = ((struct iss_net_private) {
 		.device_list		= LIST_HEAD_INIT(lp->device_list),
 		.opened_list		= LIST_HEAD_INIT(lp->opened_list),
-		.lock			= SPIN_LOCK_UNLOCKED,
+		.lock			= __SPIN_LOCK_UNLOCKED(lp.lock),
 		.dev			= dev,
 		.index			= index,
 		//.fd                   = -1,
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index e8362c1..dcbf1be 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -115,34 +115,32 @@
  *	(start) dependent operations on their target channel
  * @tx: transaction with dependencies
  */
-void
-async_tx_run_dependencies(struct dma_async_tx_descriptor *tx)
+void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx)
 {
-	struct dma_async_tx_descriptor *next = tx->next;
+	struct dma_async_tx_descriptor *dep = tx->next;
+	struct dma_async_tx_descriptor *dep_next;
 	struct dma_chan *chan;
 
-	if (!next)
+	if (!dep)
 		return;
 
-	tx->next = NULL;
-	chan = next->chan;
+	chan = dep->chan;
 
 	/* keep submitting up until a channel switch is detected
 	 * in that case we will be called again as a result of
 	 * processing the interrupt from async_tx_channel_switch
 	 */
-	while (next && next->chan == chan) {
-		struct dma_async_tx_descriptor *_next;
+	for (; dep; dep = dep_next) {
+		spin_lock_bh(&dep->lock);
+		dep->parent = NULL;
+		dep_next = dep->next;
+		if (dep_next && dep_next->chan == chan)
+			dep->next = NULL; /* ->next will be submitted */
+		else
+			dep_next = NULL; /* submit current dep and terminate */
+		spin_unlock_bh(&dep->lock);
 
-		spin_lock_bh(&next->lock);
-		next->parent = NULL;
-		_next = next->next;
-		if (_next && _next->chan == chan)
-			next->next = NULL;
-		spin_unlock_bh(&next->lock);
-
-		next->tx_submit(next);
-		next = _next;
+		dep->tx_submit(dep);
 	}
 
 	chan->device->device_issue_pending(chan);
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d19b6f5..d38f43f 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -78,6 +78,8 @@
 
 source "drivers/usb/Kconfig"
 
+source "drivers/uwb/Kconfig"
+
 source "drivers/mmc/Kconfig"
 
 source "drivers/memstick/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 46c8681..cadc64f 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -100,3 +100,4 @@
 obj-$(CONFIG_VIRTIO)		+= virtio/
 obj-$(CONFIG_REGULATOR)		+= regulator/
 obj-$(CONFIG_STAGING)		+= staging/
+obj-$(CONFIG_UWB)		+= uwb/
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b1c723f..70f7f60 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -431,7 +431,7 @@
 }
 
 static struct device_attribute alarm_attr = {
-	.attr = {.name = "alarm", .mode = 0644, .owner = THIS_MODULE},
+	.attr = {.name = "alarm", .mode = 0644},
 	.show = acpi_battery_alarm_show,
 	.store = acpi_battery_alarm_store,
 };
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index d5b4ef8..8d4a568 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -150,7 +150,7 @@
 	}
 
 	snprintf(name, sizeof(name), "%u", (u32)sun);
-	pci_slot = pci_create_slot(pci_bus, device, name);
+	pci_slot = pci_create_slot(pci_bus, device, name, NULL);
 	if (IS_ERR(pci_slot)) {
 		err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
 		kfree(slot);
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 10a3651..7b011e7 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -463,7 +463,7 @@
 }
 
 static struct device_attribute alarm_attr = {
-	.attr = {.name = "alarm", .mode = 0644, .owner = THIS_MODULE},
+	.attr = {.name = "alarm", .mode = 0644},
 	.show = acpi_battery_alarm_show,
 	.store = acpi_battery_alarm_store,
 };
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index bf5b04d..631ee2e 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -120,13 +120,13 @@
 	spin_unlock_irqrestore(&rtc_lock, flags);
 
 	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-		BCD_TO_BIN(sec);
-		BCD_TO_BIN(min);
-		BCD_TO_BIN(hr);
-		BCD_TO_BIN(day);
-		BCD_TO_BIN(mo);
-		BCD_TO_BIN(yr);
-		BCD_TO_BIN(cent);
+		sec = bcd2bin(sec);
+		min = bcd2bin(min);
+		hr = bcd2bin(hr);
+		day = bcd2bin(day);
+		mo = bcd2bin(mo);
+		yr = bcd2bin(yr);
+		cent = bcd2bin(cent);
 	}
 
 	/* we're trusting the FADT (see above) */
@@ -204,7 +204,7 @@
 {
 	u32 val = CMOS_READ(offset);
 	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-		BCD_TO_BIN(val);
+		val = bcd2bin(val);
 	return val;
 }
 
@@ -212,7 +212,7 @@
 static void cmos_bcd_write(u32 val, int offset, int rtc_control)
 {
 	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-		BIN_TO_BCD(val);
+		val = bin2bcd(val);
 	CMOS_WRITE(val, offset);
 }
 
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 91dec44..24e80fd 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -115,7 +115,6 @@
 	table_attr->attr.read = acpi_table_show;
 	table_attr->attr.attr.name = table_attr->name;
 	table_attr->attr.attr.mode = 0444;
-	table_attr->attr.attr.owner = THIS_MODULE;
 
 	return;
 }
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1ee9499..bbb3cae 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5373,6 +5373,8 @@
 
 #ifdef CONFIG_ATA_SFF
 	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
+#else
+	INIT_DELAYED_WORK(&ap->port_task, NULL);
 #endif
 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index a93247c..5d687d7 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1206,7 +1206,10 @@
 
 	ata_eh_clear_action(link, dev, ehi, action);
 
-	if (!(ehc->i.flags & ATA_EHI_QUIET))
+	/* About to take EH action, set RECOVERED.  Ignore actions on
+	 * slave links as master will do them again.
+	 */
+	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
 		ap->pflags |= ATA_PFLAG_RECOVERED;
 
 	spin_unlock_irqrestore(ap->lock, flags);
@@ -2010,8 +2013,13 @@
 		struct ata_eh_context *mehc = &ap->link.eh_context;
 		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
 
+		/* transfer control flags from master to slave */
+		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
+
+		/* perform autopsy on the slave link */
 		ata_eh_link_autopsy(ap->slave_link);
 
+		/* transfer actions from slave to master and clear slave */
 		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
 		mehc->i.action		|= sehc->i.action;
 		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
@@ -2447,14 +2455,14 @@
 		dev->pio_mode = XFER_PIO_0;
 		dev->flags &= ~ATA_DFLAG_SLEEPING;
 
-		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
-			continue;
-
-		/* apply class override */
-		if (lflags & ATA_LFLAG_ASSUME_ATA)
-			classes[dev->devno] = ATA_DEV_ATA;
-		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
-			classes[dev->devno] = ATA_DEV_SEMB_UNSUP; /* not yet */
+		if (!ata_phys_link_offline(ata_dev_phys_link(dev))) {
+			/* apply class override */
+			if (lflags & ATA_LFLAG_ASSUME_ATA)
+				classes[dev->devno] = ATA_DEV_ATA;
+			else if (lflags & ATA_LFLAG_ASSUME_SEMB)
+				classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
+		} else
+			classes[dev->devno] = ATA_DEV_NONE;
 	}
 
 	/* record current link speed */
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 2a4c516..4b47394 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2153,8 +2153,17 @@
  */
 void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
 {
-	if (qc->ap->ioaddr.bmdma_addr)
+	struct ata_port *ap = qc->ap;
+	unsigned long flags;
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	ap->hsm_task_state = HSM_ST_IDLE;
+
+	if (ap->ioaddr.bmdma_addr)
 		ata_bmdma_stop(qc);
+
+	spin_unlock_irqrestore(ap->lock, flags);
 }
 
 /**
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 1cfa745..5b72e73 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -70,6 +70,7 @@
 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
 static void svia_noop_freeze(struct ata_port *ap);
 static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
 static int vt6421_pata_cable_detect(struct ata_port *ap);
@@ -103,21 +104,26 @@
 	ATA_BMDMA_SHT(DRV_NAME),
 };
 
-static struct ata_port_operations vt6420_sata_ops = {
+static struct ata_port_operations svia_base_ops = {
 	.inherits		= &ata_bmdma_port_ops,
+	.sff_tf_load		= svia_tf_load,
+};
+
+static struct ata_port_operations vt6420_sata_ops = {
+	.inherits		= &svia_base_ops,
 	.freeze			= svia_noop_freeze,
 	.prereset		= vt6420_prereset,
 };
 
 static struct ata_port_operations vt6421_pata_ops = {
-	.inherits		= &ata_bmdma_port_ops,
+	.inherits		= &svia_base_ops,
 	.cable_detect		= vt6421_pata_cable_detect,
 	.set_piomode		= vt6421_set_pio_mode,
 	.set_dmamode		= vt6421_set_dma_mode,
 };
 
 static struct ata_port_operations vt6421_sata_ops = {
-	.inherits		= &ata_bmdma_port_ops,
+	.inherits		= &svia_base_ops,
 	.scr_read		= svia_scr_read,
 	.scr_write		= svia_scr_write,
 };
@@ -168,6 +174,29 @@
 	return 0;
 }
 
+/**
+ *	svia_tf_load - send taskfile registers to host controller
+ *	@ap: Port to which output is sent
+ *	@tf: ATA taskfile register set
+ *
+ *	Outputs ATA taskfile to standard ATA host controller.
+ *
+ *	This is to fix the internal bug of via chipsets, which will
+ *	reset the device register after changing the IEN bit on ctl
+ *	register.
+ */
+static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	struct ata_taskfile ttf;
+
+	if (tf->ctl != ap->last_ctl)  {
+		ttf = *tf;
+		ttf.flags |= ATA_TFLAG_DEVICE;
+		tf = &ttf;
+	}
+	ata_sff_tf_load(ap, tf);
+}
+
 static void svia_noop_freeze(struct ata_port *ap)
 {
 	/* Some VIA controllers choke if ATA_NIEN is manipulated in
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index af0d175..5260e9e 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -21,6 +21,8 @@
 #include <linux/memory_hotplug.h>
 #include <linux/mm.h>
 #include <linux/mutex.h>
+#include <linux/stat.h>
+
 #include <asm/atomic.h>
 #include <asm/uaccess.h>
 
@@ -325,7 +327,7 @@
 
 	return count;
 }
-static CLASS_ATTR(probe, 0700, NULL, memory_probe_store);
+static CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
 
 static int memory_probe_init(void)
 {
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 5116b78..f520709 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -13,6 +13,7 @@
 #include <linux/nodemask.h>
 #include <linux/cpu.h>
 #include <linux/device.h>
+#include <linux/swap.h>
 
 static struct sysdev_class node_class = {
 	.name = "node",
@@ -61,34 +62,52 @@
 	si_meminfo_node(&i, nid);
 
 	n = sprintf(buf, "\n"
-		       "Node %d MemTotal:     %8lu kB\n"
-		       "Node %d MemFree:      %8lu kB\n"
-		       "Node %d MemUsed:      %8lu kB\n"
-		       "Node %d Active:       %8lu kB\n"
-		       "Node %d Inactive:     %8lu kB\n"
-#ifdef CONFIG_HIGHMEM
-		       "Node %d HighTotal:    %8lu kB\n"
-		       "Node %d HighFree:     %8lu kB\n"
-		       "Node %d LowTotal:     %8lu kB\n"
-		       "Node %d LowFree:      %8lu kB\n"
+		       "Node %d MemTotal:       %8lu kB\n"
+		       "Node %d MemFree:        %8lu kB\n"
+		       "Node %d MemUsed:        %8lu kB\n"
+		       "Node %d Active:         %8lu kB\n"
+		       "Node %d Inactive:       %8lu kB\n"
+		       "Node %d Active(anon):   %8lu kB\n"
+		       "Node %d Inactive(anon): %8lu kB\n"
+		       "Node %d Active(file):   %8lu kB\n"
+		       "Node %d Inactive(file): %8lu kB\n"
+#ifdef CONFIG_UNEVICTABLE_LRU
+		       "Node %d Unevictable:    %8lu kB\n"
+		       "Node %d Mlocked:        %8lu kB\n"
 #endif
-		       "Node %d Dirty:        %8lu kB\n"
-		       "Node %d Writeback:    %8lu kB\n"
-		       "Node %d FilePages:    %8lu kB\n"
-		       "Node %d Mapped:       %8lu kB\n"
-		       "Node %d AnonPages:    %8lu kB\n"
-		       "Node %d PageTables:   %8lu kB\n"
-		       "Node %d NFS_Unstable: %8lu kB\n"
-		       "Node %d Bounce:       %8lu kB\n"
-		       "Node %d WritebackTmp: %8lu kB\n"
-		       "Node %d Slab:         %8lu kB\n"
-		       "Node %d SReclaimable: %8lu kB\n"
-		       "Node %d SUnreclaim:   %8lu kB\n",
+#ifdef CONFIG_HIGHMEM
+		       "Node %d HighTotal:      %8lu kB\n"
+		       "Node %d HighFree:       %8lu kB\n"
+		       "Node %d LowTotal:       %8lu kB\n"
+		       "Node %d LowFree:        %8lu kB\n"
+#endif
+		       "Node %d Dirty:          %8lu kB\n"
+		       "Node %d Writeback:      %8lu kB\n"
+		       "Node %d FilePages:      %8lu kB\n"
+		       "Node %d Mapped:         %8lu kB\n"
+		       "Node %d AnonPages:      %8lu kB\n"
+		       "Node %d PageTables:     %8lu kB\n"
+		       "Node %d NFS_Unstable:   %8lu kB\n"
+		       "Node %d Bounce:         %8lu kB\n"
+		       "Node %d WritebackTmp:   %8lu kB\n"
+		       "Node %d Slab:           %8lu kB\n"
+		       "Node %d SReclaimable:   %8lu kB\n"
+		       "Node %d SUnreclaim:     %8lu kB\n",
 		       nid, K(i.totalram),
 		       nid, K(i.freeram),
 		       nid, K(i.totalram - i.freeram),
-		       nid, K(node_page_state(nid, NR_ACTIVE)),
-		       nid, K(node_page_state(nid, NR_INACTIVE)),
+		       nid, K(node_page_state(nid, NR_ACTIVE_ANON) +
+				node_page_state(nid, NR_ACTIVE_FILE)),
+		       nid, K(node_page_state(nid, NR_INACTIVE_ANON) +
+				node_page_state(nid, NR_INACTIVE_FILE)),
+		       nid, K(node_page_state(nid, NR_ACTIVE_ANON)),
+		       nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
+		       nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
+		       nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
+#ifdef CONFIG_UNEVICTABLE_LRU
+		       nid, K(node_page_state(nid, NR_UNEVICTABLE)),
+		       nid, K(node_page_state(nid, NR_MLOCK)),
+#endif
 #ifdef CONFIG_HIGHMEM
 		       nid, K(i.totalhigh),
 		       nid, K(i.freehigh),
@@ -173,6 +192,8 @@
 		sysdev_create_file(&node->sysdev, &attr_meminfo);
 		sysdev_create_file(&node->sysdev, &attr_numastat);
 		sysdev_create_file(&node->sysdev, &attr_distance);
+
+		scan_unevictable_register_node(node);
 	}
 	return error;
 }
@@ -192,6 +213,8 @@
 	sysdev_remove_file(&node->sysdev, &attr_numastat);
 	sysdev_remove_file(&node->sysdev, &attr_distance);
 
+	scan_unevictable_unregister_node(node);
+
 	sysdev_unregister(&node->sysdev);
 }
 
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index b82654e..d876ad8 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -90,7 +90,7 @@
 static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
 static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL);
 static struct device_attribute dev_attr_firmware_version = {
-	.attr = { .name = "firmware-version", .mode = S_IRUGO, .owner = THIS_MODULE },
+	.attr = { .name = "firmware-version", .mode = S_IRUGO },
 	.show = aoedisk_show_fwver,
 };
 
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7b33512..9034ca5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -391,7 +391,7 @@
 }
 
 static struct device_attribute pid_attr = {
-	.attr = { .name = "pid", .mode = S_IRUGO, .owner = THIS_MODULE },
+	.attr = { .name = "pid", .mode = S_IRUGO},
 	.show = pid_show,
 };
 
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 3a281ef..f60e418 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -349,8 +349,6 @@
 
 	struct work_struct reset_work;
 	wait_queue_head_t reset_wait;
-
-	int sg_stat[6];
 };
 
 /*
@@ -685,7 +683,6 @@
 		goto drop;
 	}
 	urq->nsg = n_elem;
-	sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
 
 	if (blk_pc_request(rq)) {
 		ub_cmd_build_packet(sc, lun, cmd, urq);
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 31dcd91..dc8d1a9 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -417,6 +417,6 @@
 module_init(agp_ali_init);
 module_exit(agp_ali_cleanup);
 
-MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
 MODULE_LICENSE("GPL and additional rights");
 
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 2812ee2..52f4361 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -772,6 +772,6 @@
 module_exit(agp_amd64_cleanup);
 #endif
 
-MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>, Andi Kleen");
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
 module_param(agp_try_unsupported, bool, 0);
 MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index ae2791b..f1537ee 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -561,6 +561,6 @@
 module_init(agp_ati_init);
 module_exit(agp_ati_cleanup);
 
-MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
 MODULE_LICENSE("GPL and additional rights");
 
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 3a3cc03..8c617ad 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -349,7 +349,7 @@
 __setup("agp=", agp_setup);
 #endif
 
-MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
 MODULE_DESCRIPTION("AGP GART driver");
 MODULE_LICENSE("GPL and additional rights");
 MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 1108665..9cf6e9b 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -2390,5 +2390,5 @@
 module_init(agp_intel_init);
 module_exit(agp_intel_cleanup);
 
-MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
 MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 5bbed3d..16acee2 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -1,7 +1,7 @@
 /*
  * Nvidia AGPGART routines.
  * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
- * to work in 2.5 by Dave Jones <davej@codemonkey.org.uk>
+ * to work in 2.5 by Dave Jones <davej@redhat.com>
  */
 
 #include <linux/module.h>
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index f2492ec..db60539 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -20,8 +20,8 @@
 #include <linux/agp_backend.h>
 #include <linux/log2.h>
 
-#include <asm-parisc/parisc-device.h>
-#include <asm-parisc/ropes.h>
+#include <asm/parisc-device.h>
+#include <asm/ropes.h>
 
 #include "agp.h"
 
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 9f4d49e..d3bd243 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -595,4 +595,4 @@
 module_exit(agp_via_cleanup);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
diff --git a/drivers/char/ds1286.c b/drivers/char/ds1286.c
index 5329d48..0a826d7 100644
--- a/drivers/char/ds1286.c
+++ b/drivers/char/ds1286.c
@@ -210,8 +210,8 @@
 		if (sec != 0)
 			return -EINVAL;
 
-		min = BIN2BCD(min);
-		min = BIN2BCD(hrs);
+		min = bin2bcd(min);
+		min = bin2bcd(hrs);
 
 		spin_lock(&ds1286_lock);
 		rtc_write(hrs, RTC_HOURS_ALARM);
@@ -353,7 +353,7 @@
 
 	ds1286_get_time(&tm);
 	hundredth = rtc_read(RTC_HUNDREDTH_SECOND);
-	BCD_TO_BIN(hundredth);
+	hundredth = bcd2bin(hundredth);
 
 	p += sprintf(p,
 	             "rtc_time\t: %02d:%02d:%02d.%02d\n"
@@ -477,12 +477,12 @@
 	rtc_write(save_control, RTC_CMD);
 	spin_unlock_irqrestore(&ds1286_lock, flags);
 
-	BCD_TO_BIN(rtc_tm->tm_sec);
-	BCD_TO_BIN(rtc_tm->tm_min);
-	BCD_TO_BIN(rtc_tm->tm_hour);
-	BCD_TO_BIN(rtc_tm->tm_mday);
-	BCD_TO_BIN(rtc_tm->tm_mon);
-	BCD_TO_BIN(rtc_tm->tm_year);
+	rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
+	rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
+	rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
+	rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
+	rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
+	rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
 
 	/*
 	 * Account for differences between how the RTC uses the values
@@ -531,12 +531,12 @@
 	if (yrs >= 100)
 		yrs -= 100;
 
-	BIN_TO_BCD(sec);
-	BIN_TO_BCD(min);
-	BIN_TO_BCD(hrs);
-	BIN_TO_BCD(day);
-	BIN_TO_BCD(mon);
-	BIN_TO_BCD(yrs);
+	sec = bin2bcd(sec);
+	min = bin2bcd(min);
+	hrs = bin2bcd(hrs);
+	day = bin2bcd(day);
+	mon = bin2bcd(mon);
+	yrs = bin2bcd(yrs);
 
 	spin_lock_irqsave(&ds1286_lock, flags);
 	save_control = rtc_read(RTC_CMD);
@@ -572,8 +572,8 @@
 	cmd = rtc_read(RTC_CMD);
 	spin_unlock_irqrestore(&ds1286_lock, flags);
 
-	BCD_TO_BIN(alm_tm->tm_min);
-	BCD_TO_BIN(alm_tm->tm_hour);
+	alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
+	alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
 	alm_tm->tm_sec = 0;
 }
 
diff --git a/drivers/char/ds1302.c b/drivers/char/ds1302.c
index c5e67a6..170693c 100644
--- a/drivers/char/ds1302.c
+++ b/drivers/char/ds1302.c
@@ -131,12 +131,12 @@
 
 	local_irq_restore(flags);
 
-	BCD_TO_BIN(rtc_tm->tm_sec);
-	BCD_TO_BIN(rtc_tm->tm_min);
-	BCD_TO_BIN(rtc_tm->tm_hour);
-	BCD_TO_BIN(rtc_tm->tm_mday);
-	BCD_TO_BIN(rtc_tm->tm_mon);
-	BCD_TO_BIN(rtc_tm->tm_year);
+	rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
+	rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
+	rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
+	rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
+	rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
+	rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
 
 	/*
 	 * Account for differences between how the RTC uses the values
@@ -211,12 +211,12 @@
 			else
 				yrs -= 1900;	/* RTC (70, 71, ... 99) */
 
-			BIN_TO_BCD(sec);
-			BIN_TO_BCD(min);
-			BIN_TO_BCD(hrs);
-			BIN_TO_BCD(day);
-			BIN_TO_BCD(mon);
-			BIN_TO_BCD(yrs);
+			sec = bin2bcd(sec);
+			min = bin2bcd(min);
+			hrs = bin2bcd(hrs);
+			day = bin2bcd(day);
+			mon = bin2bcd(mon);
+			yrs = bin2bcd(yrs);
 
 			lock_kernel();
 			local_irq_save(flags);
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 4998b27..cf2461d 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -2477,7 +2477,11 @@
 	unsigned long flags;
 
 	if (msec == -1)
-		return -EOPNOTSUPP;
+		msec = 0xFFFF;
+	else if (msec > 0xFFFE)
+		msec = 0xFFFE;
+	else if (msec < 1)
+		msec = 1;
 
 	spin_lock_irqsave(&epca_lock, flags);
 	globalwinon(ch);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index f3cfb4c..408f5f9 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -219,7 +219,7 @@
 	for (irq = find_first_bit(&v, HPET_MAX_IRQ); irq < HPET_MAX_IRQ;
 		irq = find_next_bit(&v, HPET_MAX_IRQ, 1 + irq)) {
 
-		if (irq >= NR_IRQS) {
+		if (irq >= nr_irqs) {
 			irq = HPET_MAX_IRQ;
 			break;
 		}
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index bf70450..5b819b1 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -161,7 +161,7 @@
 			}
 		} else {
 			r = cons_ops[index]->put_chars(vtermnos[index], c, i);
-			if (r < 0) {
+			if (r <= 0) {
 				/* throw away chars on error */
 				i = 0;
 			} else if (r > 0) {
@@ -374,6 +374,9 @@
 		if (hp->ops->notifier_del)
 			hp->ops->notifier_del(hp, hp->data);
 
+		/* cancel pending tty resize work */
+		cancel_work_sync(&hp->tty_resize);
+
 		/*
 		 * Chain calls chars_in_buffer() and returns immediately if
 		 * there is no buffered data otherwise sleeps on a wait queue
@@ -399,6 +402,9 @@
 	if (!hp)
 		return;
 
+	/* cancel pending tty resize work */
+	cancel_work_sync(&hp->tty_resize);
+
 	spin_lock_irqsave(&hp->lock, flags);
 
 	/*
@@ -418,8 +424,8 @@
 
 	spin_unlock_irqrestore(&hp->lock, flags);
 
-	if (hp->ops->notifier_del)
-			hp->ops->notifier_del(hp, hp->data);
+	if (hp->ops->notifier_hangup)
+			hp->ops->notifier_hangup(hp, hp->data);
 
 	while(temp_open_count) {
 		--temp_open_count;
@@ -431,7 +437,7 @@
  * Push buffered characters whether they were just recently buffered or waiting
  * on a blocked hypervisor.  Call this function with hp->lock held.
  */
-static void hvc_push(struct hvc_struct *hp)
+static int hvc_push(struct hvc_struct *hp)
 {
 	int n;
 
@@ -439,7 +445,7 @@
 	if (n <= 0) {
 		if (n == 0) {
 			hp->do_wakeup = 1;
-			return;
+			return 0;
 		}
 		/* throw away output on error; this happens when
 		   there is no session connected to the vterm. */
@@ -450,6 +456,8 @@
 		memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf);
 	else
 		hp->do_wakeup = 1;
+
+	return n;
 }
 
 static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count)
@@ -492,6 +500,39 @@
 	return written;
 }
 
+/**
+ * hvc_set_winsz() - Resize the hvc tty terminal window.
+ * @work:	work structure.
+ *
+ * The routine shall not be called within an atomic context because it
+ * might sleep.
+ *
+ * Locking:	hp->lock
+ */
+static void hvc_set_winsz(struct work_struct *work)
+{
+	struct hvc_struct *hp;
+	unsigned long hvc_flags;
+	struct tty_struct *tty;
+	struct winsize ws;
+
+	hp = container_of(work, struct hvc_struct, tty_resize);
+	if (!hp)
+		return;
+
+	spin_lock_irqsave(&hp->lock, hvc_flags);
+	if (!hp->tty) {
+		spin_unlock_irqrestore(&hp->lock, hvc_flags);
+		return;
+	}
+	ws  = hp->ws;
+	tty = tty_kref_get(hp->tty);
+	spin_unlock_irqrestore(&hp->lock, hvc_flags);
+
+	tty_do_resize(tty, tty, &ws);
+	tty_kref_put(tty);
+}
+
 /*
  * This is actually a contract between the driver and the tty layer outlining
  * how much write room the driver can guarantee will be sent OR BUFFERED.  This
@@ -538,16 +579,20 @@
 	char buf[N_INBUF] __ALIGNED__;
 	unsigned long flags;
 	int read_total = 0;
+	int written_total = 0;
 
 	spin_lock_irqsave(&hp->lock, flags);
 
 	/* Push pending writes */
 	if (hp->n_outbuf > 0)
-		hvc_push(hp);
+		written_total = hvc_push(hp);
 
 	/* Reschedule us if still some write pending */
-	if (hp->n_outbuf > 0)
+	if (hp->n_outbuf > 0) {
 		poll_mask |= HVC_POLL_WRITE;
+		/* If hvc_push() was not able to write, sleep a few msecs */
+		timeout = (written_total) ? 0 : MIN_TIMEOUT;
+	}
 
 	/* No tty attached, just skip */
 	tty = hp->tty;
@@ -632,6 +677,24 @@
 }
 EXPORT_SYMBOL_GPL(hvc_poll);
 
+/**
+ * hvc_resize() - Update terminal window size information.
+ * @hp:		HVC console pointer
+ * @ws:		Terminal window size structure
+ *
+ * Stores the specified window size information in the hvc structure of @hp.
+ * The function schedule the tty resize update.
+ *
+ * Locking:	Locking free; the function MUST be called holding hp->lock
+ */
+void hvc_resize(struct hvc_struct *hp, struct winsize ws)
+{
+	if ((hp->ws.ws_row != ws.ws_row) || (hp->ws.ws_col != ws.ws_col)) {
+		hp->ws = ws;
+		schedule_work(&hp->tty_resize);
+	}
+}
+
 /*
  * This kthread is either polling or interrupt driven.  This is determined by
  * calling hvc_poll() who determines whether a console adapter support
@@ -659,10 +722,6 @@
 			poll_mask |= HVC_POLL_READ;
 		if (hvc_kicked)
 			continue;
-		if (poll_mask & HVC_POLL_WRITE) {
-			yield();
-			continue;
-		}
 		set_current_state(TASK_INTERRUPTIBLE);
 		if (!hvc_kicked) {
 			if (poll_mask == 0)
@@ -718,6 +777,7 @@
 
 	kref_init(&hp->kref);
 
+	INIT_WORK(&hp->tty_resize, hvc_set_winsz);
 	spin_lock_init(&hp->lock);
 	spin_lock(&hvc_structs_lock);
 
@@ -743,7 +803,7 @@
 }
 EXPORT_SYMBOL_GPL(hvc_alloc);
 
-int __devexit hvc_remove(struct hvc_struct *hp)
+int hvc_remove(struct hvc_struct *hp)
 {
 	unsigned long flags;
 	struct tty_struct *tty;
@@ -796,7 +856,7 @@
 	drv->minor_start = HVC_MINOR;
 	drv->type = TTY_DRIVER_TYPE_SYSTEM;
 	drv->init_termios = tty_std_termios;
-	drv->flags = TTY_DRIVER_REAL_RAW;
+	drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
 	tty_set_operations(drv, &hvc_ops);
 
 	/* Always start the kthread because there can be hotplug vty adapters
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 9790201..8297dbc 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -27,6 +27,7 @@
 #ifndef HVC_CONSOLE_H
 #define HVC_CONSOLE_H
 #include <linux/kref.h>
+#include <linux/tty.h>
 
 /*
  * This is the max number of console adapters that can/will be found as
@@ -56,6 +57,8 @@
 	struct hv_ops *ops;
 	int irq_requested;
 	int data;
+	struct winsize ws;
+	struct work_struct tty_resize;
 	struct list_head next;
 	struct kref kref; /* ref count & hvc_struct lifetime */
 };
@@ -65,9 +68,10 @@
 	int (*get_chars)(uint32_t vtermno, char *buf, int count);
 	int (*put_chars)(uint32_t vtermno, const char *buf, int count);
 
-	/* Callbacks for notification. Called in open and close */
+	/* Callbacks for notification. Called in open, close and hangup */
 	int (*notifier_add)(struct hvc_struct *hp, int irq);
 	void (*notifier_del)(struct hvc_struct *hp, int irq);
+	void (*notifier_hangup)(struct hvc_struct *hp, int irq);
 };
 
 /* Register a vterm and a slot index for use as a console (console_init) */
@@ -77,15 +81,19 @@
 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
 				struct hv_ops *ops, int outbuf_size);
 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
-extern int __devexit hvc_remove(struct hvc_struct *hp);
+extern int hvc_remove(struct hvc_struct *hp);
 
 /* data available */
 int hvc_poll(struct hvc_struct *hp);
 void hvc_kick(void);
 
+/* Resize hvc tty terminal window */
+extern void hvc_resize(struct hvc_struct *hp, struct winsize ws);
+
 /* default notifier for irq based notification */
 extern int notifier_add_irq(struct hvc_struct *hp, int data);
 extern void notifier_del_irq(struct hvc_struct *hp, int data);
+extern void notifier_hangup_irq(struct hvc_struct *hp, int data);
 
 
 #if defined(CONFIG_XMON) && defined(CONFIG_SMP)
diff --git a/drivers/char/hvc_irq.c b/drivers/char/hvc_irq.c
index 73a59cd..d09e568 100644
--- a/drivers/char/hvc_irq.c
+++ b/drivers/char/hvc_irq.c
@@ -42,3 +42,8 @@
 	free_irq(irq, hp);
 	hp->irq_requested = 0;
 }
+
+void notifier_hangup_irq(struct hvc_struct *hp, int irq)
+{
+	notifier_del_irq(hp, irq);
+}
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
index b71c610..b74a2f8 100644
--- a/drivers/char/hvc_iseries.c
+++ b/drivers/char/hvc_iseries.c
@@ -202,6 +202,7 @@
 	.put_chars = put_chars,
 	.notifier_add = notifier_add_irq,
 	.notifier_del = notifier_del_irq,
+	.notifier_hangup = notifier_hangup_irq,
 };
 
 static int __devinit hvc_vio_probe(struct vio_dev *vdev,
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 93f3840..019e0b5 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -82,6 +82,7 @@
 	.put_chars = hvc_put_chars,
 	.notifier_add = notifier_add_irq,
 	.notifier_del = notifier_del_irq,
+	.notifier_hangup = notifier_hangup_irq,
 };
 
 static int __devinit hvc_vio_probe(struct vio_dev *vdev,
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index 538ceea..eba999f 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -102,6 +102,7 @@
 	.put_chars = write_console,
 	.notifier_add = notifier_add_irq,
 	.notifier_del = notifier_del_irq,
+	.notifier_hangup = notifier_hangup_irq,
 };
 
 static int __init xen_init(void)
diff --git a/drivers/char/ip27-rtc.c b/drivers/char/ip27-rtc.c
index ec9d044..2abd881 100644
--- a/drivers/char/ip27-rtc.c
+++ b/drivers/char/ip27-rtc.c
@@ -130,12 +130,12 @@
 		if (yrs >= 100)
 			yrs -= 100;
 
-		sec = BIN2BCD(sec);
-		min = BIN2BCD(min);
-		hrs = BIN2BCD(hrs);
-		day = BIN2BCD(day);
-		mon = BIN2BCD(mon);
-		yrs = BIN2BCD(yrs);
+		sec = bin2bcd(sec);
+		min = bin2bcd(min);
+		hrs = bin2bcd(hrs);
+		day = bin2bcd(day);
+		mon = bin2bcd(mon);
+		yrs = bin2bcd(yrs);
 
 		spin_lock_irq(&rtc_lock);
 		rtc->control |= M48T35_RTC_SET;
@@ -311,12 +311,12 @@
 	rtc->control &= ~M48T35_RTC_READ;
 	spin_unlock_irq(&rtc_lock);
 
-	rtc_tm->tm_sec = BCD2BIN(rtc_tm->tm_sec);
-	rtc_tm->tm_min = BCD2BIN(rtc_tm->tm_min);
-	rtc_tm->tm_hour = BCD2BIN(rtc_tm->tm_hour);
-	rtc_tm->tm_mday = BCD2BIN(rtc_tm->tm_mday);
-	rtc_tm->tm_mon = BCD2BIN(rtc_tm->tm_mon);
-	rtc_tm->tm_year = BCD2BIN(rtc_tm->tm_year);
+	rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
+	rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
+	rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
+	rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
+	rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
+	rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
 
 	/*
 	 * Account for differences between how the RTC uses the values
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
index b930de5..3f7da8c 100644
--- a/drivers/char/pc8736x_gpio.c
+++ b/drivers/char/pc8736x_gpio.c
@@ -41,7 +41,8 @@
 #define SIO_BASE2       0x4E	/* alt command-reg to check */
 
 #define SIO_SID		0x20	/* SuperI/O ID Register */
-#define SIO_SID_VALUE	0xe9	/* Expected value in SuperI/O ID Register */
+#define SIO_SID_PC87365	0xe5	/* Expected value in ID Register for PC87365 */
+#define SIO_SID_PC87366	0xe9	/* Expected value in ID Register for PC87366 */
 
 #define SIO_CF1		0x21	/* chip config, bit0 is chip enable */
 
@@ -91,13 +92,17 @@
 
 static int pc8736x_superio_present(void)
 {
+	int id;
+
 	/* try the 2 possible values, read a hardware reg to verify */
 	superio_cmd = SIO_BASE1;
-	if (superio_inb(SIO_SID) == SIO_SID_VALUE)
+	id = superio_inb(SIO_SID);
+	if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366)
 		return superio_cmd;
 
 	superio_cmd = SIO_BASE2;
-	if (superio_inb(SIO_SID) == SIO_SID_VALUE)
+	id = superio_inb(SIO_SID);
+	if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366)
 		return superio_cmd;
 
 	return 0;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c8752ea..705a839 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -558,9 +558,26 @@
 	unsigned dont_count_entropy:1;
 };
 
-static struct timer_rand_state input_timer_state;
 static struct timer_rand_state *irq_timer_state[NR_IRQS];
 
+static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+{
+	if (irq >= nr_irqs)
+		return NULL;
+
+	return irq_timer_state[irq];
+}
+
+static void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
+{
+	if (irq >= nr_irqs)
+		return;
+
+	irq_timer_state[irq] = state;
+}
+
+static struct timer_rand_state input_timer_state;
+
 /*
  * This function adds entropy to the entropy "pool" by using timing
  * delays.  It uses the timer_rand_state structure to make an estimate
@@ -648,11 +665,15 @@
 
 void add_interrupt_randomness(int irq)
 {
-	if (irq >= NR_IRQS || irq_timer_state[irq] == NULL)
+	struct timer_rand_state *state;
+
+	state = get_timer_rand_state(irq);
+
+	if (state == NULL)
 		return;
 
 	DEBUG_ENT("irq event %d\n", irq);
-	add_timer_randomness(irq_timer_state[irq], 0x100 + irq);
+	add_timer_randomness(state, 0x100 + irq);
 }
 
 #ifdef CONFIG_BLOCK
@@ -912,7 +933,12 @@
 {
 	struct timer_rand_state *state;
 
-	if (irq >= NR_IRQS || irq_timer_state[irq])
+	if (irq >= nr_irqs)
+		return;
+
+	state = get_timer_rand_state(irq);
+
+	if (state)
 		return;
 
 	/*
@@ -921,7 +947,7 @@
 	 */
 	state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
 	if (state)
-		irq_timer_state[irq] = state;
+		set_timer_rand_state(irq, state);
 }
 
 #ifdef CONFIG_BLOCK
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 17683de..32dc897 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -518,17 +518,17 @@
 		if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) ||
 							RTC_ALWAYS_BCD) {
 			if (sec < 60)
-				BIN_TO_BCD(sec);
+				sec = bin2bcd(sec);
 			else
 				sec = 0xff;
 
 			if (min < 60)
-				BIN_TO_BCD(min);
+				min = bin2bcd(min);
 			else
 				min = 0xff;
 
 			if (hrs < 24)
-				BIN_TO_BCD(hrs);
+				hrs = bin2bcd(hrs);
 			else
 				hrs = 0xff;
 		}
@@ -614,12 +614,12 @@
 
 		if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
 		    || RTC_ALWAYS_BCD) {
-			BIN_TO_BCD(sec);
-			BIN_TO_BCD(min);
-			BIN_TO_BCD(hrs);
-			BIN_TO_BCD(day);
-			BIN_TO_BCD(mon);
-			BIN_TO_BCD(yrs);
+			sec = bin2bcd(sec);
+			min = bin2bcd(min);
+			hrs = bin2bcd(hrs);
+			day = bin2bcd(day);
+			mon = bin2bcd(mon);
+			yrs = bin2bcd(yrs);
 		}
 
 		save_control = CMOS_READ(RTC_CONTROL);
@@ -1099,7 +1099,7 @@
 	spin_unlock_irq(&rtc_lock);
 
 	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-		BCD_TO_BIN(year);       /* This should never happen... */
+		year = bcd2bin(year);       /* This should never happen... */
 
 	if (year < 20) {
 		epoch = 2000;
@@ -1352,13 +1352,13 @@
 	spin_unlock_irqrestore(&rtc_lock, flags);
 
 	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-		BCD_TO_BIN(rtc_tm->tm_sec);
-		BCD_TO_BIN(rtc_tm->tm_min);
-		BCD_TO_BIN(rtc_tm->tm_hour);
-		BCD_TO_BIN(rtc_tm->tm_mday);
-		BCD_TO_BIN(rtc_tm->tm_mon);
-		BCD_TO_BIN(rtc_tm->tm_year);
-		BCD_TO_BIN(rtc_tm->tm_wday);
+		rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
+		rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
+		rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
+		rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
+		rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
+		rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
+		rtc_tm->tm_wday = bcd2bin(rtc_tm->tm_wday);
 	}
 
 #ifdef CONFIG_MACH_DECSTATION
@@ -1392,9 +1392,9 @@
 	spin_unlock_irq(&rtc_lock);
 
 	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-		BCD_TO_BIN(alm_tm->tm_sec);
-		BCD_TO_BIN(alm_tm->tm_min);
-		BCD_TO_BIN(alm_tm->tm_hour);
+		alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
+		alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
+		alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
 	}
 }
 
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index 5b8d7a1..ba4e862 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -2504,7 +2504,7 @@
 		del_timer(&board->timer);
 		if (pdev) {
 #ifdef CONFIG_PCI
-			pci_iounmap(pdev, board->base2);
+			iounmap(board->base2);
 			pci_release_region(pdev, IS_CF_BOARD(board) ? 3 : 2);
 #endif
 		} else {
@@ -2677,7 +2677,7 @@
 	}
 	board->hw_base = pci_resource_start(pdev, reg);
 	board->base2 =
-	board->base = pci_iomap(pdev, reg, WINDOW_LEN(board));
+	board->base = ioremap_nocache(board->hw_base, WINDOW_LEN(board));
 	if (!board->base) {
 		dev_err(&pdev->dev, "ioremap failed\n");
 		goto err_reg;
@@ -2703,7 +2703,7 @@
 
 	return 0;
 err_unmap:
-	pci_iounmap(pdev, board->base2);
+	iounmap(board->base2);
 err_reg:
 	pci_release_region(pdev, reg);
 err_flag:
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index dce4cc0..ce0d9da 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -168,7 +168,7 @@
 static struct sysrq_key_op sysrq_show_timers_op = {
 	.handler	= sysrq_handle_show_timers,
 	.help_msg	= "show-all-timers(Q)",
-	.action_msg	= "Show Pending Timers",
+	.action_msg	= "Show clockevent devices & pending hrtimers (no others)",
 };
 
 static void sysrq_handle_mountro(int key, struct tty_struct *tty)
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index e70d13d..9c47dc4 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1157,7 +1157,7 @@
  * Once all references to platform device are down to 0,
  * release all allocated structures.
  */
-static void tpm_dev_release(struct device *dev)
+void tpm_dev_release(struct device *dev)
 {
 	struct tpm_chip *chip = dev_get_drvdata(dev);
 
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index 553b0e9..c8f8024 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -90,7 +90,7 @@
 	spin_lock_irqsave(&port->lock, flags);
 	if (port->tty)
 		tty_kref_put(port->tty);
-	port->tty = tty;
+	port->tty = tty_kref_get(tty);
 	spin_unlock_irqrestore(&port->lock, flags);
 }
 EXPORT_SYMBOL(tty_port_tty_set);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d0f4eb6..3fb0d2c 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -198,6 +198,7 @@
 	virtio_cons.put_chars = put_chars;
 	virtio_cons.notifier_add = notifier_add_vio;
 	virtio_cons.notifier_del = notifier_del_vio;
+	virtio_cons.notifier_hangup = notifier_del_vio;
 
 	/* The first argument of hvc_alloc() is the virtual console number, so
 	 * we use zero.  The second argument is the parameter for the
diff --git a/drivers/char/vr41xx_giu.c b/drivers/char/vr41xx_giu.c
index ffe9b4e..54c8372 100644
--- a/drivers/char/vr41xx_giu.c
+++ b/drivers/char/vr41xx_giu.c
@@ -641,7 +641,7 @@
 	}
 
 	irq = platform_get_irq(dev, 0);
-	if (irq < 0 || irq >= NR_IRQS)
+	if (irq < 0 || irq >= nr_irqs)
 		return -EBUSY;
 
 	return cascade_irq(irq, giu_get_irq);
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 71d2ac4..c201710 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -237,9 +237,12 @@
 
 	if (strict_strtoul(arg, 16, &base))
 		return -EINVAL;
-
+#ifdef CONFIG_X86_64
+	if (base > UINT_MAX)
+		return -ERANGE;
+#endif
 	printk(KERN_INFO "PMTMR IOPort override: 0x%04x -> 0x%04lx\n",
-	       (unsigned int)pmtmr_ioport, base);
+	       pmtmr_ioport, base);
 	pmtmr_ioport = base;
 
 	return 1;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index cd30390..904e575 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -48,13 +48,13 @@
 	  can be integrated in chips such as the Atmel AT32ap7000.
 
 config FSL_DMA
-	bool "Freescale MPC85xx/MPC83xx DMA support"
-	depends on PPC
+	tristate "Freescale Elo and Elo Plus DMA support"
+	depends on FSL_SOC
 	select DMA_ENGINE
 	---help---
-	  Enable support for the Freescale DMA engine. Now, it support
-	  MPC8560/40, MPC8555, MPC8548 and MPC8641 processors.
-	  The MPC8349, MPC8360 is also supported.
+	  Enable support for the Freescale Elo and Elo Plus DMA controllers.
+	  The Elo is the DMA controller on some 82xx and 83xx parts, and the
+	  Elo Plus is the DMA controller on 85xx and 86xx parts.
 
 config MV_XOR
 	bool "Marvell XOR engine support"
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a08d197..d1e381e 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -325,7 +325,12 @@
 	struct dmatest_thread	*thread;
 	unsigned int		i;
 
-	dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC);
+	/* Have we already been told about this channel? */
+	list_for_each_entry(dtc, &dmatest_channels, node)
+		if (dtc->chan == chan)
+			return DMA_DUP;
+
+	dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
 	if (!dtc) {
 		pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id);
 		return DMA_NAK;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index c0059ca..0b95dcc 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -370,7 +370,10 @@
 					struct dma_client *client)
 {
 	struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
-	LIST_HEAD(tmp_list);
+
+	/* Has this channel already been allocated? */
+	if (fsl_chan->desc_pool)
+		return 1;
 
 	/* We need the descriptor to be aligned to 32bytes
 	 * for meeting FSL DMA specification requirement.
@@ -410,6 +413,8 @@
 	}
 	spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
 	dma_pool_destroy(fsl_chan->desc_pool);
+
+	fsl_chan->desc_pool = NULL;
 }
 
 static struct dma_async_tx_descriptor *
@@ -786,159 +791,29 @@
 	fsl_chan_ld_cleanup(fsl_chan);
 }
 
-static void fsl_dma_callback_test(void *param)
+static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
+	struct device_node *node, u32 feature, const char *compatible)
 {
-	struct fsl_dma_chan *fsl_chan = param;
-	if (fsl_chan)
-		dev_dbg(fsl_chan->dev, "selftest: callback is ok!\n");
-}
-
-static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
-{
-	struct dma_chan *chan;
-	int err = 0;
-	dma_addr_t dma_dest, dma_src;
-	dma_cookie_t cookie;
-	u8 *src, *dest;
-	int i;
-	size_t test_size;
-	struct dma_async_tx_descriptor *tx1, *tx2, *tx3;
-
-	test_size = 4096;
-
-	src = kmalloc(test_size * 2, GFP_KERNEL);
-	if (!src) {
-		dev_err(fsl_chan->dev,
-				"selftest: Cannot alloc memory for test!\n");
-		return -ENOMEM;
-	}
-
-	dest = src + test_size;
-
-	for (i = 0; i < test_size; i++)
-		src[i] = (u8) i;
-
-	chan = &fsl_chan->common;
-
-	if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
-		dev_err(fsl_chan->dev,
-				"selftest: Cannot alloc resources for DMA\n");
-		err = -ENODEV;
-		goto out;
-	}
-
-	/* TX 1 */
-	dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2,
-				 DMA_TO_DEVICE);
-	dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2,
-				  DMA_FROM_DEVICE);
-	tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0);
-	async_tx_ack(tx1);
-
-	cookie = fsl_dma_tx_submit(tx1);
-	fsl_dma_memcpy_issue_pending(chan);
-	msleep(2);
-
-	if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
-		dev_err(fsl_chan->dev, "selftest: Time out!\n");
-		err = -ENODEV;
-		goto free_resources;
-	}
-
-	/* Test free and re-alloc channel resources */
-	fsl_dma_free_chan_resources(chan);
-
-	if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
-		dev_err(fsl_chan->dev,
-				"selftest: Cannot alloc resources for DMA\n");
-		err = -ENODEV;
-		goto free_resources;
-	}
-
-	/* Continue to test
-	 * TX 2
-	 */
-	dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2,
-					test_size / 4, DMA_TO_DEVICE);
-	dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2,
-					test_size / 4, DMA_FROM_DEVICE);
-	tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
-	async_tx_ack(tx2);
-
-	/* TX 3 */
-	dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4,
-					test_size / 4, DMA_TO_DEVICE);
-	dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4,
-					test_size / 4, DMA_FROM_DEVICE);
-	tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
-	async_tx_ack(tx3);
-
-	/* Interrupt tx test */
-	tx1 = fsl_dma_prep_interrupt(chan, 0);
-	async_tx_ack(tx1);
-	cookie = fsl_dma_tx_submit(tx1);
-
-	/* Test exchanging the prepared tx sort */
-	cookie = fsl_dma_tx_submit(tx3);
-	cookie = fsl_dma_tx_submit(tx2);
-
-	if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *)
-	    dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) {
-		tx3->callback = fsl_dma_callback_test;
-		tx3->callback_param = fsl_chan;
-	}
-	fsl_dma_memcpy_issue_pending(chan);
-	msleep(2);
-
-	if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
-		dev_err(fsl_chan->dev, "selftest: Time out!\n");
-		err = -ENODEV;
-		goto free_resources;
-	}
-
-	err = memcmp(src, dest, test_size);
-	if (err) {
-		for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size);
-				i++);
-		dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is "
-				"error! src 0x%x, dest 0x%x\n",
-				i, (long)test_size, *(src + i), *(dest + i));
-	}
-
-free_resources:
-	fsl_dma_free_chan_resources(chan);
-out:
-	kfree(src);
-	return err;
-}
-
-static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
-			const struct of_device_id *match)
-{
-	struct fsl_dma_device *fdev;
 	struct fsl_dma_chan *new_fsl_chan;
 	int err;
 
-	fdev = dev_get_drvdata(dev->dev.parent);
-	BUG_ON(!fdev);
-
 	/* alloc channel */
 	new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
 	if (!new_fsl_chan) {
-		dev_err(&dev->dev, "No free memory for allocating "
+		dev_err(fdev->dev, "No free memory for allocating "
 				"dma channels!\n");
 		return -ENOMEM;
 	}
 
 	/* get dma channel register base */
-	err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg);
+	err = of_address_to_resource(node, 0, &new_fsl_chan->reg);
 	if (err) {
-		dev_err(&dev->dev, "Can't get %s property 'reg'\n",
-				dev->node->full_name);
+		dev_err(fdev->dev, "Can't get %s property 'reg'\n",
+				node->full_name);
 		goto err_no_reg;
 	}
 
-	new_fsl_chan->feature = *(u32 *)match->data;
+	new_fsl_chan->feature = feature;
 
 	if (!fdev->feature)
 		fdev->feature = new_fsl_chan->feature;
@@ -948,13 +823,13 @@
 	 */
 	WARN_ON(fdev->feature != new_fsl_chan->feature);
 
-	new_fsl_chan->dev = &dev->dev;
+	new_fsl_chan->dev = &new_fsl_chan->common.dev;
 	new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
 			new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
 
 	new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
 	if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) {
-		dev_err(&dev->dev, "There is no %d channel!\n",
+		dev_err(fdev->dev, "There is no %d channel!\n",
 				new_fsl_chan->id);
 		err = -EINVAL;
 		goto err_no_chan;
@@ -988,29 +863,23 @@
 			&fdev->common.channels);
 	fdev->common.chancnt++;
 
-	new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0);
+	new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
 	if (new_fsl_chan->irq != NO_IRQ) {
 		err = request_irq(new_fsl_chan->irq,
 					&fsl_dma_chan_do_interrupt, IRQF_SHARED,
 					"fsldma-channel", new_fsl_chan);
 		if (err) {
-			dev_err(&dev->dev, "DMA channel %s request_irq error "
-				"with return %d\n", dev->node->full_name, err);
+			dev_err(fdev->dev, "DMA channel %s request_irq error "
+				"with return %d\n", node->full_name, err);
 			goto err_no_irq;
 		}
 	}
 
-	err = fsl_dma_self_test(new_fsl_chan);
-	if (err)
-		goto err_self_test;
-
-	dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
-				match->compatible, new_fsl_chan->irq);
+	dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
+				compatible, new_fsl_chan->irq);
 
 	return 0;
 
-err_self_test:
-	free_irq(new_fsl_chan->irq, new_fsl_chan);
 err_no_irq:
 	list_del(&new_fsl_chan->common.device_node);
 err_no_chan:
@@ -1020,38 +889,20 @@
 	return err;
 }
 
-const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN;
-const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN;
-
-static struct of_device_id of_fsl_dma_chan_ids[] = {
-	{
-		.compatible = "fsl,eloplus-dma-channel",
-		.data = (void *)&mpc8540_dma_ip_feature,
-	},
-	{
-		.compatible = "fsl,elo-dma-channel",
-		.data = (void *)&mpc8349_dma_ip_feature,
-	},
-	{}
-};
-
-static struct of_platform_driver of_fsl_dma_chan_driver = {
-	.name = "of-fsl-dma-channel",
-	.match_table = of_fsl_dma_chan_ids,
-	.probe = of_fsl_dma_chan_probe,
-};
-
-static __init int of_fsl_dma_chan_init(void)
+static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
 {
-	return of_register_platform_driver(&of_fsl_dma_chan_driver);
+	free_irq(fchan->irq, fchan);
+	list_del(&fchan->common.device_node);
+	iounmap(fchan->reg_base);
+	kfree(fchan);
 }
 
 static int __devinit of_fsl_dma_probe(struct of_device *dev,
 			const struct of_device_id *match)
 {
 	int err;
-	unsigned int irq;
 	struct fsl_dma_device *fdev;
+	struct device_node *child;
 
 	fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
 	if (!fdev) {
@@ -1085,9 +936,9 @@
 	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
 	fdev->common.dev = &dev->dev;
 
-	irq = irq_of_parse_and_map(dev->node, 0);
-	if (irq != NO_IRQ) {
-		err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED,
+	fdev->irq = irq_of_parse_and_map(dev->node, 0);
+	if (fdev->irq != NO_IRQ) {
+		err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
 					"fsldma-device", fdev);
 		if (err) {
 			dev_err(&dev->dev, "DMA device request_irq error "
@@ -1097,7 +948,21 @@
 	}
 
 	dev_set_drvdata(&(dev->dev), fdev);
-	of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev);
+
+	/* We cannot use of_platform_bus_probe() because there is no
+	 * of_platform_bus_remove.  Instead, we manually instantiate every DMA
+	 * channel object.
+	 */
+	for_each_child_of_node(dev->node, child) {
+		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
+			fsl_dma_chan_probe(fdev, child,
+				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
+				"fsl,eloplus-dma-channel");
+		if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
+			fsl_dma_chan_probe(fdev, child,
+				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
+				"fsl,elo-dma-channel");
+	}
 
 	dma_async_device_register(&fdev->common);
 	return 0;
@@ -1109,6 +974,30 @@
 	return err;
 }
 
+static int of_fsl_dma_remove(struct of_device *of_dev)
+{
+	struct fsl_dma_device *fdev;
+	unsigned int i;
+
+	fdev = dev_get_drvdata(&of_dev->dev);
+
+	dma_async_device_unregister(&fdev->common);
+
+	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
+		if (fdev->chan[i])
+			fsl_dma_chan_remove(fdev->chan[i]);
+
+	if (fdev->irq != NO_IRQ)
+		free_irq(fdev->irq, fdev);
+
+	iounmap(fdev->reg_base);
+
+	kfree(fdev);
+	dev_set_drvdata(&of_dev->dev, NULL);
+
+	return 0;
+}
+
 static struct of_device_id of_fsl_dma_ids[] = {
 	{ .compatible = "fsl,eloplus-dma", },
 	{ .compatible = "fsl,elo-dma", },
@@ -1116,15 +1005,32 @@
 };
 
 static struct of_platform_driver of_fsl_dma_driver = {
-	.name = "of-fsl-dma",
+	.name = "fsl-elo-dma",
 	.match_table = of_fsl_dma_ids,
 	.probe = of_fsl_dma_probe,
+	.remove = of_fsl_dma_remove,
 };
 
 static __init int of_fsl_dma_init(void)
 {
-	return of_register_platform_driver(&of_fsl_dma_driver);
+	int ret;
+
+	pr_info("Freescale Elo / Elo Plus DMA driver\n");
+
+	ret = of_register_platform_driver(&of_fsl_dma_driver);
+	if (ret)
+		pr_err("fsldma: failed to register platform driver\n");
+
+	return ret;
 }
 
-subsys_initcall(of_fsl_dma_chan_init);
+static void __exit of_fsl_dma_exit(void)
+{
+	of_unregister_platform_driver(&of_fsl_dma_driver);
+}
+
 subsys_initcall(of_fsl_dma_init);
+module_exit(of_fsl_dma_exit);
+
+MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 6faf07b..4f21a51 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -114,6 +114,7 @@
 	struct dma_device common;
 	struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
 	u32 feature;		/* The same as DMA channels */
+	int irq;		/* Channel IRQ */
 };
 
 /* Define macros for fsl_dma_chan->feature property */
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index bc8c6e3..1ef68b3 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -971,11 +971,9 @@
 	switch (ioat_chan->device->version) {
 	case IOAT_VER_1_2:
 		return ioat1_dma_get_next_descriptor(ioat_chan);
-		break;
 	case IOAT_VER_2_0:
 	case IOAT_VER_3_0:
 		return ioat2_dma_get_next_descriptor(ioat_chan);
-		break;
 	}
 	return NULL;
 }
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 0e024fe..887072f 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -142,7 +142,7 @@
 		csrow->nr_pages = (r.end - r.start + 1) >> PAGE_SHIFT;
 		csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
 		csrow->mtype = MEM_XDR;
-		csrow->edac_mode = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+		csrow->edac_mode = EDAC_SECDED;
 		dev_dbg(mci->dev,
 			"Initialized on node %d, chanmask=0x%x,"
 			" first_page=0x%lx, nr_pages=0x%x\n",
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index deb154a..4353414 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -732,7 +732,6 @@
 
 	attr->attr.name = name;
 	attr->attr.mode = S_IRUSR;
-	attr->attr.owner = THIS_MODULE;
 
 	attr->hdr = hdr;
 	attr->show = show;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index dbd42d6..7f2ee27 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -127,6 +127,13 @@
 	  This driver provides an in-kernel interface to those GPIOs using
 	  platform-neutral GPIO calls.
 
+config GPIO_TWL4030
+	tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
+	depends on TWL4030_CORE
+	help
+	  Say yes here to access the GPIO signals of various multi-function
+	  power management chips from Texas Instruments.
+
 comment "PCI GPIO expanders:"
 
 config GPIO_BT8XX
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 01b4bbd..6aafdeb 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -9,4 +9,5 @@
 obj-$(CONFIG_GPIO_MCP23S08)	+= mcp23s08.o
 obj-$(CONFIG_GPIO_PCA953X)	+= pca953x.o
 obj-$(CONFIG_GPIO_PCF857X)	+= pcf857x.o
+obj-$(CONFIG_GPIO_TWL4030)	+= twl4030-gpio.o
 obj-$(CONFIG_GPIO_BT8XX)	+= bt8xxgpio.o
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 9112830..faa1cc6 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -248,7 +248,7 @@
 	if (!test_bit(FLAG_EXPORT, &desc->flags))
 		status = -EIO;
 	else
-		status = sprintf(buf, "%d\n", gpio_get_value_cansleep(gpio));
+		status = sprintf(buf, "%d\n", !!gpio_get_value_cansleep(gpio));
 
 	mutex_unlock(&sysfs_lock);
 	return status;
@@ -1105,7 +1105,7 @@
 
 	might_sleep_if(extra_checks);
 	chip = gpio_to_chip(gpio);
-	return chip->get(chip, gpio - chip->base);
+	return chip->get ? chip->get(chip, gpio - chip->base) : 0;
 }
 EXPORT_SYMBOL_GPL(gpio_get_value_cansleep);
 
@@ -1143,7 +1143,7 @@
 
 		if (!is_out) {
 			int		irq = gpio_to_irq(gpio);
-			struct irq_desc	*desc = irq_desc + irq;
+			struct irq_desc	*desc = irq_to_desc(irq);
 
 			/* This races with request_irq(), set_irq_type(),
 			 * and set_irq_wake() ... but those are "rare".
diff --git a/drivers/gpio/twl4030-gpio.c b/drivers/gpio/twl4030-gpio.c
new file mode 100644
index 0000000..37d3eec
--- /dev/null
+++ b/drivers/gpio/twl4030-gpio.c
@@ -0,0 +1,521 @@
+/*
+ * twl4030_gpio.c -- access to GPIOs on TWL4030/TPS659x0 chips
+ *
+ * Copyright (C) 2006-2007 Texas Instruments, Inc.
+ * Copyright (C) 2006 MontaVista Software, Inc.
+ *
+ * Code re-arranged and cleaned up by:
+ *	Syed Mohammed Khasim <x0khasim@ti.com>
+ *
+ * Initial Code:
+ *	Andy Lowe / Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/i2c/twl4030.h>
+
+
+/*
+ * The GPIO "subchip" supports 18 GPIOs which can be configured as
+ * inputs or outputs, with pullups or pulldowns on each pin.  Each
+ * GPIO can trigger interrupts on either or both edges.
+ *
+ * GPIO interrupts can be fed to either of two IRQ lines; this is
+ * intended to support multiple hosts.
+ *
+ * There are also two LED pins used sometimes as output-only GPIOs.
+ */
+
+
+static struct gpio_chip twl_gpiochip;
+static int twl4030_gpio_irq_base;
+
+/* genirq interfaces are not available to modules */
+#ifdef MODULE
+#define is_module()	true
+#else
+#define is_module()	false
+#endif
+
+/* GPIO_CTRL Fields */
+#define MASK_GPIO_CTRL_GPIO0CD1		BIT(0)
+#define MASK_GPIO_CTRL_GPIO1CD2		BIT(1)
+#define MASK_GPIO_CTRL_GPIO_ON		BIT(2)
+
+/* Mask for GPIO registers when aggregated into a 32-bit integer */
+#define GPIO_32_MASK			0x0003ffff
+
+/* Data structures */
+static DEFINE_MUTEX(gpio_lock);
+
+/* store usage of each GPIO. - each bit represents one GPIO */
+static unsigned int gpio_usage_count;
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * To configure TWL4030 GPIO module registers
+ */
+static inline int gpio_twl4030_write(u8 address, u8 data)
+{
+	return twl4030_i2c_write_u8(TWL4030_MODULE_GPIO, data, address);
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * LED register offsets (use TWL4030_MODULE_{LED,PWMA,PWMB}))
+ * PWMs A and B are dedicated to LEDs A and B, respectively.
+ */
+
+#define TWL4030_LED_LEDEN	0x0
+
+/* LEDEN bits */
+#define LEDEN_LEDAON		BIT(0)
+#define LEDEN_LEDBON		BIT(1)
+#define LEDEN_LEDAEXT		BIT(2)
+#define LEDEN_LEDBEXT		BIT(3)
+#define LEDEN_LEDAPWM		BIT(4)
+#define LEDEN_LEDBPWM		BIT(5)
+#define LEDEN_PWM_LENGTHA	BIT(6)
+#define LEDEN_PWM_LENGTHB	BIT(7)
+
+#define TWL4030_PWMx_PWMxON	0x0
+#define TWL4030_PWMx_PWMxOFF	0x1
+
+#define PWMxON_LENGTH		BIT(7)
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * To read a TWL4030 GPIO module register
+ */
+static inline int gpio_twl4030_read(u8 address)
+{
+	u8 data;
+	int ret = 0;
+
+	ret = twl4030_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address);
+	return (ret < 0) ? ret : data;
+}
+
+/*----------------------------------------------------------------------*/
+
+static u8 cached_leden;		/* protected by gpio_lock */
+
+/* The LED lines are open drain outputs ... a FET pulls to GND, so an
+ * external pullup is needed.  We could also expose the integrated PWM
+ * as a LED brightness control; we initialize it as "always on".
+ */
+static void twl4030_led_set_value(int led, int value)
+{
+	u8 mask = LEDEN_LEDAON | LEDEN_LEDAPWM;
+	int status;
+
+	if (led)
+		mask <<= 1;
+
+	mutex_lock(&gpio_lock);
+	if (value)
+		cached_leden &= ~mask;
+	else
+		cached_leden |= mask;
+	status = twl4030_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
+			TWL4030_LED_LEDEN);
+	mutex_unlock(&gpio_lock);
+}
+
+static int twl4030_set_gpio_direction(int gpio, int is_input)
+{
+	u8 d_bnk = gpio >> 3;
+	u8 d_msk = BIT(gpio & 0x7);
+	u8 reg = 0;
+	u8 base = REG_GPIODATADIR1 + d_bnk;
+	int ret = 0;
+
+	mutex_lock(&gpio_lock);
+	ret = gpio_twl4030_read(base);
+	if (ret >= 0) {
+		if (is_input)
+			reg = ret & ~d_msk;
+		else
+			reg = ret | d_msk;
+
+		ret = gpio_twl4030_write(base, reg);
+	}
+	mutex_unlock(&gpio_lock);
+	return ret;
+}
+
+static int twl4030_set_gpio_dataout(int gpio, int enable)
+{
+	u8 d_bnk = gpio >> 3;
+	u8 d_msk = BIT(gpio & 0x7);
+	u8 base = 0;
+
+	if (enable)
+		base = REG_SETGPIODATAOUT1 + d_bnk;
+	else
+		base = REG_CLEARGPIODATAOUT1 + d_bnk;
+
+	return gpio_twl4030_write(base, d_msk);
+}
+
+static int twl4030_get_gpio_datain(int gpio)
+{
+	u8 d_bnk = gpio >> 3;
+	u8 d_off = gpio & 0x7;
+	u8 base = 0;
+	int ret = 0;
+
+	if (unlikely((gpio >= TWL4030_GPIO_MAX)
+		|| !(gpio_usage_count & BIT(gpio))))
+		return -EPERM;
+
+	base = REG_GPIODATAIN1 + d_bnk;
+	ret = gpio_twl4030_read(base);
+	if (ret > 0)
+		ret = (ret >> d_off) & 0x1;
+
+	return ret;
+}
+
+/*
+ * Configure debounce timing value for a GPIO pin on TWL4030
+ */
+int twl4030_set_gpio_debounce(int gpio, int enable)
+{
+	u8 d_bnk = gpio >> 3;
+	u8 d_msk = BIT(gpio & 0x7);
+	u8 reg = 0;
+	u8 base = 0;
+	int ret = 0;
+
+	if (unlikely((gpio >= TWL4030_GPIO_MAX)
+		|| !(gpio_usage_count & BIT(gpio))))
+		return -EPERM;
+
+	base = REG_GPIO_DEBEN1 + d_bnk;
+	mutex_lock(&gpio_lock);
+	ret = gpio_twl4030_read(base);
+	if (ret >= 0) {
+		if (enable)
+			reg = ret | d_msk;
+		else
+			reg = ret & ~d_msk;
+
+		ret = gpio_twl4030_write(base, reg);
+	}
+	mutex_unlock(&gpio_lock);
+	return ret;
+}
+EXPORT_SYMBOL(twl4030_set_gpio_debounce);
+
+/*----------------------------------------------------------------------*/
+
+static int twl_request(struct gpio_chip *chip, unsigned offset)
+{
+	int status = 0;
+
+	mutex_lock(&gpio_lock);
+
+	/* Support the two LED outputs as output-only GPIOs. */
+	if (offset >= TWL4030_GPIO_MAX) {
+		u8	ledclr_mask = LEDEN_LEDAON | LEDEN_LEDAEXT
+				| LEDEN_LEDAPWM | LEDEN_PWM_LENGTHA;
+		u8	module = TWL4030_MODULE_PWMA;
+
+		offset -= TWL4030_GPIO_MAX;
+		if (offset) {
+			ledclr_mask <<= 1;
+			module = TWL4030_MODULE_PWMB;
+		}
+
+		/* initialize PWM to always-drive */
+		status = twl4030_i2c_write_u8(module, 0x7f,
+				TWL4030_PWMx_PWMxOFF);
+		if (status < 0)
+			goto done;
+		status = twl4030_i2c_write_u8(module, 0x7f,
+				TWL4030_PWMx_PWMxON);
+		if (status < 0)
+			goto done;
+
+		/* init LED to not-driven (high) */
+		module = TWL4030_MODULE_LED;
+		status = twl4030_i2c_read_u8(module, &cached_leden,
+				TWL4030_LED_LEDEN);
+		if (status < 0)
+			goto done;
+		cached_leden &= ~ledclr_mask;
+		status = twl4030_i2c_write_u8(module, cached_leden,
+				TWL4030_LED_LEDEN);
+		if (status < 0)
+			goto done;
+
+		status = 0;
+		goto done;
+	}
+
+	/* on first use, turn GPIO module "on" */
+	if (!gpio_usage_count) {
+		struct twl4030_gpio_platform_data *pdata;
+		u8 value = MASK_GPIO_CTRL_GPIO_ON;
+
+		/* optionally have the first two GPIOs switch vMMC1
+		 * and vMMC2 power supplies based on card presence.
+		 */
+		pdata = chip->dev->platform_data;
+		value |= pdata->mmc_cd & 0x03;
+
+		status = gpio_twl4030_write(REG_GPIO_CTRL, value);
+	}
+
+	if (!status)
+		gpio_usage_count |= (0x1 << offset);
+
+done:
+	mutex_unlock(&gpio_lock);
+	return status;
+}
+
+static void twl_free(struct gpio_chip *chip, unsigned offset)
+{
+	if (offset >= TWL4030_GPIO_MAX) {
+		twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1);
+		return;
+	}
+
+	mutex_lock(&gpio_lock);
+
+	gpio_usage_count &= ~BIT(offset);
+
+	/* on last use, switch off GPIO module */
+	if (!gpio_usage_count)
+		gpio_twl4030_write(REG_GPIO_CTRL, 0x0);
+
+	mutex_unlock(&gpio_lock);
+}
+
+static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
+{
+	return (offset < TWL4030_GPIO_MAX)
+		? twl4030_set_gpio_direction(offset, 1)
+		: -EINVAL;
+}
+
+static int twl_get(struct gpio_chip *chip, unsigned offset)
+{
+	int status = 0;
+
+	if (offset < TWL4030_GPIO_MAX)
+		status = twl4030_get_gpio_datain(offset);
+	else if (offset == TWL4030_GPIO_MAX)
+		status = cached_leden & LEDEN_LEDAON;
+	else
+		status = cached_leden & LEDEN_LEDBON;
+	return (status < 0) ? 0 : status;
+}
+
+static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
+{
+	if (offset < TWL4030_GPIO_MAX) {
+		twl4030_set_gpio_dataout(offset, value);
+		return twl4030_set_gpio_direction(offset, 0);
+	} else {
+		twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
+		return 0;
+	}
+}
+
+static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	if (offset < TWL4030_GPIO_MAX)
+		twl4030_set_gpio_dataout(offset, value);
+	else
+		twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
+}
+
+static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+	return (twl4030_gpio_irq_base && (offset < TWL4030_GPIO_MAX))
+		? (twl4030_gpio_irq_base + offset)
+		: -EINVAL;
+}
+
+static struct gpio_chip twl_gpiochip = {
+	.label			= "twl4030",
+	.owner			= THIS_MODULE,
+	.request		= twl_request,
+	.free			= twl_free,
+	.direction_input	= twl_direction_in,
+	.get			= twl_get,
+	.direction_output	= twl_direction_out,
+	.set			= twl_set,
+	.to_irq			= twl_to_irq,
+	.can_sleep		= 1,
+};
+
+/*----------------------------------------------------------------------*/
+
+static int __devinit gpio_twl4030_pulls(u32 ups, u32 downs)
+{
+	u8		message[6];
+	unsigned	i, gpio_bit;
+
+	/* For most pins, a pulldown was enabled by default.
+	 * We should have data that's specific to this board.
+	 */
+	for (gpio_bit = 1, i = 1; i < 6; i++) {
+		u8		bit_mask;
+		unsigned	j;
+
+		for (bit_mask = 0, j = 0; j < 8; j += 2, gpio_bit <<= 1) {
+			if (ups & gpio_bit)
+				bit_mask |= 1 << (j + 1);
+			else if (downs & gpio_bit)
+				bit_mask |= 1 << (j + 0);
+		}
+		message[i] = bit_mask;
+	}
+
+	return twl4030_i2c_write(TWL4030_MODULE_GPIO, message,
+				REG_GPIOPUPDCTR1, 5);
+}
+
+static int gpio_twl4030_remove(struct platform_device *pdev);
+
+static int __devinit gpio_twl4030_probe(struct platform_device *pdev)
+{
+	struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
+	int ret;
+
+	/* maybe setup IRQs */
+	if (pdata->irq_base) {
+		if (is_module()) {
+			dev_err(&pdev->dev,
+				"can't dispatch IRQs from modules\n");
+			goto no_irqs;
+		}
+		ret = twl4030_sih_setup(TWL4030_MODULE_GPIO);
+		if (ret < 0)
+			return ret;
+		WARN_ON(ret != pdata->irq_base);
+		twl4030_gpio_irq_base = ret;
+	}
+
+no_irqs:
+	/*
+	 * NOTE:  boards may waste power if they don't set pullups
+	 * and pulldowns correctly ... default for non-ULPI pins is
+	 * pulldown, and some other pins may have external pullups
+	 * or pulldowns.  Careful!
+	 */
+	ret = gpio_twl4030_pulls(pdata->pullups, pdata->pulldowns);
+	if (ret)
+		dev_dbg(&pdev->dev, "pullups %.05x %.05x --> %d\n",
+				pdata->pullups, pdata->pulldowns,
+				ret);
+
+	twl_gpiochip.base = pdata->gpio_base;
+	twl_gpiochip.ngpio = TWL4030_GPIO_MAX;
+	twl_gpiochip.dev = &pdev->dev;
+
+	/* NOTE: we assume VIBRA_CTL.VIBRA_EN, in MODULE_AUDIO_VOICE,
+	 * is (still) clear if use_leds is set.
+	 */
+	if (pdata->use_leds)
+		twl_gpiochip.ngpio += 2;
+
+	ret = gpiochip_add(&twl_gpiochip);
+	if (ret < 0) {
+		dev_err(&pdev->dev,
+				"could not register gpiochip, %d\n",
+				ret);
+		twl_gpiochip.ngpio = 0;
+		gpio_twl4030_remove(pdev);
+	} else if (pdata->setup) {
+		int status;
+
+		status = pdata->setup(&pdev->dev,
+				pdata->gpio_base, TWL4030_GPIO_MAX);
+		if (status)
+			dev_dbg(&pdev->dev, "setup --> %d\n", status);
+	}
+
+	return ret;
+}
+
+static int __devexit gpio_twl4030_remove(struct platform_device *pdev)
+{
+	struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
+	int status;
+
+	if (pdata->teardown) {
+		status = pdata->teardown(&pdev->dev,
+				pdata->gpio_base, TWL4030_GPIO_MAX);
+		if (status) {
+			dev_dbg(&pdev->dev, "teardown --> %d\n", status);
+			return status;
+		}
+	}
+
+	status = gpiochip_remove(&twl_gpiochip);
+	if (status < 0)
+		return status;
+
+	if (is_module())
+		return 0;
+
+	/* REVISIT no support yet for deregistering all the IRQs */
+	WARN_ON(1);
+	return -EIO;
+}
+
+/* Note:  this hardware lives inside an I2C-based multi-function device. */
+MODULE_ALIAS("platform:twl4030_gpio");
+
+static struct platform_driver gpio_twl4030_driver = {
+	.driver.name	= "twl4030_gpio",
+	.driver.owner	= THIS_MODULE,
+	.probe		= gpio_twl4030_probe,
+	.remove		= __devexit_p(gpio_twl4030_remove),
+};
+
+static int __init gpio_twl4030_init(void)
+{
+	return platform_driver_register(&gpio_twl4030_driver);
+}
+subsys_initcall(gpio_twl4030_init);
+
+static void __exit gpio_twl4030_exit(void)
+{
+	platform_driver_unregister(&gpio_twl4030_driver);
+}
+module_exit(gpio_twl4030_exit);
+
+MODULE_AUTHOR("Texas Instruments, Inc.");
+MODULE_DESCRIPTION("GPIO interface for TWL4030");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 9097500..a8b33c2 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,7 +6,7 @@
 #
 menuconfig DRM
 	tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
-	depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && SHMEM
+	depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
 	help
 	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
 	  introduced in XFree86 4.0. If you say Y here, you need to select
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9255088..17ae330 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -171,6 +171,37 @@
 	return 0;
 }
 
+/*
+ * Try to write quickly with an atomic kmap. Return true on success.
+ *
+ * If this fails (which includes a partial write), we'll redo the whole
+ * thing with the slow version.
+ *
+ * This is a workaround for the low performance of iounmap (approximate
+ * 10% cpu cost on normal 3D workloads).  kmap_atomic on HIGHMEM kernels
+ * happens to let us map card memory without taking IPIs.  When the vmap
+ * rework lands we should be able to dump this hack.
+ */
+static inline int fast_user_write(unsigned long pfn, char __user *user_data,
+				  int l, int o)
+{
+#ifdef CONFIG_HIGHMEM
+	unsigned long unwritten;
+	char *vaddr_atomic;
+
+	vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0);
+#if WATCH_PWRITE
+	DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
+		 i, o, l, pfn, vaddr_atomic);
+#endif
+	unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o, user_data, l);
+	kunmap_atomic(vaddr_atomic, KM_USER0);
+	return !unwritten;
+#else
+	return 0;
+#endif
+}
+
 static int
 i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
 		    struct drm_i915_gem_pwrite *args,
@@ -180,12 +211,7 @@
 	ssize_t remain;
 	loff_t offset;
 	char __user *user_data;
-	char __iomem *vaddr;
-	char *vaddr_atomic;
-	int i, o, l;
 	int ret = 0;
-	unsigned long pfn;
-	unsigned long unwritten;
 
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
@@ -209,6 +235,9 @@
 	obj_priv->dirty = 1;
 
 	while (remain > 0) {
+		unsigned long pfn;
+		int i, o, l;
+
 		/* Operation in this page
 		 *
 		 * i = page number
@@ -223,25 +252,10 @@
 
 		pfn = (dev->agp->base >> PAGE_SHIFT) + i;
 
-#ifdef CONFIG_HIGHMEM
-		/* This is a workaround for the low performance of iounmap
-		 * (approximate 10% cpu cost on normal 3D workloads).
-		 * kmap_atomic on HIGHMEM kernels happens to let us map card
-		 * memory without taking IPIs.  When the vmap rework lands
-		 * we should be able to dump this hack.
-		 */
-		vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0);
-#if WATCH_PWRITE
-		DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
-			 i, o, l, pfn, vaddr_atomic);
-#endif
-		unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o,
-							      user_data, l);
-		kunmap_atomic(vaddr_atomic, KM_USER0);
+		if (!fast_user_write(pfn, user_data, l, o)) {
+			unsigned long unwritten;
+			char __iomem *vaddr;
 
-		if (unwritten)
-#endif /* CONFIG_HIGHMEM */
-		{
 			vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
 #if WATCH_PWRITE
 			DRM_INFO("pwrite slow i %d o %d l %d "
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 1d3b8a3..705a43c 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -428,7 +428,7 @@
 		usbhid->out[usbhid->outhead].raw_report = kmalloc(len, GFP_ATOMIC);
 		if (!usbhid->out[usbhid->outhead].raw_report) {
 			spin_unlock_irqrestore(&usbhid->outlock, flags);
-			warn("output queueing failed");
+			dev_warn(&hid->dev, "output queueing failed\n");
 			return;
 		}
 		hid_output_report(report, usbhid->out[usbhid->outhead].raw_report);
@@ -455,7 +455,7 @@
 		usbhid->ctrl[usbhid->ctrlhead].raw_report = kmalloc(len, GFP_ATOMIC);
 		if (!usbhid->ctrl[usbhid->ctrlhead].raw_report) {
 			spin_unlock_irqrestore(&usbhid->ctrllock, flags);
-			warn("control queueing failed");
+			dev_warn(&hid->dev, "control queueing failed\n");
 			return;
 		}
 		hid_output_report(report, usbhid->ctrl[usbhid->ctrlhead].raw_report);
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index b06b8e0..bc011da 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -49,6 +49,9 @@
 
 #define APPLESMC_MAX_DATA_LENGTH 32
 
+#define APPLESMC_MIN_WAIT	0x0040
+#define APPLESMC_MAX_WAIT	0x8000
+
 #define APPLESMC_STATUS_MASK	0x0f
 #define APPLESMC_READ_CMD	0x10
 #define APPLESMC_WRITE_CMD	0x11
@@ -57,8 +60,8 @@
 
 #define KEY_COUNT_KEY		"#KEY" /* r-o ui32 */
 
-#define LIGHT_SENSOR_LEFT_KEY	"ALV0" /* r-o {alv (6 bytes) */
-#define LIGHT_SENSOR_RIGHT_KEY	"ALV1" /* r-o {alv (6 bytes) */
+#define LIGHT_SENSOR_LEFT_KEY	"ALV0" /* r-o {alv (6-10 bytes) */
+#define LIGHT_SENSOR_RIGHT_KEY	"ALV1" /* r-o {alv (6-10 bytes) */
 #define BACKLIGHT_KEY		"LKSB" /* w-o {lkb (2 bytes) */
 
 #define CLAMSHELL_KEY		"MSLD" /* r-o ui8 (unused) */
@@ -104,6 +107,15 @@
 /* Set 6: Macbook3 set */
 	{ "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TTF0", "TW0P", "Th0H",
 	  "Th0S", "Th1H", NULL },
+/* Set 7: Macbook Air */
+	{ "TB0T", "TB1S", "TB1T", "TB2S", "TB2T", "TC0D", "TC0P", "TCFP",
+	  "TTF0", "TW0P", "Th0H", "Tp0P", "TpFP", "Ts0P", "Ts0S", NULL },
+/* Set 8: Macbook Pro 4,1 (Penryn) */
+	{ "TB0T", "TC0D", "TC0P", "TG0D", "TG0H", "TTF0", "TW0P", "Th0H",
+	  "Th1H", "Th2H", "Tm0P", "Ts0P", NULL },
+/* Set 9: Macbook Pro 3,1 (Santa Rosa) */
+	{ "TALP", "TB0T", "TC0D", "TC0P", "TG0D", "TG0H", "TTF0", "TW0P",
+	  "Th0H", "Th1H", "Th2H", "Tm0P", "Ts0P", NULL },
 };
 
 /* List of keys used to read/write fan speeds */
@@ -163,25 +175,25 @@
 static struct workqueue_struct *applesmc_led_wq;
 
 /*
- * __wait_status - Wait up to 2ms for the status port to get a certain value
+ * __wait_status - Wait up to 32ms for the status port to get a certain value
  * (masked with 0x0f), returning zero if the value is obtained.  Callers must
  * hold applesmc_lock.
  */
 static int __wait_status(u8 val)
 {
-	unsigned int i;
+	int us;
 
 	val = val & APPLESMC_STATUS_MASK;
 
-	for (i = 0; i < 200; i++) {
+	for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
+		udelay(us);
 		if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == val) {
 			if (debug)
 				printk(KERN_DEBUG
-						"Waited %d us for status %x\n",
-						i*10, val);
+					"Waited %d us for status %x\n",
+					2 * us - APPLESMC_MIN_WAIT, val);
 			return 0;
 		}
-		udelay(10);
 	}
 
 	printk(KERN_WARNING "applesmc: wait status failed: %x != %x\n",
@@ -191,6 +203,25 @@
 }
 
 /*
+ * special treatment of command port - on newer macbooks, it seems necessary
+ * to resend the command byte before polling the status again. Callers must
+ * hold applesmc_lock.
+ */
+static int send_command(u8 cmd)
+{
+	int us;
+	for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
+		outb(cmd, APPLESMC_CMD_PORT);
+		udelay(us);
+		if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == 0x0c)
+			return 0;
+	}
+	printk(KERN_WARNING "applesmc: command failed: %x -> %x\n",
+		cmd, inb(APPLESMC_CMD_PORT));
+	return -EIO;
+}
+
+/*
  * applesmc_read_key - reads len bytes from a given key, and put them in buffer.
  * Returns zero on success or a negative error on failure. Callers must
  * hold applesmc_lock.
@@ -205,8 +236,7 @@
 		return -EINVAL;
 	}
 
-	outb(APPLESMC_READ_CMD, APPLESMC_CMD_PORT);
-	if (__wait_status(0x0c))
+	if (send_command(APPLESMC_READ_CMD))
 		return -EIO;
 
 	for (i = 0; i < 4; i++) {
@@ -249,8 +279,7 @@
 		return -EINVAL;
 	}
 
-	outb(APPLESMC_WRITE_CMD, APPLESMC_CMD_PORT);
-	if (__wait_status(0x0c))
+	if (send_command(APPLESMC_WRITE_CMD))
 		return -EIO;
 
 	for (i = 0; i < 4; i++) {
@@ -284,8 +313,7 @@
 	readkey[2] = index >> 8;
 	readkey[3] = index;
 
-	outb(APPLESMC_GET_KEY_BY_INDEX_CMD, APPLESMC_CMD_PORT);
-	if (__wait_status(0x0c))
+	if (send_command(APPLESMC_GET_KEY_BY_INDEX_CMD))
 		return -EIO;
 
 	for (i = 0; i < 4; i++) {
@@ -315,8 +343,7 @@
 {
 	int i;
 
-	outb(APPLESMC_GET_KEY_TYPE_CMD, APPLESMC_CMD_PORT);
-	if (__wait_status(0x0c))
+	if (send_command(APPLESMC_GET_KEY_TYPE_CMD))
 		return -EIO;
 
 	for (i = 0; i < 4; i++) {
@@ -325,7 +352,7 @@
 			return -EIO;
 	}
 
-	outb(5, APPLESMC_DATA_PORT);
+	outb(6, APPLESMC_DATA_PORT);
 
 	for (i = 0; i < 6; i++) {
 		if (__wait_status(0x05))
@@ -527,17 +554,27 @@
 static ssize_t applesmc_light_show(struct device *dev,
 				struct device_attribute *attr, char *sysfsbuf)
 {
+	static int data_length;
 	int ret;
 	u8 left = 0, right = 0;
-	u8 buffer[6];
+	u8 buffer[10], query[6];
 
 	mutex_lock(&applesmc_lock);
 
-	ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, 6);
+	if (!data_length) {
+		ret = applesmc_get_key_type(LIGHT_SENSOR_LEFT_KEY, query);
+		if (ret)
+			goto out;
+		data_length = clamp_val(query[0], 0, 10);
+		printk(KERN_INFO "applesmc: light sensor data length set to "
+			"%d\n", data_length);
+	}
+
+	ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length);
 	left = buffer[2];
 	if (ret)
 		goto out;
-	ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, 6);
+	ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
 	right = buffer[2];
 
 out:
@@ -1233,39 +1270,57 @@
 	{ .accelerometer = 0, .light = 0, .temperature_set = 5 },
 /* MacBook3: accelerometer and temperature set 6 */
 	{ .accelerometer = 1, .light = 0, .temperature_set = 6 },
+/* MacBook Air: accelerometer, backlight and temperature set 7 */
+	{ .accelerometer = 1, .light = 1, .temperature_set = 7 },
+/* MacBook Pro 4: accelerometer, backlight and temperature set 8 */
+	{ .accelerometer = 1, .light = 1, .temperature_set = 8 },
+/* MacBook Pro 3: accelerometer, backlight and temperature set 9 */
+	{ .accelerometer = 1, .light = 1, .temperature_set = 9 },
 };
 
 /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
  * So we need to put "Apple MacBook Pro" before "Apple MacBook". */
 static __initdata struct dmi_system_id applesmc_whitelist[] = {
+	{ applesmc_dmi_match, "Apple MacBook Air", {
+	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") },
+		&applesmc_dmi_data[7]},
+	{ applesmc_dmi_match, "Apple MacBook Pro 4", {
+	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro4") },
+		&applesmc_dmi_data[8]},
+	{ applesmc_dmi_match, "Apple MacBook Pro 3", {
+	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") },
+		&applesmc_dmi_data[9]},
 	{ applesmc_dmi_match, "Apple MacBook Pro", {
 	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
 	  DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") },
-		(void*)&applesmc_dmi_data[0]},
+		&applesmc_dmi_data[0]},
 	{ applesmc_dmi_match, "Apple MacBook (v2)", {
 	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
 	  DMI_MATCH(DMI_PRODUCT_NAME,"MacBook2") },
-		(void*)&applesmc_dmi_data[1]},
+		&applesmc_dmi_data[1]},
 	{ applesmc_dmi_match, "Apple MacBook (v3)", {
 	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
 	  DMI_MATCH(DMI_PRODUCT_NAME,"MacBook3") },
-		(void*)&applesmc_dmi_data[6]},
+		&applesmc_dmi_data[6]},
 	{ applesmc_dmi_match, "Apple MacBook", {
 	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
 	  DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") },
-		(void*)&applesmc_dmi_data[2]},
+		&applesmc_dmi_data[2]},
 	{ applesmc_dmi_match, "Apple Macmini", {
 	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
 	  DMI_MATCH(DMI_PRODUCT_NAME,"Macmini") },
-		(void*)&applesmc_dmi_data[3]},
+		&applesmc_dmi_data[3]},
 	{ applesmc_dmi_match, "Apple MacPro2", {
 	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
 	  DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
-		(void*)&applesmc_dmi_data[4]},
+		&applesmc_dmi_data[4]},
 	{ applesmc_dmi_match, "Apple iMac", {
 	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
 	  DMI_MATCH(DMI_PRODUCT_NAME,"iMac") },
-		(void*)&applesmc_dmi_data[5]},
+		&applesmc_dmi_data[5]},
 	{ .ident = NULL }
 };
 
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 9b462bb..5fbfa34 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -75,7 +75,8 @@
 #define FSCM	0x09	/* Logical device: fans */
 #define VLM	0x0d	/* Logical device: voltages */
 #define TMS	0x0e	/* Logical device: temperatures */
-static const u8 logdev[3] = { FSCM, VLM, TMS };
+#define LDNI_MAX 3
+static const u8 logdev[LDNI_MAX] = { FSCM, VLM, TMS };
 
 #define LD_FAN		0
 #define LD_IN		1
@@ -489,11 +490,66 @@
 	SENSOR_ATTR(in10_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 10),
 };
 
+/* (temp & vin) channel status register alarm bits (pdf sec.11.5.12) */
+#define CHAN_ALM_MIN	0x02	/* min limit crossed */
+#define CHAN_ALM_MAX	0x04	/* max limit exceeded */
+#define TEMP_ALM_CRIT	0x08	/* temp crit exceeded (temp only) */
+
+/* show_in_min/max_alarm() reads data from the per-channel status
+   register (sec 11.5.12), not the vin event status registers (sec
+   11.5.2) that (legacy) show_in_alarm() resds (via data->in_alarms) */
+
+static ssize_t show_in_min_alarm(struct device *dev,
+			struct device_attribute *devattr, char *buf)
+{
+	struct pc87360_data *data = pc87360_update_device(dev);
+	unsigned nr = to_sensor_dev_attr(devattr)->index;
+
+	return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MIN));
+}
+static ssize_t show_in_max_alarm(struct device *dev,
+			struct device_attribute *devattr, char *buf)
+{
+	struct pc87360_data *data = pc87360_update_device(dev);
+	unsigned nr = to_sensor_dev_attr(devattr)->index;
+
+	return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MAX));
+}
+
+static struct sensor_device_attribute in_min_alarm[] = {
+	SENSOR_ATTR(in0_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 0),
+	SENSOR_ATTR(in1_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 1),
+	SENSOR_ATTR(in2_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 2),
+	SENSOR_ATTR(in3_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 3),
+	SENSOR_ATTR(in4_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 4),
+	SENSOR_ATTR(in5_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 5),
+	SENSOR_ATTR(in6_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 6),
+	SENSOR_ATTR(in7_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 7),
+	SENSOR_ATTR(in8_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 8),
+	SENSOR_ATTR(in9_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 9),
+	SENSOR_ATTR(in10_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 10),
+};
+static struct sensor_device_attribute in_max_alarm[] = {
+	SENSOR_ATTR(in0_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 0),
+	SENSOR_ATTR(in1_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 1),
+	SENSOR_ATTR(in2_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 2),
+	SENSOR_ATTR(in3_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 3),
+	SENSOR_ATTR(in4_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 4),
+	SENSOR_ATTR(in5_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 5),
+	SENSOR_ATTR(in6_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 6),
+	SENSOR_ATTR(in7_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 7),
+	SENSOR_ATTR(in8_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 8),
+	SENSOR_ATTR(in9_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 9),
+	SENSOR_ATTR(in10_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 10),
+};
+
 #define VIN_UNIT_ATTRS(X) \
 	&in_input[X].dev_attr.attr,	\
 	&in_status[X].dev_attr.attr,	\
 	&in_min[X].dev_attr.attr,	\
-	&in_max[X].dev_attr.attr
+	&in_max[X].dev_attr.attr,	\
+	&in_min_alarm[X].dev_attr.attr,	\
+	&in_max_alarm[X].dev_attr.attr
 
 static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -658,12 +714,68 @@
 		    show_therm_crit, set_therm_crit, 2+11),
 };
 
+/* show_therm_min/max_alarm() reads data from the per-channel voltage
+   status register (sec 11.5.12) */
+
+static ssize_t show_therm_min_alarm(struct device *dev,
+				struct device_attribute *devattr, char *buf)
+{
+	struct pc87360_data *data = pc87360_update_device(dev);
+	unsigned nr = to_sensor_dev_attr(devattr)->index;
+
+	return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MIN));
+}
+static ssize_t show_therm_max_alarm(struct device *dev,
+				struct device_attribute *devattr, char *buf)
+{
+	struct pc87360_data *data = pc87360_update_device(dev);
+	unsigned nr = to_sensor_dev_attr(devattr)->index;
+
+	return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MAX));
+}
+static ssize_t show_therm_crit_alarm(struct device *dev,
+				struct device_attribute *devattr, char *buf)
+{
+	struct pc87360_data *data = pc87360_update_device(dev);
+	unsigned nr = to_sensor_dev_attr(devattr)->index;
+
+	return sprintf(buf, "%u\n", !!(data->in_status[nr] & TEMP_ALM_CRIT));
+}
+
+static struct sensor_device_attribute therm_min_alarm[] = {
+	SENSOR_ATTR(temp4_min_alarm, S_IRUGO,
+		    show_therm_min_alarm, NULL, 0+11),
+	SENSOR_ATTR(temp5_min_alarm, S_IRUGO,
+		    show_therm_min_alarm, NULL, 1+11),
+	SENSOR_ATTR(temp6_min_alarm, S_IRUGO,
+		    show_therm_min_alarm, NULL, 2+11),
+};
+static struct sensor_device_attribute therm_max_alarm[] = {
+	SENSOR_ATTR(temp4_max_alarm, S_IRUGO,
+		    show_therm_max_alarm, NULL, 0+11),
+	SENSOR_ATTR(temp5_max_alarm, S_IRUGO,
+		    show_therm_max_alarm, NULL, 1+11),
+	SENSOR_ATTR(temp6_max_alarm, S_IRUGO,
+		    show_therm_max_alarm, NULL, 2+11),
+};
+static struct sensor_device_attribute therm_crit_alarm[] = {
+	SENSOR_ATTR(temp4_crit_alarm, S_IRUGO,
+		    show_therm_crit_alarm, NULL, 0+11),
+	SENSOR_ATTR(temp5_crit_alarm, S_IRUGO,
+		    show_therm_crit_alarm, NULL, 1+11),
+	SENSOR_ATTR(temp6_crit_alarm, S_IRUGO,
+		    show_therm_crit_alarm, NULL, 2+11),
+};
+
 #define THERM_UNIT_ATTRS(X) \
 	&therm_input[X].dev_attr.attr,	\
 	&therm_status[X].dev_attr.attr,	\
 	&therm_min[X].dev_attr.attr,	\
 	&therm_max[X].dev_attr.attr,	\
-	&therm_crit[X].dev_attr.attr
+	&therm_crit[X].dev_attr.attr,	\
+	&therm_min_alarm[X].dev_attr.attr, \
+	&therm_max_alarm[X].dev_attr.attr, \
+	&therm_crit_alarm[X].dev_attr.attr
 
 static struct attribute * pc8736x_therm_attr_array[] = {
 	THERM_UNIT_ATTRS(0),
@@ -790,12 +902,76 @@
 }
 static DEVICE_ATTR(alarms_temp, S_IRUGO, show_temp_alarms, NULL);
 
+/* show_temp_min/max_alarm() reads data from the per-channel status
+   register (sec 12.3.7), not the temp event status registers (sec
+   12.3.2) that show_temp_alarm() reads (via data->temp_alarms) */
+
+static ssize_t show_temp_min_alarm(struct device *dev,
+			struct device_attribute *devattr, char *buf)
+{
+	struct pc87360_data *data = pc87360_update_device(dev);
+	unsigned nr = to_sensor_dev_attr(devattr)->index;
+
+	return sprintf(buf, "%u\n", !!(data->temp_status[nr] & CHAN_ALM_MIN));
+}
+static ssize_t show_temp_max_alarm(struct device *dev,
+			struct device_attribute *devattr, char *buf)
+{
+	struct pc87360_data *data = pc87360_update_device(dev);
+	unsigned nr = to_sensor_dev_attr(devattr)->index;
+
+	return sprintf(buf, "%u\n", !!(data->temp_status[nr] & CHAN_ALM_MAX));
+}
+static ssize_t show_temp_crit_alarm(struct device *dev,
+			struct device_attribute *devattr, char *buf)
+{
+	struct pc87360_data *data = pc87360_update_device(dev);
+	unsigned nr = to_sensor_dev_attr(devattr)->index;
+
+	return sprintf(buf, "%u\n", !!(data->temp_status[nr] & TEMP_ALM_CRIT));
+}
+
+static struct sensor_device_attribute temp_min_alarm[] = {
+	SENSOR_ATTR(temp1_min_alarm, S_IRUGO, show_temp_min_alarm, NULL, 0),
+	SENSOR_ATTR(temp2_min_alarm, S_IRUGO, show_temp_min_alarm, NULL, 1),
+	SENSOR_ATTR(temp3_min_alarm, S_IRUGO, show_temp_min_alarm, NULL, 2),
+};
+static struct sensor_device_attribute temp_max_alarm[] = {
+	SENSOR_ATTR(temp1_max_alarm, S_IRUGO, show_temp_max_alarm, NULL, 0),
+	SENSOR_ATTR(temp2_max_alarm, S_IRUGO, show_temp_max_alarm, NULL, 1),
+	SENSOR_ATTR(temp3_max_alarm, S_IRUGO, show_temp_max_alarm, NULL, 2),
+};
+static struct sensor_device_attribute temp_crit_alarm[] = {
+	SENSOR_ATTR(temp1_crit_alarm, S_IRUGO, show_temp_crit_alarm, NULL, 0),
+	SENSOR_ATTR(temp2_crit_alarm, S_IRUGO, show_temp_crit_alarm, NULL, 1),
+	SENSOR_ATTR(temp3_crit_alarm, S_IRUGO, show_temp_crit_alarm, NULL, 2),
+};
+
+#define TEMP_FAULT	0x40	/* open diode */
+static ssize_t show_temp_fault(struct device *dev,
+			struct device_attribute *devattr, char *buf)
+{
+	struct pc87360_data *data = pc87360_update_device(dev);
+	unsigned nr = to_sensor_dev_attr(devattr)->index;
+
+	return sprintf(buf, "%u\n", !!(data->temp_status[nr] & TEMP_FAULT));
+}
+static struct sensor_device_attribute temp_fault[] = {
+	SENSOR_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0),
+	SENSOR_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1),
+	SENSOR_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2),
+};
+
 #define TEMP_UNIT_ATTRS(X) \
 	&temp_input[X].dev_attr.attr,	\
 	&temp_status[X].dev_attr.attr,	\
 	&temp_min[X].dev_attr.attr,	\
 	&temp_max[X].dev_attr.attr,	\
-	&temp_crit[X].dev_attr.attr
+	&temp_crit[X].dev_attr.attr,	\
+	&temp_min_alarm[X].dev_attr.attr, \
+	&temp_max_alarm[X].dev_attr.attr, \
+	&temp_crit_alarm[X].dev_attr.attr, \
+	&temp_fault[X].dev_attr.attr
 
 static struct attribute * pc8736x_temp_attr_array[] = {
 	TEMP_UNIT_ATTRS(0),
@@ -809,8 +985,8 @@
 	.attrs = pc8736x_temp_attr_array,
 };
 
-static ssize_t show_name(struct device *dev, struct device_attribute
-			 *devattr, char *buf)
+static ssize_t show_name(struct device *dev,
+			struct device_attribute *devattr, char *buf)
 {
 	struct pc87360_data *data = dev_get_drvdata(dev);
 	return sprintf(buf, "%s\n", data->name);
@@ -955,7 +1131,7 @@
 	mutex_init(&data->update_lock);
 	platform_set_drvdata(pdev, data);
 
-	for (i = 0; i < 3; i++) {
+	for (i = 0; i < LDNI_MAX; i++) {
 		if (((data->address[i] = extra_isa[i]))
 		 && !request_region(extra_isa[i], PC87360_EXTENT,
 		 		    pc87360_driver.driver.name)) {
@@ -1031,7 +1207,15 @@
 			    || (err = device_create_file(dev,
 					&temp_crit[i].dev_attr))
 			    || (err = device_create_file(dev,
-					&temp_status[i].dev_attr)))
+					&temp_status[i].dev_attr))
+			    || (err = device_create_file(dev,
+					&temp_min_alarm[i].dev_attr))
+			    || (err = device_create_file(dev,
+					&temp_max_alarm[i].dev_attr))
+			    || (err = device_create_file(dev,
+					&temp_crit_alarm[i].dev_attr))
+			    || (err = device_create_file(dev,
+					&temp_fault[i].dev_attr)))
 				goto ERROR3;
 		}
 		if ((err = device_create_file(dev, &dev_attr_alarms_temp)))
@@ -1131,6 +1315,16 @@
 	mutex_unlock(&(data->lock));
 }
 
+/* (temp & vin) channel conversion status register flags (pdf sec.11.5.12) */
+#define CHAN_CNVRTD	0x80	/* new data ready */
+#define CHAN_ENA	0x01	/* enabled channel (temp or vin) */
+#define CHAN_ALM_ENA	0x10	/* propagate to alarms-reg ?? (chk val!) */
+#define CHAN_READY	(CHAN_ENA|CHAN_CNVRTD) /* sample ready mask */
+
+#define TEMP_OTS_OE	0x20	/* OTS Output Enable */
+#define VIN_RW1C_MASK	(CHAN_READY|CHAN_ALM_MAX|CHAN_ALM_MIN)   /* 0x87 */
+#define TEMP_RW1C_MASK	(VIN_RW1C_MASK|TEMP_ALM_CRIT|TEMP_FAULT) /* 0xCF */
+
 static void pc87360_init_device(struct platform_device *pdev,
 				int use_thermistors)
 {
@@ -1152,11 +1346,12 @@
 
 	nr = data->innr < 11 ? data->innr : 11;
 	for (i = 0; i < nr; i++) {
+		reg = pc87360_read_value(data, LD_IN, i,
+					 PC87365_REG_IN_STATUS);
+		dev_dbg(&pdev->dev, "bios in%d status:0x%02x\n", i, reg);
 		if (init >= init_in[i]) {
 			/* Forcibly enable voltage channel */
-			reg = pc87360_read_value(data, LD_IN, i,
-						 PC87365_REG_IN_STATUS);
-			if (!(reg & 0x01)) {
+			if (!(reg & CHAN_ENA)) {
 				dev_dbg(&pdev->dev, "Forcibly "
 					"enabling in%d\n", i);
 				pc87360_write_value(data, LD_IN, i,
@@ -1168,19 +1363,24 @@
 
 	/* We can't blindly trust the Super-I/O space configuration bit,
 	   most BIOS won't set it properly */
+	dev_dbg(&pdev->dev, "bios thermistors:%d\n", use_thermistors);
 	for (i = 11; i < data->innr; i++) {
 		reg = pc87360_read_value(data, LD_IN, i,
 					 PC87365_REG_TEMP_STATUS);
-		use_thermistors = use_thermistors || (reg & 0x01);
+		use_thermistors = use_thermistors || (reg & CHAN_ENA);
+		/* thermistors are temp[4-6], measured on vin[11-14] */
+		dev_dbg(&pdev->dev, "bios temp%d_status:0x%02x\n", i-7, reg);
 	}
+	dev_dbg(&pdev->dev, "using thermistors:%d\n", use_thermistors);
 
 	i = use_thermistors ? 2 : 0;
 	for (; i < data->tempnr; i++) {
+		reg = pc87360_read_value(data, LD_TEMP, i,
+					 PC87365_REG_TEMP_STATUS);
+		dev_dbg(&pdev->dev, "bios temp%d_status:0x%02x\n", i+1, reg);
 		if (init >= init_temp[i]) {
 			/* Forcibly enable temperature channel */
-			reg = pc87360_read_value(data, LD_TEMP, i,
-						 PC87365_REG_TEMP_STATUS);
-			if (!(reg & 0x01)) {
+			if (!(reg & CHAN_ENA)) {
 				dev_dbg(&pdev->dev, "Forcibly "
 					"enabling temp%d\n", i+1);
 				pc87360_write_value(data, LD_TEMP, i,
@@ -1197,7 +1397,7 @@
 				   diodes */
 				reg = pc87360_read_value(data, LD_TEMP,
 				      (i-11)/2, PC87365_REG_TEMP_STATUS);
-				if (reg & 0x01) {
+				if (reg & CHAN_ENA) {
 					dev_dbg(&pdev->dev, "Skipping "
 						"temp%d, pin already in use "
 						"by temp%d\n", i-7, (i-11)/2);
@@ -1207,7 +1407,7 @@
 				/* Forcibly enable thermistor channel */
 				reg = pc87360_read_value(data, LD_IN, i,
 							 PC87365_REG_IN_STATUS);
-				if (!(reg & 0x01)) {
+				if (!(reg & CHAN_ENA)) {
 					dev_dbg(&pdev->dev, "Forcibly "
 						"enabling temp%d\n", i-7);
 					pc87360_write_value(data, LD_IN, i,
@@ -1221,7 +1421,8 @@
 	if (data->innr) {
 		reg = pc87360_read_value(data, LD_IN, NO_BANK,
 					 PC87365_REG_IN_CONFIG);
-		if (reg & 0x01) {
+		dev_dbg(&pdev->dev, "bios vin-cfg:0x%02x\n", reg);
+		if (reg & CHAN_ENA) {
 			dev_dbg(&pdev->dev, "Forcibly "
 				"enabling monitoring (VLM)\n");
 			pc87360_write_value(data, LD_IN, NO_BANK,
@@ -1233,7 +1434,8 @@
 	if (data->tempnr) {
 		reg = pc87360_read_value(data, LD_TEMP, NO_BANK,
 					 PC87365_REG_TEMP_CONFIG);
-		if (reg & 0x01) {
+		dev_dbg(&pdev->dev, "bios temp-cfg:0x%02x\n", reg);
+		if (reg & CHAN_ENA) {
 			dev_dbg(&pdev->dev, "Forcibly enabling "
 				"monitoring (TMS)\n");
 			pc87360_write_value(data, LD_TEMP, NO_BANK,
@@ -1336,11 +1538,11 @@
 			pc87360_write_value(data, LD_IN, i,
 					    PC87365_REG_IN_STATUS,
 					    data->in_status[i]);
-			if ((data->in_status[i] & 0x81) == 0x81) {
+			if ((data->in_status[i] & CHAN_READY) == CHAN_READY) {
 				data->in[i] = pc87360_read_value(data, LD_IN,
 					      i, PC87365_REG_IN);
 			}
-			if (data->in_status[i] & 0x01) {
+			if (data->in_status[i] & CHAN_ENA) {
 				data->in_min[i] = pc87360_read_value(data,
 						  LD_IN, i,
 						  PC87365_REG_IN_MIN);
@@ -1373,12 +1575,12 @@
 			pc87360_write_value(data, LD_TEMP, i,
 					    PC87365_REG_TEMP_STATUS,
 					    data->temp_status[i]);
-			if ((data->temp_status[i] & 0x81) == 0x81) {
+			if ((data->temp_status[i] & CHAN_READY) == CHAN_READY) {
 				data->temp[i] = pc87360_read_value(data,
 						LD_TEMP, i,
 						PC87365_REG_TEMP);
 			}
-			if (data->temp_status[i] & 0x01) {
+			if (data->temp_status[i] & CHAN_ENA) {
 				data->temp_min[i] = pc87360_read_value(data,
 						    LD_TEMP, i,
 						    PC87365_REG_TEMP_MIN);
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 1e328d19..3e01992 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -135,7 +135,7 @@
 	*status = get_pcf(adap, 1);
 #ifndef STUB_I2C
 	while (timeout-- && (*status & I2C_PCF_PIN)) {
-		adap->waitforpin();
+		adap->waitforpin(adap->data);
 		*status = get_pcf(adap, 1);
 	}
 	if (*status & I2C_PCF_LAB) {
@@ -208,7 +208,7 @@
 		return -ENXIO;
 	}
 	
-	printk(KERN_DEBUG "i2c-algo-pcf.o: deteted and initialized PCF8584.\n");
+	printk(KERN_DEBUG "i2c-algo-pcf.o: detected and initialized PCF8584.\n");
 
 	return 0;
 }
@@ -331,13 +331,16 @@
 	int i;
 	int ret=0, timeout, status;
     
+	if (adap->xfer_begin)
+		adap->xfer_begin(adap->data);
 
 	/* Check for bus busy */
 	timeout = wait_for_bb(adap);
 	if (timeout) {
 		DEB2(printk(KERN_ERR "i2c-algo-pcf.o: "
 		            "Timeout waiting for BB in pcf_xfer\n");)
-		return -EIO;
+		i = -EIO;
+		goto out;
 	}
 	
 	for (i = 0;ret >= 0 && i < num; i++) {
@@ -359,12 +362,14 @@
 		if (timeout) {
 			if (timeout == -EINTR) {
 				/* arbitration lost */
-				return (-EINTR);
+				i = -EINTR;
+				goto out;
 			}
 			i2c_stop(adap);
 			DEB2(printk(KERN_ERR "i2c-algo-pcf.o: Timeout waiting "
 				    "for PIN(1) in pcf_xfer\n");)
-			return (-EREMOTEIO);
+			i = -EREMOTEIO;
+			goto out;
 		}
     
 #ifndef STUB_I2C
@@ -372,7 +377,8 @@
 		if (status & I2C_PCF_LRB) {
 			i2c_stop(adap);
 			DEB2(printk(KERN_ERR "i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");)
-			return (-EREMOTEIO);
+			i = -EREMOTEIO;
+			goto out;
 		}
 #endif
     
@@ -404,6 +410,9 @@
 		}
 	}
 
+out:
+	if (adap->xfer_end)
+		adap->xfer_end(adap->data);
 	return (i);
 }
 
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index acadbc5..7f95905 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -97,6 +97,7 @@
 	    ICH9
 	    Tolapai
 	    ICH10
+	    PCH
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 1ea3925..424dad6 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -332,10 +332,6 @@
 	int error;
 	u8 temp;
 	
-	/* driver_data might come from user-space, so check it */
-	if (id->driver_data >= ARRAY_SIZE(chipname))
-		return -EINVAL;
-
 	if (amd756_ioport) {
 		dev_err(&pdev->dev, "Only one device supported "
 		       "(you have a strange motherboard, btw)\n");
@@ -412,7 +408,6 @@
 	.id_table	= amd756_ids,
 	.probe		= amd756_probe,
 	.remove		= __devexit_p(amd756_remove),
-	.dynids.use_driver_data = 1,
 };
 
 static int __init amd756_init(void)
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 8164de1..228f757 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -423,7 +423,6 @@
 	.owner		= THIS_MODULE,
 	.name		= "i2c-cpm",
 	.algo		= &cpm_i2c_algo,
-	.class		= I2C_CLASS_HWMON | I2C_CLASS_SPD,
 };
 
 static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 7f38c01..0ed3ccb 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -104,7 +104,8 @@
 	return (clock);
 }
 
-static void pcf_isa_waitforpin(void) {
+static void pcf_isa_waitforpin(void *data)
+{
 	DEFINE_WAIT(wait);
 	int timeout = 2;
 	unsigned long flags;
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 1098f21..648aa7b 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -123,7 +123,7 @@
 				hydra_adap.name))
 		return -EBUSY;
 
-	hydra_bit_data.data = ioremap(base, pci_resource_len(dev, 0));
+	hydra_bit_data.data = pci_ioremap_bar(dev, 0);
 	if (hydra_bit_data.data == NULL) {
 		release_mem_region(base+offsetof(struct Hydra, CachePD), 4);
 		return -ENODEV;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index dc7ea32..5123eb6 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -41,6 +41,7 @@
   Tolapai               0x5032     32     hard     yes     yes     yes
   ICH10                 0x3a30     32     hard     yes     yes     yes
   ICH10                 0x3a60     32     hard     yes     yes     yes
+  PCH                   0x3b30     32     hard     yes     yes     yes
 
   Features supported by this driver:
   Software PEC                     no
@@ -576,6 +577,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
 	{ 0, }
 };
 
@@ -599,6 +601,7 @@
 	case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
 	case PCI_DEVICE_ID_INTEL_ICH10_4:
 	case PCI_DEVICE_ID_INTEL_ICH10_5:
+	case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
 		i801_features |= FEATURE_I2C_BLOCK_READ;
 		/* fall through */
 	case PCI_DEVICE_ID_INTEL_82801DB_3:
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 73dc52e..9f194d9 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -332,10 +332,6 @@
 	unsigned char temp;
 	int error = -ENODEV;
 
-	/* driver_data might come from user-space, so check it */
-	if (id->driver_data & 1 || id->driver_data > 0xff)
-		return -EINVAL;
-
 	/* Determine the address of the SMBus areas */
 	if (force_addr) {
 		vt596_smba = force_addr & 0xfff0;
@@ -483,7 +479,6 @@
 	.name		= "vt596_smbus",
 	.id_table	= vt596_ids,
 	.probe		= vt596_probe,
-	.dynids.use_driver_data = 1,
 };
 
 static int __init i2c_vt596_init(void)
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 1735682..4c35702 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -1,6 +1,8 @@
 #
 # Miscellaneous I2C chip drivers configuration
 #
+# *** DEPRECATED! Do not add new entries! See Makefile ***
+#
 
 menu "Miscellaneous I2C Chip support"
 
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index ca520fa..23d2a31 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -1,7 +1,8 @@
 #
 # Makefile for miscellaneous I2C chip drivers.
 #
-# Think twice before you add a new driver to this directory.
+# Do not add new drivers to this directory! It is DEPRECATED.
+#
 # Device drivers are better grouped according to the functionality they
 # implement rather than to the bus they are connected to. In particular:
 # * Hardware monitoring chip drivers go to drivers/hwmon
diff --git a/drivers/i2c/chips/at24.c b/drivers/i2c/chips/at24.c
index 2a4acb2..d477552 100644
--- a/drivers/i2c/chips/at24.c
+++ b/drivers/i2c/chips/at24.c
@@ -460,7 +460,6 @@
 	 */
 	at24->bin.attr.name = "eeprom";
 	at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR;
-	at24->bin.attr.owner = THIS_MODULE;
 	at24->bin.read = at24_bin_read;
 	at24->bin.size = chip.byte_len;
 
diff --git a/drivers/i2c/chips/ds1682.c b/drivers/i2c/chips/ds1682.c
index 23be4d4..f3ee4a1 100644
--- a/drivers/i2c/chips/ds1682.c
+++ b/drivers/i2c/chips/ds1682.c
@@ -190,7 +190,6 @@
 	.attr = {
 		.name = "eeprom",
 		.mode = S_IRUGO | S_IWUSR,
-		.owner = THIS_MODULE,
 	},
 	.size = DS1682_EEPROM_SIZE,
 	.read = ds1682_eeprom_read,
diff --git a/drivers/i2c/chips/menelaus.c b/drivers/i2c/chips/menelaus.c
index 176126d..4b364ba 100644
--- a/drivers/i2c/chips/menelaus.c
+++ b/drivers/i2c/chips/menelaus.c
@@ -832,52 +832,52 @@
 
 static void menelaus_to_time(char *regs, struct rtc_time *t)
 {
-	t->tm_sec = BCD2BIN(regs[0]);
-	t->tm_min = BCD2BIN(regs[1]);
+	t->tm_sec = bcd2bin(regs[0]);
+	t->tm_min = bcd2bin(regs[1]);
 	if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
-		t->tm_hour = BCD2BIN(regs[2] & 0x1f) - 1;
+		t->tm_hour = bcd2bin(regs[2] & 0x1f) - 1;
 		if (regs[2] & RTC_HR_PM)
 			t->tm_hour += 12;
 	} else
-		t->tm_hour = BCD2BIN(regs[2] & 0x3f);
-	t->tm_mday = BCD2BIN(regs[3]);
-	t->tm_mon = BCD2BIN(regs[4]) - 1;
-	t->tm_year = BCD2BIN(regs[5]) + 100;
+		t->tm_hour = bcd2bin(regs[2] & 0x3f);
+	t->tm_mday = bcd2bin(regs[3]);
+	t->tm_mon = bcd2bin(regs[4]) - 1;
+	t->tm_year = bcd2bin(regs[5]) + 100;
 }
 
 static int time_to_menelaus(struct rtc_time *t, int regnum)
 {
 	int	hour, status;
 
-	status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_sec));
+	status = menelaus_write_reg(regnum++, bin2bcd(t->tm_sec));
 	if (status < 0)
 		goto fail;
 
-	status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_min));
+	status = menelaus_write_reg(regnum++, bin2bcd(t->tm_min));
 	if (status < 0)
 		goto fail;
 
 	if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
 		hour = t->tm_hour + 1;
 		if (hour > 12)
-			hour = RTC_HR_PM | BIN2BCD(hour - 12);
+			hour = RTC_HR_PM | bin2bcd(hour - 12);
 		else
-			hour = BIN2BCD(hour);
+			hour = bin2bcd(hour);
 	} else
-		hour = BIN2BCD(t->tm_hour);
+		hour = bin2bcd(t->tm_hour);
 	status = menelaus_write_reg(regnum++, hour);
 	if (status < 0)
 		goto fail;
 
-	status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_mday));
+	status = menelaus_write_reg(regnum++, bin2bcd(t->tm_mday));
 	if (status < 0)
 		goto fail;
 
-	status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_mon + 1));
+	status = menelaus_write_reg(regnum++, bin2bcd(t->tm_mon + 1));
 	if (status < 0)
 		goto fail;
 
-	status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_year - 100));
+	status = menelaus_write_reg(regnum++, bin2bcd(t->tm_year - 100));
 	if (status < 0)
 		goto fail;
 
@@ -914,7 +914,7 @@
 	}
 
 	menelaus_to_time(regs, t);
-	t->tm_wday = BCD2BIN(regs[6]);
+	t->tm_wday = bcd2bin(regs[6]);
 
 	return 0;
 }
@@ -927,7 +927,7 @@
 	status = time_to_menelaus(t, MENELAUS_RTC_SEC);
 	if (status < 0)
 		return status;
-	status = menelaus_write_reg(MENELAUS_RTC_WKDAY, BIN2BCD(t->tm_wday));
+	status = menelaus_write_reg(MENELAUS_RTC_WKDAY, bin2bcd(t->tm_wday));
 	if (status < 0) {
 		dev_err(&the_menelaus->client->dev, "rtc write reg %02x "
 				"err %d\n", MENELAUS_RTC_WKDAY, status);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 42e852d..5a485c2 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -266,6 +266,9 @@
 
 	client->dev.platform_data = info->platform_data;
 
+	if (info->archdata)
+		client->dev.archdata = *info->archdata;
+
 	client->flags = info->flags;
 	client->addr = info->addr;
 	client->irq = info->irq;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 74a369a..a820ca6 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -84,21 +84,40 @@
 
 	  If unsure, say N.
 
-config BLK_DEV_IDEDISK
-	tristate "Include IDE/ATA-2 DISK support"
-	---help---
-	  This will include enhanced support for MFM/RLL/IDE hard disks.  If
-	  you have a MFM/RLL/IDE disk, and there is no special reason to use
-	  the old hard disk driver instead, say Y.  If you have an SCSI-only
-	  system, you can say N here.
+config IDE_GD
+	tristate "generic ATA/ATAPI disk support"
+	default y
+	help
+	  Support for ATA/ATAPI disks (including ATAPI floppy drives).
 
-	  To compile this driver as a module, choose M here: the
-	  module will be called ide-disk.
-	  Do not compile this driver as a module if your root file system
-	  (the one containing the directory /) is located on the IDE disk.
+	  To compile this driver as a module, choose M here.
+	  The module will be called ide-gd_mod.
 
 	  If unsure, say Y.
 
+config IDE_GD_ATA
+	bool "ATA disk support"
+	depends on IDE_GD
+	default y
+	help
+	  This will include support for ATA hard disks.
+
+	  If unsure, say Y.
+
+config IDE_GD_ATAPI
+	bool "ATAPI floppy support"
+	depends on IDE_GD
+	select IDE_ATAPI
+	help
+	  This will include support for ATAPI floppy drives
+	  (i.e. Iomega ZIP or MKE LS-120).
+
+	  For information about jumper settings and the question
+	  of when a ZIP drive uses a partition table, see
+	  <http://www.win.tue.nl/~aeb/linux/zip/zip-1.html>.
+
+	  If unsure, say N.
+
 config BLK_DEV_IDECS
 	tristate "PCMCIA IDE support"
 	depends on PCMCIA
@@ -163,29 +182,6 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called ide-tape.
 
-config BLK_DEV_IDEFLOPPY
-	tristate "Include IDE/ATAPI FLOPPY support"
-	select IDE_ATAPI
-	---help---
-	  If you have an IDE floppy drive which uses the ATAPI protocol,
-	  answer Y.  ATAPI is a newer protocol used by IDE CD-ROM/tape/floppy
-	  drives, similar to the SCSI protocol.
-
-	  The LS-120 and the IDE/ATAPI Iomega ZIP drive are also supported by
-	  this driver. For information about jumper settings and the question
-	  of when a ZIP drive uses a partition table, see
-	  <http://www.win.tue.nl/~aeb/linux/zip/zip-1.html>.
-	  (ATAPI PD-CD/CDR drives are not supported by this driver; support
-	  for PD-CD/CDR drives is available if you answer Y to
-	  "SCSI emulation support", below).
-
-	  If you say Y here, the FLOPPY drive will be identified along with
-	  other IDE devices, as "hdb" or "hdc", or something similar (check
-	  the boot messages with dmesg).
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called ide-floppy.
-
 config BLK_DEV_IDESCSI
 	tristate "SCSI emulation support (DEPRECATED)"
 	depends on SCSI
@@ -332,7 +328,7 @@
 # TODO: split it on per host driver config options (or module parameters)
 config BLK_DEV_OFFBOARD
 	bool "Boot off-board chipsets first support (DEPRECATED)"
-	depends on BLK_DEV_IDEPCI && (BLK_DEV_AEC62XX || BLK_DEV_GENERIC || BLK_DEV_HPT34X || BLK_DEV_HPT366 || BLK_DEV_PDC202XX_NEW || BLK_DEV_PDC202XX_OLD || BLK_DEV_TC86C001)
+	depends on BLK_DEV_IDEPCI && (BLK_DEV_AEC62XX || BLK_DEV_GENERIC || BLK_DEV_HPT366 || BLK_DEV_PDC202XX_NEW || BLK_DEV_PDC202XX_OLD || BLK_DEV_TC86C001)
 	help
 	  Normally, IDE controllers built into the motherboard (on-board
 	  controllers) are assigned to ide0 and ide1 while those on add-in PCI
@@ -482,28 +478,6 @@
 
 	  It is safe to say Y to this question.
 
-config BLK_DEV_HPT34X
-	tristate "HPT34X chipset support"
-	depends on BROKEN
-	select BLK_DEV_IDEDMA_PCI
-	help
-	  This driver adds up to 4 more EIDE devices sharing a single
-	  interrupt. The HPT343 chipset in its current form is a non-bootable
-	  controller; the HPT345/HPT363 chipset is a bootable (needs BIOS FIX)
-	  PCI UDMA controllers. This driver requires dynamic tuning of the
-	  chipset during the ide-probe at boot time. It is reported to support
-	  DVD II drives, by the manufacturer.
-
-config HPT34X_AUTODMA
-	bool "HPT34X AUTODMA support (EXPERIMENTAL)"
-	depends on BLK_DEV_HPT34X && EXPERIMENTAL
-	help
-	  This is a dangerous thing to attempt currently! Please read the
-	  comments at the top of <file:drivers/ide/pci/hpt34x.c>.  If you say Y
-	  here, then say Y to "Use DMA by default when available" as well.
-
-	  If unsure, say N.
-
 config BLK_DEV_HPT366
 	tristate "HPT36X/37X chipset support"
 	select BLK_DEV_IDEDMA_PCI
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index ceaf779..9cf92ac 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -18,47 +18,96 @@
 
 obj-$(CONFIG_IDE)			+= ide-core.o
 
-ifeq ($(CONFIG_IDE_ARM), y)
-	ide-arm-core-y += arm/ide_arm.o
-	obj-y += ide-arm-core.o
-endif
+obj-$(CONFIG_IDE_ARM)			+= ide_arm.o
 
-obj-$(CONFIG_IDE)			+= legacy/ pci/
+obj-$(CONFIG_BLK_DEV_ALI14XX)		+= ali14xx.o
+obj-$(CONFIG_BLK_DEV_UMC8672)		+= umc8672.o
+obj-$(CONFIG_BLK_DEV_DTC2278)		+= dtc2278.o
+obj-$(CONFIG_BLK_DEV_HT6560B)		+= ht6560b.o
+obj-$(CONFIG_BLK_DEV_QD65XX)		+= qd65xx.o
+obj-$(CONFIG_BLK_DEV_4DRIVES)		+= ide-4drives.o
+
+obj-$(CONFIG_BLK_DEV_GAYLE)		+= gayle.o
+obj-$(CONFIG_BLK_DEV_FALCON_IDE)	+= falconide.o
+obj-$(CONFIG_BLK_DEV_MAC_IDE)		+= macide.o
+obj-$(CONFIG_BLK_DEV_Q40IDE)		+= q40ide.o
+obj-$(CONFIG_BLK_DEV_BUDDHA)		+= buddha.o
+
+obj-$(CONFIG_BLK_DEV_AEC62XX)		+= aec62xx.o
+obj-$(CONFIG_BLK_DEV_ALI15X3)		+= alim15x3.o
+obj-$(CONFIG_BLK_DEV_AMD74XX)		+= amd74xx.o
+obj-$(CONFIG_BLK_DEV_ATIIXP)		+= atiixp.o
+obj-$(CONFIG_BLK_DEV_CELLEB)		+= scc_pata.o
+obj-$(CONFIG_BLK_DEV_CMD64X)		+= cmd64x.o
+obj-$(CONFIG_BLK_DEV_CS5520)		+= cs5520.o
+obj-$(CONFIG_BLK_DEV_CS5530)		+= cs5530.o
+obj-$(CONFIG_BLK_DEV_CS5535)		+= cs5535.o
+obj-$(CONFIG_BLK_DEV_SC1200)		+= sc1200.o
+obj-$(CONFIG_BLK_DEV_CY82C693)		+= cy82c693.o
+obj-$(CONFIG_BLK_DEV_DELKIN)		+= delkin_cb.o
+obj-$(CONFIG_BLK_DEV_HPT366)		+= hpt366.o
+obj-$(CONFIG_BLK_DEV_IT8213)		+= it8213.o
+obj-$(CONFIG_BLK_DEV_IT821X)		+= it821x.o
+obj-$(CONFIG_BLK_DEV_JMICRON)		+= jmicron.o
+obj-$(CONFIG_BLK_DEV_NS87415)		+= ns87415.o
+obj-$(CONFIG_BLK_DEV_OPTI621)		+= opti621.o
+obj-$(CONFIG_BLK_DEV_PDC202XX_OLD)	+= pdc202xx_old.o
+obj-$(CONFIG_BLK_DEV_PDC202XX_NEW)	+= pdc202xx_new.o
+obj-$(CONFIG_BLK_DEV_PIIX)		+= piix.o
+obj-$(CONFIG_BLK_DEV_RZ1000)		+= rz1000.o
+obj-$(CONFIG_BLK_DEV_SVWKS)		+= serverworks.o
+obj-$(CONFIG_BLK_DEV_SGIIOC4)		+= sgiioc4.o
+obj-$(CONFIG_BLK_DEV_SIIMAGE)		+= siimage.o
+obj-$(CONFIG_BLK_DEV_SIS5513)		+= sis5513.o
+obj-$(CONFIG_BLK_DEV_SL82C105)		+= sl82c105.o
+obj-$(CONFIG_BLK_DEV_SLC90E66)		+= slc90e66.o
+obj-$(CONFIG_BLK_DEV_TC86C001)		+= tc86c001.o
+obj-$(CONFIG_BLK_DEV_TRIFLEX)		+= triflex.o
+obj-$(CONFIG_BLK_DEV_TRM290)		+= trm290.o
+obj-$(CONFIG_BLK_DEV_VIA82CXXX)		+= via82cxxx.o
+
+# Must appear at the end of the block
+obj-$(CONFIG_BLK_DEV_GENERIC)		+= ide-pci-generic.o
+ide-pci-generic-y			+= generic.o
 
 obj-$(CONFIG_IDEPCI_PCIBUS_ORDER)	+= ide-scan-pci.o
 
-ifeq ($(CONFIG_BLK_DEV_CMD640), y)
-	cmd640-core-y += pci/cmd640.o
-	obj-y += cmd640-core.o
-endif
+obj-$(CONFIG_BLK_DEV_CMD640)		+= cmd640.o
 
-obj-$(CONFIG_IDE)			+= ppc/
-obj-$(CONFIG_IDE_H8300)			+= h8300/
+obj-$(CONFIG_BLK_DEV_IDE_PMAC)		+= pmac.o
+
+obj-$(CONFIG_IDE_H8300)			+= ide-h8300.o
+
 obj-$(CONFIG_IDE_GENERIC)		+= ide-generic.o
 obj-$(CONFIG_BLK_DEV_IDEPNP)		+= ide-pnp.o
 
-ide-disk_mod-y += ide-disk.o ide-disk_ioctl.o
+ide-gd_mod-y += ide-gd.o
 ide-cd_mod-y += ide-cd.o ide-cd_ioctl.o ide-cd_verbose.o
-ide-floppy_mod-y += ide-floppy.o ide-floppy_ioctl.o
 
+ifeq ($(CONFIG_IDE_GD_ATA), y)
+	ide-gd_mod-y += ide-disk.o ide-disk_ioctl.o
 ifeq ($(CONFIG_IDE_PROC_FS), y)
-	ide-disk_mod-y += ide-disk_proc.o
-	ide-floppy_mod-y += ide-floppy_proc.o
+	ide-gd_mod-y += ide-disk_proc.o
+endif
 endif
 
-obj-$(CONFIG_BLK_DEV_IDEDISK)		+= ide-disk_mod.o
+ifeq ($(CONFIG_IDE_GD_ATAPI), y)
+	ide-gd_mod-y += ide-floppy.o ide-floppy_ioctl.o
+ifeq ($(CONFIG_IDE_PROC_FS), y)
+	ide-gd_mod-y += ide-floppy_proc.o
+endif
+endif
+
+obj-$(CONFIG_IDE_GD)			+= ide-gd_mod.o
 obj-$(CONFIG_BLK_DEV_IDECD)		+= ide-cd_mod.o
-obj-$(CONFIG_BLK_DEV_IDEFLOPPY)		+= ide-floppy_mod.o
 obj-$(CONFIG_BLK_DEV_IDETAPE)		+= ide-tape.o
 
-ifeq ($(CONFIG_BLK_DEV_IDECS), y)
-	ide-cs-core-y += legacy/ide-cs.o
-	obj-y += ide-cs-core.o
-endif
+obj-$(CONFIG_BLK_DEV_IDECS)		+= ide-cs.o
 
-ifeq ($(CONFIG_BLK_DEV_PLATFORM), y)
-	ide-platform-core-y += legacy/ide_platform.o
-	obj-y += ide-platform-core.o
-endif
+obj-$(CONFIG_BLK_DEV_PLATFORM)		+= ide_platform.o
 
-obj-$(CONFIG_IDE)			+= arm/ mips/
+obj-$(CONFIG_BLK_DEV_IDE_ICSIDE)	+= icside.o
+obj-$(CONFIG_BLK_DEV_IDE_RAPIDE)	+= rapide.o
+obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710)	+= palm_bk3710.o
+
+obj-$(CONFIG_BLK_DEV_IDE_AU1XXX)	+= au1xxx-ide.o
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/aec62xx.c
similarity index 100%
rename from drivers/ide/pci/aec62xx.c
rename to drivers/ide/aec62xx.c
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/ali14xx.c
similarity index 100%
rename from drivers/ide/legacy/ali14xx.c
rename to drivers/ide/ali14xx.c
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/alim15x3.c
similarity index 100%
rename from drivers/ide/pci/alim15x3.c
rename to drivers/ide/alim15x3.c
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/amd74xx.c
similarity index 100%
rename from drivers/ide/pci/amd74xx.c
rename to drivers/ide/amd74xx.c
diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile
deleted file mode 100644
index 5bc2605..0000000
--- a/drivers/ide/arm/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-
-obj-$(CONFIG_BLK_DEV_IDE_ICSIDE)	+= icside.o
-obj-$(CONFIG_BLK_DEV_IDE_RAPIDE)	+= rapide.o
-obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710)	+= palm_bk3710.o
-
-ifeq ($(CONFIG_IDE_ARM), m)
-	obj-m += ide_arm.o
-endif
-
-EXTRA_CFLAGS	:= -Idrivers/ide
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/atiixp.c
similarity index 100%
rename from drivers/ide/pci/atiixp.c
rename to drivers/ide/atiixp.c
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
similarity index 100%
rename from drivers/ide/mips/au1xxx-ide.c
rename to drivers/ide/au1xxx-ide.c
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/buddha.c
similarity index 100%
rename from drivers/ide/legacy/buddha.c
rename to drivers/ide/buddha.c
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/cmd640.c
similarity index 100%
rename from drivers/ide/pci/cmd640.c
rename to drivers/ide/cmd640.c
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/cmd64x.c
similarity index 100%
rename from drivers/ide/pci/cmd64x.c
rename to drivers/ide/cmd64x.c
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/cs5520.c
similarity index 100%
rename from drivers/ide/pci/cs5520.c
rename to drivers/ide/cs5520.c
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/cs5530.c
similarity index 100%
rename from drivers/ide/pci/cs5530.c
rename to drivers/ide/cs5530.c
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/cs5535.c
similarity index 100%
rename from drivers/ide/pci/cs5535.c
rename to drivers/ide/cs5535.c
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/cy82c693.c
similarity index 100%
rename from drivers/ide/pci/cy82c693.c
rename to drivers/ide/cy82c693.c
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/delkin_cb.c
similarity index 81%
rename from drivers/ide/pci/delkin_cb.c
rename to drivers/ide/delkin_cb.c
index 8689a70..8f1b2d9 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/delkin_cb.c
@@ -46,10 +46,27 @@
 	.quirkproc		= ide_undecoded_slave,
 };
 
+static unsigned int delkin_cb_init_chipset(struct pci_dev *dev)
+{
+	unsigned long base = pci_resource_start(dev, 0);
+	int i;
+
+	outb(0x02, base + 0x1e);	/* set nIEN to block interrupts */
+	inb(base + 0x17);		/* read status to clear interrupts */
+
+	for (i = 0; i < sizeof(setup); ++i) {
+		if (setup[i])
+			outb(setup[i], base + i);
+	}
+
+	return 0;
+}
+
 static const struct ide_port_info delkin_cb_port_info = {
 	.port_ops		= &delkin_cb_port_ops,
 	.host_flags		= IDE_HFLAG_IO_32BIT | IDE_HFLAG_UNMASK_IRQS |
 				  IDE_HFLAG_NO_DMA,
+	.init_chipset		= delkin_cb_init_chipset,
 };
 
 static int __devinit
@@ -57,7 +74,7 @@
 {
 	struct ide_host *host;
 	unsigned long base;
-	int i, rc;
+	int rc;
 	hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
 
 	rc = pci_enable_device(dev);
@@ -72,12 +89,8 @@
 		return rc;
 	}
 	base = pci_resource_start(dev, 0);
-	outb(0x02, base + 0x1e);	/* set nIEN to block interrupts */
-	inb(base + 0x17);		/* read status to clear interrupts */
-	for (i = 0; i < sizeof(setup); ++i) {
-		if (setup[i])
-			outb(setup[i], base + i);
-	}
+
+	delkin_cb_init_chipset(dev);
 
 	memset(&hw, 0, sizeof(hw));
 	ide_std_init_ports(&hw, base + 0x10, base + 0x1e);
@@ -110,6 +123,40 @@
 	pci_disable_device(dev);
 }
 
+#ifdef CONFIG_PM
+static int delkin_cb_suspend(struct pci_dev *dev, pm_message_t state)
+{
+	pci_save_state(dev);
+	pci_disable_device(dev);
+	pci_set_power_state(dev, pci_choose_state(dev, state));
+
+	return 0;
+}
+
+static int delkin_cb_resume(struct pci_dev *dev)
+{
+	struct ide_host *host = pci_get_drvdata(dev);
+	int rc;
+
+	pci_set_power_state(dev, PCI_D0);
+
+	rc = pci_enable_device(dev);
+	if (rc)
+		return rc;
+
+	pci_restore_state(dev);
+	pci_set_master(dev);
+
+	if (host->init_chipset)
+		host->init_chipset(dev);
+
+	return 0;
+}
+#else
+#define delkin_cb_suspend NULL
+#define delkin_cb_resume NULL
+#endif
+
 static struct pci_device_id delkin_cb_pci_tbl[] __devinitdata = {
 	{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -122,6 +169,8 @@
 	.id_table	= delkin_cb_pci_tbl,
 	.probe		= delkin_cb_probe,
 	.remove		= delkin_cb_remove,
+	.suspend	= delkin_cb_suspend,
+	.resume		= delkin_cb_resume,
 };
 
 static int __init delkin_cb_init(void)
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/dtc2278.c
similarity index 100%
rename from drivers/ide/legacy/dtc2278.c
rename to drivers/ide/dtc2278.c
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/falconide.c
similarity index 100%
rename from drivers/ide/legacy/falconide.c
rename to drivers/ide/falconide.c
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/gayle.c
similarity index 100%
rename from drivers/ide/legacy/gayle.c
rename to drivers/ide/gayle.c
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/generic.c
similarity index 100%
rename from drivers/ide/pci/generic.c
rename to drivers/ide/generic.c
diff --git a/drivers/ide/h8300/Makefile b/drivers/ide/h8300/Makefile
deleted file mode 100644
index 5eba16f..0000000
--- a/drivers/ide/h8300/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-
-obj-$(CONFIG_IDE_H8300)			+= ide-h8300.o
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/hpt366.c
similarity index 98%
rename from drivers/ide/pci/hpt366.c
rename to drivers/ide/hpt366.c
index 9cf171c..a7909e9 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -3,7 +3,7 @@
  * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
  * Portions Copyright (C) 2003		Red Hat Inc
  * Portions Copyright (C) 2007		Bartlomiej Zolnierkiewicz
- * Portions Copyright (C) 2005-2007	MontaVista Software, Inc.
+ * Portions Copyright (C) 2005-2008	MontaVista Software, Inc.
  *
  * Thanks to HighPoint Technologies for their assistance, and hardware.
  * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his
@@ -748,26 +748,24 @@
 	struct pci_dev	*dev	= to_pci_dev(hwif->dev);
 	struct hpt_info *info	= hpt3xx_get_info(hwif->dev);
 
-	if (drive->quirk_list) {
-		if (info->chip_type >= HPT370) {
-			u8 scr1 = 0;
+	if (drive->quirk_list == 0)
+		return;
 
-			pci_read_config_byte(dev, 0x5a, &scr1);
-			if (((scr1 & 0x10) >> 4) != mask) {
-				if (mask)
-					scr1 |=  0x10;
-				else
-					scr1 &= ~0x10;
-				pci_write_config_byte(dev, 0x5a, scr1);
-			}
-		} else {
+	if (info->chip_type >= HPT370) {
+		u8 scr1 = 0;
+
+		pci_read_config_byte(dev, 0x5a, &scr1);
+		if (((scr1 & 0x10) >> 4) != mask) {
 			if (mask)
-				disable_irq(hwif->irq);
+				scr1 |=  0x10;
 			else
-				enable_irq (hwif->irq);
+				scr1 &= ~0x10;
+			pci_write_config_byte(dev, 0x5a, scr1);
 		}
-	} else
-		outb(ATA_DEVCTL_OBS | (mask ? 2 : 0), hwif->io_ports.ctl_addr);
+	} else if (mask)
+		disable_irq(hwif->irq);
+	else
+		enable_irq(hwif->irq);
 }
 
 /*
@@ -1289,7 +1287,6 @@
 
 static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
 {
-	struct pci_dev *dev	= to_pci_dev(hwif->dev);
 	struct hpt_info *info	= hpt3xx_get_info(hwif->dev);
 	int serialize		= HPT_SERIALIZE_IO;
 	u8  chip_type		= info->chip_type;
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/ht6560b.c
similarity index 100%
rename from drivers/ide/legacy/ht6560b.c
rename to drivers/ide/ht6560b.c
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/icside.c
similarity index 100%
rename from drivers/ide/arm/icside.c
rename to drivers/ide/icside.c
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/ide-4drives.c
similarity index 100%
rename from drivers/ide/legacy/ide-4drives.c
rename to drivers/ide/ide-4drives.c
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 2e30571..4e58b9e 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -191,7 +191,7 @@
 {
 	struct ide_atapi_pc pc;
 
-	if (drive->atapi_flags & IDE_AFLAG_NO_DOORLOCK)
+	if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
 		return 0;
 
 	ide_init_pc(&pc);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 3308b1c..13265a8 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -99,7 +99,7 @@
 /* Mark that we've seen a media change and invalidate our internal buffers. */
 static void cdrom_saw_media_change(ide_drive_t *drive)
 {
-	drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED;
+	drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
 	drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
 }
 
@@ -340,8 +340,8 @@
 	}
 
 	ide_debug_log(IDE_DBG_RQ, "%s: stat: 0x%x, good_stat: 0x%x, "
-		      "rq->cmd_type: 0x%x, err: 0x%x\n", __func__, stat,
-		      good_stat, rq->cmd_type, err);
+		      "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x, err: 0x%x\n",
+		      __func__, stat, good_stat, rq->cmd[0], rq->cmd_type, err);
 
 	if (blk_sense_request(rq)) {
 		/*
@@ -843,13 +843,10 @@
 	rq->q->prep_rq_fn(rq->q, rq);
 }
 
-/*
- * All other packet commands.
- */
 static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct request *rq)
 {
-
-	ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
+	ide_debug_log(IDE_DBG_FUNC, "Call %s, rq->cmd[0]: 0x%x\n",
+		      __func__, rq->cmd[0]);
 
 	/*
 	 * Some of the trailing request sense fields are optional,
@@ -876,7 +873,7 @@
 	if (!sense)
 		sense = &local_sense;
 
-	ide_debug_log(IDE_DBG_PC, "Call %s, rq->cmd[0]: 0x%x, write: 0x%x, "
+	ide_debug_log(IDE_DBG_PC, "Call %s, cmd[0]: 0x%x, write: 0x%x, "
 		      "timeout: %d, cmd_flags: 0x%x\n", __func__, cmd[0], write,
 		      timeout, cmd_flags);
 
@@ -1177,8 +1174,9 @@
 	unsigned short sectors_per_frame =
 		queue_hardsect_size(drive->queue) >> SECTOR_BITS;
 
-	ide_debug_log(IDE_DBG_RQ, "Call %s, write: 0x%x, secs_per_frame: %u\n",
-		      __func__, write, sectors_per_frame);
+	ide_debug_log(IDE_DBG_RQ, "Call %s, rq->cmd[0]: 0x%x, write: 0x%x, "
+		      "secs_per_frame: %u\n",
+		      __func__, rq->cmd[0], write, sectors_per_frame);
 
 	if (write) {
 		/* disk has become write protected */
@@ -1221,7 +1219,8 @@
 static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
 {
 
-	ide_debug_log(IDE_DBG_PC, "Call %s, rq->cmd_type: 0x%x\n", __func__,
+	ide_debug_log(IDE_DBG_PC, "Call %s, rq->cmd[0]: 0x%x, "
+		      "rq->cmd_type: 0x%x\n", __func__, rq->cmd[0],
 		      rq->cmd_type);
 
 	if (blk_pc_request(rq))
@@ -1257,9 +1256,6 @@
 	}
 }
 
-/*
- * cdrom driver request routine.
- */
 static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
 					sector_t block)
 {
@@ -1267,8 +1263,10 @@
 	ide_handler_t *fn;
 	int xferlen;
 
-	ide_debug_log(IDE_DBG_RQ, "Call %s, rq->cmd_type: 0x%x, block: %llu\n",
-		      __func__, rq->cmd_type, (unsigned long long)block);
+	ide_debug_log(IDE_DBG_RQ, "Call %s, rq->cmd[0]: 0x%x, "
+		      "rq->cmd_type: 0x%x, block: %llu\n",
+		      __func__, rq->cmd[0], rq->cmd_type,
+		      (unsigned long long)block);
 
 	if (blk_fs_request(rq)) {
 		if (drive->atapi_flags & IDE_AFLAG_SEEKING) {
@@ -1412,6 +1410,10 @@
 
 	*capacity = 1 + be32_to_cpu(capbuf.lba);
 	*sectors_per_frame = blocklen >> SECTOR_BITS;
+
+	ide_debug_log(IDE_DBG_PROBE, "%s: cap: %lu, sectors_per_frame: %lu\n",
+		      __func__, *capacity, *sectors_per_frame);
+
 	return 0;
 }
 
@@ -1643,6 +1645,9 @@
 		maxspeed = be16_to_cpup((__be16 *)&buf[8 + 8]);
 	}
 
+	ide_debug_log(IDE_DBG_PROBE, "%s: curspeed: %u, maxspeed: %u\n",
+		      __func__, curspeed, maxspeed);
+
 	cd->current_speed = (curspeed + (176/2)) / 176;
 	cd->max_speed = (maxspeed + (176/2)) / 176;
 }
@@ -1732,7 +1737,7 @@
 		return 0;
 
 	if ((buf[8 + 6] & 0x01) == 0)
-		drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
+		drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
 	if (buf[8 + 6] & 0x08)
 		drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
 	if (buf[8 + 3] & 0x01)
@@ -1777,7 +1782,7 @@
 	if ((cdi->mask & CDC_DVD_R) == 0 || (cdi->mask & CDC_DVD_RAM) == 0)
 		printk(KERN_CONT " DVD%s%s",
 				 (cdi->mask & CDC_DVD_R) ? "" : "-R",
-				 (cdi->mask & CDC_DVD_RAM) ? "" : "-RAM");
+				 (cdi->mask & CDC_DVD_RAM) ? "" : "/RAM");
 
 	if ((cdi->mask & CDC_CD_R) == 0 || (cdi->mask & CDC_CD_RW) == 0)
 		printk(KERN_CONT " CD%s%s",
@@ -1908,6 +1913,16 @@
 	IDE_PROC_DEVSET(dsc_overlap, 0, 1),
 	{ 0 },
 };
+
+static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive)
+{
+	return idecd_proc;
+}
+
+static const struct ide_proc_devset *ide_cd_proc_devsets(ide_drive_t *drive)
+{
+	return idecd_settings;
+}
 #endif
 
 static const struct cd_list_entry ide_cd_quirks_list[] = {
@@ -1986,8 +2001,8 @@
 	if (!drive->queue->unplug_delay)
 		drive->queue->unplug_delay = 1;
 
-	drive->atapi_flags = IDE_AFLAG_MEDIA_CHANGED | IDE_AFLAG_NO_EJECT |
-		       ide_cd_flags(id);
+	drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
+	drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id);
 
 	if ((drive->atapi_flags & IDE_AFLAG_VERTOS_300_SSD) &&
 	    fw_rev[4] == '1' && fw_rev[6] <= '2')
@@ -2069,8 +2084,8 @@
 	.end_request		= ide_end_request,
 	.error			= __ide_error,
 #ifdef CONFIG_IDE_PROC_FS
-	.proc			= idecd_proc,
-	.settings		= idecd_settings,
+	.proc_entries		= ide_cd_proc_entries,
+	.proc_devsets		= ide_cd_proc_devsets,
 #endif
 };
 
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 74231b4..df3df00 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -86,8 +86,8 @@
 
 	if (slot_nr == CDSL_CURRENT) {
 		(void) cdrom_check_status(drive, NULL);
-		retval = (drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED) ? 1 : 0;
-		drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED;
+		retval = (drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED) ? 1 : 0;
+		drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
 		return retval;
 	} else {
 		return -EINVAL;
@@ -136,7 +136,7 @@
 		sense = &my_sense;
 
 	/* If the drive cannot lock the door, just pretend. */
-	if (drive->atapi_flags & IDE_AFLAG_NO_DOORLOCK) {
+	if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0) {
 		stat = 0;
 	} else {
 		unsigned char cmd[BLK_MAX_CDB];
@@ -157,7 +157,7 @@
 	    (sense->asc == 0x24 || sense->asc == 0x20)) {
 		printk(KERN_ERR "%s: door locking not supported\n",
 			drive->name);
-		drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
+		drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
 		stat = 0;
 	}
 
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/ide-cs.c
similarity index 100%
rename from drivers/ide/legacy/ide-cs.c
rename to drivers/ide/ide-cs.c
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 3853bde..223750c 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -14,9 +14,6 @@
  * This is the IDE/ATA disk driver, as evolved from hd.c and ide.c.
  */
 
-#define IDEDISK_VERSION	"1.18"
-
-#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
@@ -39,46 +36,8 @@
 #include <asm/io.h>
 #include <asm/div64.h>
 
-#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
-#define IDE_DISK_MINORS		(1 << PARTN_BITS)
-#else
-#define IDE_DISK_MINORS		0
-#endif
-
 #include "ide-disk.h"
 
-static DEFINE_MUTEX(idedisk_ref_mutex);
-
-#define to_ide_disk(obj) container_of(obj, struct ide_disk_obj, kref)
-
-static void ide_disk_release(struct kref *);
-
-static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
-{
-	struct ide_disk_obj *idkp = NULL;
-
-	mutex_lock(&idedisk_ref_mutex);
-	idkp = ide_disk_g(disk);
-	if (idkp) {
-		if (ide_device_get(idkp->drive))
-			idkp = NULL;
-		else
-			kref_get(&idkp->kref);
-	}
-	mutex_unlock(&idedisk_ref_mutex);
-	return idkp;
-}
-
-static void ide_disk_put(struct ide_disk_obj *idkp)
-{
-	ide_drive_t *drive = idkp->drive;
-
-	mutex_lock(&idedisk_ref_mutex);
-	kref_put(&idkp->kref, ide_disk_release);
-	ide_device_put(drive);
-	mutex_unlock(&idedisk_ref_mutex);
-}
-
 static const u8 ide_rw_cmds[] = {
 	ATA_CMD_READ_MULTI,
 	ATA_CMD_WRITE_MULTI,
@@ -374,7 +333,7 @@
 	}
 }
 
-static void init_idedisk_capacity(ide_drive_t *drive)
+static int ide_disk_get_capacity(ide_drive_t *drive)
 {
 	u16 *id = drive->id;
 	int lba;
@@ -403,11 +362,28 @@
 		if (ata_id_hpa_enabled(id))
 			idedisk_check_hpa(drive);
 	}
-}
 
-sector_t ide_disk_capacity(ide_drive_t *drive)
-{
-	return drive->capacity64;
+	/* limit drive capacity to 137GB if LBA48 cannot be used */
+	if ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 &&
+	    drive->capacity64 > 1ULL << 28) {
+		printk(KERN_WARNING "%s: cannot use LBA48 - full capacity "
+		       "%llu sectors (%llu MB)\n",
+		       drive->name, (unsigned long long)drive->capacity64,
+		       sectors_to_MB(drive->capacity64));
+		drive->capacity64 = 1ULL << 28;
+	}
+
+	if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
+	    (drive->dev_flags & IDE_DFLAG_LBA48)) {
+		if (drive->capacity64 > 1ULL << 28) {
+			printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
+					 " will be used for accessing sectors "
+					 "> %u\n", drive->name, 1 << 28);
+		} else
+			drive->dev_flags &= ~IDE_DFLAG_LBA48;
+	}
+
+	return 0;
 }
 
 static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
@@ -508,7 +484,7 @@
 		 * time we have trimmed the drive capacity if LBA48 is
 		 * not available so we don't need to recheck that.
 		 */
-		capacity = ide_disk_capacity(drive);
+		capacity = ide_gd_capacity(drive);
 		barrier = ata_id_flush_enabled(id) &&
 			(drive->dev_flags & IDE_DFLAG_NOFLUSH) == 0 &&
 			((drive->dev_flags & IDE_DFLAG_LBA48) == 0 ||
@@ -616,7 +592,12 @@
 
 ide_ext_devset_rw_sync(nowerr, nowerr);
 
-static void idedisk_setup(ide_drive_t *drive)
+static int ide_disk_check(ide_drive_t *drive, const char *s)
+{
+	return 1;
+}
+
+static void ide_disk_setup(ide_drive_t *drive)
 {
 	struct ide_disk_obj *idkp = drive->driver_data;
 	ide_hwif_t *hwif = drive->hwif;
@@ -652,33 +633,13 @@
 			 drive->queue->max_sectors / 2);
 
 	/* calculate drive capacity, and select LBA if possible */
-	init_idedisk_capacity(drive);
-
-	/* limit drive capacity to 137GB if LBA48 cannot be used */
-	if ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 &&
-	    drive->capacity64 > 1ULL << 28) {
-		printk(KERN_WARNING "%s: cannot use LBA48 - full capacity "
-		       "%llu sectors (%llu MB)\n",
-		       drive->name, (unsigned long long)drive->capacity64,
-		       sectors_to_MB(drive->capacity64));
-		drive->capacity64 = 1ULL << 28;
-	}
-
-	if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
-	    (drive->dev_flags & IDE_DFLAG_LBA48)) {
-		if (drive->capacity64 > 1ULL << 28) {
-			printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
-					 " will be used for accessing sectors "
-					 "> %u\n", drive->name, 1 << 28);
-		} else
-			drive->dev_flags &= ~IDE_DFLAG_LBA48;
-	}
+	ide_disk_get_capacity(drive);
 
 	/*
 	 * if possible, give fdisk access to more of the drive,
 	 * by correcting bios_cyls:
 	 */
-	capacity = ide_disk_capacity(drive);
+	capacity = ide_gd_capacity(drive);
 
 	if ((drive->dev_flags & IDE_DFLAG_FORCED_GEOM) == 0) {
 		if (ata_id_lba48_enabled(drive->id)) {
@@ -718,9 +679,17 @@
 		drive->dev_flags |= IDE_DFLAG_WCACHE;
 
 	set_wcache(drive, 1);
+
+	if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 &&
+	    (drive->head == 0 || drive->head > 16)) {
+		printk(KERN_ERR "%s: invalid geometry: %d physical heads?\n",
+			drive->name, drive->head);
+		drive->dev_flags &= ~IDE_DFLAG_ATTACH;
+	} else
+		drive->dev_flags |= IDE_DFLAG_ATTACH;
 }
 
-static void ide_cacheflush_p(ide_drive_t *drive)
+static void ide_disk_flush(ide_drive_t *drive)
 {
 	if (ata_id_flush_enabled(drive->id) == 0 ||
 	    (drive->dev_flags & IDE_DFLAG_WCACHE) == 0)
@@ -730,267 +699,40 @@
 		printk(KERN_INFO "%s: wcache flush failed!\n", drive->name);
 }
 
-static void ide_disk_remove(ide_drive_t *drive)
+static int ide_disk_init_media(ide_drive_t *drive, struct gendisk *disk)
 {
-	struct ide_disk_obj *idkp = drive->driver_data;
-	struct gendisk *g = idkp->disk;
-
-	ide_proc_unregister_driver(drive, idkp->driver);
-
-	del_gendisk(g);
-
-	ide_cacheflush_p(drive);
-
-	ide_disk_put(idkp);
+	return 0;
 }
 
-static void ide_disk_release(struct kref *kref)
-{
-	struct ide_disk_obj *idkp = to_ide_disk(kref);
-	ide_drive_t *drive = idkp->drive;
-	struct gendisk *g = idkp->disk;
-
-	drive->driver_data = NULL;
-	g->private_data = NULL;
-	put_disk(g);
-	kfree(idkp);
-}
-
-static int ide_disk_probe(ide_drive_t *drive);
-
-/*
- * On HPA drives the capacity needs to be
- * reinitilized on resume otherwise the disk
- * can not be used and a hard reset is required
- */
-static void ide_disk_resume(ide_drive_t *drive)
-{
-	if (ata_id_hpa_enabled(drive->id))
-		init_idedisk_capacity(drive);
-}
-
-static void ide_device_shutdown(ide_drive_t *drive)
-{
-#ifdef	CONFIG_ALPHA
-	/* On Alpha, halt(8) doesn't actually turn the machine off,
-	   it puts you into the sort of firmware monitor. Typically,
-	   it's used to boot another kernel image, so it's not much
-	   different from reboot(8). Therefore, we don't need to
-	   spin down the disk in this case, especially since Alpha
-	   firmware doesn't handle disks in standby mode properly.
-	   On the other hand, it's reasonably safe to turn the power
-	   off when the shutdown process reaches the firmware prompt,
-	   as the firmware initialization takes rather long time -
-	   at least 10 seconds, which should be sufficient for
-	   the disk to expire its write cache. */
-	if (system_state != SYSTEM_POWER_OFF) {
-#else
-	if (system_state == SYSTEM_RESTART) {
-#endif
-		ide_cacheflush_p(drive);
-		return;
-	}
-
-	printk(KERN_INFO "Shutdown: %s\n", drive->name);
-
-	drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND);
-}
-
-static ide_driver_t idedisk_driver = {
-	.gen_driver = {
-		.owner		= THIS_MODULE,
-		.name		= "ide-disk",
-		.bus		= &ide_bus_type,
-	},
-	.probe			= ide_disk_probe,
-	.remove			= ide_disk_remove,
-	.resume			= ide_disk_resume,
-	.shutdown		= ide_device_shutdown,
-	.version		= IDEDISK_VERSION,
-	.do_request		= ide_do_rw_disk,
-	.end_request		= ide_end_request,
-	.error			= __ide_error,
-#ifdef CONFIG_IDE_PROC_FS
-	.proc			= ide_disk_proc,
-	.settings		= ide_disk_settings,
-#endif
-};
-
-static int idedisk_set_doorlock(ide_drive_t *drive, int on)
+static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
+				 int on)
 {
 	ide_task_t task;
+	int ret;
+
+	if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
+		return 0;
 
 	memset(&task, 0, sizeof(task));
 	task.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
 	task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
 
-	return ide_no_data_taskfile(drive, &task);
+	ret = ide_no_data_taskfile(drive, &task);
+
+	if (ret)
+		drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
+
+	return ret;
 }
 
-static int idedisk_open(struct inode *inode, struct file *filp)
-{
-	struct gendisk *disk = inode->i_bdev->bd_disk;
-	struct ide_disk_obj *idkp;
-	ide_drive_t *drive;
-
-	idkp = ide_disk_get(disk);
-	if (idkp == NULL)
-		return -ENXIO;
-
-	drive = idkp->drive;
-
-	idkp->openers++;
-
-	if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) {
-		check_disk_change(inode->i_bdev);
-		/*
-		 * Ignore the return code from door_lock,
-		 * since the open() has already succeeded,
-		 * and the door_lock is irrelevant at this point.
-		 */
-		if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) &&
-		    idedisk_set_doorlock(drive, 1))
-			drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
-	}
-	return 0;
-}
-
-static int idedisk_release(struct inode *inode, struct file *filp)
-{
-	struct gendisk *disk = inode->i_bdev->bd_disk;
-	struct ide_disk_obj *idkp = ide_disk_g(disk);
-	ide_drive_t *drive = idkp->drive;
-
-	if (idkp->openers == 1)
-		ide_cacheflush_p(drive);
-
-	if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) {
-		if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) &&
-		    idedisk_set_doorlock(drive, 0))
-			drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
-	}
-
-	idkp->openers--;
-
-	ide_disk_put(idkp);
-
-	return 0;
-}
-
-static int idedisk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
-	struct ide_disk_obj *idkp = ide_disk_g(bdev->bd_disk);
-	ide_drive_t *drive = idkp->drive;
-
-	geo->heads = drive->bios_head;
-	geo->sectors = drive->bios_sect;
-	geo->cylinders = (u16)drive->bios_cyl; /* truncate */
-	return 0;
-}
-
-static int idedisk_media_changed(struct gendisk *disk)
-{
-	struct ide_disk_obj *idkp = ide_disk_g(disk);
-	ide_drive_t *drive = idkp->drive;
-
-	/* do not scan partitions twice if this is a removable device */
-	if (drive->dev_flags & IDE_DFLAG_ATTACH) {
-		drive->dev_flags &= ~IDE_DFLAG_ATTACH;
-		return 0;
-	}
-
-	/* if removable, always assume it was changed */
-	return !!(drive->dev_flags & IDE_DFLAG_REMOVABLE);
-}
-
-static int idedisk_revalidate_disk(struct gendisk *disk)
-{
-	struct ide_disk_obj *idkp = ide_disk_g(disk);
-	set_capacity(disk, ide_disk_capacity(idkp->drive));
-	return 0;
-}
-
-static struct block_device_operations idedisk_ops = {
-	.owner			= THIS_MODULE,
-	.open			= idedisk_open,
-	.release		= idedisk_release,
-	.ioctl			= ide_disk_ioctl,
-	.getgeo			= idedisk_getgeo,
-	.media_changed		= idedisk_media_changed,
-	.revalidate_disk	= idedisk_revalidate_disk
+const struct ide_disk_ops ide_ata_disk_ops = {
+	.check		= ide_disk_check,
+	.get_capacity	= ide_disk_get_capacity,
+	.setup		= ide_disk_setup,
+	.flush		= ide_disk_flush,
+	.init_media	= ide_disk_init_media,
+	.set_doorlock	= ide_disk_set_doorlock,
+	.do_request	= ide_do_rw_disk,
+	.end_request	= ide_end_request,
+	.ioctl		= ide_disk_ioctl,
 };
-
-MODULE_DESCRIPTION("ATA DISK Driver");
-
-static int ide_disk_probe(ide_drive_t *drive)
-{
-	struct ide_disk_obj *idkp;
-	struct gendisk *g;
-
-	/* strstr("foo", "") is non-NULL */
-	if (!strstr("ide-disk", drive->driver_req))
-		goto failed;
-
-	if (drive->media != ide_disk)
-		goto failed;
-
-	idkp = kzalloc(sizeof(*idkp), GFP_KERNEL);
-	if (!idkp)
-		goto failed;
-
-	g = alloc_disk_node(IDE_DISK_MINORS, hwif_to_node(drive->hwif));
-	if (!g)
-		goto out_free_idkp;
-
-	ide_init_disk(g, drive);
-
-	kref_init(&idkp->kref);
-
-	idkp->drive = drive;
-	idkp->driver = &idedisk_driver;
-	idkp->disk = g;
-
-	g->private_data = &idkp->driver;
-
-	drive->driver_data = idkp;
-
-	idedisk_setup(drive);
-	if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 &&
-	    (drive->head == 0 || drive->head > 16)) {
-		printk(KERN_ERR "%s: INVALID GEOMETRY: %d PHYSICAL HEADS?\n",
-			drive->name, drive->head);
-		drive->dev_flags &= ~IDE_DFLAG_ATTACH;
-	} else
-		drive->dev_flags |= IDE_DFLAG_ATTACH;
-
-	g->minors = IDE_DISK_MINORS;
-	g->driverfs_dev = &drive->gendev;
-	g->flags |= GENHD_FL_EXT_DEVT;
-	if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
-		g->flags = GENHD_FL_REMOVABLE;
-	set_capacity(g, ide_disk_capacity(drive));
-	g->fops = &idedisk_ops;
-	add_disk(g);
-	return 0;
-
-out_free_idkp:
-	kfree(idkp);
-failed:
-	return -ENODEV;
-}
-
-static void __exit idedisk_exit(void)
-{
-	driver_unregister(&idedisk_driver.gen_driver);
-}
-
-static int __init idedisk_init(void)
-{
-	return driver_register(&idedisk_driver.gen_driver);
-}
-
-MODULE_ALIAS("ide:*m-disk*");
-MODULE_ALIAS("ide-disk");
-module_init(idedisk_init);
-module_exit(idedisk_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-disk.h b/drivers/ide/ide-disk.h
index a82fa43..b234b0f 100644
--- a/drivers/ide/ide-disk.h
+++ b/drivers/ide/ide-disk.h
@@ -1,19 +1,11 @@
 #ifndef __IDE_DISK_H
 #define __IDE_DISK_H
 
-struct ide_disk_obj {
-	ide_drive_t	*drive;
-	ide_driver_t	*driver;
-	struct gendisk	*disk;
-	struct kref	kref;
-	unsigned int	openers;	/* protected by BKL for now */
-};
+#include "ide-gd.h"
 
-#define ide_disk_g(disk) \
-	container_of((disk)->private_data, struct ide_disk_obj, driver)
-
+#ifdef CONFIG_IDE_GD_ATA
 /* ide-disk.c */
-sector_t ide_disk_capacity(ide_drive_t *);
+extern const struct ide_disk_ops ide_ata_disk_ops;
 ide_decl_devset(address);
 ide_decl_devset(multcount);
 ide_decl_devset(nowerr);
@@ -21,12 +13,17 @@
 ide_decl_devset(acoustic);
 
 /* ide-disk_ioctl.c */
-int ide_disk_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+int ide_disk_ioctl(ide_drive_t *, struct inode *, struct file *, unsigned int,
+		   unsigned long);
 
 #ifdef CONFIG_IDE_PROC_FS
 /* ide-disk_proc.c */
 extern ide_proc_entry_t ide_disk_proc[];
 extern const struct ide_proc_devset ide_disk_settings[];
 #endif
+#else
+#define ide_disk_proc		NULL
+#define ide_disk_settings	NULL
+#endif
 
 #endif /* __IDE_DISK_H */
diff --git a/drivers/ide/ide-disk_ioctl.c b/drivers/ide/ide-disk_ioctl.c
index a6cf1a03..a49698b 100644
--- a/drivers/ide/ide-disk_ioctl.c
+++ b/drivers/ide/ide-disk_ioctl.c
@@ -13,12 +13,10 @@
 { 0 }
 };
 
-int ide_disk_ioctl(struct inode *inode, struct file *file,
+int ide_disk_ioctl(ide_drive_t *drive, struct inode *inode, struct file *file,
 		   unsigned int cmd, unsigned long arg)
 {
 	struct block_device *bdev = inode->i_bdev;
-	struct ide_disk_obj *idkp = ide_disk_g(bdev->bd_disk);
-	ide_drive_t *drive = idkp->drive;
 	int err;
 
 	err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings);
diff --git a/drivers/ide/ide-disk_proc.c b/drivers/ide/ide-disk_proc.c
index 4724976..1146f42 100644
--- a/drivers/ide/ide-disk_proc.c
+++ b/drivers/ide/ide-disk_proc.c
@@ -56,7 +56,7 @@
 	ide_drive_t*drive = (ide_drive_t *)data;
 	int len;
 
-	len = sprintf(page, "%llu\n", (long long)ide_disk_capacity(drive));
+	len = sprintf(page, "%llu\n", (long long)ide_gd_capacity(drive));
 
 	PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
 }
diff --git a/drivers/ide/ide-dma-sff.c b/drivers/ide/ide-dma-sff.c
index 0903782..cac431f 100644
--- a/drivers/ide/ide-dma-sff.c
+++ b/drivers/ide/ide-dma-sff.c
@@ -130,7 +130,7 @@
 			xcount = bcount & 0xffff;
 			if (is_trm290)
 				xcount = ((xcount >> 2) - 1) << 16;
-			if (xcount == 0x0000) {
+			else if (xcount == 0x0000) {
 				if (count++ >= PRD_ENTRIES)
 					goto use_pio_instead;
 				*table++ = cpu_to_le32(0x8000);
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index cf0aa25..aeb1ad7 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -15,12 +15,6 @@
  * Documentation/ide/ChangeLog.ide-floppy.1996-2002
  */
 
-#define DRV_NAME "ide-floppy"
-#define PFX DRV_NAME ": "
-
-#define IDEFLOPPY_VERSION "1.00"
-
-#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
@@ -49,19 +43,6 @@
 
 #include "ide-floppy.h"
 
-/* module parameters */
-static unsigned long debug_mask;
-module_param(debug_mask, ulong, 0644);
-
-/* define to see debug info */
-#define IDEFLOPPY_DEBUG_LOG	0
-
-#if IDEFLOPPY_DEBUG_LOG
-#define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, args)
-#else
-#define ide_debug_log(lvl, fmt, args...) do {} while (0)
-#endif
-
 /*
  * After each failed packet command we issue a request sense command and retry
  * the packet command IDEFLOPPY_MAX_PC_RETRIES times.
@@ -83,43 +64,13 @@
 /* Error code returned in rq->errors to the higher part of the driver. */
 #define	IDEFLOPPY_ERROR_GENERAL		101
 
-static DEFINE_MUTEX(idefloppy_ref_mutex);
-
-static void idefloppy_cleanup_obj(struct kref *);
-
-static struct ide_floppy_obj *ide_floppy_get(struct gendisk *disk)
-{
-	struct ide_floppy_obj *floppy = NULL;
-
-	mutex_lock(&idefloppy_ref_mutex);
-	floppy = ide_drv_g(disk, ide_floppy_obj);
-	if (floppy) {
-		if (ide_device_get(floppy->drive))
-			floppy = NULL;
-		else
-			kref_get(&floppy->kref);
-	}
-	mutex_unlock(&idefloppy_ref_mutex);
-	return floppy;
-}
-
-static void ide_floppy_put(struct ide_floppy_obj *floppy)
-{
-	ide_drive_t *drive = floppy->drive;
-
-	mutex_lock(&idefloppy_ref_mutex);
-	kref_put(&floppy->kref, idefloppy_cleanup_obj);
-	ide_device_put(drive);
-	mutex_unlock(&idefloppy_ref_mutex);
-}
-
 /*
  * Used to finish servicing a request. For read/write requests, we will call
  * ide_end_request to pass to the next buffer.
  */
-static int idefloppy_end_request(ide_drive_t *drive, int uptodate, int nsecs)
+static int ide_floppy_end_request(ide_drive_t *drive, int uptodate, int nsecs)
 {
-	idefloppy_floppy_t *floppy = drive->driver_data;
+	struct ide_disk_obj *floppy = drive->driver_data;
 	struct request *rq = HWGROUP(drive)->rq;
 	int error;
 
@@ -161,12 +112,12 @@
 	struct bio *bio = rq->bio;
 
 	while ((bio = rq->bio) != NULL)
-		idefloppy_end_request(drive, 1, 0);
+		ide_floppy_end_request(drive, 1, 0);
 }
 
 static void ide_floppy_callback(ide_drive_t *drive, int dsc)
 {
-	idefloppy_floppy_t *floppy = drive->driver_data;
+	struct ide_disk_obj *floppy = drive->driver_data;
 	struct ide_atapi_pc *pc = drive->pc;
 	int uptodate = pc->error ? 0 : 1;
 
@@ -200,10 +151,10 @@
 			       "Aborting request!\n");
 	}
 
-	idefloppy_end_request(drive, uptodate, 0);
+	ide_floppy_end_request(drive, uptodate, 0);
 }
 
-static void ide_floppy_report_error(idefloppy_floppy_t *floppy,
+static void ide_floppy_report_error(struct ide_disk_obj *floppy,
 				    struct ide_atapi_pc *pc)
 {
 	/* supress error messages resulting from Medium not present */
@@ -222,7 +173,7 @@
 static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
 		struct ide_atapi_pc *pc)
 {
-	idefloppy_floppy_t *floppy = drive->driver_data;
+	struct ide_disk_obj *floppy = drive->driver_data;
 
 	if (floppy->failed_pc == NULL &&
 	    pc->c[0] != GPCMD_REQUEST_SENSE)
@@ -286,7 +237,7 @@
 				    struct ide_atapi_pc *pc, struct request *rq,
 				    unsigned long sector)
 {
-	idefloppy_floppy_t *floppy = drive->driver_data;
+	struct ide_disk_obj *floppy = drive->driver_data;
 	int block = sector / floppy->bs_factor;
 	int blocks = rq->nr_sectors / floppy->bs_factor;
 	int cmd = rq_data_dir(rq);
@@ -310,7 +261,7 @@
 	pc->flags |= PC_FLAG_DMA_OK;
 }
 
-static void idefloppy_blockpc_cmd(idefloppy_floppy_t *floppy,
+static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
 		struct ide_atapi_pc *pc, struct request *rq)
 {
 	ide_init_pc(pc);
@@ -329,13 +280,12 @@
 	pc->req_xfer = pc->buf_size = rq->data_len;
 }
 
-static ide_startstop_t idefloppy_do_request(ide_drive_t *drive,
-		struct request *rq, sector_t block_s)
+static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
+					     struct request *rq, sector_t block)
 {
-	idefloppy_floppy_t *floppy = drive->driver_data;
+	struct ide_disk_obj *floppy = drive->driver_data;
 	ide_hwif_t *hwif = drive->hwif;
 	struct ide_atapi_pc *pc;
-	unsigned long block = (unsigned long)block_s;
 
 	ide_debug_log(IDE_DBG_FUNC, "%s: dev: %s, cmd: 0x%x, cmd_type: %x, "
 		      "errors: %d\n",
@@ -353,7 +303,7 @@
 		else
 			printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
 
-		idefloppy_end_request(drive, 0, 0);
+		ide_floppy_end_request(drive, 0, 0);
 		return ide_stopped;
 	}
 	if (blk_fs_request(rq)) {
@@ -361,11 +311,11 @@
 		    (rq->nr_sectors % floppy->bs_factor)) {
 			printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
 				drive->name);
-			idefloppy_end_request(drive, 0, 0);
+			ide_floppy_end_request(drive, 0, 0);
 			return ide_stopped;
 		}
 		pc = &floppy->queued_pc;
-		idefloppy_create_rw_cmd(drive, pc, rq, block);
+		idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
 	} else if (blk_special_request(rq)) {
 		pc = (struct ide_atapi_pc *) rq->buffer;
 	} else if (blk_pc_request(rq)) {
@@ -373,7 +323,7 @@
 		idefloppy_blockpc_cmd(floppy, pc, rq);
 	} else {
 		blk_dump_rq_flags(rq, PFX "unsupported command in queue");
-		idefloppy_end_request(drive, 0, 0);
+		ide_floppy_end_request(drive, 0, 0);
 		return ide_stopped;
 	}
 
@@ -394,7 +344,7 @@
  */
 static int ide_floppy_get_flexible_disk_page(ide_drive_t *drive)
 {
-	idefloppy_floppy_t *floppy = drive->driver_data;
+	struct ide_disk_obj *floppy = drive->driver_data;
 	struct gendisk *disk = floppy->disk;
 	struct ide_atapi_pc pc;
 	u8 *page;
@@ -410,11 +360,11 @@
 	}
 
 	if (pc.buf[3] & 0x80)
-		drive->atapi_flags |= IDE_AFLAG_WP;
+		drive->dev_flags |= IDE_DFLAG_WP;
 	else
-		drive->atapi_flags &= ~IDE_AFLAG_WP;
+		drive->dev_flags &= ~IDE_DFLAG_WP;
 
-	set_disk_ro(disk, !!(drive->atapi_flags & IDE_AFLAG_WP));
+	set_disk_ro(disk, !!(drive->dev_flags & IDE_DFLAG_WP));
 
 	page = &pc.buf[8];
 
@@ -445,7 +395,9 @@
 			drive->name, lba_capacity, capacity);
 		floppy->blocks = floppy->block_size ?
 			capacity / floppy->block_size : 0;
+		drive->capacity64 = floppy->blocks * floppy->bs_factor;
 	}
+
 	return 0;
 }
 
@@ -455,7 +407,7 @@
  */
 static int ide_floppy_get_capacity(ide_drive_t *drive)
 {
-	idefloppy_floppy_t *floppy = drive->driver_data;
+	struct ide_disk_obj *floppy = drive->driver_data;
 	struct gendisk *disk = floppy->disk;
 	struct ide_atapi_pc pc;
 	u8 *cap_desc;
@@ -466,7 +418,7 @@
 	drive->bios_head = drive->bios_sect = 0;
 	floppy->blocks = 0;
 	floppy->bs_factor = 1;
-	set_capacity(floppy->disk, 0);
+	drive->capacity64 = 0;
 
 	ide_floppy_create_read_capacity_cmd(&pc);
 	if (ide_queue_pc_tail(drive, disk, &pc)) {
@@ -523,6 +475,8 @@
 					       "non 512 bytes block size not "
 					       "fully supported\n",
 					       drive->name);
+				drive->capacity64 =
+					floppy->blocks * floppy->bs_factor;
 				rc = 0;
 			}
 			break;
@@ -547,21 +501,12 @@
 	if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
 		(void) ide_floppy_get_flexible_disk_page(drive);
 
-	set_capacity(disk, floppy->blocks * floppy->bs_factor);
-
 	return rc;
 }
 
-sector_t ide_floppy_capacity(ide_drive_t *drive)
+static void ide_floppy_setup(ide_drive_t *drive)
 {
-	idefloppy_floppy_t *floppy = drive->driver_data;
-	unsigned long capacity = floppy->blocks * floppy->bs_factor;
-
-	return capacity;
-}
-
-static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
-{
+	struct ide_disk_obj *floppy = drive->driver_data;
 	u16 *id = drive->id;
 
 	drive->pc_callback	 = ide_floppy_callback;
@@ -592,252 +537,42 @@
 		blk_queue_max_sectors(drive->queue, 64);
 		drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
 		/* IOMEGA Clik! drives do not support lock/unlock commands */
-		drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
+		drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
 	}
 
 	(void) ide_floppy_get_capacity(drive);
 
 	ide_proc_register_driver(drive, floppy->driver);
+
+	drive->dev_flags |= IDE_DFLAG_ATTACH;
 }
 
-static void ide_floppy_remove(ide_drive_t *drive)
+static void ide_floppy_flush(ide_drive_t *drive)
 {
-	idefloppy_floppy_t *floppy = drive->driver_data;
-	struct gendisk *g = floppy->disk;
-
-	ide_proc_unregister_driver(drive, floppy->driver);
-
-	del_gendisk(g);
-
-	ide_floppy_put(floppy);
 }
 
-static void idefloppy_cleanup_obj(struct kref *kref)
+static int ide_floppy_init_media(ide_drive_t *drive, struct gendisk *disk)
 {
-	struct ide_floppy_obj *floppy = to_ide_drv(kref, ide_floppy_obj);
-	ide_drive_t *drive = floppy->drive;
-	struct gendisk *g = floppy->disk;
-
-	drive->driver_data = NULL;
-	g->private_data = NULL;
-	put_disk(g);
-	kfree(floppy);
-}
-
-static int ide_floppy_probe(ide_drive_t *);
-
-static ide_driver_t idefloppy_driver = {
-	.gen_driver = {
-		.owner		= THIS_MODULE,
-		.name		= "ide-floppy",
-		.bus		= &ide_bus_type,
-	},
-	.probe			= ide_floppy_probe,
-	.remove			= ide_floppy_remove,
-	.version		= IDEFLOPPY_VERSION,
-	.do_request		= idefloppy_do_request,
-	.end_request		= idefloppy_end_request,
-	.error			= __ide_error,
-#ifdef CONFIG_IDE_PROC_FS
-	.proc			= ide_floppy_proc,
-	.settings		= ide_floppy_settings,
-#endif
-};
-
-static int idefloppy_open(struct inode *inode, struct file *filp)
-{
-	struct gendisk *disk = inode->i_bdev->bd_disk;
-	struct ide_floppy_obj *floppy;
-	ide_drive_t *drive;
 	int ret = 0;
 
-	floppy = ide_floppy_get(disk);
-	if (!floppy)
-		return -ENXIO;
+	if (ide_do_test_unit_ready(drive, disk))
+		ide_do_start_stop(drive, disk, 1);
 
-	drive = floppy->drive;
+	ret = ide_floppy_get_capacity(drive);
 
-	ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
+	set_capacity(disk, ide_gd_capacity(drive));
 
-	floppy->openers++;
-
-	if (floppy->openers == 1) {
-		drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
-		/* Just in case */
-
-		if (ide_do_test_unit_ready(drive, disk))
-			ide_do_start_stop(drive, disk, 1);
-
-		if (ide_floppy_get_capacity(drive)
-		   && (filp->f_flags & O_NDELAY) == 0
-		    /*
-		     * Allow O_NDELAY to open a drive without a disk, or with an
-		     * unreadable disk, so that we can get the format capacity
-		     * of the drive or begin the format - Sam
-		     */
-		    ) {
-			ret = -EIO;
-			goto out_put_floppy;
-		}
-
-		if ((drive->atapi_flags & IDE_AFLAG_WP) && (filp->f_mode & 2)) {
-			ret = -EROFS;
-			goto out_put_floppy;
-		}
-
-		drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED;
-		ide_set_media_lock(drive, disk, 1);
-		check_disk_change(inode->i_bdev);
-	} else if (drive->atapi_flags & IDE_AFLAG_FORMAT_IN_PROGRESS) {
-		ret = -EBUSY;
-		goto out_put_floppy;
-	}
-	return 0;
-
-out_put_floppy:
-	floppy->openers--;
-	ide_floppy_put(floppy);
 	return ret;
 }
 
-static int idefloppy_release(struct inode *inode, struct file *filp)
-{
-	struct gendisk *disk = inode->i_bdev->bd_disk;
-	struct ide_floppy_obj *floppy = ide_drv_g(disk, ide_floppy_obj);
-	ide_drive_t *drive = floppy->drive;
-
-	ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
-
-	if (floppy->openers == 1) {
-		ide_set_media_lock(drive, disk, 0);
-		drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
-	}
-
-	floppy->openers--;
-
-	ide_floppy_put(floppy);
-
-	return 0;
-}
-
-static int idefloppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
-	struct ide_floppy_obj *floppy = ide_drv_g(bdev->bd_disk,
-						     ide_floppy_obj);
-	ide_drive_t *drive = floppy->drive;
-
-	geo->heads = drive->bios_head;
-	geo->sectors = drive->bios_sect;
-	geo->cylinders = (u16)drive->bios_cyl; /* truncate */
-	return 0;
-}
-
-static int idefloppy_media_changed(struct gendisk *disk)
-{
-	struct ide_floppy_obj *floppy = ide_drv_g(disk, ide_floppy_obj);
-	ide_drive_t *drive = floppy->drive;
-	int ret;
-
-	/* do not scan partitions twice if this is a removable device */
-	if (drive->dev_flags & IDE_DFLAG_ATTACH) {
-		drive->dev_flags &= ~IDE_DFLAG_ATTACH;
-		return 0;
-	}
-	ret = !!(drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED);
-	drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED;
-	return ret;
-}
-
-static int idefloppy_revalidate_disk(struct gendisk *disk)
-{
-	struct ide_floppy_obj *floppy = ide_drv_g(disk, ide_floppy_obj);
-	set_capacity(disk, ide_floppy_capacity(floppy->drive));
-	return 0;
-}
-
-static struct block_device_operations idefloppy_ops = {
-	.owner			= THIS_MODULE,
-	.open			= idefloppy_open,
-	.release		= idefloppy_release,
-	.ioctl			= ide_floppy_ioctl,
-	.getgeo			= idefloppy_getgeo,
-	.media_changed		= idefloppy_media_changed,
-	.revalidate_disk	= idefloppy_revalidate_disk
+const struct ide_disk_ops ide_atapi_disk_ops = {
+	.check		= ide_check_atapi_device,
+	.get_capacity	= ide_floppy_get_capacity,
+	.setup		= ide_floppy_setup,
+	.flush		= ide_floppy_flush,
+	.init_media	= ide_floppy_init_media,
+	.set_doorlock	= ide_set_media_lock,
+	.do_request	= ide_floppy_do_request,
+	.end_request	= ide_floppy_end_request,
+	.ioctl		= ide_floppy_ioctl,
 };
-
-static int ide_floppy_probe(ide_drive_t *drive)
-{
-	idefloppy_floppy_t *floppy;
-	struct gendisk *g;
-
-	if (!strstr("ide-floppy", drive->driver_req))
-		goto failed;
-
-	if (drive->media != ide_floppy)
-		goto failed;
-
-	if (!ide_check_atapi_device(drive, DRV_NAME)) {
-		printk(KERN_ERR PFX "%s: not supported by this version of "
-		       DRV_NAME "\n", drive->name);
-		goto failed;
-	}
-	floppy = kzalloc(sizeof(idefloppy_floppy_t), GFP_KERNEL);
-	if (!floppy) {
-		printk(KERN_ERR PFX "%s: Can't allocate a floppy structure\n",
-		       drive->name);
-		goto failed;
-	}
-
-	g = alloc_disk(1 << PARTN_BITS);
-	if (!g)
-		goto out_free_floppy;
-
-	ide_init_disk(g, drive);
-
-	kref_init(&floppy->kref);
-
-	floppy->drive = drive;
-	floppy->driver = &idefloppy_driver;
-	floppy->disk = g;
-
-	g->private_data = &floppy->driver;
-
-	drive->driver_data = floppy;
-
-	drive->debug_mask = debug_mask;
-
-	idefloppy_setup(drive, floppy);
-	drive->dev_flags |= IDE_DFLAG_ATTACH;
-
-	g->minors = 1 << PARTN_BITS;
-	g->driverfs_dev = &drive->gendev;
-	if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
-		g->flags = GENHD_FL_REMOVABLE;
-	g->fops = &idefloppy_ops;
-	add_disk(g);
-	return 0;
-
-out_free_floppy:
-	kfree(floppy);
-failed:
-	return -ENODEV;
-}
-
-static void __exit idefloppy_exit(void)
-{
-	driver_unregister(&idefloppy_driver.gen_driver);
-}
-
-static int __init idefloppy_init(void)
-{
-	printk(KERN_INFO DRV_NAME " driver " IDEFLOPPY_VERSION "\n");
-	return driver_register(&idefloppy_driver.gen_driver);
-}
-
-MODULE_ALIAS("ide:*m-floppy*");
-MODULE_ALIAS("ide-floppy");
-module_init(idefloppy_init);
-module_exit(idefloppy_exit);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("ATAPI FLOPPY Driver");
-
diff --git a/drivers/ide/ide-floppy.h b/drivers/ide/ide-floppy.h
index 17cf865..c17124d 100644
--- a/drivers/ide/ide-floppy.h
+++ b/drivers/ide/ide-floppy.h
@@ -1,37 +1,9 @@
 #ifndef __IDE_FLOPPY_H
 #define __IDE_FLOPPY_H
 
-/*
- * Most of our global data which we need to save even as we leave the driver
- * due to an interrupt or a timer event is stored in a variable of type
- * idefloppy_floppy_t, defined below.
- */
-typedef struct ide_floppy_obj {
-	ide_drive_t	*drive;
-	ide_driver_t	*driver;
-	struct gendisk	*disk;
-	struct kref	kref;
-	unsigned int	openers;	/* protected by BKL for now */
+#include "ide-gd.h"
 
-	/* Last failed packet command */
-	struct ide_atapi_pc *failed_pc;
-	/* used for blk_{fs,pc}_request() requests */
-	struct ide_atapi_pc queued_pc;
-
-	/* Last error information */
-	u8 sense_key, asc, ascq;
-
-	int progress_indication;
-
-	/* Device information */
-	/* Current format */
-	int blocks, block_size, bs_factor;
-	/* Last format capacity descriptor */
-	u8 cap_desc[8];
-	/* Copy of the flexible disk page */
-	u8 flexible_disk_page[32];
-} idefloppy_floppy_t;
-
+#ifdef CONFIG_IDE_GD_ATAPI
 /*
  * Pages of the SELECT SENSE / MODE SENSE packet commands.
  * See SFF-8070i spec.
@@ -46,17 +18,22 @@
 #define IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS	0x4603
 
 /* ide-floppy.c */
+extern const struct ide_disk_ops ide_atapi_disk_ops;
 void ide_floppy_create_mode_sense_cmd(struct ide_atapi_pc *, u8);
 void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *);
-sector_t ide_floppy_capacity(ide_drive_t *);
 
 /* ide-floppy_ioctl.c */
-int ide_floppy_ioctl(struct inode *, struct file *, unsigned, unsigned long);
+int ide_floppy_ioctl(ide_drive_t *, struct inode *, struct file *, unsigned int,
+		     unsigned long);
 
 #ifdef CONFIG_IDE_PROC_FS
 /* ide-floppy_proc.c */
 extern ide_proc_entry_t ide_floppy_proc[];
 extern const struct ide_proc_devset ide_floppy_settings[];
 #endif
+#else
+#define ide_floppy_proc		NULL
+#define ide_floppy_settings	NULL
+#endif
 
 #endif /*__IDE_FLOPPY_H */
diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
index a3a7a08..409e4c1 100644
--- a/drivers/ide/ide-floppy_ioctl.c
+++ b/drivers/ide/ide-floppy_ioctl.c
@@ -33,7 +33,7 @@
 
 static int ide_floppy_get_format_capacities(ide_drive_t *drive, int __user *arg)
 {
-	struct ide_floppy_obj *floppy = drive->driver_data;
+	struct ide_disk_obj *floppy = drive->driver_data;
 	struct ide_atapi_pc pc;
 	u8 header_len, desc_cnt;
 	int i, blocks, length, u_array_size, u_index;
@@ -113,7 +113,7 @@
 
 static int ide_floppy_get_sfrp_bit(ide_drive_t *drive)
 {
-	idefloppy_floppy_t *floppy = drive->driver_data;
+	struct ide_disk_obj *floppy = drive->driver_data;
 	struct ide_atapi_pc pc;
 
 	drive->atapi_flags &= ~IDE_AFLAG_SRFP;
@@ -132,17 +132,17 @@
 
 static int ide_floppy_format_unit(ide_drive_t *drive, int __user *arg)
 {
-	idefloppy_floppy_t *floppy = drive->driver_data;
+	struct ide_disk_obj *floppy = drive->driver_data;
 	struct ide_atapi_pc pc;
 	int blocks, length, flags, err = 0;
 
 	if (floppy->openers > 1) {
 		/* Don't format if someone is using the disk */
-		drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
+		drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
 		return -EBUSY;
 	}
 
-	drive->atapi_flags |= IDE_AFLAG_FORMAT_IN_PROGRESS;
+	drive->dev_flags |= IDE_DFLAG_FORMAT_IN_PROGRESS;
 
 	/*
 	 * Send ATAPI_FORMAT_UNIT to the drive.
@@ -174,7 +174,7 @@
 
 out:
 	if (err)
-		drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
+		drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
 	return err;
 }
 
@@ -190,7 +190,7 @@
 
 static int ide_floppy_get_format_progress(ide_drive_t *drive, int __user *arg)
 {
-	idefloppy_floppy_t *floppy = drive->driver_data;
+	struct ide_disk_obj *floppy = drive->driver_data;
 	struct ide_atapi_pc pc;
 	int progress_indication = 0x10000;
 
@@ -226,7 +226,7 @@
 static int ide_floppy_lockdoor(ide_drive_t *drive, struct ide_atapi_pc *pc,
 			       unsigned long arg, unsigned int cmd)
 {
-	idefloppy_floppy_t *floppy = drive->driver_data;
+	struct ide_disk_obj *floppy = drive->driver_data;
 	struct gendisk *disk = floppy->disk;
 	int prevent = (arg && cmd != CDROMEJECT) ? 1 : 0;
 
@@ -260,13 +260,10 @@
 	}
 }
 
-int ide_floppy_ioctl(struct inode *inode, struct file *file,
-		    unsigned int cmd, unsigned long arg)
+int ide_floppy_ioctl(ide_drive_t *drive, struct inode *inode,
+		     struct file *file, unsigned int cmd, unsigned long arg)
 {
 	struct block_device *bdev = inode->i_bdev;
-	struct ide_floppy_obj *floppy = ide_drv_g(bdev->bd_disk,
-						     ide_floppy_obj);
-	ide_drive_t *drive = floppy->drive;
 	struct ide_atapi_pc pc;
 	void __user *argp = (void __user *)arg;
 	int err;
diff --git a/drivers/ide/ide-floppy_proc.c b/drivers/ide/ide-floppy_proc.c
index 76f0c6c..3ec762c 100644
--- a/drivers/ide/ide-floppy_proc.c
+++ b/drivers/ide/ide-floppy_proc.c
@@ -9,7 +9,7 @@
 	ide_drive_t*drive = (ide_drive_t *)data;
 	int len;
 
-	len = sprintf(page, "%llu\n", (long long)ide_floppy_capacity(drive));
+	len = sprintf(page, "%llu\n", (long long)ide_gd_capacity(drive));
 	PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
 }
 
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
new file mode 100644
index 0000000..d44898f
--- /dev/null
+++ b/drivers/ide/ide-gd.c
@@ -0,0 +1,398 @@
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/mutex.h>
+#include <linux/ide.h>
+#include <linux/hdreg.h>
+
+#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
+#define IDE_DISK_MINORS		(1 << PARTN_BITS)
+#else
+#define IDE_DISK_MINORS		0
+#endif
+
+#include "ide-disk.h"
+#include "ide-floppy.h"
+
+#define IDE_GD_VERSION	"1.18"
+
+/* module parameters */
+static unsigned long debug_mask;
+module_param(debug_mask, ulong, 0644);
+
+static DEFINE_MUTEX(ide_disk_ref_mutex);
+
+static void ide_disk_release(struct kref *);
+
+static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
+{
+	struct ide_disk_obj *idkp = NULL;
+
+	mutex_lock(&ide_disk_ref_mutex);
+	idkp = ide_drv_g(disk, ide_disk_obj);
+	if (idkp) {
+		if (ide_device_get(idkp->drive))
+			idkp = NULL;
+		else
+			kref_get(&idkp->kref);
+	}
+	mutex_unlock(&ide_disk_ref_mutex);
+	return idkp;
+}
+
+static void ide_disk_put(struct ide_disk_obj *idkp)
+{
+	ide_drive_t *drive = idkp->drive;
+
+	mutex_lock(&ide_disk_ref_mutex);
+	kref_put(&idkp->kref, ide_disk_release);
+	ide_device_put(drive);
+	mutex_unlock(&ide_disk_ref_mutex);
+}
+
+sector_t ide_gd_capacity(ide_drive_t *drive)
+{
+	return drive->capacity64;
+}
+
+static int ide_gd_probe(ide_drive_t *);
+
+static void ide_gd_remove(ide_drive_t *drive)
+{
+	struct ide_disk_obj *idkp = drive->driver_data;
+	struct gendisk *g = idkp->disk;
+
+	ide_proc_unregister_driver(drive, idkp->driver);
+
+	del_gendisk(g);
+
+	drive->disk_ops->flush(drive);
+
+	ide_disk_put(idkp);
+}
+
+static void ide_disk_release(struct kref *kref)
+{
+	struct ide_disk_obj *idkp = to_ide_drv(kref, ide_disk_obj);
+	ide_drive_t *drive = idkp->drive;
+	struct gendisk *g = idkp->disk;
+
+	drive->disk_ops = NULL;
+	drive->driver_data = NULL;
+	g->private_data = NULL;
+	put_disk(g);
+	kfree(idkp);
+}
+
+/*
+ * On HPA drives the capacity needs to be
+ * reinitilized on resume otherwise the disk
+ * can not be used and a hard reset is required
+ */
+static void ide_gd_resume(ide_drive_t *drive)
+{
+	if (ata_id_hpa_enabled(drive->id))
+		(void)drive->disk_ops->get_capacity(drive);
+}
+
+static void ide_gd_shutdown(ide_drive_t *drive)
+{
+#ifdef	CONFIG_ALPHA
+	/* On Alpha, halt(8) doesn't actually turn the machine off,
+	   it puts you into the sort of firmware monitor. Typically,
+	   it's used to boot another kernel image, so it's not much
+	   different from reboot(8). Therefore, we don't need to
+	   spin down the disk in this case, especially since Alpha
+	   firmware doesn't handle disks in standby mode properly.
+	   On the other hand, it's reasonably safe to turn the power
+	   off when the shutdown process reaches the firmware prompt,
+	   as the firmware initialization takes rather long time -
+	   at least 10 seconds, which should be sufficient for
+	   the disk to expire its write cache. */
+	if (system_state != SYSTEM_POWER_OFF) {
+#else
+	if (system_state == SYSTEM_RESTART) {
+#endif
+		drive->disk_ops->flush(drive);
+		return;
+	}
+
+	printk(KERN_INFO "Shutdown: %s\n", drive->name);
+
+	drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND);
+}
+
+#ifdef CONFIG_IDE_PROC_FS
+static ide_proc_entry_t *ide_disk_proc_entries(ide_drive_t *drive)
+{
+	return (drive->media == ide_disk) ? ide_disk_proc : ide_floppy_proc;
+}
+
+static const struct ide_proc_devset *ide_disk_proc_devsets(ide_drive_t *drive)
+{
+	return (drive->media == ide_disk) ? ide_disk_settings
+					  : ide_floppy_settings;
+}
+#endif
+
+static ide_startstop_t ide_gd_do_request(ide_drive_t *drive,
+					 struct request *rq, sector_t sector)
+{
+	return drive->disk_ops->do_request(drive, rq, sector);
+}
+
+static int ide_gd_end_request(ide_drive_t *drive, int uptodate, int nrsecs)
+{
+	return drive->disk_ops->end_request(drive, uptodate, nrsecs);
+}
+
+static ide_driver_t ide_gd_driver = {
+	.gen_driver = {
+		.owner		= THIS_MODULE,
+		.name		= "ide-gd",
+		.bus		= &ide_bus_type,
+	},
+	.probe			= ide_gd_probe,
+	.remove			= ide_gd_remove,
+	.resume			= ide_gd_resume,
+	.shutdown		= ide_gd_shutdown,
+	.version		= IDE_GD_VERSION,
+	.do_request		= ide_gd_do_request,
+	.end_request		= ide_gd_end_request,
+	.error			= __ide_error,
+#ifdef CONFIG_IDE_PROC_FS
+	.proc_entries		= ide_disk_proc_entries,
+	.proc_devsets		= ide_disk_proc_devsets,
+#endif
+};
+
+static int ide_gd_open(struct inode *inode, struct file *filp)
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	struct ide_disk_obj *idkp;
+	ide_drive_t *drive;
+	int ret = 0;
+
+	idkp = ide_disk_get(disk);
+	if (idkp == NULL)
+		return -ENXIO;
+
+	drive = idkp->drive;
+
+	ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
+
+	idkp->openers++;
+
+	if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) {
+		drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
+		/* Just in case */
+
+		ret = drive->disk_ops->init_media(drive, disk);
+
+		/*
+		 * Allow O_NDELAY to open a drive without a disk, or with an
+		 * unreadable disk, so that we can get the format capacity
+		 * of the drive or begin the format - Sam
+		 */
+		if (ret && (filp->f_flags & O_NDELAY) == 0) {
+			ret = -EIO;
+			goto out_put_idkp;
+		}
+
+		if ((drive->dev_flags & IDE_DFLAG_WP) && (filp->f_mode & 2)) {
+			ret = -EROFS;
+			goto out_put_idkp;
+		}
+
+		/*
+		 * Ignore the return code from door_lock,
+		 * since the open() has already succeeded,
+		 * and the door_lock is irrelevant at this point.
+		 */
+		drive->disk_ops->set_doorlock(drive, disk, 1);
+		drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
+		check_disk_change(inode->i_bdev);
+	} else if (drive->dev_flags & IDE_DFLAG_FORMAT_IN_PROGRESS) {
+		ret = -EBUSY;
+		goto out_put_idkp;
+	}
+	return 0;
+
+out_put_idkp:
+	idkp->openers--;
+	ide_disk_put(idkp);
+	return ret;
+}
+
+static int ide_gd_release(struct inode *inode, struct file *filp)
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
+	ide_drive_t *drive = idkp->drive;
+
+	ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
+
+	if (idkp->openers == 1)
+		drive->disk_ops->flush(drive);
+
+	if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) {
+		drive->disk_ops->set_doorlock(drive, disk, 0);
+		drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
+	}
+
+	idkp->openers--;
+
+	ide_disk_put(idkp);
+
+	return 0;
+}
+
+static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+	struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
+	ide_drive_t *drive = idkp->drive;
+
+	geo->heads = drive->bios_head;
+	geo->sectors = drive->bios_sect;
+	geo->cylinders = (u16)drive->bios_cyl; /* truncate */
+	return 0;
+}
+
+static int ide_gd_media_changed(struct gendisk *disk)
+{
+	struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
+	ide_drive_t *drive = idkp->drive;
+	int ret;
+
+	/* do not scan partitions twice if this is a removable device */
+	if (drive->dev_flags & IDE_DFLAG_ATTACH) {
+		drive->dev_flags &= ~IDE_DFLAG_ATTACH;
+		return 0;
+	}
+
+	ret = !!(drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED);
+	drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
+
+	return ret;
+}
+
+static int ide_gd_revalidate_disk(struct gendisk *disk)
+{
+	struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
+	set_capacity(disk, ide_gd_capacity(idkp->drive));
+	return 0;
+}
+
+static int ide_gd_ioctl(struct inode *inode, struct file *file,
+			     unsigned int cmd, unsigned long arg)
+{
+	struct block_device *bdev = inode->i_bdev;
+	struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
+	ide_drive_t *drive = idkp->drive;
+
+	return drive->disk_ops->ioctl(drive, inode, file, cmd, arg);
+}
+
+static struct block_device_operations ide_gd_ops = {
+	.owner			= THIS_MODULE,
+	.open			= ide_gd_open,
+	.release		= ide_gd_release,
+	.ioctl			= ide_gd_ioctl,
+	.getgeo			= ide_gd_getgeo,
+	.media_changed		= ide_gd_media_changed,
+	.revalidate_disk	= ide_gd_revalidate_disk
+};
+
+static int ide_gd_probe(ide_drive_t *drive)
+{
+	const struct ide_disk_ops *disk_ops = NULL;
+	struct ide_disk_obj *idkp;
+	struct gendisk *g;
+
+	/* strstr("foo", "") is non-NULL */
+	if (!strstr("ide-gd", drive->driver_req))
+		goto failed;
+
+#ifdef CONFIG_IDE_GD_ATA
+	if (drive->media == ide_disk)
+		disk_ops = &ide_ata_disk_ops;
+#endif
+#ifdef CONFIG_IDE_GD_ATAPI
+	if (drive->media == ide_floppy)
+		disk_ops = &ide_atapi_disk_ops;
+#endif
+	if (disk_ops == NULL)
+		goto failed;
+
+	if (disk_ops->check(drive, DRV_NAME) == 0) {
+		printk(KERN_ERR PFX "%s: not supported by this driver\n",
+			drive->name);
+		goto failed;
+	}
+
+	idkp = kzalloc(sizeof(*idkp), GFP_KERNEL);
+	if (!idkp) {
+		printk(KERN_ERR PFX "%s: can't allocate a disk structure\n",
+			drive->name);
+		goto failed;
+	}
+
+	g = alloc_disk_node(IDE_DISK_MINORS, hwif_to_node(drive->hwif));
+	if (!g)
+		goto out_free_idkp;
+
+	ide_init_disk(g, drive);
+
+	kref_init(&idkp->kref);
+
+	idkp->drive = drive;
+	idkp->driver = &ide_gd_driver;
+	idkp->disk = g;
+
+	g->private_data = &idkp->driver;
+
+	drive->driver_data = idkp;
+	drive->debug_mask = debug_mask;
+	drive->disk_ops = disk_ops;
+
+	disk_ops->setup(drive);
+
+	set_capacity(g, ide_gd_capacity(drive));
+
+	g->minors = IDE_DISK_MINORS;
+	g->driverfs_dev = &drive->gendev;
+	g->flags |= GENHD_FL_EXT_DEVT;
+	if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
+		g->flags = GENHD_FL_REMOVABLE;
+	g->fops = &ide_gd_ops;
+	add_disk(g);
+	return 0;
+
+out_free_idkp:
+	kfree(idkp);
+failed:
+	return -ENODEV;
+}
+
+static int __init ide_gd_init(void)
+{
+	printk(KERN_INFO DRV_NAME " driver " IDE_GD_VERSION "\n");
+	return driver_register(&ide_gd_driver.gen_driver);
+}
+
+static void __exit ide_gd_exit(void)
+{
+	driver_unregister(&ide_gd_driver.gen_driver);
+}
+
+MODULE_ALIAS("ide:*m-disk*");
+MODULE_ALIAS("ide-disk");
+MODULE_ALIAS("ide:*m-floppy*");
+MODULE_ALIAS("ide-floppy");
+module_init(ide_gd_init);
+module_exit(ide_gd_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("generic ATA/ATAPI disk driver");
diff --git a/drivers/ide/ide-gd.h b/drivers/ide/ide-gd.h
new file mode 100644
index 0000000..7d3d101
--- /dev/null
+++ b/drivers/ide/ide-gd.h
@@ -0,0 +1,44 @@
+#ifndef __IDE_GD_H
+#define __IDE_GD_H
+
+#define DRV_NAME "ide-gd"
+#define PFX DRV_NAME ": "
+
+/* define to see debug info */
+#define IDE_GD_DEBUG_LOG	0
+
+#if IDE_GD_DEBUG_LOG
+#define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, args)
+#else
+#define ide_debug_log(lvl, fmt, args...) do {} while (0)
+#endif
+
+struct ide_disk_obj {
+	ide_drive_t	*drive;
+	ide_driver_t	*driver;
+	struct gendisk	*disk;
+	struct kref	kref;
+	unsigned int	openers;	/* protected by BKL for now */
+
+	/* Last failed packet command */
+	struct ide_atapi_pc *failed_pc;
+	/* used for blk_{fs,pc}_request() requests */
+	struct ide_atapi_pc queued_pc;
+
+	/* Last error information */
+	u8 sense_key, asc, ascq;
+
+	int progress_indication;
+
+	/* Device information */
+	/* Current format */
+	int blocks, block_size, bs_factor;
+	/* Last format capacity descriptor */
+	u8 cap_desc[8];
+	/* Copy of the flexible disk page */
+	u8 flexible_disk_page[32];
+};
+
+sector_t ide_gd_capacity(ide_drive_t *);
+
+#endif /* __IDE_GD_H */
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/ide-h8300.c
similarity index 100%
rename from drivers/ide/h8300/ide-h8300.c
rename to drivers/ide/ide-h8300.c
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index b762deb..bb7a1ed 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -755,7 +755,7 @@
 	 
 	udelay(1);
 	SELECT_DRIVE(drive);
-	SELECT_MASK(drive, 0);
+	SELECT_MASK(drive, 1);
 	udelay(1);
 	tp_ops->set_irq(hwif, 0);
 
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 19f8c77..1649ea5 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -208,6 +208,7 @@
 		drive->ready_stat = 0;
 		if (ata_id_cdb_intr(id))
 			drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
+		drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
 		/* we don't do head unloading on ATAPI devices */
 		drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
 		return;
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index b269264..c31d0dd 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -567,10 +567,10 @@
 void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver)
 {
 	mutex_lock(&ide_setting_mtx);
-	drive->settings = driver->settings;
+	drive->settings = driver->proc_devsets(drive);
 	mutex_unlock(&ide_setting_mtx);
 
-	ide_add_proc_entries(drive->proc, driver->proc, drive);
+	ide_add_proc_entries(drive->proc, driver->proc_entries(drive), drive);
 }
 
 EXPORT_SYMBOL(ide_proc_register_driver);
@@ -591,7 +591,7 @@
 {
 	unsigned long flags;
 
-	ide_remove_proc_entries(drive->proc, driver->proc);
+	ide_remove_proc_entries(drive->proc, driver->proc_entries(drive));
 
 	mutex_lock(&ide_setting_mtx);
 	spin_lock_irqsave(&ide_lock, flags);
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index d879c77..b2b2e5e 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -2108,7 +2108,7 @@
 
 	/* device lacks locking support according to capabilities page */
 	if ((caps[6] & 1) == 0)
-		drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
+		drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
 
 	if (caps[7] & 0x02)
 		tape->blk_size = 512;
@@ -2298,6 +2298,16 @@
 	{ "name",	S_IFREG|S_IRUGO,	proc_idetape_read_name,	NULL },
 	{ NULL, 0, NULL, NULL }
 };
+
+static ide_proc_entry_t *ide_tape_proc_entries(ide_drive_t *drive)
+{
+	return idetape_proc;
+}
+
+static const struct ide_proc_devset *ide_tape_proc_devsets(ide_drive_t *drive)
+{
+	return idetape_settings;
+}
 #endif
 
 static int ide_tape_probe(ide_drive_t *);
@@ -2315,8 +2325,8 @@
 	.end_request		= idetape_end_request,
 	.error			= __ide_error,
 #ifdef CONFIG_IDE_PROC_FS
-	.proc			= idetape_proc,
-	.settings		= idetape_settings,
+	.proc_entries		= ide_tape_proc_entries,
+	.proc_devsets		= ide_tape_proc_devsets,
 #endif
 };
 
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/ide_arm.c
similarity index 100%
rename from drivers/ide/arm/ide_arm.c
rename to drivers/ide/ide_arm.c
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/ide_platform.c
similarity index 100%
rename from drivers/ide/legacy/ide_platform.c
rename to drivers/ide/ide_platform.c
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/it8213.c
similarity index 100%
rename from drivers/ide/pci/it8213.c
rename to drivers/ide/it8213.c
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/it821x.c
similarity index 100%
rename from drivers/ide/pci/it821x.c
rename to drivers/ide/it821x.c
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/jmicron.c
similarity index 100%
rename from drivers/ide/pci/jmicron.c
rename to drivers/ide/jmicron.c
diff --git a/drivers/ide/legacy/Makefile b/drivers/ide/legacy/Makefile
deleted file mode 100644
index 6939329..0000000
--- a/drivers/ide/legacy/Makefile
+++ /dev/null
@@ -1,25 +0,0 @@
-
-# link order is important here
-
-obj-$(CONFIG_BLK_DEV_ALI14XX)		+= ali14xx.o
-obj-$(CONFIG_BLK_DEV_UMC8672)		+= umc8672.o
-obj-$(CONFIG_BLK_DEV_DTC2278)		+= dtc2278.o
-obj-$(CONFIG_BLK_DEV_HT6560B)		+= ht6560b.o
-obj-$(CONFIG_BLK_DEV_QD65XX)		+= qd65xx.o
-obj-$(CONFIG_BLK_DEV_4DRIVES)		+= ide-4drives.o
-
-obj-$(CONFIG_BLK_DEV_GAYLE)		+= gayle.o
-obj-$(CONFIG_BLK_DEV_FALCON_IDE)	+= falconide.o
-obj-$(CONFIG_BLK_DEV_MAC_IDE)		+= macide.o
-obj-$(CONFIG_BLK_DEV_Q40IDE)		+= q40ide.o
-obj-$(CONFIG_BLK_DEV_BUDDHA)		+= buddha.o
-
-ifeq ($(CONFIG_BLK_DEV_IDECS), m)
-	obj-m += ide-cs.o
-endif
-
-ifeq ($(CONFIG_BLK_DEV_PLATFORM), m)
-	obj-m += ide_platform.o
-endif
-
-EXTRA_CFLAGS	:= -Idrivers/ide
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/macide.c
similarity index 100%
rename from drivers/ide/legacy/macide.c
rename to drivers/ide/macide.c
diff --git a/drivers/ide/mips/Makefile b/drivers/ide/mips/Makefile
deleted file mode 100644
index 5873fa0..0000000
--- a/drivers/ide/mips/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_BLK_DEV_IDE_AU1XXX)	+= au1xxx-ide.o
-
-EXTRA_CFLAGS    := -Idrivers/ide
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/ns87415.c
similarity index 100%
rename from drivers/ide/pci/ns87415.c
rename to drivers/ide/ns87415.c
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/opti621.c
similarity index 100%
rename from drivers/ide/pci/opti621.c
rename to drivers/ide/opti621.c
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/palm_bk3710.c
similarity index 100%
rename from drivers/ide/arm/palm_bk3710.c
rename to drivers/ide/palm_bk3710.c
diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
deleted file mode 100644
index 02e6ee7..0000000
--- a/drivers/ide/pci/Makefile
+++ /dev/null
@@ -1,44 +0,0 @@
-
-obj-$(CONFIG_BLK_DEV_AEC62XX)		+= aec62xx.o
-obj-$(CONFIG_BLK_DEV_ALI15X3)		+= alim15x3.o
-obj-$(CONFIG_BLK_DEV_AMD74XX)		+= amd74xx.o
-obj-$(CONFIG_BLK_DEV_ATIIXP)		+= atiixp.o
-obj-$(CONFIG_BLK_DEV_CELLEB)		+= scc_pata.o
-obj-$(CONFIG_BLK_DEV_CMD64X)		+= cmd64x.o
-obj-$(CONFIG_BLK_DEV_CS5520)		+= cs5520.o
-obj-$(CONFIG_BLK_DEV_CS5530)		+= cs5530.o
-obj-$(CONFIG_BLK_DEV_CS5535)		+= cs5535.o
-obj-$(CONFIG_BLK_DEV_SC1200)		+= sc1200.o
-obj-$(CONFIG_BLK_DEV_CY82C693)		+= cy82c693.o
-obj-$(CONFIG_BLK_DEV_DELKIN)		+= delkin_cb.o
-obj-$(CONFIG_BLK_DEV_HPT34X)		+= hpt34x.o
-obj-$(CONFIG_BLK_DEV_HPT366)		+= hpt366.o
-obj-$(CONFIG_BLK_DEV_IT8213)		+= it8213.o
-obj-$(CONFIG_BLK_DEV_IT821X)		+= it821x.o
-obj-$(CONFIG_BLK_DEV_JMICRON)		+= jmicron.o
-obj-$(CONFIG_BLK_DEV_NS87415)		+= ns87415.o
-obj-$(CONFIG_BLK_DEV_OPTI621)		+= opti621.o
-obj-$(CONFIG_BLK_DEV_PDC202XX_OLD)	+= pdc202xx_old.o
-obj-$(CONFIG_BLK_DEV_PDC202XX_NEW)	+= pdc202xx_new.o
-obj-$(CONFIG_BLK_DEV_PIIX)		+= piix.o
-obj-$(CONFIG_BLK_DEV_RZ1000)		+= rz1000.o
-obj-$(CONFIG_BLK_DEV_SVWKS)		+= serverworks.o
-obj-$(CONFIG_BLK_DEV_SGIIOC4)		+= sgiioc4.o
-obj-$(CONFIG_BLK_DEV_SIIMAGE)		+= siimage.o
-obj-$(CONFIG_BLK_DEV_SIS5513)		+= sis5513.o
-obj-$(CONFIG_BLK_DEV_SL82C105)		+= sl82c105.o
-obj-$(CONFIG_BLK_DEV_SLC90E66)		+= slc90e66.o
-obj-$(CONFIG_BLK_DEV_TC86C001)		+= tc86c001.o
-obj-$(CONFIG_BLK_DEV_TRIFLEX)		+= triflex.o
-obj-$(CONFIG_BLK_DEV_TRM290)		+= trm290.o
-obj-$(CONFIG_BLK_DEV_VIA82CXXX)		+= via82cxxx.o
-
-# Must appear at the end of the block
-obj-$(CONFIG_BLK_DEV_GENERIC)		+= ide-pci-generic.o
-ide-pci-generic-y			+= generic.o
-
-ifeq ($(CONFIG_BLK_DEV_CMD640), m)
-	obj-m += cmd640.o
-endif
-
-EXTRA_CFLAGS	:= -Idrivers/ide
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
deleted file mode 100644
index fb1a3aa..0000000
--- a/drivers/ide/pci/hpt34x.c
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (C) 1998-2000	Andre Hedrick <andre@linux-ide.org>
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- *
- * 00:12.0 Unknown mass storage controller:
- * Triones Technologies, Inc.
- * Unknown device 0003 (rev 01)
- *
- * hde: UDMA 2 (0x0000 0x0002) (0x0000 0x0010)
- * hdf: UDMA 2 (0x0002 0x0012) (0x0010 0x0030)
- * hde: DMA 2  (0x0000 0x0002) (0x0000 0x0010)
- * hdf: DMA 2  (0x0002 0x0012) (0x0010 0x0030)
- * hdg: DMA 1  (0x0012 0x0052) (0x0030 0x0070)
- * hdh: DMA 1  (0x0052 0x0252) (0x0070 0x00f0)
- *
- * ide-pci.c reference
- *
- * Since there are two cards that report almost identically,
- * the only discernable difference is the values reported in pcicmd.
- * Booting-BIOS card or HPT363 :: pcicmd == 0x07
- * Non-bootable card or HPT343 :: pcicmd == 0x05
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-
-#define DRV_NAME "hpt34x"
-
-#define HPT343_DEBUG_DRIVE_INFO		0
-
-static void hpt34x_set_mode(ide_drive_t *drive, const u8 speed)
-{
-	struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
-	u32 reg1= 0, tmp1 = 0, reg2 = 0, tmp2 = 0;
-	u8			hi_speed, lo_speed;
-
-	hi_speed = speed >> 4;
-	lo_speed = speed & 0x0f;
-
-	if (hi_speed & 7) {
-		hi_speed = (hi_speed & 4) ? 0x01 : 0x10;
-	} else {
-		lo_speed <<= 5;
-		lo_speed >>= 5;
-	}
-
-	pci_read_config_dword(dev, 0x44, &reg1);
-	pci_read_config_dword(dev, 0x48, &reg2);
-	tmp1 = ((lo_speed << (3*drive->dn)) | (reg1 & ~(7 << (3*drive->dn))));
-	tmp2 = ((hi_speed << drive->dn) | (reg2 & ~(0x11 << drive->dn)));
-	pci_write_config_dword(dev, 0x44, tmp1);
-	pci_write_config_dword(dev, 0x48, tmp2);
-
-#if HPT343_DEBUG_DRIVE_INFO
-	printk("%s: %s drive%d (0x%04x 0x%04x) (0x%04x 0x%04x)" \
-		" (0x%02x 0x%02x)\n",
-		drive->name, ide_xfer_verbose(speed),
-		drive->dn, reg1, tmp1, reg2, tmp2,
-		hi_speed, lo_speed);
-#endif /* HPT343_DEBUG_DRIVE_INFO */
-}
-
-static void hpt34x_set_pio_mode(ide_drive_t *drive, const u8 pio)
-{
-	hpt34x_set_mode(drive, XFER_PIO_0 + pio);
-}
-
-/*
- * If the BIOS does not set the IO base addaress to XX00, 343 will fail.
- */
-#define	HPT34X_PCI_INIT_REG		0x80
-
-static unsigned int init_chipset_hpt34x(struct pci_dev *dev)
-{
-	int i = 0;
-	unsigned long hpt34xIoBase = pci_resource_start(dev, 4);
-	unsigned long hpt_addr[4] = { 0x20, 0x34, 0x28, 0x3c };
-	unsigned long hpt_addr_len[4] = { 7, 3, 7, 3 };
-	u16 cmd;
-	unsigned long flags;
-
-	local_irq_save(flags);
-
-	pci_write_config_byte(dev, HPT34X_PCI_INIT_REG, 0x00);
-	pci_read_config_word(dev, PCI_COMMAND, &cmd);
-
-	if (cmd & PCI_COMMAND_MEMORY)
-		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
-	else
-		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
-
-	/*
-	 * Since 20-23 can be assigned and are R/W, we correct them.
-	 */
-	pci_write_config_word(dev, PCI_COMMAND, cmd & ~PCI_COMMAND_IO);
-	for(i=0; i<4; i++) {
-		dev->resource[i].start = (hpt34xIoBase + hpt_addr[i]);
-		dev->resource[i].end = dev->resource[i].start + hpt_addr_len[i];
-		dev->resource[i].flags = IORESOURCE_IO;
-		pci_write_config_dword(dev,
-				(PCI_BASE_ADDRESS_0 + (i * 4)),
-				dev->resource[i].start);
-	}
-	pci_write_config_word(dev, PCI_COMMAND, cmd);
-
-	local_irq_restore(flags);
-
-	return dev->irq;
-}
-
-static const struct ide_port_ops hpt34x_port_ops = {
-	.set_pio_mode		= hpt34x_set_pio_mode,
-	.set_dma_mode		= hpt34x_set_mode,
-};
-
-#define IDE_HFLAGS_HPT34X \
-	(IDE_HFLAG_NO_ATAPI_DMA | \
-	 IDE_HFLAG_NO_DSC | \
-	 IDE_HFLAG_NO_AUTODMA)
-
-static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
-	{ /* 0: HPT343 */
-		.name		= DRV_NAME,
-		.init_chipset	= init_chipset_hpt34x,
-		.port_ops	= &hpt34x_port_ops,
-		.host_flags	= IDE_HFLAGS_HPT34X | IDE_HFLAG_NON_BOOTABLE,
-		.pio_mask	= ATA_PIO5,
-	},
-	{ /* 1: HPT345 */
-		.name		= DRV_NAME,
-		.init_chipset	= init_chipset_hpt34x,
-		.port_ops	= &hpt34x_port_ops,
-		.host_flags	= IDE_HFLAGS_HPT34X | IDE_HFLAG_OFF_BOARD,
-		.pio_mask	= ATA_PIO5,
-#ifdef CONFIG_HPT34X_AUTODMA
-		.swdma_mask	= ATA_SWDMA2,
-		.mwdma_mask	= ATA_MWDMA2,
-		.udma_mask	= ATA_UDMA2,
-#endif
-	}
-};
-
-static int __devinit hpt34x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
-	const struct ide_port_info *d;
-	u16 pcicmd = 0;
-
-	pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
-
-	d = &hpt34x_chipsets[(pcicmd & PCI_COMMAND_MEMORY) ? 1 : 0];
-
-	return ide_pci_init_one(dev, d, NULL);
-}
-
-static const struct pci_device_id hpt34x_pci_tbl[] = {
-	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), 0 },
-	{ 0, },
-};
-MODULE_DEVICE_TABLE(pci, hpt34x_pci_tbl);
-
-static struct pci_driver hpt34x_pci_driver = {
-	.name		= "HPT34x_IDE",
-	.id_table	= hpt34x_pci_tbl,
-	.probe		= hpt34x_init_one,
-	.remove		= ide_pci_remove,
-	.suspend	= ide_pci_suspend,
-	.resume		= ide_pci_resume,
-};
-
-static int __init hpt34x_ide_init(void)
-{
-	return ide_pci_register_driver(&hpt34x_pci_driver);
-}
-
-static void __exit hpt34x_ide_exit(void)
-{
-	pci_unregister_driver(&hpt34x_pci_driver);
-}
-
-module_init(hpt34x_ide_init);
-module_exit(hpt34x_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for Highpoint 34x IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
similarity index 100%
rename from drivers/ide/pci/pdc202xx_new.c
rename to drivers/ide/pdc202xx_new.c
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
similarity index 100%
rename from drivers/ide/pci/pdc202xx_old.c
rename to drivers/ide/pdc202xx_old.c
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/piix.c
similarity index 100%
rename from drivers/ide/pci/piix.c
rename to drivers/ide/piix.c
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/pmac.c
similarity index 100%
rename from drivers/ide/ppc/pmac.c
rename to drivers/ide/pmac.c
diff --git a/drivers/ide/ppc/Makefile b/drivers/ide/ppc/Makefile
deleted file mode 100644
index 74e52ad..0000000
--- a/drivers/ide/ppc/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-
-obj-$(CONFIG_BLK_DEV_IDE_PMAC)		+= pmac.o
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/q40ide.c
similarity index 100%
rename from drivers/ide/legacy/q40ide.c
rename to drivers/ide/q40ide.c
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/qd65xx.c
similarity index 100%
rename from drivers/ide/legacy/qd65xx.c
rename to drivers/ide/qd65xx.c
diff --git a/drivers/ide/legacy/qd65xx.h b/drivers/ide/qd65xx.h
similarity index 100%
rename from drivers/ide/legacy/qd65xx.h
rename to drivers/ide/qd65xx.h
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/rapide.c
similarity index 100%
rename from drivers/ide/arm/rapide.c
rename to drivers/ide/rapide.c
diff --git a/drivers/ide/pci/rz1000.c b/drivers/ide/rz1000.c
similarity index 100%
rename from drivers/ide/pci/rz1000.c
rename to drivers/ide/rz1000.c
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/sc1200.c
similarity index 100%
rename from drivers/ide/pci/sc1200.c
rename to drivers/ide/sc1200.c
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/scc_pata.c
similarity index 99%
rename from drivers/ide/pci/scc_pata.c
rename to drivers/ide/scc_pata.c
index 9ce1d80..49f163a 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/scc_pata.c
@@ -617,7 +617,6 @@
 	unsigned long intmask_port;
 	unsigned long mode_port;
 	unsigned long ecmode_port;
-	unsigned long dma_status_port;
 	u32 reg = 0;
 	struct scc_ports *ports;
 	int rc;
@@ -637,7 +636,6 @@
 	intmask_port = dma_base + 0x010;
 	mode_port = ctl_base + 0x024;
 	ecmode_port = ctl_base + 0xf00;
-	dma_status_port = dma_base + 0x004;
 
 	/* controller initialization */
 	reg = 0;
@@ -843,8 +841,6 @@
 
 static void __devinit init_hwif_scc(ide_hwif_t *hwif)
 {
-	struct scc_ports *ports = ide_get_hwifdata(hwif);
-
 	/* PTERADD */
 	out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
 
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/serverworks.c
similarity index 100%
rename from drivers/ide/pci/serverworks.c
rename to drivers/ide/serverworks.c
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/sgiioc4.c
similarity index 93%
rename from drivers/ide/pci/sgiioc4.c
rename to drivers/ide/sgiioc4.c
index dd63454..8af9b23 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -101,18 +101,8 @@
 	for (i = 0; i <= 7; i++)
 		hw->io_ports_array[i] = reg + i * 4;
 
-	if (ctrl_port)
-		hw->io_ports.ctl_addr = ctrl_port;
-
-	if (irq_port)
-		hw->io_ports.irq_addr = irq_port;
-}
-
-static void
-sgiioc4_maskproc(ide_drive_t * drive, int mask)
-{
-	writeb(ATA_DEVCTL_OBS | (mask ? 2 : 0),
-	       (void __iomem *)drive->hwif->io_ports.ctl_addr);
+	hw->io_ports.ctl_addr = ctrl_port;
+	hw->io_ports.irq_addr = irq_port;
 }
 
 static int
@@ -310,16 +300,14 @@
 	unsigned long port = hwif->io_ports.status_addr;
 	u8 reg = (u8) readb((void __iomem *) port);
 
-	if ((port & 0xFFF) == 0x11C) {	/* Status register of IOC4 */
-		if (!(reg & ATA_BUSY)) { /* Not busy... check for interrupt */
-			unsigned long other_ir = port - 0x110;
-			unsigned int intr_reg = (u32) readl((void __iomem *) other_ir);
+	if (!(reg & ATA_BUSY)) {	/* Not busy... check for interrupt */
+		unsigned long other_ir = port - 0x110;
+		unsigned int intr_reg = (u32) readl((void __iomem *) other_ir);
 
-			/* Clear the Interrupt, Error bits on the IOC4 */
-			if (intr_reg & 0x03) {
-				writel(0x03, (void __iomem *) other_ir);
-				intr_reg = (u32) readl((void __iomem *) other_ir);
-			}
+		/* Clear the Interrupt, Error bits on the IOC4 */
+		if (intr_reg & 0x03) {
+			writel(0x03, (void __iomem *) other_ir);
+			intr_reg = (u32) readl((void __iomem *) other_ir);
 		}
 	}
 
@@ -332,13 +320,9 @@
 {
 	struct pci_dev *dev = to_pci_dev(hwif->dev);
 	unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
-	void __iomem *virt_dma_base;
 	int num_ports = sizeof (ioc4_dma_regs_t);
 	void *pad;
 
-	if (dma_base == 0)
-		return -1;
-
 	printk(KERN_INFO "    %s: MMIO-DMA\n", hwif->name);
 
 	if (request_mem_region(dma_base, num_ports, hwif->name) == NULL) {
@@ -348,14 +332,8 @@
 		return -1;
 	}
 
-	virt_dma_base = ioremap(dma_base, num_ports);
-	if (virt_dma_base == NULL) {
-		printk(KERN_ERR "%s(%s) -- ERROR: unable to map addresses "
-		       "0x%lx to 0x%lx\n", __func__, hwif->name,
-		       dma_base, dma_base + num_ports - 1);
-		goto dma_remap_failure;
-	}
-	hwif->dma_base = (unsigned long) virt_dma_base;
+	hwif->dma_base = (unsigned long)hwif->io_ports.irq_addr +
+			 IOC4_DMA_OFFSET;
 
 	hwif->sg_max_nents = IOC4_PRD_ENTRIES;
 
@@ -379,9 +357,6 @@
 	printk(KERN_INFO "%s: changing from DMA to PIO mode", hwif->name);
 
 dma_pci_alloc_failure:
-	iounmap(virt_dma_base);
-
-dma_remap_failure:
 	release_mem_region(dma_base, num_ports);
 
 	return -1;
@@ -563,8 +538,6 @@
 	.set_dma_mode		= sgiioc4_set_dma_mode,
 	/* reset DMA engine, clear IRQs */
 	.resetproc		= sgiioc4_resetproc,
-	/* mask on/off NIEN register */
-	.maskproc		= sgiioc4_maskproc,
 };
 
 static const struct ide_dma_ops sgiioc4_dma_ops = {
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/siimage.c
similarity index 100%
rename from drivers/ide/pci/siimage.c
rename to drivers/ide/siimage.c
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/sis5513.c
similarity index 100%
rename from drivers/ide/pci/sis5513.c
rename to drivers/ide/sis5513.c
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/sl82c105.c
similarity index 100%
rename from drivers/ide/pci/sl82c105.c
rename to drivers/ide/sl82c105.c
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/slc90e66.c
similarity index 100%
rename from drivers/ide/pci/slc90e66.c
rename to drivers/ide/slc90e66.c
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/tc86c001.c
similarity index 100%
rename from drivers/ide/pci/tc86c001.c
rename to drivers/ide/tc86c001.c
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/triflex.c
similarity index 100%
rename from drivers/ide/pci/triflex.c
rename to drivers/ide/triflex.c
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/trm290.c
similarity index 100%
rename from drivers/ide/pci/trm290.c
rename to drivers/ide/trm290.c
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/umc8672.c
similarity index 100%
rename from drivers/ide/legacy/umc8672.c
rename to drivers/ide/umc8672.c
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/via82cxxx.c
similarity index 100%
rename from drivers/ide/pci/via82cxxx.c
rename to drivers/ide/via82cxxx.c
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index a78d35a..f1e82a9 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -122,7 +122,7 @@
 
 #define CM_COUNTER_ATTR(_name, _index) \
 struct cm_counter_attribute cm_##_name##_counter_attr = { \
-	.attr = { .name = __stringify(_name), .mode = 0444, .owner = THIS_MODULE }, \
+	.attr = { .name = __stringify(_name), .mode = 0444 }, \
 	.index = _index \
 }
 
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 49c45fe..5c54fc2 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -406,19 +406,15 @@
 
 	if (i == qp_info->snoop_table_size) {
 		/* Grow table. */
-		new_snoop_table = kmalloc(sizeof mad_snoop_priv *
-					  qp_info->snoop_table_size + 1,
-					  GFP_ATOMIC);
+		new_snoop_table = krealloc(qp_info->snoop_table,
+					   sizeof mad_snoop_priv *
+					   (qp_info->snoop_table_size + 1),
+					   GFP_ATOMIC);
 		if (!new_snoop_table) {
 			i = -ENOMEM;
 			goto out;
 		}
-		if (qp_info->snoop_table) {
-			memcpy(new_snoop_table, qp_info->snoop_table,
-			       sizeof mad_snoop_priv *
-			       qp_info->snoop_table_size);
-			kfree(qp_info->snoop_table);
-		}
+
 		qp_info->snoop_table = new_snoop_table;
 		qp_info->snoop_table_size++;
 	}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 3ddacf3..4346a24 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -904,8 +904,8 @@
 
 	mutex_lock(&file->mut);
 	mc = ucma_alloc_multicast(ctx);
-	if (IS_ERR(mc)) {
-		ret = PTR_ERR(mc);
+	if (!mc) {
+		ret = -ENOMEM;
 		goto err1;
 	}
 
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index c325c44..44e936e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1942,6 +1942,7 @@
 fail3:
 	cxgb3_free_atid(ep->com.tdev, ep->atid);
 fail2:
+	cm_id->rem_ref(cm_id);
 	put_ep(&ep->com);
 out:
 	return err;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 5d7b785..4df887a 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -128,6 +128,8 @@
 	/* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
 	u32 hca_cap_mr_pgsize;
 	int max_mtu;
+	int max_num_qps;
+	int max_num_cqs;
 	atomic_t num_cqs;
 	atomic_t num_qps;
 };
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 33647a9..2f4c28a 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -132,9 +132,9 @@
 	if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
 		return ERR_PTR(-EINVAL);
 
-	if (!atomic_add_unless(&shca->num_cqs, 1, ehca_max_cq)) {
+	if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
 		ehca_err(device, "Unable to create CQ, max number of %i "
-			"CQs reached.", ehca_max_cq);
+			"CQs reached.", shca->max_num_cqs);
 		ehca_err(device, "To increase the maximum number of CQs "
 			"use the number_of_cqs module parameter.\n");
 		return ERR_PTR(-ENOSPC);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 598844d..bb02a86 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -44,6 +44,8 @@
 #include <linux/slab.h>
 #endif
 
+#include <linux/notifier.h>
+#include <linux/memory.h>
 #include "ehca_classes.h"
 #include "ehca_iverbs.h"
 #include "ehca_mrmw.h"
@@ -366,22 +368,23 @@
 			shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
 
 	/* Set maximum number of CQs and QPs to calculate EQ size */
-	if (ehca_max_qp == -1)
-		ehca_max_qp = min_t(int, rblock->max_qp, EHCA_MAX_NUM_QUEUES);
-	else if (ehca_max_qp < 1 || ehca_max_qp > rblock->max_qp) {
-		ehca_gen_err("Requested number of QPs is out of range (1 - %i) "
-			"specified by HW", rblock->max_qp);
-		ret = -EINVAL;
-		goto sense_attributes1;
+	if (shca->max_num_qps == -1)
+		shca->max_num_qps = min_t(int, rblock->max_qp,
+					  EHCA_MAX_NUM_QUEUES);
+	else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
+		ehca_gen_warn("The requested number of QPs is out of range "
+			      "(1 - %i) specified by HW. Value is set to %i",
+			      rblock->max_qp, rblock->max_qp);
+		shca->max_num_qps = rblock->max_qp;
 	}
 
-	if (ehca_max_cq == -1)
-		ehca_max_cq = min_t(int, rblock->max_cq, EHCA_MAX_NUM_QUEUES);
-	else if (ehca_max_cq < 1 || ehca_max_cq > rblock->max_cq) {
-		ehca_gen_err("Requested number of CQs is out of range (1 - %i) "
-			"specified by HW", rblock->max_cq);
-		ret = -EINVAL;
-		goto sense_attributes1;
+	if (shca->max_num_cqs == -1)
+		shca->max_num_cqs = min_t(int, rblock->max_cq,
+					  EHCA_MAX_NUM_QUEUES);
+	else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
+		ehca_gen_warn("The requested number of CQs is out of range "
+			      "(1 - %i) specified by HW. Value is set to %i",
+			      rblock->max_cq, rblock->max_cq);
 	}
 
 	/* query max MTU from first port -- it's the same for all ports */
@@ -733,9 +736,13 @@
 		ehca_gen_err("Cannot allocate shca memory.");
 		return -ENOMEM;
 	}
+
 	mutex_init(&shca->modify_mutex);
 	atomic_set(&shca->num_cqs, 0);
 	atomic_set(&shca->num_qps, 0);
+	shca->max_num_qps = ehca_max_qp;
+	shca->max_num_cqs = ehca_max_cq;
+
 	for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
 		spin_lock_init(&shca->sport[i].mod_sqp_lock);
 
@@ -755,7 +762,7 @@
 		goto probe1;
 	}
 
-	eq_size = 2 * ehca_max_cq + 4 * ehca_max_qp;
+	eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
 	/* create event queues */
 	ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
 	if (ret) {
@@ -964,6 +971,41 @@
 	spin_unlock(&shca_list_lock);
 }
 
+static int ehca_mem_notifier(struct notifier_block *nb,
+			     unsigned long action, void *data)
+{
+	static unsigned long ehca_dmem_warn_time;
+
+	switch (action) {
+	case MEM_CANCEL_OFFLINE:
+	case MEM_CANCEL_ONLINE:
+	case MEM_ONLINE:
+	case MEM_OFFLINE:
+		return NOTIFY_OK;
+	case MEM_GOING_ONLINE:
+	case MEM_GOING_OFFLINE:
+		/* only ok if no hca is attached to the lpar */
+		spin_lock(&shca_list_lock);
+		if (list_empty(&shca_list)) {
+			spin_unlock(&shca_list_lock);
+			return NOTIFY_OK;
+		} else {
+			spin_unlock(&shca_list_lock);
+			if (printk_timed_ratelimit(&ehca_dmem_warn_time,
+						   30 * 1000))
+				ehca_gen_err("DMEM operations are not allowed"
+					     "as long as an ehca adapter is"
+					     "attached to the LPAR");
+			return NOTIFY_BAD;
+		}
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block ehca_mem_nb = {
+	.notifier_call = ehca_mem_notifier,
+};
+
 static int __init ehca_module_init(void)
 {
 	int ret;
@@ -991,6 +1033,12 @@
 		goto module_init2;
 	}
 
+	ret = register_memory_notifier(&ehca_mem_nb);
+	if (ret) {
+		ehca_gen_err("Failed registering memory add/remove notifier");
+		goto module_init3;
+	}
+
 	if (ehca_poll_all_eqs != 1) {
 		ehca_gen_err("WARNING!!!");
 		ehca_gen_err("It is possible to lose interrupts.");
@@ -1003,6 +1051,9 @@
 
 	return 0;
 
+module_init3:
+	ibmebus_unregister_driver(&ehca_driver);
+
 module_init2:
 	ehca_destroy_slab_caches();
 
@@ -1018,6 +1069,8 @@
 
 	ibmebus_unregister_driver(&ehca_driver);
 
+	unregister_memory_notifier(&ehca_mem_nb);
+
 	ehca_destroy_slab_caches();
 
 	ehca_destroy_comp_pool();
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 4dbe287..4d54b9f 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -465,9 +465,9 @@
 	u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
 	unsigned long flags;
 
-	if (!atomic_add_unless(&shca->num_qps, 1, ehca_max_qp)) {
+	if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
 		ehca_err(pd->device, "Unable to create QP, max number of %i "
-			 "QPs reached.", ehca_max_qp);
+			 "QPs reached.", shca->max_num_qps);
 		ehca_err(pd->device, "To increase the maximum number of QPs "
 			 "use the number_of_qps module parameter.\n");
 		return ERR_PTR(-ENOSPC);
@@ -502,6 +502,12 @@
 	if (init_attr->srq) {
 		my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
 
+		if (qp_type == IB_QPT_UC) {
+			ehca_err(pd->device, "UC with SRQ not supported");
+			atomic_dec(&shca->num_qps);
+			return ERR_PTR(-EINVAL);
+		}
+
 		has_srq = 1;
 		parms.ext_type = EQPT_SRQBASE;
 		parms.srq_qpn = my_srq->real_qp_num;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index cdca3a5..606f1e2 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -298,7 +298,7 @@
 	int p, q;
 	int ret;
 
-	for (p = 0; p < dev->dev->caps.num_ports; ++p)
+	for (p = 0; p < dev->num_ports; ++p)
 		for (q = 0; q <= 1; ++q) {
 			agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
 						      q ? IB_QPT_GSI : IB_QPT_SMI,
@@ -314,7 +314,7 @@
 	return 0;
 
 err:
-	for (p = 0; p < dev->dev->caps.num_ports; ++p)
+	for (p = 0; p < dev->num_ports; ++p)
 		for (q = 0; q <= 1; ++q)
 			if (dev->send_agent[p][q])
 				ib_unregister_mad_agent(dev->send_agent[p][q]);
@@ -327,7 +327,7 @@
 	struct ib_mad_agent *agent;
 	int p, q;
 
-	for (p = 0; p < dev->dev->caps.num_ports; ++p) {
+	for (p = 0; p < dev->num_ports; ++p) {
 		for (q = 0; q <= 1; ++q) {
 			agent = dev->send_agent[p][q];
 			dev->send_agent[p][q] = NULL;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index a3c2851..2e80f8f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -574,7 +574,10 @@
 	ibdev->ib_dev.owner		= THIS_MODULE;
 	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
 	ibdev->ib_dev.local_dma_lkey	= dev->caps.reserved_lkey;
-	ibdev->ib_dev.phys_port_cnt	= dev->caps.num_ports;
+	ibdev->num_ports = 0;
+	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+		ibdev->num_ports++;
+	ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
 	ibdev->ib_dev.num_comp_vectors	= 1;
 	ibdev->ib_dev.dma_device	= &dev->pdev->dev;
 
@@ -691,7 +694,7 @@
 	struct mlx4_ib_dev *ibdev = ibdev_ptr;
 	int p;
 
-	for (p = 1; p <= dev->caps.num_ports; ++p)
+	for (p = 1; p <= ibdev->num_ports; ++p)
 		mlx4_CLOSE_PORT(dev, p);
 
 	mlx4_ib_mad_cleanup(ibdev);
@@ -706,6 +709,10 @@
 			  enum mlx4_dev_event event, int port)
 {
 	struct ib_event ibev;
+	struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
+
+	if (port > ibdev->num_ports)
+		return;
 
 	switch (event) {
 	case MLX4_DEV_EVENT_PORT_UP:
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6e2b0dc..9974e88 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -162,6 +162,7 @@
 struct mlx4_ib_dev {
 	struct ib_device	ib_dev;
 	struct mlx4_dev	       *dev;
+	int			num_ports;
 	void __iomem	       *uar_map;
 
 	struct mlx4_uar		priv_uar;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index baa01de..39167a7 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -451,6 +451,7 @@
 			    struct ib_qp_init_attr *init_attr,
 			    struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
 {
+	int qpn;
 	int err;
 
 	mutex_init(&qp->mutex);
@@ -545,9 +546,17 @@
 		}
 	}
 
-	err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp);
+	if (sqpn) {
+		qpn = sqpn;
+	} else {
+		err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
+		if (err)
+			goto err_wrid;
+	}
+
+	err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
 	if (err)
-		goto err_wrid;
+		goto err_qpn;
 
 	/*
 	 * Hardware wants QPN written in big-endian order (after
@@ -560,6 +569,10 @@
 
 	return 0;
 
+err_qpn:
+	if (!sqpn)
+		mlx4_qp_release_range(dev->dev, qpn, 1);
+
 err_wrid:
 	if (pd->uobject) {
 		if (!init_attr->srq)
@@ -655,6 +668,10 @@
 	mlx4_ib_unlock_cqs(send_cq, recv_cq);
 
 	mlx4_qp_free(dev->dev, &qp->mqp);
+
+	if (!is_sqp(dev, qp))
+		mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
+
 	mlx4_mtt_cleanup(dev->dev, &qp->mtt);
 
 	if (is_user) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 68ba5c3..e0c7dfa 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -507,6 +507,7 @@
 void ipoib_drain_cq(struct net_device *dev);
 
 void ipoib_set_ethtool_ops(struct net_device *dev);
+int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
 
 #ifdef CONFIG_INFINIBAND_IPOIB_CM
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 66af5c1..e9795f6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -42,6 +42,13 @@
 	strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1);
 }
 
+static u32 ipoib_get_rx_csum(struct net_device *dev)
+{
+	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	return test_bit(IPOIB_FLAG_CSUM, &priv->flags) &&
+		!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+}
+
 static int ipoib_get_coalesce(struct net_device *dev,
 			      struct ethtool_coalesce *coal)
 {
@@ -129,7 +136,7 @@
 
 static const struct ethtool_ops ipoib_ethtool_ops = {
 	.get_drvinfo		= ipoib_get_drvinfo,
-	.get_tso		= ethtool_op_get_tso,
+	.get_rx_csum		= ipoib_get_rx_csum,
 	.get_coalesce		= ipoib_get_coalesce,
 	.set_coalesce		= ipoib_set_coalesce,
 	.get_flags		= ethtool_op_get_flags,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0e748ae..28eb6f0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -685,10 +685,6 @@
 	queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
 			   round_jiffies_relative(HZ));
 
-	init_timer(&priv->poll_timer);
-	priv->poll_timer.function = ipoib_ib_tx_timer_func;
-	priv->poll_timer.data = (unsigned long)dev;
-
 	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 
 	return 0;
@@ -906,6 +902,9 @@
 		return -ENODEV;
 	}
 
+	setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
+		    (unsigned long) dev);
+
 	if (dev->flags & IFF_UP) {
 		if (ipoib_ib_dev_open(dev)) {
 			ipoib_transport_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index c0ee5143..fddded7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1173,11 +1173,48 @@
 	return device_create_file(&dev->dev, &dev_attr_pkey);
 }
 
+int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
+{
+	struct ib_device_attr *device_attr;
+	int result = -ENOMEM;
+
+	device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
+	if (!device_attr) {
+		printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
+		       hca->name, sizeof *device_attr);
+		return result;
+	}
+
+	result = ib_query_device(hca, device_attr);
+	if (result) {
+		printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
+		       hca->name, result);
+		kfree(device_attr);
+		return result;
+	}
+	priv->hca_caps = device_attr->device_cap_flags;
+
+	kfree(device_attr);
+
+	if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
+		set_bit(IPOIB_FLAG_CSUM, &priv->flags);
+		priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+	}
+
+	if (lro)
+		priv->dev->features |= NETIF_F_LRO;
+
+	if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
+		priv->dev->features |= NETIF_F_TSO;
+
+	return 0;
+}
+
+
 static struct net_device *ipoib_add_port(const char *format,
 					 struct ib_device *hca, u8 port)
 {
 	struct ipoib_dev_priv *priv;
-	struct ib_device_attr *device_attr;
 	struct ib_port_attr attr;
 	int result = -ENOMEM;
 
@@ -1206,31 +1243,8 @@
 		goto device_init_failed;
 	}
 
-	device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
-	if (!device_attr) {
-		printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
-		       hca->name, sizeof *device_attr);
+	if (ipoib_set_dev_features(priv, hca))
 		goto device_init_failed;
-	}
-
-	result = ib_query_device(hca, device_attr);
-	if (result) {
-		printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
-		       hca->name, result);
-		kfree(device_attr);
-		goto device_init_failed;
-	}
-	priv->hca_caps = device_attr->device_cap_flags;
-
-	kfree(device_attr);
-
-	if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
-		set_bit(IPOIB_FLAG_CSUM, &priv->flags);
-		priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
-	}
-
-	if (lro)
-		priv->dev->features |= NETIF_F_LRO;
 
 	/*
 	 * Set the full membership bit, so that we join the right
@@ -1266,9 +1280,6 @@
 		goto event_failed;
 	}
 
-	if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
-		priv->dev->features |= NETIF_F_TSO;
-
 	result = register_netdev(priv->dev);
 	if (result) {
 		printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index b08eb56..2cf1a40 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -93,6 +93,10 @@
 	priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
 	set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
 
+	result = ipoib_set_dev_features(priv, ppriv->ca);
+	if (result)
+		goto device_init_failed;
+
 	priv->pkey = pkey;
 
 	memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
diff --git a/drivers/input/joystick/iforce/iforce-ff.c b/drivers/input/joystick/iforce/iforce-ff.c
index 7839b7b..0de9a09 100644
--- a/drivers/input/joystick/iforce/iforce-ff.c
+++ b/drivers/input/joystick/iforce/iforce-ff.c
@@ -197,13 +197,16 @@
  * Analyse the changes in an effect, and tell if we need to send an condition
  * parameter packet
  */
-static int need_condition_modifier(struct ff_effect *old, struct ff_effect *new)
+static int need_condition_modifier(struct iforce *iforce,
+				   struct ff_effect *old,
+				   struct ff_effect *new)
 {
 	int ret = 0;
 	int i;
 
 	if (new->type != FF_SPRING && new->type != FF_FRICTION) {
-		warn("bad effect type in need_condition_modifier");
+		dev_warn(&iforce->dev->dev, "bad effect type in %s\n",
+			 __func__);
 		return 0;
 	}
 
@@ -222,10 +225,13 @@
  * Analyse the changes in an effect, and tell if we need to send a magnitude
  * parameter packet
  */
-static int need_magnitude_modifier(struct ff_effect *old, struct ff_effect *effect)
+static int need_magnitude_modifier(struct iforce *iforce,
+				   struct ff_effect *old,
+				   struct ff_effect *effect)
 {
 	if (effect->type != FF_CONSTANT) {
-		warn("bad effect type in need_envelope_modifier");
+		dev_warn(&iforce->dev->dev, "bad effect type in %s\n",
+			 __func__);
 		return 0;
 	}
 
@@ -236,7 +242,8 @@
  * Analyse the changes in an effect, and tell if we need to send an envelope
  * parameter packet
  */
-static int need_envelope_modifier(struct ff_effect *old, struct ff_effect *effect)
+static int need_envelope_modifier(struct iforce *iforce, struct ff_effect *old,
+				  struct ff_effect *effect)
 {
 	switch (effect->type) {
 	case FF_CONSTANT:
@@ -256,7 +263,8 @@
 		break;
 
 	default:
-		warn("bad effect type in need_envelope_modifier");
+		dev_warn(&iforce->dev->dev, "bad effect type in %s\n",
+			 __func__);
 	}
 
 	return 0;
@@ -266,10 +274,12 @@
  * Analyse the changes in an effect, and tell if we need to send a periodic
  * parameter effect
  */
-static int need_period_modifier(struct ff_effect *old, struct ff_effect *new)
+static int need_period_modifier(struct iforce *iforce, struct ff_effect *old,
+				struct ff_effect *new)
 {
 	if (new->type != FF_PERIODIC) {
-		warn("bad effect type in need_period_modifier");
+		dev_warn(&iforce->dev->dev, "bad effect type in %s\n",
+			 __func__);
 		return 0;
 	}
 	return (old->u.periodic.period != new->u.periodic.period
@@ -355,7 +365,7 @@
 	int param2_err = 1;
 	int core_err = 0;
 
-	if (!old || need_period_modifier(old, effect)) {
+	if (!old || need_period_modifier(iforce, old, effect)) {
 		param1_err = make_period_modifier(iforce, mod1_chunk,
 			old != NULL,
 			effect->u.periodic.magnitude, effect->u.periodic.offset,
@@ -365,7 +375,7 @@
 		set_bit(FF_MOD1_IS_USED, core_effect->flags);
 	}
 
-	if (!old || need_envelope_modifier(old, effect)) {
+	if (!old || need_envelope_modifier(iforce, old, effect)) {
 		param2_err = make_envelope_modifier(iforce, mod2_chunk,
 			old !=NULL,
 			effect->u.periodic.envelope.attack_length,
@@ -425,7 +435,7 @@
 	int param2_err = 1;
 	int core_err = 0;
 
-	if (!old || need_magnitude_modifier(old, effect)) {
+	if (!old || need_magnitude_modifier(iforce, old, effect)) {
 		param1_err = make_magnitude_modifier(iforce, mod1_chunk,
 			old != NULL,
 			effect->u.constant.level);
@@ -434,7 +444,7 @@
 		set_bit(FF_MOD1_IS_USED, core_effect->flags);
 	}
 
-	if (!old || need_envelope_modifier(old, effect)) {
+	if (!old || need_envelope_modifier(iforce, old, effect)) {
 		param2_err = make_envelope_modifier(iforce, mod2_chunk,
 			old != NULL,
 			effect->u.constant.envelope.attack_length,
@@ -487,7 +497,7 @@
 		default: return -1;
 	}
 
-	if (!old || need_condition_modifier(old, effect)) {
+	if (!old || need_condition_modifier(iforce, old, effect)) {
 		param_err = make_condition_modifier(iforce, mod1_chunk,
 			old != NULL,
 			effect->u.condition[0].right_saturation,
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index 61ee6e3..baabf83 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -218,7 +218,9 @@
 		/* Check: no effects should be present in memory */
 		for (i = 0; i < dev->ff->max_effects; i++) {
 			if (test_bit(FF_CORE_IS_USED, iforce->core_effects[i].flags)) {
-				warn("iforce_release: Device still owns effects");
+				dev_warn(&dev->dev,
+					"%s: Device still owns effects\n",
+					__func__);
 				break;
 			}
 		}
@@ -335,26 +337,26 @@
 	if (!iforce_get_id_packet(iforce, "M"))
 		input_dev->id.vendor = (iforce->edata[2] << 8) | iforce->edata[1];
 	else
-		warn("Device does not respond to id packet M");
+		dev_warn(&iforce->dev->dev, "Device does not respond to id packet M\n");
 
 	if (!iforce_get_id_packet(iforce, "P"))
 		input_dev->id.product = (iforce->edata[2] << 8) | iforce->edata[1];
 	else
-		warn("Device does not respond to id packet P");
+		dev_warn(&iforce->dev->dev, "Device does not respond to id packet P\n");
 
 	if (!iforce_get_id_packet(iforce, "B"))
 		iforce->device_memory.end = (iforce->edata[2] << 8) | iforce->edata[1];
 	else
-		warn("Device does not respond to id packet B");
+		dev_warn(&iforce->dev->dev, "Device does not respond to id packet B\n");
 
 	if (!iforce_get_id_packet(iforce, "N"))
 		ff_effects = iforce->edata[1];
 	else
-		warn("Device does not respond to id packet N");
+		dev_warn(&iforce->dev->dev, "Device does not respond to id packet N\n");
 
 	/* Check if the device can store more effects than the driver can really handle */
 	if (ff_effects > IFORCE_EFFECTS_MAX) {
-		warn("Limiting number of effects to %d (device reports %d)",
+		dev_warn(&iforce->dev->dev, "Limiting number of effects to %d (device reports %d)\n",
 		       IFORCE_EFFECTS_MAX, ff_effects);
 		ff_effects = IFORCE_EFFECTS_MAX;
 	}
diff --git a/drivers/input/joystick/iforce/iforce-packets.c b/drivers/input/joystick/iforce/iforce-packets.c
index 015b50a..a17b500 100644
--- a/drivers/input/joystick/iforce/iforce-packets.c
+++ b/drivers/input/joystick/iforce/iforce-packets.c
@@ -65,7 +65,8 @@
 
 
 	if (CIRC_SPACE(head, tail, XMIT_SIZE) < n+2) {
-		warn("not enough space in xmit buffer to send new packet");
+		dev_warn(&iforce->dev->dev,
+			 "not enough space in xmit buffer to send new packet\n");
 		spin_unlock_irqrestore(&iforce->xmit_lock, flags);
 		return -1;
 	}
@@ -148,7 +149,7 @@
 			return 0;
 		}
 	}
-	warn("unused effect %04x updated !!!", addr);
+	dev_warn(&iforce->dev->dev, "unused effect %04x updated !!!\n", addr);
 	return -1;
 }
 
@@ -159,7 +160,8 @@
 	static int being_used = 0;
 
 	if (being_used)
-		warn("re-entrant call to iforce_process %d", being_used);
+		dev_warn(&iforce->dev->dev,
+			 "re-entrant call to iforce_process %d\n", being_used);
 	being_used++;
 
 #ifdef CONFIG_JOYSTICK_IFORCE_232
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 851cc408..f83185a 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -64,7 +64,7 @@
 
 	if ( (n=usb_submit_urb(iforce->out, GFP_ATOMIC)) ) {
 		clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
-		warn("usb_submit_urb failed %d\n", n);
+		dev_warn(&iforce->dev->dev, "usb_submit_urb failed %d\n", n);
 	}
 
 	/* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended.
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 839d1c9..b868b8d 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -911,7 +911,7 @@
 {
 	int result = usb_register(&xpad_driver);
 	if (result == 0)
-		info(DRIVER_DESC);
+		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n");
 	return result;
 }
 
diff --git a/drivers/input/misc/ati_remote.c b/drivers/input/misc/ati_remote.c
index debfc1a..e290fde 100644
--- a/drivers/input/misc/ati_remote.c
+++ b/drivers/input/misc/ati_remote.c
@@ -285,7 +285,6 @@
 };
 
 /* Local function prototypes */
-static void ati_remote_dump		(unsigned char *data, unsigned int actual_length);
 static int ati_remote_open		(struct input_dev *inputdev);
 static void ati_remote_close		(struct input_dev *inputdev);
 static int ati_remote_sendpacket	(struct ati_remote *ati_remote, u16 cmd, unsigned char *data);
@@ -307,15 +306,16 @@
 /*
  *	ati_remote_dump_input
  */
-static void ati_remote_dump(unsigned char *data, unsigned int len)
+static void ati_remote_dump(struct device *dev, unsigned char *data,
+			    unsigned int len)
 {
 	if ((len == 1) && (data[0] != (unsigned char)0xff) && (data[0] != 0x00))
-		warn("Weird byte 0x%02x", data[0]);
+		dev_warn(dev, "Weird byte 0x%02x\n", data[0]);
 	else if (len == 4)
-		warn("Weird key %02x %02x %02x %02x",
+		dev_warn(dev, "Weird key %02x %02x %02x %02x\n",
 		     data[0], data[1], data[2], data[3]);
 	else
-		warn("Weird data, len=%d %02x %02x %02x %02x %02x %02x ...",
+		dev_warn(dev, "Weird data, len=%d %02x %02x %02x %02x %02x %02x ...\n",
 		     len, data[0], data[1], data[2], data[3], data[4], data[5]);
 }
 
@@ -470,7 +470,7 @@
 	/* Deal with strange looking inputs */
 	if ( (urb->actual_length != 4) || (data[0] != 0x14) ||
 		((data[3] & 0x0f) != 0x00) ) {
-		ati_remote_dump(data, urb->actual_length);
+		ati_remote_dump(&urb->dev->dev, data, urb->actual_length);
 		return;
 	}
 
@@ -814,7 +814,7 @@
 	ati_remote = usb_get_intfdata(interface);
 	usb_set_intfdata(interface, NULL);
 	if (!ati_remote) {
-		warn("%s - null device?\n", __func__);
+		dev_warn(&interface->dev, "%s - null device?\n", __func__);
 		return;
 	}
 
@@ -834,9 +834,11 @@
 
 	result = usb_register(&ati_remote_driver);
 	if (result)
-		err("usb_register error #%d\n", result);
+		printk(KERN_ERR KBUILD_MODNAME
+		       ": usb_register error #%d\n", result);
 	else
-		info("Registered USB driver " DRIVER_DESC " v. " DRIVER_VERSION);
+		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+		       DRIVER_DESC "\n");
 
 	return result;
 }
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 11b5c7e..93a22ac 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -999,7 +999,8 @@
 {
 	int ret = usb_register(&yealink_driver);
 	if (ret == 0)
-		info(DRIVER_DESC ":" DRIVER_VERSION);
+		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+		       DRIVER_DESC "\n");
 	return ret;
 }
 
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index 570e0e8..670c61c 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -280,7 +280,8 @@
 {
 	int result = usb_register(&usb_acecad_driver);
 	if (result == 0)
-		info(DRIVER_VERSION ":" DRIVER_DESC);
+		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+		       DRIVER_DESC "\n");
 	return result;
 }
 
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index e53c838..7d005a3 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1706,20 +1706,21 @@
 	aiptek = kzalloc(sizeof(struct aiptek), GFP_KERNEL);
 	inputdev = input_allocate_device();
 	if (!aiptek || !inputdev) {
-		warn("aiptek: cannot allocate memory or input device");
+		dev_warn(&intf->dev,
+			 "cannot allocate memory or input device\n");
 		goto fail1;
         }
 
 	aiptek->data = usb_buffer_alloc(usbdev, AIPTEK_PACKET_LENGTH,
 					GFP_ATOMIC, &aiptek->data_dma);
         if (!aiptek->data) {
-		warn("aiptek: cannot allocate usb buffer");
+		dev_warn(&intf->dev, "cannot allocate usb buffer\n");
 		goto fail1;
 	}
 
 	aiptek->urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!aiptek->urb) {
-	        warn("aiptek: cannot allocate urb");
+	        dev_warn(&intf->dev, "cannot allocate urb\n");
 		goto fail2;
 	}
 
@@ -1843,8 +1844,9 @@
 		aiptek->curSetting.programmableDelay = speeds[i];
 		(void)aiptek_program_tablet(aiptek);
 		if (aiptek->inputdev->absmax[ABS_X] > 0) {
-			info("input: Aiptek using %d ms programming speed\n",
-			     aiptek->curSetting.programmableDelay);
+			dev_info(&intf->dev,
+				 "Aiptek using %d ms programming speed\n",
+				 aiptek->curSetting.programmableDelay);
 			break;
 		}
 	}
@@ -1852,7 +1854,8 @@
 	/* Murphy says that some day someone will have a tablet that fails the
 	   above test. That's you, Frederic Rodrigo */
 	if (i == ARRAY_SIZE(speeds)) {
-		info("input: Aiptek tried all speeds, no sane response");
+		dev_info(&intf->dev,
+			 "Aiptek tried all speeds, no sane response\n");
 		goto fail2;
 	}
 
@@ -1864,7 +1867,8 @@
 	 */
 	err = sysfs_create_group(&intf->dev.kobj, &aiptek_attribute_group);
 	if (err) {
-		warn("aiptek: cannot create sysfs group err: %d", err);
+		dev_warn(&intf->dev, "cannot create sysfs group err: %d\n",
+			 err);
 		goto fail3;
         }
 
@@ -1872,7 +1876,8 @@
 	 */
 	err = input_register_device(aiptek->inputdev);
 	if (err) {
-		warn("aiptek: input_register_device returned err: %d", err);
+		dev_warn(&intf->dev,
+			 "input_register_device returned err: %d\n", err);
 		goto fail4;
         }
 	return 0;
@@ -1922,8 +1927,9 @@
 {
 	int result = usb_register(&aiptek_driver);
 	if (result == 0) {
-		info(DRIVER_VERSION ": " DRIVER_AUTHOR);
-		info(DRIVER_DESC);
+		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+		       DRIVER_DESC "\n");
+		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_AUTHOR "\n");
 	}
 	return result;
 }
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 7df0228..5524e01d 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -2,7 +2,7 @@
 
 GTCO digitizer USB driver
 
-Use the err(), dbg() and info() macros from usb.h for system logging
+Use the err() and dbg() macros from usb.h for system logging
 
 TO CHECK:  Is pressure done right on report 5?
 
@@ -1010,7 +1010,7 @@
 		kfree(gtco);
 	}
 
-	info("gtco driver disconnected");
+	dev_info(&interface->dev, "gtco driver disconnected\n");
 }
 
 /*   STANDARD MODULE LOAD ROUTINES  */
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index d89112f..6682b17 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -215,7 +215,8 @@
 	retval = usb_register(&kbtab_driver);
 	if (retval)
 		goto out;
-	info(DRIVER_VERSION ":" DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 out:
 	return retval;
 }
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 5fbc463..09e227a 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -385,7 +385,8 @@
 	wacom_driver.id_table = get_device_table();
 	result = usb_register(&wacom_driver);
 	if (result == 0)
-		info(DRIVER_VERSION ":" DRIVER_DESC);
+		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+		       DRIVER_DESC "\n");
 	return result;
 }
 
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 6e1e8c6..8317fde 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -219,7 +219,7 @@
 
 config TOUCHSCREEN_UCB1400
 	tristate "Philips UCB1400 touchscreen"
-	select AC97_BUS
+	depends on AC97_BUS
 	depends on UCB1400_CORE
 	help
 	  This enables support for the Philips UCB1400 touchscreen interface.
diff --git a/drivers/input/touchscreen/hp680_ts_input.c b/drivers/input/touchscreen/hp680_ts_input.c
index c38d4e0..a89700e 100644
--- a/drivers/input/touchscreen/hp680_ts_input.c
+++ b/drivers/input/touchscreen/hp680_ts_input.c
@@ -5,7 +5,7 @@
 #include <asm/io.h>
 #include <asm/delay.h>
 #include <asm/adc.h>
-#include <asm/hp6xx.h>
+#include <mach/hp6xx.h>
 
 #define MODNAME "hp680_ts_input"
 
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index e3e4042..c7ff1e1 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -179,7 +179,7 @@
 
 config LEDS_TRIGGER_IDE_DISK
 	bool "LED IDE Disk Trigger"
-	depends on LEDS_TRIGGERS && BLK_DEV_IDEDISK
+	depends on LEDS_TRIGGERS && IDE_GD_ATA
 	help
 	  This allows LEDs to be controlled by IDE disk activity.
 	  If unsure, say Y.
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index 844d597..e8fb1ba 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -15,7 +15,7 @@
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <asm/hd64461.h>
-#include <asm/hp6xx.h>
+#include <mach/hp6xx.h>
 
 static void hp6xxled_green_set(struct led_classdev *led_cdev,
 			       enum led_brightness value)
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index f1ef33d..1c61580 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -34,7 +34,7 @@
 obj-$(CONFIG_DM_DELAY)		+= dm-delay.o
 obj-$(CONFIG_DM_MULTIPATH)	+= dm-multipath.o dm-round-robin.o
 obj-$(CONFIG_DM_SNAPSHOT)	+= dm-snapshot.o
-obj-$(CONFIG_DM_MIRROR)		+= dm-mirror.o dm-log.o
+obj-$(CONFIG_DM_MIRROR)		+= dm-mirror.o dm-log.o dm-region-hash.o
 obj-$(CONFIG_DM_ZERO)		+= dm-zero.o
 
 quiet_cmd_unroll = UNROLL  $@
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 682ef9e..ce26c84 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -23,7 +23,7 @@
 #include <asm/page.h>
 #include <asm/unaligned.h>
 
-#include "dm.h"
+#include <linux/device-mapper.h>
 
 #define DM_MSG_PREFIX "crypt"
 #define MESG_STR(x) x, sizeof(x)
@@ -56,6 +56,7 @@
 	atomic_t pending;
 	int error;
 	sector_t sector;
+	struct dm_crypt_io *base_io;
 };
 
 struct dm_crypt_request {
@@ -93,7 +94,6 @@
 
 	struct workqueue_struct *io_queue;
 	struct workqueue_struct *crypt_queue;
-	wait_queue_head_t writeq;
 
 	/*
 	 * crypto related data
@@ -534,6 +534,7 @@
 	io->base_bio = bio;
 	io->sector = sector;
 	io->error = 0;
+	io->base_io = NULL;
 	atomic_set(&io->pending, 0);
 
 	return io;
@@ -547,6 +548,7 @@
 /*
  * One of the bios was finished. Check for completion of
  * the whole request and correctly clean up the buffer.
+ * If base_io is set, wait for the last fragment to complete.
  */
 static void crypt_dec_pending(struct dm_crypt_io *io)
 {
@@ -555,7 +557,14 @@
 	if (!atomic_dec_and_test(&io->pending))
 		return;
 
-	bio_endio(io->base_bio, io->error);
+	if (likely(!io->base_io))
+		bio_endio(io->base_bio, io->error);
+	else {
+		if (io->error && !io->base_io->error)
+			io->base_io->error = io->error;
+		crypt_dec_pending(io->base_io);
+	}
+
 	mempool_free(io, cc->io_pool);
 }
 
@@ -646,10 +655,7 @@
 static void kcryptd_io_write(struct dm_crypt_io *io)
 {
 	struct bio *clone = io->ctx.bio_out;
-	struct crypt_config *cc = io->target->private;
-
 	generic_make_request(clone);
-	wake_up(&cc->writeq);
 }
 
 static void kcryptd_io(struct work_struct *work)
@@ -688,7 +694,6 @@
 	BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
 
 	clone->bi_sector = cc->start + io->sector;
-	io->sector += bio_sectors(clone);
 
 	if (async)
 		kcryptd_queue_io(io);
@@ -700,16 +705,18 @@
 {
 	struct crypt_config *cc = io->target->private;
 	struct bio *clone;
+	struct dm_crypt_io *new_io;
 	int crypt_finished;
 	unsigned out_of_pages = 0;
 	unsigned remaining = io->base_bio->bi_size;
+	sector_t sector = io->sector;
 	int r;
 
 	/*
 	 * Prevent io from disappearing until this function completes.
 	 */
 	crypt_inc_pending(io);
-	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
+	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
 
 	/*
 	 * The allocated buffers can be smaller than the whole bio,
@@ -726,6 +733,7 @@
 		io->ctx.idx_out = 0;
 
 		remaining -= clone->bi_size;
+		sector += bio_sectors(clone);
 
 		crypt_inc_pending(io);
 		r = crypt_convert(cc, &io->ctx);
@@ -741,6 +749,8 @@
 			 */
 			if (unlikely(r < 0))
 				break;
+
+			io->sector = sector;
 		}
 
 		/*
@@ -750,8 +760,33 @@
 		if (unlikely(out_of_pages))
 			congestion_wait(WRITE, HZ/100);
 
-		if (unlikely(remaining))
-			wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
+		/*
+		 * With async crypto it is unsafe to share the crypto context
+		 * between fragments, so switch to a new dm_crypt_io structure.
+		 */
+		if (unlikely(!crypt_finished && remaining)) {
+			new_io = crypt_io_alloc(io->target, io->base_bio,
+						sector);
+			crypt_inc_pending(new_io);
+			crypt_convert_init(cc, &new_io->ctx, NULL,
+					   io->base_bio, sector);
+			new_io->ctx.idx_in = io->ctx.idx_in;
+			new_io->ctx.offset_in = io->ctx.offset_in;
+
+			/*
+			 * Fragments after the first use the base_io
+			 * pending count.
+			 */
+			if (!io->base_io)
+				new_io->base_io = io;
+			else {
+				new_io->base_io = io->base_io;
+				crypt_inc_pending(io->base_io);
+				crypt_dec_pending(io);
+			}
+
+			io = new_io;
+		}
 	}
 
 	crypt_dec_pending(io);
@@ -1078,7 +1113,6 @@
 		goto bad_crypt_queue;
 	}
 
-	init_waitqueue_head(&cc->writeq);
 	ti->private = cc;
 	return 0;
 
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index bdd37f8..848b381 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -13,7 +13,8 @@
 #include <linux/bio.h>
 #include <linux/slab.h>
 
-#include "dm.h"
+#include <linux/device-mapper.h>
+
 #include "dm-bio-list.h"
 
 #define DM_MSG_PREFIX "delay"
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 769ab67..01590f3e 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -7,7 +7,6 @@
  * This file is released under the GPL.
  */
 
-#include "dm.h"
 #include "dm-snap.h"
 
 #include <linux/mm.h>
@@ -105,6 +104,11 @@
 	void *area;
 
 	/*
+	 * An area of zeros used to clear the next area.
+	 */
+	void *zero_area;
+
+	/*
 	 * Used to keep track of which metadata area the data in
 	 * 'chunk' refers to.
 	 */
@@ -149,6 +153,13 @@
 	if (!ps->area)
 		return r;
 
+	ps->zero_area = vmalloc(len);
+	if (!ps->zero_area) {
+		vfree(ps->area);
+		return r;
+	}
+	memset(ps->zero_area, 0, len);
+
 	return 0;
 }
 
@@ -156,6 +167,8 @@
 {
 	vfree(ps->area);
 	ps->area = NULL;
+	vfree(ps->zero_area);
+	ps->zero_area = NULL;
 }
 
 struct mdata_req {
@@ -220,25 +233,41 @@
  * Read or write a metadata area.  Remembering to skip the first
  * chunk which holds the header.
  */
-static int area_io(struct pstore *ps, chunk_t area, int rw)
+static int area_io(struct pstore *ps, int rw)
 {
 	int r;
 	chunk_t chunk;
 
-	chunk = area_location(ps, area);
+	chunk = area_location(ps, ps->current_area);
 
 	r = chunk_io(ps, chunk, rw, 0);
 	if (r)
 		return r;
 
-	ps->current_area = area;
 	return 0;
 }
 
-static int zero_area(struct pstore *ps, chunk_t area)
+static void zero_memory_area(struct pstore *ps)
 {
 	memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
-	return area_io(ps, area, WRITE);
+}
+
+static int zero_disk_area(struct pstore *ps, chunk_t area)
+{
+	struct dm_io_region where = {
+		.bdev = ps->snap->cow->bdev,
+		.sector = ps->snap->chunk_size * area_location(ps, area),
+		.count = ps->snap->chunk_size,
+	};
+	struct dm_io_request io_req = {
+		.bi_rw = WRITE,
+		.mem.type = DM_IO_VMA,
+		.mem.ptr.vma = ps->zero_area,
+		.client = ps->io_client,
+		.notify.fn = NULL,
+	};
+
+	return dm_io(&io_req, 1, &where, NULL);
 }
 
 static int read_header(struct pstore *ps, int *new_snapshot)
@@ -411,15 +440,14 @@
 
 static int read_exceptions(struct pstore *ps)
 {
-	chunk_t area;
 	int r, full = 1;
 
 	/*
 	 * Keeping reading chunks and inserting exceptions until
 	 * we find a partially full area.
 	 */
-	for (area = 0; full; area++) {
-		r = area_io(ps, area, READ);
+	for (ps->current_area = 0; full; ps->current_area++) {
+		r = area_io(ps, READ);
 		if (r)
 			return r;
 
@@ -428,6 +456,8 @@
 			return r;
 	}
 
+	ps->current_area--;
+
 	return 0;
 }
 
@@ -486,12 +516,13 @@
 			return r;
 		}
 
-		r = zero_area(ps, 0);
+		ps->current_area = 0;
+		zero_memory_area(ps);
+		r = zero_disk_area(ps, 0);
 		if (r) {
-			DMWARN("zero_area(0) failed");
+			DMWARN("zero_disk_area(0) failed");
 			return r;
 		}
-
 	} else {
 		/*
 		 * Sanity checks.
@@ -551,7 +582,6 @@
 			      void (*callback) (void *, int success),
 			      void *callback_context)
 {
-	int r;
 	unsigned int i;
 	struct pstore *ps = get_info(store);
 	struct disk_exception de;
@@ -572,33 +602,41 @@
 	cb->context = callback_context;
 
 	/*
-	 * If there are no more exceptions in flight, or we have
-	 * filled this metadata area we commit the exceptions to
-	 * disk.
+	 * If there are exceptions in flight and we have not yet
+	 * filled this metadata area there's nothing more to do.
 	 */
-	if (atomic_dec_and_test(&ps->pending_count) ||
-	    (ps->current_committed == ps->exceptions_per_area)) {
-		r = area_io(ps, ps->current_area, WRITE);
-		if (r)
-			ps->valid = 0;
+	if (!atomic_dec_and_test(&ps->pending_count) &&
+	    (ps->current_committed != ps->exceptions_per_area))
+		return;
 
-		/*
-		 * Have we completely filled the current area ?
-		 */
-		if (ps->current_committed == ps->exceptions_per_area) {
-			ps->current_committed = 0;
-			r = zero_area(ps, ps->current_area + 1);
-			if (r)
-				ps->valid = 0;
-		}
+	/*
+	 * If we completely filled the current area, then wipe the next one.
+	 */
+	if ((ps->current_committed == ps->exceptions_per_area) &&
+	     zero_disk_area(ps, ps->current_area + 1))
+		ps->valid = 0;
 
-		for (i = 0; i < ps->callback_count; i++) {
-			cb = ps->callbacks + i;
-			cb->callback(cb->context, r == 0 ? 1 : 0);
-		}
+	/*
+	 * Commit exceptions to disk.
+	 */
+	if (ps->valid && area_io(ps, WRITE))
+		ps->valid = 0;
 
-		ps->callback_count = 0;
+	/*
+	 * Advance to the next area if this one is full.
+	 */
+	if (ps->current_committed == ps->exceptions_per_area) {
+		ps->current_committed = 0;
+		ps->current_area++;
+		zero_memory_area(ps);
 	}
+
+	for (i = 0; i < ps->callback_count; i++) {
+		cb = ps->callbacks + i;
+		cb->callback(cb->context, ps->valid);
+	}
+
+	ps->callback_count = 0;
 }
 
 static void persistent_drop(struct exception_store *store)
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 4789c42..2fd6d44 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -5,7 +5,7 @@
  * This file is released under the GPL.
  */
 
-#include "dm.h"
+#include <linux/device-mapper.h>
 
 #include <linux/bio.h>
 #include <linux/mempool.h>
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 996802b..3073618 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -22,6 +22,7 @@
 #include <linux/vmalloc.h>
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
+#include <linux/device-mapper.h>
 #include <linux/dm-kcopyd.h>
 
 #include "dm.h"
@@ -268,6 +269,17 @@
 	spin_unlock_irqrestore(&kc->job_lock, flags);
 }
 
+
+static void push_head(struct list_head *jobs, struct kcopyd_job *job)
+{
+	unsigned long flags;
+	struct dm_kcopyd_client *kc = job->kc;
+
+	spin_lock_irqsave(&kc->job_lock, flags);
+	list_add(&job->list, jobs);
+	spin_unlock_irqrestore(&kc->job_lock, flags);
+}
+
 /*
  * These three functions process 1 item from the corresponding
  * job list.
@@ -398,7 +410,7 @@
 			 * We couldn't service this job ATM, so
 			 * push this job back onto the list.
 			 */
-			push(jobs, job);
+			push_head(jobs, job);
 			break;
 		}
 
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 6449bcd..1b29e91 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -5,12 +5,12 @@
  */
 
 #include "dm.h"
-
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/bio.h>
 #include <linux/slab.h>
+#include <linux/device-mapper.h>
 
 #define DM_MSG_PREFIX "linear"
 
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 5b48478..a8c0fc7 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -12,7 +12,7 @@
 #include <linux/dm-io.h>
 #include <linux/dm-dirty-log.h>
 
-#include "dm.h"
+#include <linux/device-mapper.h>
 
 #define DM_MSG_PREFIX "dirty region log"
 
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 9bf3460c..abf6e8c 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -5,7 +5,8 @@
  * This file is released under the GPL.
  */
 
-#include "dm.h"
+#include <linux/device-mapper.h>
+
 #include "dm-path-selector.h"
 #include "dm-bio-list.h"
 #include "dm-bio-record.h"
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
index ca1bb63..96ea226 100644
--- a/drivers/md/dm-path-selector.c
+++ b/drivers/md/dm-path-selector.c
@@ -9,7 +9,8 @@
  * Path selector registration.
  */
 
-#include "dm.h"
+#include <linux/device-mapper.h>
+
 #include "dm-path-selector.h"
 
 #include <linux/slab.h>
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 29913e4..92dcc06 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1,30 +1,30 @@
 /*
  * Copyright (C) 2003 Sistina Software Limited.
+ * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
  *
  * This file is released under the GPL.
  */
 
-#include "dm.h"
 #include "dm-bio-list.h"
 #include "dm-bio-record.h"
 
-#include <linux/ctype.h>
 #include <linux/init.h>
 #include <linux/mempool.h>
 #include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/vmalloc.h>
 #include <linux/workqueue.h>
-#include <linux/log2.h>
-#include <linux/hardirq.h>
+#include <linux/device-mapper.h>
 #include <linux/dm-io.h>
 #include <linux/dm-dirty-log.h>
 #include <linux/dm-kcopyd.h>
+#include <linux/dm-region-hash.h>
 
 #define DM_MSG_PREFIX "raid1"
+
+#define MAX_RECOVERY 1	/* Maximum number of regions recovered in parallel. */
 #define DM_IO_PAGES 64
+#define DM_KCOPYD_PAGES 64
 
 #define DM_RAID1_HANDLE_ERRORS 0x01
 #define errors_handled(p)	((p)->features & DM_RAID1_HANDLE_ERRORS)
@@ -32,87 +32,6 @@
 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
 
 /*-----------------------------------------------------------------
- * Region hash
- *
- * The mirror splits itself up into discrete regions.  Each
- * region can be in one of three states: clean, dirty,
- * nosync.  There is no need to put clean regions in the hash.
- *
- * In addition to being present in the hash table a region _may_
- * be present on one of three lists.
- *
- *   clean_regions: Regions on this list have no io pending to
- *   them, they are in sync, we are no longer interested in them,
- *   they are dull.  rh_update_states() will remove them from the
- *   hash table.
- *
- *   quiesced_regions: These regions have been spun down, ready
- *   for recovery.  rh_recovery_start() will remove regions from
- *   this list and hand them to kmirrord, which will schedule the
- *   recovery io with kcopyd.
- *
- *   recovered_regions: Regions that kcopyd has successfully
- *   recovered.  rh_update_states() will now schedule any delayed
- *   io, up the recovery_count, and remove the region from the
- *   hash.
- *
- * There are 2 locks:
- *   A rw spin lock 'hash_lock' protects just the hash table,
- *   this is never held in write mode from interrupt context,
- *   which I believe means that we only have to disable irqs when
- *   doing a write lock.
- *
- *   An ordinary spin lock 'region_lock' that protects the three
- *   lists in the region_hash, with the 'state', 'list' and
- *   'bhs_delayed' fields of the regions.  This is used from irq
- *   context, so all other uses will have to suspend local irqs.
- *---------------------------------------------------------------*/
-struct mirror_set;
-struct region_hash {
-	struct mirror_set *ms;
-	uint32_t region_size;
-	unsigned region_shift;
-
-	/* holds persistent region state */
-	struct dm_dirty_log *log;
-
-	/* hash table */
-	rwlock_t hash_lock;
-	mempool_t *region_pool;
-	unsigned int mask;
-	unsigned int nr_buckets;
-	struct list_head *buckets;
-
-	spinlock_t region_lock;
-	atomic_t recovery_in_flight;
-	struct semaphore recovery_count;
-	struct list_head clean_regions;
-	struct list_head quiesced_regions;
-	struct list_head recovered_regions;
-	struct list_head failed_recovered_regions;
-};
-
-enum {
-	RH_CLEAN,
-	RH_DIRTY,
-	RH_NOSYNC,
-	RH_RECOVERING
-};
-
-struct region {
-	struct region_hash *rh;	/* FIXME: can we get rid of this ? */
-	region_t key;
-	int state;
-
-	struct list_head hash_list;
-	struct list_head list;
-
-	atomic_t pending;
-	struct bio_list delayed_bios;
-};
-
-
-/*-----------------------------------------------------------------
  * Mirror set structures.
  *---------------------------------------------------------------*/
 enum dm_raid1_error {
@@ -132,8 +51,7 @@
 struct mirror_set {
 	struct dm_target *ti;
 	struct list_head list;
-	struct region_hash rh;
-	struct dm_kcopyd_client *kcopyd_client;
+
 	uint64_t features;
 
 	spinlock_t lock;	/* protects the lists */
@@ -141,6 +59,8 @@
 	struct bio_list writes;
 	struct bio_list failures;
 
+	struct dm_region_hash *rh;
+	struct dm_kcopyd_client *kcopyd_client;
 	struct dm_io_client *io_client;
 	mempool_t *read_record_pool;
 
@@ -159,25 +79,14 @@
 
 	struct work_struct trigger_event;
 
-	unsigned int nr_mirrors;
+	unsigned nr_mirrors;
 	struct mirror mirror[0];
 };
 
-/*
- * Conversion fns
- */
-static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
+static void wakeup_mirrord(void *context)
 {
-	return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
-}
+	struct mirror_set *ms = context;
 
-static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
-{
-	return region << rh->region_shift;
-}
-
-static void wake(struct mirror_set *ms)
-{
 	queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
 }
 
@@ -186,7 +95,7 @@
 	struct mirror_set *ms = (struct mirror_set *) data;
 
 	clear_bit(0, &ms->timer_pending);
-	wake(ms);
+	wakeup_mirrord(ms);
 }
 
 static void delayed_wake(struct mirror_set *ms)
@@ -200,473 +109,34 @@
 	add_timer(&ms->timer);
 }
 
-/* FIXME move this */
-static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
-
-#define MIN_REGIONS 64
-#define MAX_RECOVERY 1
-static int rh_init(struct region_hash *rh, struct mirror_set *ms,
-		   struct dm_dirty_log *log, uint32_t region_size,
-		   region_t nr_regions)
+static void wakeup_all_recovery_waiters(void *context)
 {
-	unsigned int nr_buckets, max_buckets;
-	size_t i;
-
-	/*
-	 * Calculate a suitable number of buckets for our hash
-	 * table.
-	 */
-	max_buckets = nr_regions >> 6;
-	for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
-		;
-	nr_buckets >>= 1;
-
-	rh->ms = ms;
-	rh->log = log;
-	rh->region_size = region_size;
-	rh->region_shift = ffs(region_size) - 1;
-	rwlock_init(&rh->hash_lock);
-	rh->mask = nr_buckets - 1;
-	rh->nr_buckets = nr_buckets;
-
-	rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
-	if (!rh->buckets) {
-		DMERR("unable to allocate region hash memory");
-		return -ENOMEM;
-	}
-
-	for (i = 0; i < nr_buckets; i++)
-		INIT_LIST_HEAD(rh->buckets + i);
-
-	spin_lock_init(&rh->region_lock);
-	sema_init(&rh->recovery_count, 0);
-	atomic_set(&rh->recovery_in_flight, 0);
-	INIT_LIST_HEAD(&rh->clean_regions);
-	INIT_LIST_HEAD(&rh->quiesced_regions);
-	INIT_LIST_HEAD(&rh->recovered_regions);
-	INIT_LIST_HEAD(&rh->failed_recovered_regions);
-
-	rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
-						      sizeof(struct region));
-	if (!rh->region_pool) {
-		vfree(rh->buckets);
-		rh->buckets = NULL;
-		return -ENOMEM;
-	}
-
-	return 0;
+	wake_up_all(&_kmirrord_recovery_stopped);
 }
 
-static void rh_exit(struct region_hash *rh)
-{
-	unsigned int h;
-	struct region *reg, *nreg;
-
-	BUG_ON(!list_empty(&rh->quiesced_regions));
-	for (h = 0; h < rh->nr_buckets; h++) {
-		list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
-			BUG_ON(atomic_read(&reg->pending));
-			mempool_free(reg, rh->region_pool);
-		}
-	}
-
-	if (rh->log)
-		dm_dirty_log_destroy(rh->log);
-	if (rh->region_pool)
-		mempool_destroy(rh->region_pool);
-	vfree(rh->buckets);
-}
-
-#define RH_HASH_MULT 2654435387U
-
-static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
-{
-	return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
-}
-
-static struct region *__rh_lookup(struct region_hash *rh, region_t region)
-{
-	struct region *reg;
-
-	list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
-		if (reg->key == region)
-			return reg;
-
-	return NULL;
-}
-
-static void __rh_insert(struct region_hash *rh, struct region *reg)
-{
-	unsigned int h = rh_hash(rh, reg->key);
-	list_add(&reg->hash_list, rh->buckets + h);
-}
-
-static struct region *__rh_alloc(struct region_hash *rh, region_t region)
-{
-	struct region *reg, *nreg;
-
-	read_unlock(&rh->hash_lock);
-	nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
-	if (unlikely(!nreg))
-		nreg = kmalloc(sizeof(struct region), GFP_NOIO);
-	nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
-		RH_CLEAN : RH_NOSYNC;
-	nreg->rh = rh;
-	nreg->key = region;
-
-	INIT_LIST_HEAD(&nreg->list);
-
-	atomic_set(&nreg->pending, 0);
-	bio_list_init(&nreg->delayed_bios);
-	write_lock_irq(&rh->hash_lock);
-
-	reg = __rh_lookup(rh, region);
-	if (reg)
-		/* we lost the race */
-		mempool_free(nreg, rh->region_pool);
-
-	else {
-		__rh_insert(rh, nreg);
-		if (nreg->state == RH_CLEAN) {
-			spin_lock(&rh->region_lock);
-			list_add(&nreg->list, &rh->clean_regions);
-			spin_unlock(&rh->region_lock);
-		}
-		reg = nreg;
-	}
-	write_unlock_irq(&rh->hash_lock);
-	read_lock(&rh->hash_lock);
-
-	return reg;
-}
-
-static inline struct region *__rh_find(struct region_hash *rh, region_t region)
-{
-	struct region *reg;
-
-	reg = __rh_lookup(rh, region);
-	if (!reg)
-		reg = __rh_alloc(rh, region);
-
-	return reg;
-}
-
-static int rh_state(struct region_hash *rh, region_t region, int may_block)
-{
-	int r;
-	struct region *reg;
-
-	read_lock(&rh->hash_lock);
-	reg = __rh_lookup(rh, region);
-	read_unlock(&rh->hash_lock);
-
-	if (reg)
-		return reg->state;
-
-	/*
-	 * The region wasn't in the hash, so we fall back to the
-	 * dirty log.
-	 */
-	r = rh->log->type->in_sync(rh->log, region, may_block);
-
-	/*
-	 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
-	 * taken as a RH_NOSYNC
-	 */
-	return r == 1 ? RH_CLEAN : RH_NOSYNC;
-}
-
-static inline int rh_in_sync(struct region_hash *rh,
-			     region_t region, int may_block)
-{
-	int state = rh_state(rh, region, may_block);
-	return state == RH_CLEAN || state == RH_DIRTY;
-}
-
-static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
-{
-	struct bio *bio;
-
-	while ((bio = bio_list_pop(bio_list))) {
-		queue_bio(ms, bio, WRITE);
-	}
-}
-
-static void complete_resync_work(struct region *reg, int success)
-{
-	struct region_hash *rh = reg->rh;
-
-	rh->log->type->set_region_sync(rh->log, reg->key, success);
-
-	/*
-	 * Dispatch the bios before we call 'wake_up_all'.
-	 * This is important because if we are suspending,
-	 * we want to know that recovery is complete and
-	 * the work queue is flushed.  If we wake_up_all
-	 * before we dispatch_bios (queue bios and call wake()),
-	 * then we risk suspending before the work queue
-	 * has been properly flushed.
-	 */
-	dispatch_bios(rh->ms, &reg->delayed_bios);
-	if (atomic_dec_and_test(&rh->recovery_in_flight))
-		wake_up_all(&_kmirrord_recovery_stopped);
-	up(&rh->recovery_count);
-}
-
-static void rh_update_states(struct region_hash *rh)
-{
-	struct region *reg, *next;
-
-	LIST_HEAD(clean);
-	LIST_HEAD(recovered);
-	LIST_HEAD(failed_recovered);
-
-	/*
-	 * Quickly grab the lists.
-	 */
-	write_lock_irq(&rh->hash_lock);
-	spin_lock(&rh->region_lock);
-	if (!list_empty(&rh->clean_regions)) {
-		list_splice_init(&rh->clean_regions, &clean);
-
-		list_for_each_entry(reg, &clean, list)
-			list_del(&reg->hash_list);
-	}
-
-	if (!list_empty(&rh->recovered_regions)) {
-		list_splice_init(&rh->recovered_regions, &recovered);
-
-		list_for_each_entry (reg, &recovered, list)
-			list_del(&reg->hash_list);
-	}
-
-	if (!list_empty(&rh->failed_recovered_regions)) {
-		list_splice_init(&rh->failed_recovered_regions,
-				 &failed_recovered);
-
-		list_for_each_entry(reg, &failed_recovered, list)
-			list_del(&reg->hash_list);
-	}
-
-	spin_unlock(&rh->region_lock);
-	write_unlock_irq(&rh->hash_lock);
-
-	/*
-	 * All the regions on the recovered and clean lists have
-	 * now been pulled out of the system, so no need to do
-	 * any more locking.
-	 */
-	list_for_each_entry_safe (reg, next, &recovered, list) {
-		rh->log->type->clear_region(rh->log, reg->key);
-		complete_resync_work(reg, 1);
-		mempool_free(reg, rh->region_pool);
-	}
-
-	list_for_each_entry_safe(reg, next, &failed_recovered, list) {
-		complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
-		mempool_free(reg, rh->region_pool);
-	}
-
-	list_for_each_entry_safe(reg, next, &clean, list) {
-		rh->log->type->clear_region(rh->log, reg->key);
-		mempool_free(reg, rh->region_pool);
-	}
-
-	rh->log->type->flush(rh->log);
-}
-
-static void rh_inc(struct region_hash *rh, region_t region)
-{
-	struct region *reg;
-
-	read_lock(&rh->hash_lock);
-	reg = __rh_find(rh, region);
-
-	spin_lock_irq(&rh->region_lock);
-	atomic_inc(&reg->pending);
-
-	if (reg->state == RH_CLEAN) {
-		reg->state = RH_DIRTY;
-		list_del_init(&reg->list);	/* take off the clean list */
-		spin_unlock_irq(&rh->region_lock);
-
-		rh->log->type->mark_region(rh->log, reg->key);
-	} else
-		spin_unlock_irq(&rh->region_lock);
-
-
-	read_unlock(&rh->hash_lock);
-}
-
-static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
-{
-	struct bio *bio;
-
-	for (bio = bios->head; bio; bio = bio->bi_next)
-		rh_inc(rh, bio_to_region(rh, bio));
-}
-
-static void rh_dec(struct region_hash *rh, region_t region)
+static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
 {
 	unsigned long flags;
-	struct region *reg;
 	int should_wake = 0;
+	struct bio_list *bl;
 
-	read_lock(&rh->hash_lock);
-	reg = __rh_lookup(rh, region);
-	read_unlock(&rh->hash_lock);
-
-	spin_lock_irqsave(&rh->region_lock, flags);
-	if (atomic_dec_and_test(&reg->pending)) {
-		/*
-		 * There is no pending I/O for this region.
-		 * We can move the region to corresponding list for next action.
-		 * At this point, the region is not yet connected to any list.
-		 *
-		 * If the state is RH_NOSYNC, the region should be kept off
-		 * from clean list.
-		 * The hash entry for RH_NOSYNC will remain in memory
-		 * until the region is recovered or the map is reloaded.
-		 */
-
-		/* do nothing for RH_NOSYNC */
-		if (reg->state == RH_RECOVERING) {
-			list_add_tail(&reg->list, &rh->quiesced_regions);
-		} else if (reg->state == RH_DIRTY) {
-			reg->state = RH_CLEAN;
-			list_add(&reg->list, &rh->clean_regions);
-		}
-		should_wake = 1;
-	}
-	spin_unlock_irqrestore(&rh->region_lock, flags);
+	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
+	spin_lock_irqsave(&ms->lock, flags);
+	should_wake = !(bl->head);
+	bio_list_add(bl, bio);
+	spin_unlock_irqrestore(&ms->lock, flags);
 
 	if (should_wake)
-		wake(rh->ms);
+		wakeup_mirrord(ms);
 }
 
-/*
- * Starts quiescing a region in preparation for recovery.
- */
-static int __rh_recovery_prepare(struct region_hash *rh)
+static void dispatch_bios(void *context, struct bio_list *bio_list)
 {
-	int r;
-	struct region *reg;
-	region_t region;
+	struct mirror_set *ms = context;
+	struct bio *bio;
 
-	/*
-	 * Ask the dirty log what's next.
-	 */
-	r = rh->log->type->get_resync_work(rh->log, &region);
-	if (r <= 0)
-		return r;
-
-	/*
-	 * Get this region, and start it quiescing by setting the
-	 * recovering flag.
-	 */
-	read_lock(&rh->hash_lock);
-	reg = __rh_find(rh, region);
-	read_unlock(&rh->hash_lock);
-
-	spin_lock_irq(&rh->region_lock);
-	reg->state = RH_RECOVERING;
-
-	/* Already quiesced ? */
-	if (atomic_read(&reg->pending))
-		list_del_init(&reg->list);
-	else
-		list_move(&reg->list, &rh->quiesced_regions);
-
-	spin_unlock_irq(&rh->region_lock);
-
-	return 1;
-}
-
-static void rh_recovery_prepare(struct region_hash *rh)
-{
-	/* Extra reference to avoid race with rh_stop_recovery */
-	atomic_inc(&rh->recovery_in_flight);
-
-	while (!down_trylock(&rh->recovery_count)) {
-		atomic_inc(&rh->recovery_in_flight);
-		if (__rh_recovery_prepare(rh) <= 0) {
-			atomic_dec(&rh->recovery_in_flight);
-			up(&rh->recovery_count);
-			break;
-		}
-	}
-
-	/* Drop the extra reference */
-	if (atomic_dec_and_test(&rh->recovery_in_flight))
-		wake_up_all(&_kmirrord_recovery_stopped);
-}
-
-/*
- * Returns any quiesced regions.
- */
-static struct region *rh_recovery_start(struct region_hash *rh)
-{
-	struct region *reg = NULL;
-
-	spin_lock_irq(&rh->region_lock);
-	if (!list_empty(&rh->quiesced_regions)) {
-		reg = list_entry(rh->quiesced_regions.next,
-				 struct region, list);
-		list_del_init(&reg->list);	/* remove from the quiesced list */
-	}
-	spin_unlock_irq(&rh->region_lock);
-
-	return reg;
-}
-
-static void rh_recovery_end(struct region *reg, int success)
-{
-	struct region_hash *rh = reg->rh;
-
-	spin_lock_irq(&rh->region_lock);
-	if (success)
-		list_add(&reg->list, &reg->rh->recovered_regions);
-	else {
-		reg->state = RH_NOSYNC;
-		list_add(&reg->list, &reg->rh->failed_recovered_regions);
-	}
-	spin_unlock_irq(&rh->region_lock);
-
-	wake(rh->ms);
-}
-
-static int rh_flush(struct region_hash *rh)
-{
-	return rh->log->type->flush(rh->log);
-}
-
-static void rh_delay(struct region_hash *rh, struct bio *bio)
-{
-	struct region *reg;
-
-	read_lock(&rh->hash_lock);
-	reg = __rh_find(rh, bio_to_region(rh, bio));
-	bio_list_add(&reg->delayed_bios, bio);
-	read_unlock(&rh->hash_lock);
-}
-
-static void rh_stop_recovery(struct region_hash *rh)
-{
-	int i;
-
-	/* wait for any recovering regions */
-	for (i = 0; i < MAX_RECOVERY; i++)
-		down(&rh->recovery_count);
-}
-
-static void rh_start_recovery(struct region_hash *rh)
-{
-	int i;
-
-	for (i = 0; i < MAX_RECOVERY; i++)
-		up(&rh->recovery_count);
-
-	wake(rh->ms);
+	while ((bio = bio_list_pop(bio_list)))
+		queue_bio(ms, bio, WRITE);
 }
 
 #define MIN_READ_RECORDS 20
@@ -776,8 +246,8 @@
 static void recovery_complete(int read_err, unsigned long write_err,
 			      void *context)
 {
-	struct region *reg = (struct region *)context;
-	struct mirror_set *ms = reg->rh->ms;
+	struct dm_region *reg = context;
+	struct mirror_set *ms = dm_rh_region_context(reg);
 	int m, bit = 0;
 
 	if (read_err) {
@@ -803,31 +273,33 @@
 		}
 	}
 
-	rh_recovery_end(reg, !(read_err || write_err));
+	dm_rh_recovery_end(reg, !(read_err || write_err));
 }
 
-static int recover(struct mirror_set *ms, struct region *reg)
+static int recover(struct mirror_set *ms, struct dm_region *reg)
 {
 	int r;
-	unsigned int i;
+	unsigned i;
 	struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
 	struct mirror *m;
 	unsigned long flags = 0;
+	region_t key = dm_rh_get_region_key(reg);
+	sector_t region_size = dm_rh_get_region_size(ms->rh);
 
 	/* fill in the source */
 	m = get_default_mirror(ms);
 	from.bdev = m->dev->bdev;
-	from.sector = m->offset + region_to_sector(reg->rh, reg->key);
-	if (reg->key == (ms->nr_regions - 1)) {
+	from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
+	if (key == (ms->nr_regions - 1)) {
 		/*
 		 * The final region may be smaller than
 		 * region_size.
 		 */
-		from.count = ms->ti->len & (reg->rh->region_size - 1);
+		from.count = ms->ti->len & (region_size - 1);
 		if (!from.count)
-			from.count = reg->rh->region_size;
+			from.count = region_size;
 	} else
-		from.count = reg->rh->region_size;
+		from.count = region_size;
 
 	/* fill in the destinations */
 	for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
@@ -836,7 +308,7 @@
 
 		m = ms->mirror + i;
 		dest->bdev = m->dev->bdev;
-		dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
+		dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
 		dest->count = from.count;
 		dest++;
 	}
@@ -853,22 +325,22 @@
 
 static void do_recovery(struct mirror_set *ms)
 {
+	struct dm_region *reg;
+	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
 	int r;
-	struct region *reg;
-	struct dm_dirty_log *log = ms->rh.log;
 
 	/*
 	 * Start quiescing some regions.
 	 */
-	rh_recovery_prepare(&ms->rh);
+	dm_rh_recovery_prepare(ms->rh);
 
 	/*
 	 * Copy any already quiesced regions.
 	 */
-	while ((reg = rh_recovery_start(&ms->rh))) {
+	while ((reg = dm_rh_recovery_start(ms->rh))) {
 		r = recover(ms, reg);
 		if (r)
-			rh_recovery_end(reg, 0);
+			dm_rh_recovery_end(reg, 0);
 	}
 
 	/*
@@ -909,9 +381,10 @@
 
 static int mirror_available(struct mirror_set *ms, struct bio *bio)
 {
-	region_t region = bio_to_region(&ms->rh, bio);
+	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
+	region_t region = dm_rh_bio_to_region(ms->rh, bio);
 
-	if (ms->rh.log->type->in_sync(ms->rh.log, region, 0))
+	if (log->type->in_sync(log, region, 0))
 		return choose_mirror(ms,  bio->bi_sector) ? 1 : 0;
 
 	return 0;
@@ -985,7 +458,14 @@
 
 	map_region(&io, m, bio);
 	bio_set_m(bio, m);
-	(void) dm_io(&io_req, 1, &io, NULL);
+	BUG_ON(dm_io(&io_req, 1, &io, NULL));
+}
+
+static inline int region_in_sync(struct mirror_set *ms, region_t region,
+				 int may_block)
+{
+	int state = dm_rh_get_state(ms->rh, region, may_block);
+	return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
 }
 
 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
@@ -995,13 +475,13 @@
 	struct mirror *m;
 
 	while ((bio = bio_list_pop(reads))) {
-		region = bio_to_region(&ms->rh, bio);
+		region = dm_rh_bio_to_region(ms->rh, bio);
 		m = get_default_mirror(ms);
 
 		/*
 		 * We can only read balance if the region is in sync.
 		 */
-		if (likely(rh_in_sync(&ms->rh, region, 1)))
+		if (likely(region_in_sync(ms, region, 1)))
 			m = choose_mirror(ms, bio->bi_sector);
 		else if (m && atomic_read(&m->error_count))
 			m = NULL;
@@ -1024,57 +504,6 @@
  * NOSYNC:	increment pending, just write to the default mirror
  *---------------------------------------------------------------*/
 
-/* __bio_mark_nosync
- * @ms
- * @bio
- * @done
- * @error
- *
- * The bio was written on some mirror(s) but failed on other mirror(s).
- * We can successfully endio the bio but should avoid the region being
- * marked clean by setting the state RH_NOSYNC.
- *
- * This function is _not_ safe in interrupt context!
- */
-static void __bio_mark_nosync(struct mirror_set *ms,
-			      struct bio *bio, unsigned done, int error)
-{
-	unsigned long flags;
-	struct region_hash *rh = &ms->rh;
-	struct dm_dirty_log *log = ms->rh.log;
-	struct region *reg;
-	region_t region = bio_to_region(rh, bio);
-	int recovering = 0;
-
-	/* We must inform the log that the sync count has changed. */
-	log->type->set_region_sync(log, region, 0);
-	ms->in_sync = 0;
-
-	read_lock(&rh->hash_lock);
-	reg = __rh_find(rh, region);
-	read_unlock(&rh->hash_lock);
-
-	/* region hash entry should exist because write was in-flight */
-	BUG_ON(!reg);
-	BUG_ON(!list_empty(&reg->list));
-
-	spin_lock_irqsave(&rh->region_lock, flags);
-	/*
-	 * Possible cases:
-	 *   1) RH_DIRTY
-	 *   2) RH_NOSYNC: was dirty, other preceeding writes failed
-	 *   3) RH_RECOVERING: flushing pending writes
-	 * Either case, the region should have not been connected to list.
-	 */
-	recovering = (reg->state == RH_RECOVERING);
-	reg->state = RH_NOSYNC;
-	BUG_ON(!list_empty(&reg->list));
-	spin_unlock_irqrestore(&rh->region_lock, flags);
-
-	bio_endio(bio, error);
-	if (recovering)
-		complete_resync_work(reg, 0);
-}
 
 static void write_callback(unsigned long error, void *context)
 {
@@ -1119,7 +548,7 @@
 		bio_list_add(&ms->failures, bio);
 		spin_unlock_irqrestore(&ms->lock, flags);
 		if (should_wake)
-			wake(ms);
+			wakeup_mirrord(ms);
 		return;
 	}
 out:
@@ -1149,7 +578,7 @@
 	 */
 	bio_set_m(bio, get_default_mirror(ms));
 
-	(void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
+	BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
 }
 
 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
@@ -1169,18 +598,19 @@
 	bio_list_init(&recover);
 
 	while ((bio = bio_list_pop(writes))) {
-		state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
+		state = dm_rh_get_state(ms->rh,
+					dm_rh_bio_to_region(ms->rh, bio), 1);
 		switch (state) {
-		case RH_CLEAN:
-		case RH_DIRTY:
+		case DM_RH_CLEAN:
+		case DM_RH_DIRTY:
 			this_list = &sync;
 			break;
 
-		case RH_NOSYNC:
+		case DM_RH_NOSYNC:
 			this_list = &nosync;
 			break;
 
-		case RH_RECOVERING:
+		case DM_RH_RECOVERING:
 			this_list = &recover;
 			break;
 		}
@@ -1193,9 +623,9 @@
 	 * be written to (writes to recover regions are going to
 	 * be delayed).
 	 */
-	rh_inc_pending(&ms->rh, &sync);
-	rh_inc_pending(&ms->rh, &nosync);
-	ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
+	dm_rh_inc_pending(ms->rh, &sync);
+	dm_rh_inc_pending(ms->rh, &nosync);
+	ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0;
 
 	/*
 	 * Dispatch io.
@@ -1204,13 +634,13 @@
 		spin_lock_irq(&ms->lock);
 		bio_list_merge(&ms->failures, &sync);
 		spin_unlock_irq(&ms->lock);
-		wake(ms);
+		wakeup_mirrord(ms);
 	} else
 		while ((bio = bio_list_pop(&sync)))
 			do_write(ms, bio);
 
 	while ((bio = bio_list_pop(&recover)))
-		rh_delay(&ms->rh, bio);
+		dm_rh_delay(ms->rh, bio);
 
 	while ((bio = bio_list_pop(&nosync))) {
 		map_bio(get_default_mirror(ms), bio);
@@ -1227,7 +657,8 @@
 
 	if (!ms->log_failure) {
 		while ((bio = bio_list_pop(failures)))
-			__bio_mark_nosync(ms, bio, bio->bi_size, 0);
+			ms->in_sync = 0;
+			dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
 		return;
 	}
 
@@ -1280,8 +711,8 @@
  *---------------------------------------------------------------*/
 static void do_mirror(struct work_struct *work)
 {
-	struct mirror_set *ms =container_of(work, struct mirror_set,
-					    kmirrord_work);
+	struct mirror_set *ms = container_of(work, struct mirror_set,
+					     kmirrord_work);
 	struct bio_list reads, writes, failures;
 	unsigned long flags;
 
@@ -1294,7 +725,7 @@
 	bio_list_init(&ms->failures);
 	spin_unlock_irqrestore(&ms->lock, flags);
 
-	rh_update_states(&ms->rh);
+	dm_rh_update_states(ms->rh, errors_handled(ms));
 	do_recovery(ms);
 	do_reads(ms, &reads);
 	do_writes(ms, &writes);
@@ -1303,7 +734,6 @@
 	dm_table_unplug_all(ms->ti->table);
 }
 
-
 /*-----------------------------------------------------------------
  * Target functions
  *---------------------------------------------------------------*/
@@ -1315,9 +745,6 @@
 	size_t len;
 	struct mirror_set *ms = NULL;
 
-	if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
-		return NULL;
-
 	len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
 
 	ms = kzalloc(len, GFP_KERNEL);
@@ -1353,7 +780,11 @@
  		return NULL;
 	}
 
-	if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
+	ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
+				       wakeup_all_recovery_waiters,
+				       ms->ti->begin, MAX_RECOVERY,
+				       dl, region_size, ms->nr_regions);
+	if (IS_ERR(ms->rh)) {
 		ti->error = "Error creating dirty region hash";
 		dm_io_client_destroy(ms->io_client);
 		mempool_destroy(ms->read_record_pool);
@@ -1371,7 +802,7 @@
 		dm_put_device(ti, ms->mirror[m].dev);
 
 	dm_io_client_destroy(ms->io_client);
-	rh_exit(&ms->rh);
+	dm_region_hash_destroy(ms->rh);
 	mempool_destroy(ms->read_record_pool);
 	kfree(ms);
 }
@@ -1411,10 +842,10 @@
  * Create dirty log: log_type #log_params <log_params>
  */
 static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
-					  unsigned int argc, char **argv,
-					  unsigned int *args_used)
+					     unsigned argc, char **argv,
+					     unsigned *args_used)
 {
-	unsigned int param_count;
+	unsigned param_count;
 	struct dm_dirty_log *dl;
 
 	if (argc < 2) {
@@ -1545,7 +976,7 @@
 	}
 
 	ti->private = ms;
- 	ti->split_io = ms->rh.region_size;
+	ti->split_io = dm_rh_get_region_size(ms->rh);
 
 	ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
 	if (!ms->kmirrord_wq) {
@@ -1580,11 +1011,11 @@
 		goto err_destroy_wq;
 	}
 
-	r = dm_kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
+	r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
 	if (r)
 		goto err_destroy_wq;
 
-	wake(ms);
+	wakeup_mirrord(ms);
 	return 0;
 
 err_destroy_wq:
@@ -1605,22 +1036,6 @@
 	free_context(ms, ti, ms->nr_mirrors);
 }
 
-static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
-{
-	unsigned long flags;
-	int should_wake = 0;
-	struct bio_list *bl;
-
-	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
-	spin_lock_irqsave(&ms->lock, flags);
-	should_wake = !(bl->head);
-	bio_list_add(bl, bio);
-	spin_unlock_irqrestore(&ms->lock, flags);
-
-	if (should_wake)
-		wake(ms);
-}
-
 /*
  * Mirror mapping function
  */
@@ -1631,16 +1046,16 @@
 	struct mirror *m;
 	struct mirror_set *ms = ti->private;
 	struct dm_raid1_read_record *read_record = NULL;
+	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
 
 	if (rw == WRITE) {
 		/* Save region for mirror_end_io() handler */
-		map_context->ll = bio_to_region(&ms->rh, bio);
+		map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
 		queue_bio(ms, bio, rw);
 		return DM_MAPIO_SUBMITTED;
 	}
 
-	r = ms->rh.log->type->in_sync(ms->rh.log,
-				      bio_to_region(&ms->rh, bio), 0);
+	r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
 	if (r < 0 && r != -EWOULDBLOCK)
 		return r;
 
@@ -1688,7 +1103,7 @@
 	 * We need to dec pending if this was a write.
 	 */
 	if (rw == WRITE) {
-		rh_dec(&ms->rh, map_context->ll);
+		dm_rh_dec(ms->rh, map_context->ll);
 		return error;
 	}
 
@@ -1744,7 +1159,7 @@
 static void mirror_presuspend(struct dm_target *ti)
 {
 	struct mirror_set *ms = (struct mirror_set *) ti->private;
-	struct dm_dirty_log *log = ms->rh.log;
+	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
 
 	atomic_set(&ms->suspend, 1);
 
@@ -1752,10 +1167,10 @@
 	 * We must finish up all the work that we've
 	 * generated (i.e. recovery work).
 	 */
-	rh_stop_recovery(&ms->rh);
+	dm_rh_stop_recovery(ms->rh);
 
 	wait_event(_kmirrord_recovery_stopped,
-		   !atomic_read(&ms->rh.recovery_in_flight));
+		   !dm_rh_recovery_in_flight(ms->rh));
 
 	if (log->type->presuspend && log->type->presuspend(log))
 		/* FIXME: need better error handling */
@@ -1773,7 +1188,7 @@
 static void mirror_postsuspend(struct dm_target *ti)
 {
 	struct mirror_set *ms = ti->private;
-	struct dm_dirty_log *log = ms->rh.log;
+	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
 
 	if (log->type->postsuspend && log->type->postsuspend(log))
 		/* FIXME: need better error handling */
@@ -1783,13 +1198,13 @@
 static void mirror_resume(struct dm_target *ti)
 {
 	struct mirror_set *ms = ti->private;
-	struct dm_dirty_log *log = ms->rh.log;
+	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
 
 	atomic_set(&ms->suspend, 0);
 	if (log->type->resume && log->type->resume(log))
 		/* FIXME: need better error handling */
 		DMWARN("log resume failed");
-	rh_start_recovery(&ms->rh);
+	dm_rh_start_recovery(ms->rh);
 }
 
 /*
@@ -1821,7 +1236,7 @@
 {
 	unsigned int m, sz = 0;
 	struct mirror_set *ms = (struct mirror_set *) ti->private;
-	struct dm_dirty_log *log = ms->rh.log;
+	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
 	char buffer[ms->nr_mirrors + 1];
 
 	switch (type) {
@@ -1834,15 +1249,15 @@
 		buffer[m] = '\0';
 
 		DMEMIT("%llu/%llu 1 %s ",
-		      (unsigned long long)log->type->get_sync_count(ms->rh.log),
+		      (unsigned long long)log->type->get_sync_count(log),
 		      (unsigned long long)ms->nr_regions, buffer);
 
-		sz += log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
+		sz += log->type->status(log, type, result+sz, maxlen-sz);
 
 		break;
 
 	case STATUSTYPE_TABLE:
-		sz = log->type->status(ms->rh.log, type, result, maxlen);
+		sz = log->type->status(log, type, result, maxlen);
 
 		DMEMIT("%d", ms->nr_mirrors);
 		for (m = 0; m < ms->nr_mirrors; m++)
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
new file mode 100644
index 0000000..59f8d9d
--- /dev/null
+++ b/drivers/md/dm-region-hash.c
@@ -0,0 +1,704 @@
+/*
+ * Copyright (C) 2003 Sistina Software Limited.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/dm-dirty-log.h>
+#include <linux/dm-region-hash.h>
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include "dm.h"
+#include "dm-bio-list.h"
+
+#define	DM_MSG_PREFIX	"region hash"
+
+/*-----------------------------------------------------------------
+ * Region hash
+ *
+ * The mirror splits itself up into discrete regions.  Each
+ * region can be in one of three states: clean, dirty,
+ * nosync.  There is no need to put clean regions in the hash.
+ *
+ * In addition to being present in the hash table a region _may_
+ * be present on one of three lists.
+ *
+ *   clean_regions: Regions on this list have no io pending to
+ *   them, they are in sync, we are no longer interested in them,
+ *   they are dull.  dm_rh_update_states() will remove them from the
+ *   hash table.
+ *
+ *   quiesced_regions: These regions have been spun down, ready
+ *   for recovery.  rh_recovery_start() will remove regions from
+ *   this list and hand them to kmirrord, which will schedule the
+ *   recovery io with kcopyd.
+ *
+ *   recovered_regions: Regions that kcopyd has successfully
+ *   recovered.  dm_rh_update_states() will now schedule any delayed
+ *   io, up the recovery_count, and remove the region from the
+ *   hash.
+ *
+ * There are 2 locks:
+ *   A rw spin lock 'hash_lock' protects just the hash table,
+ *   this is never held in write mode from interrupt context,
+ *   which I believe means that we only have to disable irqs when
+ *   doing a write lock.
+ *
+ *   An ordinary spin lock 'region_lock' that protects the three
+ *   lists in the region_hash, with the 'state', 'list' and
+ *   'delayed_bios' fields of the regions.  This is used from irq
+ *   context, so all other uses will have to suspend local irqs.
+ *---------------------------------------------------------------*/
+struct dm_region_hash {
+	uint32_t region_size;
+	unsigned region_shift;
+
+	/* holds persistent region state */
+	struct dm_dirty_log *log;
+
+	/* hash table */
+	rwlock_t hash_lock;
+	mempool_t *region_pool;
+	unsigned mask;
+	unsigned nr_buckets;
+	unsigned prime;
+	unsigned shift;
+	struct list_head *buckets;
+
+	unsigned max_recovery; /* Max # of regions to recover in parallel */
+
+	spinlock_t region_lock;
+	atomic_t recovery_in_flight;
+	struct semaphore recovery_count;
+	struct list_head clean_regions;
+	struct list_head quiesced_regions;
+	struct list_head recovered_regions;
+	struct list_head failed_recovered_regions;
+
+	void *context;
+	sector_t target_begin;
+
+	/* Callback function to schedule bios writes */
+	void (*dispatch_bios)(void *context, struct bio_list *bios);
+
+	/* Callback function to wakeup callers worker thread. */
+	void (*wakeup_workers)(void *context);
+
+	/* Callback function to wakeup callers recovery waiters. */
+	void (*wakeup_all_recovery_waiters)(void *context);
+};
+
+struct dm_region {
+	struct dm_region_hash *rh;	/* FIXME: can we get rid of this ? */
+	region_t key;
+	int state;
+
+	struct list_head hash_list;
+	struct list_head list;
+
+	atomic_t pending;
+	struct bio_list delayed_bios;
+};
+
+/*
+ * Conversion fns
+ */
+static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
+{
+	return sector >> rh->region_shift;
+}
+
+sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
+{
+	return region << rh->region_shift;
+}
+EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
+
+region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
+{
+	return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
+}
+EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
+
+void *dm_rh_region_context(struct dm_region *reg)
+{
+	return reg->rh->context;
+}
+EXPORT_SYMBOL_GPL(dm_rh_region_context);
+
+region_t dm_rh_get_region_key(struct dm_region *reg)
+{
+	return reg->key;
+}
+EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
+
+sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
+{
+	return rh->region_size;
+}
+EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
+
+/*
+ * FIXME: shall we pass in a structure instead of all these args to
+ * dm_region_hash_create()????
+ */
+#define RH_HASH_MULT 2654435387U
+#define RH_HASH_SHIFT 12
+
+#define MIN_REGIONS 64
+struct dm_region_hash *dm_region_hash_create(
+		void *context, void (*dispatch_bios)(void *context,
+						     struct bio_list *bios),
+		void (*wakeup_workers)(void *context),
+		void (*wakeup_all_recovery_waiters)(void *context),
+		sector_t target_begin, unsigned max_recovery,
+		struct dm_dirty_log *log, uint32_t region_size,
+		region_t nr_regions)
+{
+	struct dm_region_hash *rh;
+	unsigned nr_buckets, max_buckets;
+	size_t i;
+
+	/*
+	 * Calculate a suitable number of buckets for our hash
+	 * table.
+	 */
+	max_buckets = nr_regions >> 6;
+	for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
+		;
+	nr_buckets >>= 1;
+
+	rh = kmalloc(sizeof(*rh), GFP_KERNEL);
+	if (!rh) {
+		DMERR("unable to allocate region hash memory");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	rh->context = context;
+	rh->dispatch_bios = dispatch_bios;
+	rh->wakeup_workers = wakeup_workers;
+	rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
+	rh->target_begin = target_begin;
+	rh->max_recovery = max_recovery;
+	rh->log = log;
+	rh->region_size = region_size;
+	rh->region_shift = ffs(region_size) - 1;
+	rwlock_init(&rh->hash_lock);
+	rh->mask = nr_buckets - 1;
+	rh->nr_buckets = nr_buckets;
+
+	rh->shift = RH_HASH_SHIFT;
+	rh->prime = RH_HASH_MULT;
+
+	rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
+	if (!rh->buckets) {
+		DMERR("unable to allocate region hash bucket memory");
+		kfree(rh);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	for (i = 0; i < nr_buckets; i++)
+		INIT_LIST_HEAD(rh->buckets + i);
+
+	spin_lock_init(&rh->region_lock);
+	sema_init(&rh->recovery_count, 0);
+	atomic_set(&rh->recovery_in_flight, 0);
+	INIT_LIST_HEAD(&rh->clean_regions);
+	INIT_LIST_HEAD(&rh->quiesced_regions);
+	INIT_LIST_HEAD(&rh->recovered_regions);
+	INIT_LIST_HEAD(&rh->failed_recovered_regions);
+
+	rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
+						      sizeof(struct dm_region));
+	if (!rh->region_pool) {
+		vfree(rh->buckets);
+		kfree(rh);
+		rh = ERR_PTR(-ENOMEM);
+	}
+
+	return rh;
+}
+EXPORT_SYMBOL_GPL(dm_region_hash_create);
+
+void dm_region_hash_destroy(struct dm_region_hash *rh)
+{
+	unsigned h;
+	struct dm_region *reg, *nreg;
+
+	BUG_ON(!list_empty(&rh->quiesced_regions));
+	for (h = 0; h < rh->nr_buckets; h++) {
+		list_for_each_entry_safe(reg, nreg, rh->buckets + h,
+					 hash_list) {
+			BUG_ON(atomic_read(&reg->pending));
+			mempool_free(reg, rh->region_pool);
+		}
+	}
+
+	if (rh->log)
+		dm_dirty_log_destroy(rh->log);
+
+	if (rh->region_pool)
+		mempool_destroy(rh->region_pool);
+
+	vfree(rh->buckets);
+	kfree(rh);
+}
+EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
+
+struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
+{
+	return rh->log;
+}
+EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
+
+static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
+{
+	return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
+}
+
+static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
+{
+	struct dm_region *reg;
+	struct list_head *bucket = rh->buckets + rh_hash(rh, region);
+
+	list_for_each_entry(reg, bucket, hash_list)
+		if (reg->key == region)
+			return reg;
+
+	return NULL;
+}
+
+static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
+{
+	list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
+}
+
+static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
+{
+	struct dm_region *reg, *nreg;
+
+	nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
+	if (unlikely(!nreg))
+		nreg = kmalloc(sizeof(*nreg), GFP_NOIO);
+
+	nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
+		      DM_RH_CLEAN : DM_RH_NOSYNC;
+	nreg->rh = rh;
+	nreg->key = region;
+	INIT_LIST_HEAD(&nreg->list);
+	atomic_set(&nreg->pending, 0);
+	bio_list_init(&nreg->delayed_bios);
+
+	write_lock_irq(&rh->hash_lock);
+	reg = __rh_lookup(rh, region);
+	if (reg)
+		/* We lost the race. */
+		mempool_free(nreg, rh->region_pool);
+	else {
+		__rh_insert(rh, nreg);
+		if (nreg->state == DM_RH_CLEAN) {
+			spin_lock(&rh->region_lock);
+			list_add(&nreg->list, &rh->clean_regions);
+			spin_unlock(&rh->region_lock);
+		}
+
+		reg = nreg;
+	}
+	write_unlock_irq(&rh->hash_lock);
+
+	return reg;
+}
+
+static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
+{
+	struct dm_region *reg;
+
+	reg = __rh_lookup(rh, region);
+	if (!reg) {
+		read_unlock(&rh->hash_lock);
+		reg = __rh_alloc(rh, region);
+		read_lock(&rh->hash_lock);
+	}
+
+	return reg;
+}
+
+int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
+{
+	int r;
+	struct dm_region *reg;
+
+	read_lock(&rh->hash_lock);
+	reg = __rh_lookup(rh, region);
+	read_unlock(&rh->hash_lock);
+
+	if (reg)
+		return reg->state;
+
+	/*
+	 * The region wasn't in the hash, so we fall back to the
+	 * dirty log.
+	 */
+	r = rh->log->type->in_sync(rh->log, region, may_block);
+
+	/*
+	 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
+	 * taken as a DM_RH_NOSYNC
+	 */
+	return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
+}
+EXPORT_SYMBOL_GPL(dm_rh_get_state);
+
+static void complete_resync_work(struct dm_region *reg, int success)
+{
+	struct dm_region_hash *rh = reg->rh;
+
+	rh->log->type->set_region_sync(rh->log, reg->key, success);
+
+	/*
+	 * Dispatch the bios before we call 'wake_up_all'.
+	 * This is important because if we are suspending,
+	 * we want to know that recovery is complete and
+	 * the work queue is flushed.  If we wake_up_all
+	 * before we dispatch_bios (queue bios and call wake()),
+	 * then we risk suspending before the work queue
+	 * has been properly flushed.
+	 */
+	rh->dispatch_bios(rh->context, &reg->delayed_bios);
+	if (atomic_dec_and_test(&rh->recovery_in_flight))
+		rh->wakeup_all_recovery_waiters(rh->context);
+	up(&rh->recovery_count);
+}
+
+/* dm_rh_mark_nosync
+ * @ms
+ * @bio
+ * @done
+ * @error
+ *
+ * The bio was written on some mirror(s) but failed on other mirror(s).
+ * We can successfully endio the bio but should avoid the region being
+ * marked clean by setting the state DM_RH_NOSYNC.
+ *
+ * This function is _not_ safe in interrupt context!
+ */
+void dm_rh_mark_nosync(struct dm_region_hash *rh,
+		       struct bio *bio, unsigned done, int error)
+{
+	unsigned long flags;
+	struct dm_dirty_log *log = rh->log;
+	struct dm_region *reg;
+	region_t region = dm_rh_bio_to_region(rh, bio);
+	int recovering = 0;
+
+	/* We must inform the log that the sync count has changed. */
+	log->type->set_region_sync(log, region, 0);
+
+	read_lock(&rh->hash_lock);
+	reg = __rh_find(rh, region);
+	read_unlock(&rh->hash_lock);
+
+	/* region hash entry should exist because write was in-flight */
+	BUG_ON(!reg);
+	BUG_ON(!list_empty(&reg->list));
+
+	spin_lock_irqsave(&rh->region_lock, flags);
+	/*
+	 * Possible cases:
+	 *   1) DM_RH_DIRTY
+	 *   2) DM_RH_NOSYNC: was dirty, other preceeding writes failed
+	 *   3) DM_RH_RECOVERING: flushing pending writes
+	 * Either case, the region should have not been connected to list.
+	 */
+	recovering = (reg->state == DM_RH_RECOVERING);
+	reg->state = DM_RH_NOSYNC;
+	BUG_ON(!list_empty(&reg->list));
+	spin_unlock_irqrestore(&rh->region_lock, flags);
+
+	bio_endio(bio, error);
+	if (recovering)
+		complete_resync_work(reg, 0);
+}
+EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
+
+void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
+{
+	struct dm_region *reg, *next;
+
+	LIST_HEAD(clean);
+	LIST_HEAD(recovered);
+	LIST_HEAD(failed_recovered);
+
+	/*
+	 * Quickly grab the lists.
+	 */
+	write_lock_irq(&rh->hash_lock);
+	spin_lock(&rh->region_lock);
+	if (!list_empty(&rh->clean_regions)) {
+		list_splice_init(&rh->clean_regions, &clean);
+
+		list_for_each_entry(reg, &clean, list)
+			list_del(&reg->hash_list);
+	}
+
+	if (!list_empty(&rh->recovered_regions)) {
+		list_splice_init(&rh->recovered_regions, &recovered);
+
+		list_for_each_entry(reg, &recovered, list)
+			list_del(&reg->hash_list);
+	}
+
+	if (!list_empty(&rh->failed_recovered_regions)) {
+		list_splice_init(&rh->failed_recovered_regions,
+				 &failed_recovered);
+
+		list_for_each_entry(reg, &failed_recovered, list)
+			list_del(&reg->hash_list);
+	}
+
+	spin_unlock(&rh->region_lock);
+	write_unlock_irq(&rh->hash_lock);
+
+	/*
+	 * All the regions on the recovered and clean lists have
+	 * now been pulled out of the system, so no need to do
+	 * any more locking.
+	 */
+	list_for_each_entry_safe(reg, next, &recovered, list) {
+		rh->log->type->clear_region(rh->log, reg->key);
+		complete_resync_work(reg, 1);
+		mempool_free(reg, rh->region_pool);
+	}
+
+	list_for_each_entry_safe(reg, next, &failed_recovered, list) {
+		complete_resync_work(reg, errors_handled ? 0 : 1);
+		mempool_free(reg, rh->region_pool);
+	}
+
+	list_for_each_entry_safe(reg, next, &clean, list) {
+		rh->log->type->clear_region(rh->log, reg->key);
+		mempool_free(reg, rh->region_pool);
+	}
+
+	rh->log->type->flush(rh->log);
+}
+EXPORT_SYMBOL_GPL(dm_rh_update_states);
+
+static void rh_inc(struct dm_region_hash *rh, region_t region)
+{
+	struct dm_region *reg;
+
+	read_lock(&rh->hash_lock);
+	reg = __rh_find(rh, region);
+
+	spin_lock_irq(&rh->region_lock);
+	atomic_inc(&reg->pending);
+
+	if (reg->state == DM_RH_CLEAN) {
+		reg->state = DM_RH_DIRTY;
+		list_del_init(&reg->list);	/* take off the clean list */
+		spin_unlock_irq(&rh->region_lock);
+
+		rh->log->type->mark_region(rh->log, reg->key);
+	} else
+		spin_unlock_irq(&rh->region_lock);
+
+
+	read_unlock(&rh->hash_lock);
+}
+
+void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
+{
+	struct bio *bio;
+
+	for (bio = bios->head; bio; bio = bio->bi_next)
+		rh_inc(rh, dm_rh_bio_to_region(rh, bio));
+}
+EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
+
+void dm_rh_dec(struct dm_region_hash *rh, region_t region)
+{
+	unsigned long flags;
+	struct dm_region *reg;
+	int should_wake = 0;
+
+	read_lock(&rh->hash_lock);
+	reg = __rh_lookup(rh, region);
+	read_unlock(&rh->hash_lock);
+
+	spin_lock_irqsave(&rh->region_lock, flags);
+	if (atomic_dec_and_test(&reg->pending)) {
+		/*
+		 * There is no pending I/O for this region.
+		 * We can move the region to corresponding list for next action.
+		 * At this point, the region is not yet connected to any list.
+		 *
+		 * If the state is DM_RH_NOSYNC, the region should be kept off
+		 * from clean list.
+		 * The hash entry for DM_RH_NOSYNC will remain in memory
+		 * until the region is recovered or the map is reloaded.
+		 */
+
+		/* do nothing for DM_RH_NOSYNC */
+		if (reg->state == DM_RH_RECOVERING) {
+			list_add_tail(&reg->list, &rh->quiesced_regions);
+		} else if (reg->state == DM_RH_DIRTY) {
+			reg->state = DM_RH_CLEAN;
+			list_add(&reg->list, &rh->clean_regions);
+		}
+		should_wake = 1;
+	}
+	spin_unlock_irqrestore(&rh->region_lock, flags);
+
+	if (should_wake)
+		rh->wakeup_workers(rh->context);
+}
+EXPORT_SYMBOL_GPL(dm_rh_dec);
+
+/*
+ * Starts quiescing a region in preparation for recovery.
+ */
+static int __rh_recovery_prepare(struct dm_region_hash *rh)
+{
+	int r;
+	region_t region;
+	struct dm_region *reg;
+
+	/*
+	 * Ask the dirty log what's next.
+	 */
+	r = rh->log->type->get_resync_work(rh->log, &region);
+	if (r <= 0)
+		return r;
+
+	/*
+	 * Get this region, and start it quiescing by setting the
+	 * recovering flag.
+	 */
+	read_lock(&rh->hash_lock);
+	reg = __rh_find(rh, region);
+	read_unlock(&rh->hash_lock);
+
+	spin_lock_irq(&rh->region_lock);
+	reg->state = DM_RH_RECOVERING;
+
+	/* Already quiesced ? */
+	if (atomic_read(&reg->pending))
+		list_del_init(&reg->list);
+	else
+		list_move(&reg->list, &rh->quiesced_regions);
+
+	spin_unlock_irq(&rh->region_lock);
+
+	return 1;
+}
+
+void dm_rh_recovery_prepare(struct dm_region_hash *rh)
+{
+	/* Extra reference to avoid race with dm_rh_stop_recovery */
+	atomic_inc(&rh->recovery_in_flight);
+
+	while (!down_trylock(&rh->recovery_count)) {
+		atomic_inc(&rh->recovery_in_flight);
+		if (__rh_recovery_prepare(rh) <= 0) {
+			atomic_dec(&rh->recovery_in_flight);
+			up(&rh->recovery_count);
+			break;
+		}
+	}
+
+	/* Drop the extra reference */
+	if (atomic_dec_and_test(&rh->recovery_in_flight))
+		rh->wakeup_all_recovery_waiters(rh->context);
+}
+EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
+
+/*
+ * Returns any quiesced regions.
+ */
+struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
+{
+	struct dm_region *reg = NULL;
+
+	spin_lock_irq(&rh->region_lock);
+	if (!list_empty(&rh->quiesced_regions)) {
+		reg = list_entry(rh->quiesced_regions.next,
+				 struct dm_region, list);
+		list_del_init(&reg->list);  /* remove from the quiesced list */
+	}
+	spin_unlock_irq(&rh->region_lock);
+
+	return reg;
+}
+EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
+
+void dm_rh_recovery_end(struct dm_region *reg, int success)
+{
+	struct dm_region_hash *rh = reg->rh;
+
+	spin_lock_irq(&rh->region_lock);
+	if (success)
+		list_add(&reg->list, &reg->rh->recovered_regions);
+	else {
+		reg->state = DM_RH_NOSYNC;
+		list_add(&reg->list, &reg->rh->failed_recovered_regions);
+	}
+	spin_unlock_irq(&rh->region_lock);
+
+	rh->wakeup_workers(rh->context);
+}
+EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
+
+/* Return recovery in flight count. */
+int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
+{
+	return atomic_read(&rh->recovery_in_flight);
+}
+EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
+
+int dm_rh_flush(struct dm_region_hash *rh)
+{
+	return rh->log->type->flush(rh->log);
+}
+EXPORT_SYMBOL_GPL(dm_rh_flush);
+
+void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
+{
+	struct dm_region *reg;
+
+	read_lock(&rh->hash_lock);
+	reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
+	bio_list_add(&reg->delayed_bios, bio);
+	read_unlock(&rh->hash_lock);
+}
+EXPORT_SYMBOL_GPL(dm_rh_delay);
+
+void dm_rh_stop_recovery(struct dm_region_hash *rh)
+{
+	int i;
+
+	/* wait for any recovering regions */
+	for (i = 0; i < rh->max_recovery; i++)
+		down(&rh->recovery_count);
+}
+EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
+
+void dm_rh_start_recovery(struct dm_region_hash *rh)
+{
+	int i;
+
+	for (i = 0; i < rh->max_recovery; i++)
+		up(&rh->recovery_count);
+
+	rh->wakeup_workers(rh->context);
+}
+EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
+
+MODULE_DESCRIPTION(DM_NAME " region hash");
+MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 391dfa2..cdfbf65 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -9,7 +9,8 @@
  * Round-robin path selector.
  */
 
-#include "dm.h"
+#include <linux/device-mapper.h>
+
 #include "dm-path-selector.h"
 
 #include <linux/slab.h>
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 6e5528a..b2d9d1a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -600,7 +600,6 @@
 
 	s->valid = 1;
 	s->active = 0;
-	s->last_percent = 0;
 	init_rwsem(&s->lock);
 	spin_lock_init(&s->pe_lock);
 	s->ti = ti;
@@ -824,8 +823,10 @@
 	 * the bios for the original write to the origin.
 	 */
 	if (primary_pe &&
-	    atomic_dec_and_test(&primary_pe->ref_count))
+	    atomic_dec_and_test(&primary_pe->ref_count)) {
 		origin_bios = bio_list_get(&primary_pe->origin_bios);
+		free_pending_exception(primary_pe);
+	}
 
 	/*
 	 * Free the pe if it's not linked to an origin write or if
@@ -834,12 +835,6 @@
 	if (!primary_pe || primary_pe != pe)
 		free_pending_exception(pe);
 
-	/*
-	 * Free the primary pe if nothing references it.
-	 */
-	if (primary_pe && !atomic_read(&primary_pe->ref_count))
-		free_pending_exception(primary_pe);
-
 	return origin_bios;
 }
 
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h
index 292c156..f07315f 100644
--- a/drivers/md/dm-snap.h
+++ b/drivers/md/dm-snap.h
@@ -9,7 +9,7 @@
 #ifndef DM_SNAPSHOT_H
 #define DM_SNAPSHOT_H
 
-#include "dm.h"
+#include <linux/device-mapper.h>
 #include "dm-bio-list.h"
 #include <linux/blkdev.h>
 #include <linux/workqueue.h>
@@ -158,9 +158,6 @@
 	/* Used for display of table */
 	char type;
 
-	/* The last percentage we notified */
-	int last_percent;
-
 	mempool_t *pending_pool;
 
 	struct exception_table pending;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index b745d8a..a2d068d 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -4,7 +4,7 @@
  * This file is released under the GPL.
  */
 
-#include "dm.h"
+#include <linux/device-mapper.h>
 
 #include <linux/module.h>
 #include <linux/init.h>
@@ -60,8 +60,8 @@
 {
 	size_t len;
 
-	if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
-			  stripes))
+	if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
+			     stripes))
 		return NULL;
 
 	len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index bdec206..cdbf126 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -4,7 +4,7 @@
  * This file is released under the GPL.
  */
 
-#include "dm.h"
+#include <linux/device-mapper.h>
 
 #include <linux/module.h>
 #include <linux/init.h>
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 327de03..d1d0cd0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -76,7 +76,6 @@
  */
 struct dm_wq_req {
 	enum {
-		DM_WQ_FLUSH_ALL,
 		DM_WQ_FLUSH_DEFERRED,
 	} type;
 	struct work_struct work;
@@ -151,40 +150,40 @@
 
 static int __init local_init(void)
 {
-	int r;
+	int r = -ENOMEM;
 
 	/* allocate a slab for the dm_ios */
 	_io_cache = KMEM_CACHE(dm_io, 0);
 	if (!_io_cache)
-		return -ENOMEM;
+		return r;
 
 	/* allocate a slab for the target ios */
 	_tio_cache = KMEM_CACHE(dm_target_io, 0);
-	if (!_tio_cache) {
-		kmem_cache_destroy(_io_cache);
-		return -ENOMEM;
-	}
+	if (!_tio_cache)
+		goto out_free_io_cache;
 
 	r = dm_uevent_init();
-	if (r) {
-		kmem_cache_destroy(_tio_cache);
-		kmem_cache_destroy(_io_cache);
-		return r;
-	}
+	if (r)
+		goto out_free_tio_cache;
 
 	_major = major;
 	r = register_blkdev(_major, _name);
-	if (r < 0) {
-		kmem_cache_destroy(_tio_cache);
-		kmem_cache_destroy(_io_cache);
-		dm_uevent_exit();
-		return r;
-	}
+	if (r < 0)
+		goto out_uevent_exit;
 
 	if (!_major)
 		_major = r;
 
 	return 0;
+
+out_uevent_exit:
+	dm_uevent_exit();
+out_free_tio_cache:
+	kmem_cache_destroy(_tio_cache);
+out_free_io_cache:
+	kmem_cache_destroy(_io_cache);
+
+	return r;
 }
 
 static void local_exit(void)
@@ -669,6 +668,7 @@
 	clone->bi_size = to_bytes(len);
 	clone->bi_io_vec->bv_offset = offset;
 	clone->bi_io_vec->bv_len = clone->bi_size;
+	clone->bi_flags |= 1 << BIO_CLONED;
 
 	return clone;
 }
@@ -1394,9 +1394,6 @@
 
 	down_write(&md->io_lock);
 	switch (req->type) {
-	case DM_WQ_FLUSH_ALL:
-		__merge_pushback_list(md);
-		/* pass through */
 	case DM_WQ_FLUSH_DEFERRED:
 		__flush_deferred_io(md);
 		break;
@@ -1526,7 +1523,7 @@
 		if (!md->suspended_bdev) {
 			DMWARN("bdget failed in dm_suspend");
 			r = -ENOMEM;
-			goto flush_and_out;
+			goto out;
 		}
 
 		/*
@@ -1577,14 +1574,6 @@
 
 	set_bit(DMF_SUSPENDED, &md->flags);
 
-flush_and_out:
-	if (r && noflush)
-		/*
-		 * Because there may be already I/Os in the pushback list,
-		 * flush them before return.
-		 */
-		dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL);
-
 out:
 	if (r && md->suspended_bdev) {
 		bdput(md->suspended_bdev);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index cd189da..0ade60c 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -62,15 +62,6 @@
 int dm_target_iterate(void (*iter_func)(struct target_type *tt,
 					void *param), void *param);
 
-/*-----------------------------------------------------------------
- * Useful inlines.
- *---------------------------------------------------------------*/
-static inline int array_too_big(unsigned long fixed, unsigned long obj,
-				unsigned long num)
-{
-	return (num > (ULONG_MAX - fixed) / obj);
-}
-
 int dm_split_args(int *argc, char ***argvp, char *input);
 
 /*
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 5b34c13..127b052 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -545,11 +545,11 @@
 	if( VFL_TYPE_GRABBER == type ) {
 		vv->video_minor = vfd->minor;
 		INFO(("%s: registered device video%d [v4l2]\n",
-			dev->name, vfd->minor & 0x1f));
+			dev->name, vfd->num));
 	} else {
 		vv->vbi_minor = vfd->minor;
 		INFO(("%s: registered device vbi%d [v4l2]\n",
-			dev->name, vfd->minor & 0x1f));
+			dev->name, vfd->num));
 	}
 
 	*vid = vfd;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 99be9e5..fe0bd55 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -834,7 +834,7 @@
  * copying is done already, arg is a kernel pointer.
  */
 
-int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int cmd, void *arg)
+static int __saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
 {
 	struct saa7146_fh *fh  = file->private_data;
 	struct saa7146_dev *dev = fh->dev;
@@ -1215,12 +1215,18 @@
 	}
 #endif
 	default:
-		return v4l_compat_translate_ioctl(inode,file,cmd,arg,
-						  saa7146_video_do_ioctl);
+		return v4l_compat_translate_ioctl(file, cmd, arg,
+						  __saa7146_video_do_ioctl);
 	}
 	return 0;
 }
 
+int saa7146_video_do_ioctl(struct inode *inode, struct file *file,
+				    unsigned int cmd, void *arg)
+{
+	return __saa7146_video_do_ioctl(file, cmd, arg);
+}
+
 /*********************************************************************************/
 /* buffer handling functions                                                  */
 
diff --git a/drivers/media/common/tuners/mxl5005s.c b/drivers/media/common/tuners/mxl5005s.c
index 227642b..a887824 100644
--- a/drivers/media/common/tuners/mxl5005s.c
+++ b/drivers/media/common/tuners/mxl5005s.c
@@ -3481,7 +3481,9 @@
 					}
 					ctrlVal = 0;
 					for (k = 0; k < state->MXL_Ctrl[i].size; k++)
-						ctrlVal += state->MXL_Ctrl[i].val[k] * (1 << k);
+						ctrlVal += state->
+							MXL_Ctrl[i].val[k] *
+							(1 << k);
 				} else
 					return -1;
 			}
@@ -3581,7 +3583,7 @@
 
 static u32 MXL_Ceiling(u32 value, u32 resolution)
 {
-	return (value/resolution + (value % resolution > 0 ? 1 : 0));
+	return value / resolution + (value % resolution > 0 ? 1 : 0);
 }
 
 /* Retrieve the Initialzation Registers */
@@ -3910,7 +3912,10 @@
 
 static int mxl5005s_init(struct dvb_frontend *fe)
 {
+	struct mxl5005s_state *state = fe->tuner_priv;
+
 	dprintk(1, "%s()\n", __func__);
+	state->current_mode = MXL_QAM;
 	return mxl5005s_reconfigure(fe, MXL_QAM, MXL5005S_BANDWIDTH_6MHZ);
 }
 
@@ -4092,7 +4097,6 @@
 	state->frontend = fe;
 	state->config = config;
 	state->i2c = i2c;
-	state->current_mode = MXL_QAM;
 
 	printk(KERN_INFO "MXL5005S: Attached at address 0x%02x\n",
 		config->i2c_address);
diff --git a/drivers/media/common/tuners/tuner-simple.c b/drivers/media/common/tuners/tuner-simple.c
index 2a1aac1..fb3f3b3 100644
--- a/drivers/media/common/tuners/tuner-simple.c
+++ b/drivers/media/common/tuners/tuner-simple.c
@@ -493,6 +493,7 @@
 	case TUNER_PHILIPS_FM1216ME_MK3:
 	case TUNER_PHILIPS_FM1236_MK3:
 	case TUNER_PHILIPS_FMD1216ME_MK3:
+	case TUNER_PHILIPS_FMD1216MEX_MK3:
 	case TUNER_LG_NTSC_TAPE:
 	case TUNER_PHILIPS_FM1256_IH3:
 	case TUNER_TCL_MF02GIP_5N:
@@ -767,6 +768,7 @@
 
 	switch (priv->type) {
 	case TUNER_PHILIPS_FMD1216ME_MK3:
+	case TUNER_PHILIPS_FMD1216MEX_MK3:
 		if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ &&
 		    params->frequency >= 158870000)
 			buf[3] |= 0x08;
diff --git a/drivers/media/common/tuners/tuner-types.c b/drivers/media/common/tuners/tuner-types.c
index 04961a1..7c0bc06 100644
--- a/drivers/media/common/tuners/tuner-types.c
+++ b/drivers/media/common/tuners/tuner-types.c
@@ -946,7 +946,7 @@
 	},
 };
 
-/* ------------ TUNER_PHILIPS_FMD1216ME_MK3 - Philips PAL ------------ */
+/* ------------ TUNER_PHILIPS_FMD1216ME(X)_MK3 - Philips PAL ------------ */
 
 static struct tuner_range tuner_philips_fmd1216me_mk3_pal_ranges[] = {
 	{ 16 * 160.00 /*MHz*/, 0x86, 0x51, },
@@ -984,6 +984,27 @@
 	},
 };
 
+static struct tuner_params tuner_philips_fmd1216mex_mk3_params[] = {
+	{
+		.type   = TUNER_PARAM_TYPE_PAL,
+		.ranges = tuner_philips_fmd1216me_mk3_pal_ranges,
+		.count  = ARRAY_SIZE(tuner_philips_fmd1216me_mk3_pal_ranges),
+		.has_tda9887 = 1,
+		.port1_active = 1,
+		.port2_active = 1,
+		.port2_fm_high_sensitivity = 1,
+		.port2_invert_for_secam_lc = 1,
+		.port1_set_for_fm_mono = 1,
+		.radio_if = 1,
+		.fm_gain_normal = 1,
+	},
+	{
+		.type   = TUNER_PARAM_TYPE_DIGITAL,
+		.ranges = tuner_philips_fmd1216me_mk3_dvb_ranges,
+		.count  = ARRAY_SIZE(tuner_philips_fmd1216me_mk3_dvb_ranges),
+		.iffreq = 16 * 36.125, /*MHz*/
+	},
+};
 
 /* ------ TUNER_LG_TDVS_H06XF - LG INNOTEK / INFINEON ATSC ----- */
 
@@ -1663,6 +1684,16 @@
 		.params = tuner_tcl_mf02gip_5n_params,
 		.count  = ARRAY_SIZE(tuner_tcl_mf02gip_5n_params),
 	},
+	[TUNER_PHILIPS_FMD1216MEX_MK3] = { /* Philips PAL */
+		.name   = "Philips FMD1216MEX MK3 Hybrid Tuner",
+		.params = tuner_philips_fmd1216mex_mk3_params,
+		.count  = ARRAY_SIZE(tuner_philips_fmd1216mex_mk3_params),
+		.min = 16 *  50.87,
+		.max = 16 * 858.00,
+		.stepsize = 166667,
+		.initdata = tua603x_agc112,
+		.sleepdata = (u8[]){ 4, 0x9c, 0x60, 0x85, 0x54 },
+	},
 };
 EXPORT_SYMBOL(tuners);
 
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index f9c2bb9..e12d13e 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -43,7 +43,7 @@
 static DEFINE_MUTEX(xc5000_list_mutex);
 static LIST_HEAD(hybrid_tuner_instance_list);
 
-#define dprintk(level,fmt, arg...) if (debug >= level) \
+#define dprintk(level, fmt, arg...) if (debug >= level) \
 	printk(KERN_INFO "%s: " fmt, "xc5000", ## arg)
 
 #define XC5000_DEFAULT_FIRMWARE "dvb-fe-xc5000-1.1.fw"
@@ -138,11 +138,11 @@
    immediately the length of the following transaction.
 
 */
-typedef struct {
+struct XC_TV_STANDARD {
 	char *Name;
 	u16 AudioMode;
 	u16 VideoMode;
-} XC_TV_STANDARD;
+};
 
 /* Tuner standards */
 #define MN_NTSC_PAL_BTSC	0
@@ -169,7 +169,7 @@
 #define FM_Radio_INPUT2 	21
 #define FM_Radio_INPUT1 	22
 
-static XC_TV_STANDARD XC5000_Standard[MAX_TV_STANDARD] = {
+static struct XC_TV_STANDARD XC5000_Standard[MAX_TV_STANDARD] = {
 	{"M/N-NTSC/PAL-BTSC", 0x0400, 0x8020},
 	{"M/N-NTSC/PAL-A2",   0x0600, 0x8020},
 	{"M/N-NTSC/PAL-EIAJ", 0x0440, 0x8020},
@@ -183,7 +183,7 @@
 	{"D/K-PAL-NICAM",     0x0E80, 0x8009},
 	{"D/K-PAL-MONO",      0x1478, 0x8009},
 	{"D/K-SECAM-A2 DK1",  0x1200, 0x8009},
-	{"D/K-SECAM-A2 L/DK3",0x0E00, 0x8009},
+	{"D/K-SECAM-A2 L/DK3", 0x0E00, 0x8009},
 	{"D/K-SECAM-A2 MONO", 0x1478, 0x8009},
 	{"L-SECAM-NICAM",     0x8E82, 0x0009},
 	{"L'-SECAM-NICAM",    0x8E82, 0x4009},
@@ -307,9 +307,10 @@
 	unsigned int len, pos, index;
 	u8 buf[XC_MAX_I2C_WRITE_LENGTH];
 
-	index=0;
-	while ((i2c_sequence[index]!=0xFF) || (i2c_sequence[index+1]!=0xFF)) {
-		len = i2c_sequence[index]* 256 + i2c_sequence[index+1];
+	index = 0;
+	while ((i2c_sequence[index] != 0xFF) ||
+		(i2c_sequence[index + 1] != 0xFF)) {
+		len = i2c_sequence[index] * 256 + i2c_sequence[index+1];
 		if (len == 0x0000) {
 			/* RESET command */
 			result = xc_reset(fe);
@@ -329,15 +330,17 @@
 			buf[1] = i2c_sequence[index + 1];
 			pos = 2;
 			while (pos < len) {
-				if ((len - pos) > XC_MAX_I2C_WRITE_LENGTH - 2) {
-					nbytes_to_send = XC_MAX_I2C_WRITE_LENGTH;
-				} else {
+				if ((len - pos) > XC_MAX_I2C_WRITE_LENGTH - 2)
+					nbytes_to_send =
+						XC_MAX_I2C_WRITE_LENGTH;
+				else
 					nbytes_to_send = (len - pos + 2);
+				for (i = 2; i < nbytes_to_send; i++) {
+					buf[i] = i2c_sequence[index + pos +
+						i - 2];
 				}
-				for (i=2; i<nbytes_to_send; i++) {
-					buf[i] = i2c_sequence[index + pos + i - 2];
-				}
-				result = xc_send_i2c_data(priv, buf, nbytes_to_send);
+				result = xc_send_i2c_data(priv, buf,
+					nbytes_to_send);
 
 				if (result != XC_RESULT_SUCCESS)
 					return result;
@@ -386,8 +389,7 @@
 	dprintk(1, "%s(%d) Source = %s\n", __func__, rf_mode,
 		rf_mode == XC_RF_MODE_AIR ? "ANTENNA" : "CABLE");
 
-	if ((rf_mode != XC_RF_MODE_AIR) && (rf_mode != XC_RF_MODE_CABLE))
-	{
+	if ((rf_mode != XC_RF_MODE_AIR) && (rf_mode != XC_RF_MODE_CABLE)) {
 		rf_mode = XC_RF_MODE_CABLE;
 		printk(KERN_ERR
 			"%s(), Invalid mode, defaulting to CABLE",
@@ -560,13 +562,13 @@
 		.flags = I2C_M_RD, .buf = buf, .len = len };
 
 	if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) {
-		printk(KERN_ERR "xc5000 I2C read failed (len=%i)\n",(int)len);
+		printk(KERN_ERR "xc5000 I2C read failed (len=%i)\n", (int)len);
 		return -EREMOTEIO;
 	}
 	return 0;
 }
 
-static int xc5000_fwupload(struct dvb_frontend* fe)
+static int xc5000_fwupload(struct dvb_frontend *fe)
 {
 	struct xc5000_priv *priv = fe->tuner_priv;
 	const struct firmware *fw;
@@ -576,7 +578,8 @@
 	printk(KERN_INFO "xc5000: waiting for firmware upload (%s)...\n",
 		XC5000_DEFAULT_FIRMWARE);
 
-	ret = request_firmware(&fw, XC5000_DEFAULT_FIRMWARE, &priv->i2c_props.adap->dev);
+	ret = request_firmware(&fw, XC5000_DEFAULT_FIRMWARE,
+		&priv->i2c_props.adap->dev);
 	if (ret) {
 		printk(KERN_ERR "xc5000: Upload failed. (file not found?)\n");
 		ret = XC_RESULT_RESET_FAILURE;
@@ -592,7 +595,7 @@
 		ret = XC_RESULT_RESET_FAILURE;
 	} else {
 		printk(KERN_INFO "xc5000: firmware upload\n");
-		ret = xc_load_i2c_sequence(fe,  fw->data );
+		ret = xc_load_i2c_sequence(fe,  fw->data);
 	}
 
 out:
@@ -651,7 +654,7 @@
 
 	dprintk(1, "%s() frequency=%d (Hz)\n", __func__, params->frequency);
 
-	switch(params->u.vsb.modulation) {
+	switch (params->u.vsb.modulation) {
 	case VSB_8:
 	case VSB_16:
 		dprintk(1, "%s() VSB modulation\n", __func__);
@@ -748,42 +751,42 @@
 	/* FIX ME: Some video standards may have several possible audio
 		   standards. We simply default to one of them here.
 	 */
-	if(params->std & V4L2_STD_MN) {
+	if (params->std & V4L2_STD_MN) {
 		/* default to BTSC audio standard */
 		priv->video_standard = MN_NTSC_PAL_BTSC;
 		goto tune_channel;
 	}
 
-	if(params->std & V4L2_STD_PAL_BG) {
+	if (params->std & V4L2_STD_PAL_BG) {
 		/* default to NICAM audio standard */
 		priv->video_standard = BG_PAL_NICAM;
 		goto tune_channel;
 	}
 
-	if(params->std & V4L2_STD_PAL_I) {
+	if (params->std & V4L2_STD_PAL_I) {
 		/* default to NICAM audio standard */
 		priv->video_standard = I_PAL_NICAM;
 		goto tune_channel;
 	}
 
-	if(params->std & V4L2_STD_PAL_DK) {
+	if (params->std & V4L2_STD_PAL_DK) {
 		/* default to NICAM audio standard */
 		priv->video_standard = DK_PAL_NICAM;
 		goto tune_channel;
 	}
 
-	if(params->std & V4L2_STD_SECAM_DK) {
+	if (params->std & V4L2_STD_SECAM_DK) {
 		/* default to A2 DK1 audio standard */
 		priv->video_standard = DK_SECAM_A2DK1;
 		goto tune_channel;
 	}
 
-	if(params->std & V4L2_STD_SECAM_L) {
+	if (params->std & V4L2_STD_SECAM_L) {
 		priv->video_standard = L_SECAM_NICAM;
 		goto tune_channel;
 	}
 
-	if(params->std & V4L2_STD_SECAM_LC) {
+	if (params->std & V4L2_STD_SECAM_LC) {
 		priv->video_standard = LC_SECAM_NICAM;
 		goto tune_channel;
 	}
@@ -791,7 +794,7 @@
 tune_channel:
 	ret = xc_SetSignalSource(priv, priv->rf_mode);
 	if (ret != XC_RESULT_SUCCESS) {
-	printk(KERN_ERR
+		printk(KERN_ERR
 			"xc5000: xc_SetSignalSource(%d) failed\n",
 			priv->rf_mode);
 		return -EREMOTEIO;
@@ -863,7 +866,7 @@
 	 * I2C transactions until calibration is complete.  This way we
 	 * don't have to rely on clock stretching working.
 	 */
-	xc_wait( 100 );
+	xc_wait(100);
 
 	/* Default to "CABLE" mode */
 	ret |= xc_write_reg(priv, XREG_SIGNALSOURCE, XC_RF_MODE_CABLE);
@@ -885,15 +888,13 @@
 	 */
 
 	ret = xc_shutdown(priv);
-	if(ret != XC_RESULT_SUCCESS) {
+	if (ret != XC_RESULT_SUCCESS) {
 		printk(KERN_ERR
 			"xc5000: %s() unable to shutdown tuner\n",
 			__func__);
 		return -EREMOTEIO;
-	}
-	else {
+	} else
 		return XC_RESULT_SUCCESS;
-	}
 }
 
 static int xc5000_init(struct dvb_frontend *fe)
@@ -989,7 +990,7 @@
 	if (xc5000_readreg(priv, XREG_PRODUCT_ID, &id) != 0)
 		goto fail;
 
-	switch(id) {
+	switch (id) {
 	case XC_PRODUCT_ID_FW_LOADED:
 		printk(KERN_INFO
 			"xc5000: Successfully identified at address 0x%02x\n",
diff --git a/drivers/media/common/tuners/xc5000.h b/drivers/media/common/tuners/xc5000.h
index cf1a558..f4c1466 100644
--- a/drivers/media/common/tuners/xc5000.h
+++ b/drivers/media/common/tuners/xc5000.h
@@ -45,17 +45,17 @@
 
 #if defined(CONFIG_MEDIA_TUNER_XC5000) || \
     (defined(CONFIG_MEDIA_TUNER_XC5000_MODULE) && defined(MODULE))
-extern struct dvb_frontend* xc5000_attach(struct dvb_frontend *fe,
+extern struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
 					  struct i2c_adapter *i2c,
 					  struct xc5000_config *cfg);
 #else
-static inline struct dvb_frontend* xc5000_attach(struct dvb_frontend *fe,
+static inline struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
 						 struct i2c_adapter *i2c,
 						 struct xc5000_config *cfg)
 {
 	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
 	return NULL;
 }
-#endif // CONFIG_MEDIA_TUNER_XC5000
+#endif
 
-#endif // __XC5000_H__
+#endif
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index f732144..14e627e 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -595,6 +595,18 @@
 	dm1105dvb_dma_unmap(dm1105dvb);
 }
 
+static struct stv0299_config sharp_z0194a_config = {
+	.demod_address = 0x68,
+	.inittab = sharp_z0194a_inittab,
+	.mclk = 88000000UL,
+	.invert = 1,
+	.skip_reinit = 0,
+	.lock_output = STV0299_LOCKOUTPUT_1,
+	.volt13_op0_op1 = STV0299_VOLT13_OP1,
+	.min_delay_ms = 100,
+	.set_symbol_rate = sharp_z0194a_set_symbol_rate,
+};
+
 static struct stv0288_config earda_config = {
 	.demod_address = 0x68,
 	.min_delay_ms = 100,
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index f170e82..5689d1f 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -47,6 +47,7 @@
 static int dvb_force_auto_inversion;
 static int dvb_override_tune_delay;
 static int dvb_powerdown_on_sleep = 1;
+static int dvb_mfe_wait_time = 5;
 
 module_param_named(frontend_debug, dvb_frontend_debug, int, 0644);
 MODULE_PARM_DESC(frontend_debug, "Turn on/off frontend core debugging (default:off).");
@@ -58,6 +59,8 @@
 MODULE_PARM_DESC(dvb_override_tune_delay, "0: normal (default), >0 => delay in milliseconds to wait for lock after a tune attempt");
 module_param(dvb_powerdown_on_sleep, int, 0644);
 MODULE_PARM_DESC(dvb_powerdown_on_sleep, "0: do not power down, 1: turn LNB voltage off on sleep (default)");
+module_param(dvb_mfe_wait_time, int, 0644);
+MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open() for multi-frontend to become available (default:5 seconds)");
 
 #define dprintk if (dvb_frontend_debug) printk
 
@@ -212,8 +215,9 @@
 
 static void dvb_frontend_init(struct dvb_frontend *fe)
 {
-	dprintk ("DVB: initialising frontend %i (%s)...\n",
+	dprintk ("DVB: initialising adapter %i frontend %i (%s)...\n",
 		 fe->dvb->num,
+		 fe->id,
 		 fe->ops.info.name);
 
 	if (fe->ops.init)
@@ -686,7 +690,7 @@
 	mb();
 
 	fe_thread = kthread_run(dvb_frontend_thread, fe,
-		"kdvb-fe-%i", fe->dvb->num);
+		"kdvb-ad-%i-fe-%i", fe->dvb->num,fe->id);
 	if (IS_ERR(fe_thread)) {
 		ret = PTR_ERR(fe_thread);
 		printk("dvb_frontend_start: failed to start kthread (%d)\n", ret);
@@ -710,8 +714,8 @@
 		*freq_max = min(fe->ops.info.frequency_max, fe->ops.tuner_ops.info.frequency_max);
 
 	if (*freq_min == 0 || *freq_max == 0)
-		printk(KERN_WARNING "DVB: frontend %u frequency limits undefined - fix the driver\n",
-		       fe->dvb->num);
+		printk(KERN_WARNING "DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n",
+		       fe->dvb->num,fe->id);
 }
 
 static int dvb_frontend_check_parameters(struct dvb_frontend *fe,
@@ -724,8 +728,8 @@
 	dvb_frontend_get_frequeny_limits(fe, &freq_min, &freq_max);
 	if ((freq_min && parms->frequency < freq_min) ||
 	    (freq_max && parms->frequency > freq_max)) {
-		printk(KERN_WARNING "DVB: frontend %u frequency %u out of range (%u..%u)\n",
-		       fe->dvb->num, parms->frequency, freq_min, freq_max);
+		printk(KERN_WARNING "DVB: adapter %i frontend %i frequency %u out of range (%u..%u)\n",
+		       fe->dvb->num, fe->id, parms->frequency, freq_min, freq_max);
 		return -EINVAL;
 	}
 
@@ -735,8 +739,8 @@
 		     parms->u.qpsk.symbol_rate < fe->ops.info.symbol_rate_min) ||
 		    (fe->ops.info.symbol_rate_max &&
 		     parms->u.qpsk.symbol_rate > fe->ops.info.symbol_rate_max)) {
-			printk(KERN_WARNING "DVB: frontend %u symbol rate %u out of range (%u..%u)\n",
-			       fe->dvb->num, parms->u.qpsk.symbol_rate,
+			printk(KERN_WARNING "DVB: adapter %i frontend %i symbol rate %u out of range (%u..%u)\n",
+			       fe->dvb->num, fe->id, parms->u.qpsk.symbol_rate,
 			       fe->ops.info.symbol_rate_min, fe->ops.info.symbol_rate_max);
 			return -EINVAL;
 		}
@@ -746,8 +750,8 @@
 		     parms->u.qam.symbol_rate < fe->ops.info.symbol_rate_min) ||
 		    (fe->ops.info.symbol_rate_max &&
 		     parms->u.qam.symbol_rate > fe->ops.info.symbol_rate_max)) {
-			printk(KERN_WARNING "DVB: frontend %u symbol rate %u out of range (%u..%u)\n",
-			       fe->dvb->num, parms->u.qam.symbol_rate,
+			printk(KERN_WARNING "DVB: adapter %i frontend %i symbol rate %u out of range (%u..%u)\n",
+			       fe->dvb->num, fe->id, parms->u.qam.symbol_rate,
 			       fe->ops.info.symbol_rate_min, fe->ops.info.symbol_rate_max);
 			return -EINVAL;
 		}
@@ -899,30 +903,30 @@
 	int i;
 
 	if (tvp->cmd <= 0 || tvp->cmd > DTV_MAX_COMMAND) {
-		printk("%s: tvp.cmd = 0x%08x (undefined/unknown/invalid)\n",
+		printk(KERN_WARNING "%s: tvp.cmd = 0x%08x undefined\n",
 			__func__, tvp->cmd);
 		return;
 	}
 
-	printk("%s() tvp.cmd    = 0x%08x (%s)\n"
-		,__FUNCTION__
+	dprintk("%s() tvp.cmd    = 0x%08x (%s)\n"
+		,__func__
 		,tvp->cmd
 		,dtv_cmds[ tvp->cmd ].name);
 
 	if(dtv_cmds[ tvp->cmd ].buffer) {
 
-		printk("%s() tvp.u.buffer.len = 0x%02x\n"
-			,__FUNCTION__
+		dprintk("%s() tvp.u.buffer.len = 0x%02x\n"
+			,__func__
 			,tvp->u.buffer.len);
 
 		for(i = 0; i < tvp->u.buffer.len; i++)
-			printk("%s() tvp.u.buffer.data[0x%02x] = 0x%02x\n"
-				,__FUNCTION__
+			dprintk("%s() tvp.u.buffer.data[0x%02x] = 0x%02x\n"
+				,__func__
 				,i
 				,tvp->u.buffer.data[i]);
 
 	} else
-		printk("%s() tvp.u.data = 0x%08x\n", __FUNCTION__, tvp->u.data);
+		dprintk("%s() tvp.u.data = 0x%08x\n", __func__, tvp->u.data);
 }
 
 int is_legacy_delivery_system(fe_delivery_system_t s)
@@ -942,8 +946,6 @@
 {
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 
-	printk("%s()\n", __FUNCTION__);
-
 	c->frequency = p->frequency;
 	c->inversion = p->inversion;
 
@@ -998,27 +1000,25 @@
 	struct dvb_frontend_private *fepriv = fe->frontend_priv;
 	struct dvb_frontend_parameters *p = &fepriv->parameters;
 
-	printk("%s()\n", __FUNCTION__);
-
 	p->frequency = c->frequency;
 	p->inversion = c->inversion;
 
 	switch (fe->ops.info.type) {
 	case FE_QPSK:
-		printk("%s() Preparing QPSK req\n", __FUNCTION__);
+		dprintk("%s() Preparing QPSK req\n", __func__);
 		p->u.qpsk.symbol_rate = c->symbol_rate;
 		p->u.qpsk.fec_inner = c->fec_inner;
 		c->delivery_system = SYS_DVBS;
 		break;
 	case FE_QAM:
-		printk("%s() Preparing QAM req\n", __FUNCTION__);
+		dprintk("%s() Preparing QAM req\n", __func__);
 		p->u.qam.symbol_rate = c->symbol_rate;
 		p->u.qam.fec_inner = c->fec_inner;
 		p->u.qam.modulation = c->modulation;
 		c->delivery_system = SYS_DVBC_ANNEX_AC;
 		break;
 	case FE_OFDM:
-		printk("%s() Preparing OFDM req\n", __FUNCTION__);
+		dprintk("%s() Preparing OFDM req\n", __func__);
 		if (c->bandwidth_hz == 6000000)
 			p->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
 		else if (c->bandwidth_hz == 7000000)
@@ -1036,7 +1036,7 @@
 		c->delivery_system = SYS_DVBT;
 		break;
 	case FE_ATSC:
-		printk("%s() Preparing VSB req\n", __FUNCTION__);
+		dprintk("%s() Preparing VSB req\n", __func__);
 		p->u.vsb.modulation = c->modulation;
 		if ((c->modulation == VSB_8) || (c->modulation == VSB_16))
 			c->delivery_system = SYS_ATSC;
@@ -1055,14 +1055,13 @@
 	struct dvb_frontend_private *fepriv = fe->frontend_priv;
 	struct dvb_frontend_parameters *p = &fepriv->parameters;
 
-	printk("%s()\n", __FUNCTION__);
-
 	p->frequency = c->frequency;
 	p->inversion = c->inversion;
 
 	switch(c->modulation) {
 	case PSK_8:
 	case APSK_16:
+	case APSK_32:
 	case QPSK:
 		p->u.qpsk.symbol_rate = c->symbol_rate;
 		p->u.qpsk.fec_inner = c->fec_inner;
@@ -1089,19 +1088,17 @@
 {
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 
-	printk("%s()\n", __FUNCTION__);
-
 	/* For legacy delivery systems we don't need the delivery_system to
 	 * be specified, but we populate the older structures from the cache
 	 * so we can call set_frontend on older drivers.
 	 */
 	if(is_legacy_delivery_system(c->delivery_system)) {
 
-		printk("%s() legacy, modulation = %d\n", __FUNCTION__, c->modulation);
+		dprintk("%s() legacy, modulation = %d\n", __func__, c->modulation);
 		dtv_property_legacy_params_sync(fe);
 
 	} else {
-		printk("%s() adv, modulation = %d\n", __FUNCTION__, c->modulation);
+		dprintk("%s() adv, modulation = %d\n", __func__, c->modulation);
 
 		/* For advanced delivery systems / modulation types ...
 		 * we seed the lecacy dvb_frontend_parameters structure
@@ -1123,8 +1120,6 @@
 {
 	int r = 0;
 
-	printk("%s()\n", __FUNCTION__);
-
 	dtv_property_dump(tvp);
 
 	/* Allow the frontend to validate incoming properties */
@@ -1198,7 +1193,6 @@
 {
 	int r = 0;
 	struct dvb_frontend_private *fepriv = fe->frontend_priv;
-	printk("%s()\n", __FUNCTION__);
 	dtv_property_dump(tvp);
 
 	/* Allow the frontend to validate incoming properties */
@@ -1213,7 +1207,7 @@
 		/* Reset a cache of data specific to the frontend here. This does
 		 * not effect hardware.
 		 */
-		printk("%s() Flushing property cache\n", __FUNCTION__);
+		dprintk("%s() Flushing property cache\n", __func__);
 		memset(&fe->dtv_property_cache, 0, sizeof(struct dtv_frontend_properties));
 		fe->dtv_property_cache.state = tvp->cmd;
 		fe->dtv_property_cache.delivery_system = SYS_UNDEFINED;
@@ -1224,7 +1218,7 @@
 		 * ioctl.
 		 */
 		fe->dtv_property_cache.state = tvp->cmd;
-		printk("%s() Finalised property cache\n", __FUNCTION__);
+		dprintk("%s() Finalised property cache\n", __func__);
 		dtv_property_cache_submit(fe);
 
 		r |= dvb_frontend_ioctl_legacy(inode, file, FE_SET_FRONTEND,
@@ -1335,12 +1329,10 @@
 	dprintk("%s\n", __func__);
 
 	if(cmd == FE_SET_PROPERTY) {
-		printk("%s() FE_SET_PROPERTY\n", __FUNCTION__);
-
 		tvps = (struct dtv_properties __user *)parg;
 
-		printk("%s() properties.num = %d\n", __FUNCTION__, tvps->num);
-		printk("%s() properties.props = %p\n", __FUNCTION__, tvps->props);
+		dprintk("%s() properties.num = %d\n", __func__, tvps->num);
+		dprintk("%s() properties.props = %p\n", __func__, tvps->props);
 
 		/* Put an arbitrary limit on the number of messages that can
 		 * be sent at once */
@@ -1364,18 +1356,16 @@
 			err |= (tvp + i)->result;
 		}
 
-		if(fe->dtv_property_cache.state == DTV_TUNE) {
-			printk("%s() Property cache is full, tuning\n", __FUNCTION__);
-		}
+		if(fe->dtv_property_cache.state == DTV_TUNE)
+			dprintk("%s() Property cache is full, tuning\n", __func__);
 
 	} else
 	if(cmd == FE_GET_PROPERTY) {
-		printk("%s() FE_GET_PROPERTY\n", __FUNCTION__);
 
 		tvps = (struct dtv_properties __user *)parg;
 
-		printk("%s() properties.num = %d\n", __FUNCTION__, tvps->num);
-		printk("%s() properties.props = %p\n", __FUNCTION__, tvps->props);
+		dprintk("%s() properties.num = %d\n", __func__, tvps->num);
+		dprintk("%s() properties.props = %p\n", __func__, tvps->props);
 
 		/* Put an arbitrary limit on the number of messages that can
 		 * be sent at once */
@@ -1704,13 +1694,53 @@
 	struct dvb_device *dvbdev = file->private_data;
 	struct dvb_frontend *fe = dvbdev->priv;
 	struct dvb_frontend_private *fepriv = fe->frontend_priv;
+	struct dvb_adapter *adapter = fe->dvb;
 	int ret;
 
 	dprintk ("%s\n", __func__);
 
+	if (adapter->mfe_shared) {
+		mutex_lock (&adapter->mfe_lock);
+
+		if (adapter->mfe_dvbdev == NULL)
+			adapter->mfe_dvbdev = dvbdev;
+
+		else if (adapter->mfe_dvbdev != dvbdev) {
+			struct dvb_device
+				*mfedev = adapter->mfe_dvbdev;
+			struct dvb_frontend
+				*mfe = mfedev->priv;
+			struct dvb_frontend_private
+				*mfepriv = mfe->frontend_priv;
+			int mferetry = (dvb_mfe_wait_time << 1);
+
+			mutex_unlock (&adapter->mfe_lock);
+			while (mferetry-- && (mfedev->users != -1 ||
+					mfepriv->thread != NULL)) {
+				if(msleep_interruptible(500)) {
+					if(signal_pending(current))
+						return -EINTR;
+				}
+			}
+
+			mutex_lock (&adapter->mfe_lock);
+			if(adapter->mfe_dvbdev != dvbdev) {
+				mfedev = adapter->mfe_dvbdev;
+				mfe = mfedev->priv;
+				mfepriv = mfe->frontend_priv;
+				if (mfedev->users != -1 ||
+						mfepriv->thread != NULL) {
+					mutex_unlock (&adapter->mfe_lock);
+					return -EBUSY;
+				}
+				adapter->mfe_dvbdev = dvbdev;
+			}
+		}
+	}
+
 	if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) {
 		if ((ret = fe->ops.ts_bus_ctrl(fe, 1)) < 0)
-			return ret;
+			goto err0;
 	}
 
 	if ((ret = dvb_generic_open (inode, file)) < 0)
@@ -1730,6 +1760,8 @@
 		fepriv->events.eventr = fepriv->events.eventw = 0;
 	}
 
+	if (adapter->mfe_shared)
+		mutex_unlock (&adapter->mfe_lock);
 	return ret;
 
 err2:
@@ -1737,6 +1769,9 @@
 err1:
 	if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl)
 		fe->ops.ts_bus_ctrl(fe, 0);
+err0:
+	if (adapter->mfe_shared)
+		mutex_unlock (&adapter->mfe_lock);
 	return ret;
 }
 
@@ -1806,8 +1841,9 @@
 	fe->dvb = dvb;
 	fepriv->inversion = INVERSION_OFF;
 
-	printk ("DVB: registering frontend %i (%s)...\n",
+	printk ("DVB: registering adapter %i frontend %i (%s)...\n",
 		fe->dvb->num,
+		fe->id,
 		fe->ops.info.name);
 
 	dvb_register_device (fe->dvb, &fepriv->dvbdev, &dvbdev_template,
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index 3055301..db4a63b 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -222,6 +222,7 @@
 	struct dtv_frontend_properties dtv_property_cache;
 #define DVB_FRONTEND_COMPONENT_TUNER 0
 	int (*callback)(void *adapter_priv, int component, int cmd, int arg);
+	int id;
 };
 
 extern int dvb_register_frontend(struct dvb_adapter *dvb,
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index 665776d..a113744 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -326,6 +326,9 @@
 	adap->name = name;
 	adap->module = module;
 	adap->device = device;
+	adap->mfe_shared = 0;
+	adap->mfe_dvbdev = NULL;
+	mutex_init (&adap->mfe_lock);
 
 	list_add_tail (&adap->list_head, &dvb_adapter_list);
 
diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h
index 89d12dc..574e336 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.h
+++ b/drivers/media/dvb/dvb-core/dvbdev.h
@@ -62,6 +62,10 @@
 	struct device *device;
 
 	struct module *module;
+
+	int mfe_shared;			/* indicates mutually exclusive frontends */
+	struct dvb_device *mfe_dvbdev;	/* frontend device in use */
+	struct mutex mfe_lock;		/* access lock for thread creation */
 };
 
 
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index ca53df6..6286fbb 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -422,6 +422,18 @@
 	return 0;
 }
 
+static struct stv0299_config sharp_z0194a_config = {
+	.demod_address = 0x68,
+	.inittab = sharp_z0194a_inittab,
+	.mclk = 88000000UL,
+	.invert = 1,
+	.skip_reinit = 0,
+	.lock_output = STV0299_LOCKOUTPUT_1,
+	.volt13_op0_op1 = STV0299_VOLT13_OP1,
+	.min_delay_ms = 100,
+	.set_symbol_rate = sharp_z0194a_set_symbol_rate,
+};
+
 static struct cx24116_config dw2104_config = {
 	.demod_address = 0x55,
 	.mpg_clk_pos_pol = 0x01,
diff --git a/drivers/media/dvb/frontends/cx22702.c b/drivers/media/dvb/frontends/cx22702.c
index 9430e03..5d1abe3 100644
--- a/drivers/media/dvb/frontends/cx22702.c
+++ b/drivers/media/dvb/frontends/cx22702.c
@@ -34,13 +34,12 @@
 #include "dvb_frontend.h"
 #include "cx22702.h"
 
-
 struct cx22702_state {
 
-	struct i2c_adapter* i2c;
+	struct i2c_adapter *i2c;
 
 	/* configuration settings */
-	const struct cx22702_config* config;
+	const struct cx22702_config *config;
 
 	struct dvb_frontend frontend;
 
@@ -49,10 +48,13 @@
 };
 
 static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Enable verbose debug messages");
+
 #define dprintk	if (debug) printk
 
 /* Register values to initialise the demod */
-static u8 init_tab [] = {
+static u8 init_tab[] = {
 	0x00, 0x00, /* Stop aquisition */
 	0x0B, 0x06,
 	0x09, 0x01,
@@ -80,65 +82,67 @@
 	0xfd, 0x00,
 };
 
-static int cx22702_writereg (struct cx22702_state* state, u8 reg, u8 data)
+static int cx22702_writereg(struct cx22702_state *state, u8 reg, u8 data)
 {
 	int ret;
-	u8 buf [] = { reg, data };
-	struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 };
+	u8 buf[] = { reg, data };
+	struct i2c_msg msg = {
+		.addr = state->config->demod_address, .flags = 0,
+			.buf = buf, .len = 2 };
 
 	ret = i2c_transfer(state->i2c, &msg, 1);
 
 	if (ret != 1)
-		printk("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+		printk(KERN_ERR
+			"%s: error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
 			__func__, reg, data, ret);
 
 	return (ret != 1) ? -1 : 0;
 }
 
-static u8 cx22702_readreg (struct cx22702_state* state, u8 reg)
+static u8 cx22702_readreg(struct cx22702_state *state, u8 reg)
 {
 	int ret;
-	u8 b0 [] = { reg };
-	u8 b1 [] = { 0 };
+	u8 b0[] = { reg };
+	u8 b1[] = { 0 };
 
-	struct i2c_msg msg [] = {
-		{ .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 },
-		{ .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } };
+	struct i2c_msg msg[] = {
+		{ .addr = state->config->demod_address, .flags = 0,
+			.buf = b0, .len = 1 },
+		{ .addr = state->config->demod_address, .flags = I2C_M_RD,
+			.buf = b1, .len = 1 } };
 
 	ret = i2c_transfer(state->i2c, msg, 2);
 
 	if (ret != 2)
-		printk("%s: readreg error (ret == %i)\n", __func__, ret);
+		printk(KERN_ERR "%s: readreg error (ret == %i)\n",
+			__func__, ret);
 
 	return b1[0];
 }
 
-static int cx22702_set_inversion (struct cx22702_state *state, int inversion)
+static int cx22702_set_inversion(struct cx22702_state *state, int inversion)
 {
 	u8 val;
 
 	switch (inversion) {
-
-		case INVERSION_AUTO:
-			return -EOPNOTSUPP;
-
-		case INVERSION_ON:
-			val = cx22702_readreg (state, 0x0C);
-			return cx22702_writereg (state, 0x0C, val | 0x01);
-
-		case INVERSION_OFF:
-			val = cx22702_readreg (state, 0x0C);
-			return cx22702_writereg (state, 0x0C, val & 0xfe);
-
-		default:
-			return -EINVAL;
-
+	case INVERSION_AUTO:
+		return -EOPNOTSUPP;
+	case INVERSION_ON:
+		val = cx22702_readreg(state, 0x0C);
+		return cx22702_writereg(state, 0x0C, val | 0x01);
+	case INVERSION_OFF:
+		val = cx22702_readreg(state, 0x0C);
+		return cx22702_writereg(state, 0x0C, val & 0xfe);
+	default:
+		return -EINVAL;
 	}
 
 }
 
 /* Retrieve the demod settings */
-static int cx22702_get_tps (struct cx22702_state *state, struct dvb_ofdm_parameters *p)
+static int cx22702_get_tps(struct cx22702_state *state,
+	struct dvb_ofdm_parameters *p)
 {
 	u8 val;
 
@@ -146,180 +150,281 @@
 	if (!(cx22702_readreg(state, 0x0A) & 0x20))
 		return -EAGAIN;
 
-	val = cx22702_readreg (state, 0x01);
-	switch( (val&0x18)>>3) {
-		case 0: p->constellation =   QPSK; break;
-		case 1: p->constellation = QAM_16; break;
-		case 2: p->constellation = QAM_64; break;
+	val = cx22702_readreg(state, 0x01);
+	switch ((val & 0x18) >> 3) {
+	case 0:
+		p->constellation = QPSK;
+		break;
+	case 1:
+		p->constellation = QAM_16;
+		break;
+	case 2:
+		p->constellation = QAM_64;
+		break;
 	}
-	switch( val&0x07 ) {
-		case 0: p->hierarchy_information = HIERARCHY_NONE; break;
-		case 1: p->hierarchy_information =    HIERARCHY_1; break;
-		case 2: p->hierarchy_information =    HIERARCHY_2; break;
-		case 3: p->hierarchy_information =    HIERARCHY_4; break;
+	switch (val & 0x07) {
+	case 0:
+		p->hierarchy_information = HIERARCHY_NONE;
+		break;
+	case 1:
+		p->hierarchy_information = HIERARCHY_1;
+		break;
+	case 2:
+		p->hierarchy_information = HIERARCHY_2;
+		break;
+	case 3:
+		p->hierarchy_information = HIERARCHY_4;
+		break;
 	}
 
 
-	val = cx22702_readreg (state, 0x02);
-	switch( (val&0x38)>>3 ) {
-		case 0: p->code_rate_HP = FEC_1_2; break;
-		case 1: p->code_rate_HP = FEC_2_3; break;
-		case 2: p->code_rate_HP = FEC_3_4; break;
-		case 3: p->code_rate_HP = FEC_5_6; break;
-		case 4: p->code_rate_HP = FEC_7_8; break;
+	val = cx22702_readreg(state, 0x02);
+	switch ((val & 0x38) >> 3) {
+	case 0:
+		p->code_rate_HP = FEC_1_2;
+		break;
+	case 1:
+		p->code_rate_HP = FEC_2_3;
+		break;
+	case 2:
+		p->code_rate_HP = FEC_3_4;
+		break;
+	case 3:
+		p->code_rate_HP = FEC_5_6;
+		break;
+	case 4:
+		p->code_rate_HP = FEC_7_8;
+		break;
 	}
-	switch( val&0x07 ) {
-		case 0: p->code_rate_LP = FEC_1_2; break;
-		case 1: p->code_rate_LP = FEC_2_3; break;
-		case 2: p->code_rate_LP = FEC_3_4; break;
-		case 3: p->code_rate_LP = FEC_5_6; break;
-		case 4: p->code_rate_LP = FEC_7_8; break;
+	switch (val & 0x07) {
+	case 0:
+		p->code_rate_LP = FEC_1_2;
+		break;
+	case 1:
+		p->code_rate_LP = FEC_2_3;
+		break;
+	case 2:
+		p->code_rate_LP = FEC_3_4;
+		break;
+	case 3:
+		p->code_rate_LP = FEC_5_6;
+		break;
+	case 4:
+		p->code_rate_LP = FEC_7_8;
+		break;
 	}
 
-
-	val = cx22702_readreg (state, 0x03);
-	switch( (val&0x0c)>>2 ) {
-		case 0: p->guard_interval = GUARD_INTERVAL_1_32; break;
-		case 1: p->guard_interval = GUARD_INTERVAL_1_16; break;
-		case 2: p->guard_interval =  GUARD_INTERVAL_1_8; break;
-		case 3: p->guard_interval =  GUARD_INTERVAL_1_4; break;
+	val = cx22702_readreg(state, 0x03);
+	switch ((val & 0x0c) >> 2) {
+	case 0:
+		p->guard_interval = GUARD_INTERVAL_1_32;
+		break;
+	case 1:
+		p->guard_interval = GUARD_INTERVAL_1_16;
+		break;
+	case 2:
+		p->guard_interval = GUARD_INTERVAL_1_8;
+		break;
+	case 3:
+		p->guard_interval = GUARD_INTERVAL_1_4;
+		break;
 	}
-	switch( val&0x03 ) {
-		case 0: p->transmission_mode = TRANSMISSION_MODE_2K; break;
-		case 1: p->transmission_mode = TRANSMISSION_MODE_8K; break;
+	switch (val & 0x03) {
+	case 0:
+		p->transmission_mode = TRANSMISSION_MODE_2K;
+		break;
+	case 1:
+		p->transmission_mode = TRANSMISSION_MODE_8K;
+		break;
 	}
 
 	return 0;
 }
 
-static int cx22702_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
+static int cx22702_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
 {
-	struct cx22702_state* state = fe->demodulator_priv;
-	dprintk ("%s(%d)\n", __func__, enable);
+	struct cx22702_state *state = fe->demodulator_priv;
+	dprintk("%s(%d)\n", __func__, enable);
 	if (enable)
-		return cx22702_writereg (state, 0x0D, cx22702_readreg(state, 0x0D) & 0xfe);
+		return cx22702_writereg(state, 0x0D,
+			cx22702_readreg(state, 0x0D) & 0xfe);
 	else
-		return cx22702_writereg (state, 0x0D, cx22702_readreg(state, 0x0D) | 1);
+		return cx22702_writereg(state, 0x0D,
+			cx22702_readreg(state, 0x0D) | 1);
 }
 
 /* Talk to the demod, set the FEC, GUARD, QAM settings etc */
-static int cx22702_set_tps (struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int cx22702_set_tps(struct dvb_frontend *fe,
+	struct dvb_frontend_parameters *p)
 {
 	u8 val;
-	struct cx22702_state* state = fe->demodulator_priv;
+	struct cx22702_state *state = fe->demodulator_priv;
 
 	if (fe->ops.tuner_ops.set_params) {
 		fe->ops.tuner_ops.set_params(fe, p);
-		if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
+		if (fe->ops.i2c_gate_ctrl)
+			fe->ops.i2c_gate_ctrl(fe, 0);
 	}
 
 	/* set inversion */
-	cx22702_set_inversion (state, p->inversion);
+	cx22702_set_inversion(state, p->inversion);
 
 	/* set bandwidth */
-	switch(p->u.ofdm.bandwidth) {
+	switch (p->u.ofdm.bandwidth) {
 	case BANDWIDTH_6_MHZ:
-		cx22702_writereg(state, 0x0C, (cx22702_readreg(state, 0x0C) & 0xcf) | 0x20 );
+		cx22702_writereg(state, 0x0C,
+			(cx22702_readreg(state, 0x0C) & 0xcf) | 0x20);
 		break;
 	case BANDWIDTH_7_MHZ:
-		cx22702_writereg(state, 0x0C, (cx22702_readreg(state, 0x0C) & 0xcf) | 0x10 );
+		cx22702_writereg(state, 0x0C,
+			(cx22702_readreg(state, 0x0C) & 0xcf) | 0x10);
 		break;
 	case BANDWIDTH_8_MHZ:
-		cx22702_writereg(state, 0x0C, cx22702_readreg(state, 0x0C) &0xcf );
+		cx22702_writereg(state, 0x0C,
+			cx22702_readreg(state, 0x0C) & 0xcf);
 		break;
 	default:
-		dprintk ("%s: invalid bandwidth\n",__func__);
+		dprintk("%s: invalid bandwidth\n", __func__);
 		return -EINVAL;
 	}
 
-
-	p->u.ofdm.code_rate_LP = FEC_AUTO; //temp hack as manual not working
+	p->u.ofdm.code_rate_LP = FEC_AUTO; /* temp hack as manual not working */
 
 	/* use auto configuration? */
-	if((p->u.ofdm.hierarchy_information==HIERARCHY_AUTO) ||
-	   (p->u.ofdm.constellation==QAM_AUTO) ||
-	   (p->u.ofdm.code_rate_HP==FEC_AUTO) ||
-	   (p->u.ofdm.code_rate_LP==FEC_AUTO) ||
-	   (p->u.ofdm.guard_interval==GUARD_INTERVAL_AUTO) ||
-	   (p->u.ofdm.transmission_mode==TRANSMISSION_MODE_AUTO) ) {
+	if ((p->u.ofdm.hierarchy_information == HIERARCHY_AUTO) ||
+	   (p->u.ofdm.constellation == QAM_AUTO) ||
+	   (p->u.ofdm.code_rate_HP == FEC_AUTO) ||
+	   (p->u.ofdm.code_rate_LP == FEC_AUTO) ||
+	   (p->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO) ||
+	   (p->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO)) {
 
 		/* TPS Source - use hardware driven values */
 		cx22702_writereg(state, 0x06, 0x10);
 		cx22702_writereg(state, 0x07, 0x9);
 		cx22702_writereg(state, 0x08, 0xC1);
-		cx22702_writereg(state, 0x0B, cx22702_readreg(state, 0x0B) & 0xfc );
-		cx22702_writereg(state, 0x0C, (cx22702_readreg(state, 0x0C) & 0xBF) | 0x40 );
+		cx22702_writereg(state, 0x0B, cx22702_readreg(state, 0x0B)
+			& 0xfc);
+		cx22702_writereg(state, 0x0C,
+			(cx22702_readreg(state, 0x0C) & 0xBF) | 0x40);
 		cx22702_writereg(state, 0x00, 0x01); /* Begin aquisition */
-		dprintk("%s: Autodetecting\n",__func__);
+		dprintk("%s: Autodetecting\n", __func__);
 		return 0;
 	}
 
 	/* manually programmed values */
-	val=0;
-	switch(p->u.ofdm.constellation) {
-		case   QPSK: val = (val&0xe7); break;
-		case QAM_16: val = (val&0xe7)|0x08; break;
-		case QAM_64: val = (val&0xe7)|0x10; break;
-		default:
-			dprintk ("%s: invalid constellation\n",__func__);
-			return -EINVAL;
+	val = 0;
+	switch (p->u.ofdm.constellation) {
+	case QPSK:
+		val = (val & 0xe7);
+		break;
+	case QAM_16:
+		val = (val & 0xe7) | 0x08;
+		break;
+	case QAM_64:
+		val = (val & 0xe7) | 0x10;
+		break;
+	default:
+		dprintk("%s: invalid constellation\n", __func__);
+		return -EINVAL;
 	}
-	switch(p->u.ofdm.hierarchy_information) {
-		case HIERARCHY_NONE: val = (val&0xf8); break;
-		case    HIERARCHY_1: val = (val&0xf8)|1; break;
-		case    HIERARCHY_2: val = (val&0xf8)|2; break;
-		case    HIERARCHY_4: val = (val&0xf8)|3; break;
-		default:
-			dprintk ("%s: invalid hierarchy\n",__func__);
-			return -EINVAL;
+	switch (p->u.ofdm.hierarchy_information) {
+	case HIERARCHY_NONE:
+		val = (val & 0xf8);
+		break;
+	case HIERARCHY_1:
+		val = (val & 0xf8) | 1;
+		break;
+	case HIERARCHY_2:
+		val = (val & 0xf8) | 2;
+		break;
+	case HIERARCHY_4:
+		val = (val & 0xf8) | 3;
+		break;
+	default:
+		dprintk("%s: invalid hierarchy\n", __func__);
+		return -EINVAL;
 	}
-	cx22702_writereg (state, 0x06, val);
+	cx22702_writereg(state, 0x06, val);
 
-	val=0;
-	switch(p->u.ofdm.code_rate_HP) {
-		case FEC_NONE:
-		case FEC_1_2: val = (val&0xc7); break;
-		case FEC_2_3: val = (val&0xc7)|0x08; break;
-		case FEC_3_4: val = (val&0xc7)|0x10; break;
-		case FEC_5_6: val = (val&0xc7)|0x18; break;
-		case FEC_7_8: val = (val&0xc7)|0x20; break;
-		default:
-			dprintk ("%s: invalid code_rate_HP\n",__func__);
-			return -EINVAL;
+	val = 0;
+	switch (p->u.ofdm.code_rate_HP) {
+	case FEC_NONE:
+	case FEC_1_2:
+		val = (val & 0xc7);
+		break;
+	case FEC_2_3:
+		val = (val & 0xc7) | 0x08;
+		break;
+	case FEC_3_4:
+		val = (val & 0xc7) | 0x10;
+		break;
+	case FEC_5_6:
+		val = (val & 0xc7) | 0x18;
+		break;
+	case FEC_7_8:
+		val = (val & 0xc7) | 0x20;
+		break;
+	default:
+		dprintk("%s: invalid code_rate_HP\n", __func__);
+		return -EINVAL;
 	}
-	switch(p->u.ofdm.code_rate_LP) {
-		case FEC_NONE:
-		case FEC_1_2: val = (val&0xf8); break;
-		case FEC_2_3: val = (val&0xf8)|1; break;
-		case FEC_3_4: val = (val&0xf8)|2; break;
-		case FEC_5_6: val = (val&0xf8)|3; break;
-		case FEC_7_8: val = (val&0xf8)|4; break;
-		default:
-			dprintk ("%s: invalid code_rate_LP\n",__func__);
-			return -EINVAL;
+	switch (p->u.ofdm.code_rate_LP) {
+	case FEC_NONE:
+	case FEC_1_2:
+		val = (val & 0xf8);
+		break;
+	case FEC_2_3:
+		val = (val & 0xf8) | 1;
+		break;
+	case FEC_3_4:
+		val = (val & 0xf8) | 2;
+		break;
+	case FEC_5_6:
+		val = (val & 0xf8) | 3;
+		break;
+	case FEC_7_8:
+		val = (val & 0xf8) | 4;
+		break;
+	default:
+		dprintk("%s: invalid code_rate_LP\n", __func__);
+		return -EINVAL;
 	}
-	cx22702_writereg (state, 0x07, val);
+	cx22702_writereg(state, 0x07, val);
 
-	val=0;
-	switch(p->u.ofdm.guard_interval) {
-		case GUARD_INTERVAL_1_32: val = (val&0xf3); break;
-		case GUARD_INTERVAL_1_16: val = (val&0xf3)|0x04; break;
-		case  GUARD_INTERVAL_1_8: val = (val&0xf3)|0x08; break;
-		case  GUARD_INTERVAL_1_4: val = (val&0xf3)|0x0c; break;
-		default:
-			dprintk ("%s: invalid guard_interval\n",__func__);
-			return -EINVAL;
+	val = 0;
+	switch (p->u.ofdm.guard_interval) {
+	case GUARD_INTERVAL_1_32:
+		val = (val & 0xf3);
+		break;
+	case GUARD_INTERVAL_1_16:
+		val = (val & 0xf3) | 0x04;
+		break;
+	case GUARD_INTERVAL_1_8:
+		val = (val & 0xf3) | 0x08;
+		break;
+	case GUARD_INTERVAL_1_4:
+		val = (val & 0xf3) | 0x0c;
+		break;
+	default:
+		dprintk("%s: invalid guard_interval\n", __func__);
+		return -EINVAL;
 	}
-	switch(p->u.ofdm.transmission_mode) {
-		case TRANSMISSION_MODE_2K: val = (val&0xfc); break;
-		case TRANSMISSION_MODE_8K: val = (val&0xfc)|1; break;
-		default:
-			dprintk ("%s: invalid transmission_mode\n",__func__);
-			return -EINVAL;
+	switch (p->u.ofdm.transmission_mode) {
+	case TRANSMISSION_MODE_2K:
+		val = (val & 0xfc);
+		break;
+	case TRANSMISSION_MODE_8K:
+		val = (val & 0xfc) | 1;
+		break;
+	default:
+		dprintk("%s: invalid transmission_mode\n", __func__);
+		return -EINVAL;
 	}
 	cx22702_writereg(state, 0x08, val);
-	cx22702_writereg(state, 0x0B, (cx22702_readreg(state, 0x0B) & 0xfc) | 0x02 );
-	cx22702_writereg(state, 0x0C, (cx22702_readreg(state, 0x0C) & 0xBF) | 0x40 );
+	cx22702_writereg(state, 0x0B,
+		(cx22702_readreg(state, 0x0B) & 0xfc) | 0x02);
+	cx22702_writereg(state, 0x0C,
+		(cx22702_readreg(state, 0x0C) & 0xBF) | 0x40);
 
 	/* Begin channel aquisition */
 	cx22702_writereg(state, 0x00, 0x01);
@@ -329,109 +434,111 @@
 
 /* Reset the demod hardware and reset all of the configuration registers
    to a default state. */
-static int cx22702_init (struct dvb_frontend* fe)
+static int cx22702_init(struct dvb_frontend *fe)
 {
 	int i;
-	struct cx22702_state* state = fe->demodulator_priv;
+	struct cx22702_state *state = fe->demodulator_priv;
 
-	cx22702_writereg (state, 0x00, 0x02);
+	cx22702_writereg(state, 0x00, 0x02);
 
 	msleep(10);
 
-	for (i=0; i<sizeof(init_tab); i+=2)
-		cx22702_writereg (state, init_tab[i], init_tab[i+1]);
+	for (i = 0; i < ARRAY_SIZE(init_tab); i += 2)
+		cx22702_writereg(state, init_tab[i], init_tab[i + 1]);
 
-	cx22702_writereg (state, 0xf8, (state->config->output_mode << 1) & 0x02);
+	cx22702_writereg(state, 0xf8, (state->config->output_mode << 1)
+		& 0x02);
 
 	cx22702_i2c_gate_ctrl(fe, 0);
 
 	return 0;
 }
 
-static int cx22702_read_status(struct dvb_frontend* fe, fe_status_t* status)
+static int cx22702_read_status(struct dvb_frontend *fe, fe_status_t *status)
 {
-	struct cx22702_state* state = fe->demodulator_priv;
+	struct cx22702_state *state = fe->demodulator_priv;
 	u8 reg0A;
 	u8 reg23;
 
 	*status = 0;
 
-	reg0A = cx22702_readreg (state, 0x0A);
-	reg23 = cx22702_readreg (state, 0x23);
+	reg0A = cx22702_readreg(state, 0x0A);
+	reg23 = cx22702_readreg(state, 0x23);
 
-	dprintk ("%s: status demod=0x%02x agc=0x%02x\n"
-		,__func__,reg0A,reg23);
+	dprintk("%s: status demod=0x%02x agc=0x%02x\n"
+		, __func__, reg0A, reg23);
 
-	if(reg0A & 0x10) {
+	if (reg0A & 0x10) {
 		*status |= FE_HAS_LOCK;
 		*status |= FE_HAS_VITERBI;
 		*status |= FE_HAS_SYNC;
 	}
 
-	if(reg0A & 0x20)
+	if (reg0A & 0x20)
 		*status |= FE_HAS_CARRIER;
 
-	if(reg23 < 0xf0)
+	if (reg23 < 0xf0)
 		*status |= FE_HAS_SIGNAL;
 
 	return 0;
 }
 
-static int cx22702_read_ber(struct dvb_frontend* fe, u32* ber)
+static int cx22702_read_ber(struct dvb_frontend *fe, u32 *ber)
 {
-	struct cx22702_state* state = fe->demodulator_priv;
+	struct cx22702_state *state = fe->demodulator_priv;
 
-	if(cx22702_readreg (state, 0xE4) & 0x02) {
+	if (cx22702_readreg(state, 0xE4) & 0x02) {
 		/* Realtime statistics */
-		*ber = (cx22702_readreg (state, 0xDE) & 0x7F) << 7
-			| (cx22702_readreg (state, 0xDF)&0x7F);
+		*ber = (cx22702_readreg(state, 0xDE) & 0x7F) << 7
+			| (cx22702_readreg(state, 0xDF) & 0x7F);
 	} else {
 		/* Averagtine statistics */
-		*ber = (cx22702_readreg (state, 0xDE) & 0x7F) << 7
-			| cx22702_readreg (state, 0xDF);
+		*ber = (cx22702_readreg(state, 0xDE) & 0x7F) << 7
+			| cx22702_readreg(state, 0xDF);
 	}
 
 	return 0;
 }
 
-static int cx22702_read_signal_strength(struct dvb_frontend* fe, u16* signal_strength)
+static int cx22702_read_signal_strength(struct dvb_frontend *fe,
+	u16 *signal_strength)
 {
-	struct cx22702_state* state = fe->demodulator_priv;
+	struct cx22702_state *state = fe->demodulator_priv;
 
 	u16 rs_ber = 0;
-	rs_ber = cx22702_readreg (state, 0x23);
+	rs_ber = cx22702_readreg(state, 0x23);
 	*signal_strength = (rs_ber << 8) | rs_ber;
 
 	return 0;
 }
 
-static int cx22702_read_snr(struct dvb_frontend* fe, u16* snr)
+static int cx22702_read_snr(struct dvb_frontend *fe, u16 *snr)
 {
-	struct cx22702_state* state = fe->demodulator_priv;
+	struct cx22702_state *state = fe->demodulator_priv;
 
-	u16 rs_ber=0;
-	if(cx22702_readreg (state, 0xE4) & 0x02) {
+	u16 rs_ber = 0;
+	if (cx22702_readreg(state, 0xE4) & 0x02) {
 		/* Realtime statistics */
-		rs_ber = (cx22702_readreg (state, 0xDE) & 0x7F) << 7
-			| (cx22702_readreg (state, 0xDF)& 0x7F);
+		rs_ber = (cx22702_readreg(state, 0xDE) & 0x7F) << 7
+			| (cx22702_readreg(state, 0xDF) & 0x7F);
 	} else {
 		/* Averagine statistics */
-		rs_ber = (cx22702_readreg (state, 0xDE) & 0x7F) << 8
-			| cx22702_readreg (state, 0xDF);
+		rs_ber = (cx22702_readreg(state, 0xDE) & 0x7F) << 8
+			| cx22702_readreg(state, 0xDF);
 	}
 	*snr = ~rs_ber;
 
 	return 0;
 }
 
-static int cx22702_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
+static int cx22702_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
 {
-	struct cx22702_state* state = fe->demodulator_priv;
+	struct cx22702_state *state = fe->demodulator_priv;
 
 	u8 _ucblocks;
 
 	/* RS Uncorrectable Packet Count then reset */
-	_ucblocks = cx22702_readreg (state, 0xE3);
+	_ucblocks = cx22702_readreg(state, 0xE3);
 	if (state->prevUCBlocks < _ucblocks)
 		*ucblocks = (_ucblocks - state->prevUCBlocks);
 	else
@@ -441,34 +548,36 @@
 	return 0;
 }
 
-static int cx22702_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int cx22702_get_frontend(struct dvb_frontend *fe,
+	struct dvb_frontend_parameters *p)
 {
-	struct cx22702_state* state = fe->demodulator_priv;
+	struct cx22702_state *state = fe->demodulator_priv;
 
-	u8 reg0C = cx22702_readreg (state, 0x0C);
+	u8 reg0C = cx22702_readreg(state, 0x0C);
 
 	p->inversion = reg0C & 0x1 ? INVERSION_ON : INVERSION_OFF;
-	return cx22702_get_tps (state, &p->u.ofdm);
+	return cx22702_get_tps(state, &p->u.ofdm);
 }
 
-static int cx22702_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune)
+static int cx22702_get_tune_settings(struct dvb_frontend *fe,
+	struct dvb_frontend_tune_settings *tune)
 {
 	tune->min_delay_ms = 1000;
 	return 0;
 }
 
-static void cx22702_release(struct dvb_frontend* fe)
+static void cx22702_release(struct dvb_frontend *fe)
 {
-	struct cx22702_state* state = fe->demodulator_priv;
+	struct cx22702_state *state = fe->demodulator_priv;
 	kfree(state);
 }
 
 static struct dvb_frontend_ops cx22702_ops;
 
-struct dvb_frontend* cx22702_attach(const struct cx22702_config* config,
-				    struct i2c_adapter* i2c)
+struct dvb_frontend *cx22702_attach(const struct cx22702_config *config,
+	struct i2c_adapter *i2c)
 {
-	struct cx22702_state* state = NULL;
+	struct cx22702_state *state = NULL;
 
 	/* allocate memory for the internal state */
 	state = kmalloc(sizeof(struct cx22702_state), GFP_KERNEL);
@@ -485,7 +594,8 @@
 		goto error;
 
 	/* create dvb_frontend */
-	memcpy(&state->frontend.ops, &cx22702_ops, sizeof(struct dvb_frontend_ops));
+	memcpy(&state->frontend.ops, &cx22702_ops,
+		sizeof(struct dvb_frontend_ops));
 	state->frontend.demodulator_priv = state;
 	return &state->frontend;
 
@@ -493,6 +603,7 @@
 	kfree(state);
 	return NULL;
 }
+EXPORT_SYMBOL(cx22702_attach);
 
 static struct dvb_frontend_ops cx22702_ops = {
 
@@ -525,11 +636,6 @@
 	.read_ucblocks = cx22702_read_ucblocks,
 };
 
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Enable verbose debug messages");
-
 MODULE_DESCRIPTION("Conexant CX22702 DVB-T Demodulator driver");
 MODULE_AUTHOR("Steven Toth");
 MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(cx22702_attach);
diff --git a/drivers/media/dvb/frontends/cx22702.h b/drivers/media/dvb/frontends/cx22702.h
index b1e465c..f154e1f 100644
--- a/drivers/media/dvb/frontends/cx22702.h
+++ b/drivers/media/dvb/frontends/cx22702.h
@@ -30,8 +30,7 @@
 
 #include <linux/dvb/frontend.h>
 
-struct cx22702_config
-{
+struct cx22702_config {
 	/* the demodulator's i2c address */
 	u8 demod_address;
 
@@ -41,16 +40,19 @@
 	u8 output_mode;
 };
 
-#if defined(CONFIG_DVB_CX22702) || (defined(CONFIG_DVB_CX22702_MODULE) && defined(MODULE))
-extern struct dvb_frontend* cx22702_attach(const struct cx22702_config* config,
-					   struct i2c_adapter* i2c);
+#if defined(CONFIG_DVB_CX22702) || (defined(CONFIG_DVB_CX22702_MODULE) \
+	&& defined(MODULE))
+extern struct dvb_frontend *cx22702_attach(
+	const struct cx22702_config *config,
+	struct i2c_adapter *i2c);
 #else
-static inline struct dvb_frontend* cx22702_attach(const struct cx22702_config* config,
-					   struct i2c_adapter* i2c)
+static inline struct dvb_frontend *cx22702_attach(
+	const struct cx22702_config *config,
+	struct i2c_adapter *i2c)
 {
 	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
 	return NULL;
 }
-#endif // CONFIG_DVB_CX22702
+#endif
 
-#endif // CX22702_H
+#endif
diff --git a/drivers/media/dvb/frontends/cx24116.c b/drivers/media/dvb/frontends/cx24116.c
index deb36f4..b144b30 100644
--- a/drivers/media/dvb/frontends/cx24116.c
+++ b/drivers/media/dvb/frontends/cx24116.c
@@ -41,10 +41,14 @@
 #include "dvb_frontend.h"
 #include "cx24116.h"
 
-static int debug = 0;
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
+
 #define dprintk(args...) \
 	do { \
-		if (debug) printk ("cx24116: " args); \
+		if (debug) \
+			printk(KERN_INFO "cx24116: " args); \
 	} while (0)
 
 #define CX24116_DEFAULT_FIRMWARE "dvb-fe-cx24116.fw"
@@ -68,13 +72,20 @@
 #define CX24116_REG_UCB8    (0xca)
 #define CX24116_REG_CLKDIV  (0xf3)
 #define CX24116_REG_RATEDIV (0xf9)
-#define CX24116_REG_FECSTATUS (0x9c)    /* configured fec (not tuned) or actual FEC (tuned) 1=1/2 2=2/3 etc */
+
+/* configured fec (not tuned) or actual FEC (tuned) 1=1/2 2=2/3 etc */
+#define CX24116_REG_FECSTATUS (0x9c)
 
 /* FECSTATUS bits */
-#define CX24116_FEC_FECMASK   (0x1f)    /* mask to determine configured fec (not tuned) or actual fec (tuned) */
-#define CX24116_FEC_DVBS      (0x20)    /* Select DVB-S demodulator, else DVB-S2 */
+/* mask to determine configured fec (not tuned) or actual fec (tuned) */
+#define CX24116_FEC_FECMASK   (0x1f)
+
+/* Select DVB-S demodulator, else DVB-S2 */
+#define CX24116_FEC_DVBS      (0x20)
 #define CX24116_FEC_UNKNOWN   (0x40)    /* Unknown/unused */
-#define CX24116_FEC_PILOT     (0x80)    /* Pilot mode requested when tuning else always reset when tuned */
+
+/* Pilot mode requested when tuning else always reset when tuned */
+#define CX24116_FEC_PILOT     (0x80)
 
 /* arg buffer size */
 #define CX24116_ARGLEN (0x1e)
@@ -116,12 +127,17 @@
 
 /* DiSEqC tone burst */
 static int toneburst = 1;
+module_param(toneburst, int, 0644);
+MODULE_PARM_DESC(toneburst, "DiSEqC toneburst 0=OFF, 1=TONE CACHE, "\
+	"2=MESSAGE CACHE (default:1)");
 
 /* SNR measurements */
-static int esno_snr = 0;
+static int esno_snr;
+module_param(esno_snr, int, 0644);
+MODULE_PARM_DESC(debug, "SNR return units, 0=PERCENTAGE 0-100, "\
+	"1=ESNO(db * 10) (default:0)");
 
-enum cmds
-{
+enum cmds {
 	CMD_SET_VCO     = 0x10,
 	CMD_TUNEREQUEST = 0x11,
 	CMD_MPEGCONFIG  = 0x13,
@@ -138,8 +154,7 @@
 };
 
 /* The Demod/Tuner can't easily provide these, we cache them */
-struct cx24116_tuning
-{
+struct cx24116_tuning {
 	u32 frequency;
 	u32 symbol_rate;
 	fe_spectral_inversion_t inversion;
@@ -158,16 +173,14 @@
 };
 
 /* Basic commands that are sent to the firmware */
-struct cx24116_cmd
-{
+struct cx24116_cmd {
 	u8 len;
 	u8 args[CX24116_ARGLEN];
 };
 
-struct cx24116_state
-{
-	struct i2c_adapter* i2c;
-	const struct cx24116_config* config;
+struct cx24116_state {
+	struct i2c_adapter *i2c;
+	const struct cx24116_config *config;
 
 	struct dvb_frontend frontend;
 
@@ -179,19 +192,20 @@
 	struct cx24116_cmd dsec_cmd;
 };
 
-static int cx24116_writereg(struct cx24116_state* state, int reg, int data)
+static int cx24116_writereg(struct cx24116_state *state, int reg, int data)
 {
 	u8 buf[] = { reg, data };
 	struct i2c_msg msg = { .addr = state->config->demod_address,
 		.flags = 0, .buf = buf, .len = 2 };
 	int err;
 
-	if (debug>1)
+	if (debug > 1)
 		printk("cx24116: %s: write reg 0x%02x, value 0x%02x\n",
-						__func__,reg, data);
+			__func__, reg, data);
 
-	if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
-		printk("%s: writereg error(err == %i, reg == 0x%02x,"
+	err = i2c_transfer(state->i2c, &msg, 1);
+	if (err != 1) {
+		printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x,"
 			 " value == 0x%02x)\n", __func__, err, reg, data);
 		return -EREMOTEIO;
 	}
@@ -200,7 +214,8 @@
 }
 
 /* Bulk byte writes to a single I2C address, for 32k firmware load */
-static int cx24116_writeregN(struct cx24116_state* state, int reg, u8 *data, u16 len)
+static int cx24116_writeregN(struct cx24116_state *state, int reg,
+			     const u8 *data, u16 len)
 {
 	int ret = -EREMOTEIO;
 	struct i2c_msg msg;
@@ -221,12 +236,13 @@
 	msg.buf = buf;
 	msg.len = len + 1;
 
-	if (debug>1)
-		printk("cx24116: %s:  write regN 0x%02x, len = %d\n",
-						__func__,reg, len);
+	if (debug > 1)
+		printk(KERN_INFO "cx24116: %s:  write regN 0x%02x, len = %d\n",
+			__func__, reg, len);
 
-	if ((ret = i2c_transfer(state->i2c, &msg, 1)) != 1) {
-		printk("%s: writereg error(err == %i, reg == 0x%02x\n",
+	ret = i2c_transfer(state->i2c, &msg, 1);
+	if (ret != 1) {
+		printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x\n",
 			 __func__, ret, reg);
 		ret = -EREMOTEIO;
 	}
@@ -237,30 +253,35 @@
 	return ret;
 }
 
-static int cx24116_readreg(struct cx24116_state* state, u8 reg)
+static int cx24116_readreg(struct cx24116_state *state, u8 reg)
 {
 	int ret;
 	u8 b0[] = { reg };
 	u8 b1[] = { 0 };
 	struct i2c_msg msg[] = {
-		{ .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 },
-		{ .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 }
+		{ .addr = state->config->demod_address, .flags = 0,
+			.buf = b0, .len = 1 },
+		{ .addr = state->config->demod_address, .flags = I2C_M_RD,
+			.buf = b1, .len = 1 }
 	};
 
 	ret = i2c_transfer(state->i2c, msg, 2);
 
 	if (ret != 2) {
-		printk("%s: reg=0x%x (error=%d)\n", __func__, reg, ret);
+		printk(KERN_ERR "%s: reg=0x%x (error=%d)\n",
+			__func__, reg, ret);
 		return ret;
 	}
 
-	if (debug>1)
-		printk("cx24116: read reg 0x%02x, value 0x%02x\n",reg, b1[0]);
+	if (debug > 1)
+		printk(KERN_INFO "cx24116: read reg 0x%02x, value 0x%02x\n",
+			reg, b1[0]);
 
 	return b1[0];
 }
 
-static int cx24116_set_inversion(struct cx24116_state* state, fe_spectral_inversion_t inversion)
+static int cx24116_set_inversion(struct cx24116_state *state,
+	fe_spectral_inversion_t inversion)
 {
 	dprintk("%s(%d)\n", __func__, inversion);
 
@@ -308,10 +329,10 @@
  * Eg.(2/3) szap "Zone Horror"
  *
  * mask/val = 0x04, 0x20
- * status 1f | signal c3c0 | snr a333 | ber 00000098 | unc 00000000 | FE_HAS_LOCK
+ * status 1f | signal c3c0 | snr a333 | ber 00000098 | unc 0 | FE_HAS_LOCK
  *
  * mask/val = 0x04, 0x30
- * status 1f | signal c3c0 | snr a333 | ber 00000000 | unc 00000000 | FE_HAS_LOCK
+ * status 1f | signal c3c0 | snr a333 | ber 00000000 | unc 0 | FE_HAS_LOCK
  *
  * After tuning FECSTATUS contains actual FEC
  * in use numbered 1 through to 8 for 1/2 .. 2/3 etc
@@ -389,18 +410,16 @@
   */
 };
 
-static int cx24116_lookup_fecmod(struct cx24116_state* state,
+static int cx24116_lookup_fecmod(struct cx24116_state *state,
 	fe_modulation_t m, fe_code_rate_t f)
 {
 	int i, ret = -EOPNOTSUPP;
 
 	dprintk("%s(0x%02x,0x%02x)\n", __func__, m, f);
 
-	for(i=0 ; i < sizeof(CX24116_MODFEC_MODES) / sizeof(struct cx24116_modfec) ; i++)
-	{
-		if( (m == CX24116_MODFEC_MODES[i].modulation) &&
-			(f == CX24116_MODFEC_MODES[i].fec) )
-			{
+	for (i = 0; i < ARRAY_SIZE(CX24116_MODFEC_MODES); i++) {
+		if ((m == CX24116_MODFEC_MODES[i].modulation) &&
+			(f == CX24116_MODFEC_MODES[i].fec)) {
 				ret = i;
 				break;
 			}
@@ -409,7 +428,8 @@
 	return ret;
 }
 
-static int cx24116_set_fec(struct cx24116_state* state, fe_modulation_t mod, fe_code_rate_t fec)
+static int cx24116_set_fec(struct cx24116_state *state,
+	fe_modulation_t mod, fe_code_rate_t fec)
 {
 	int ret = 0;
 
@@ -417,7 +437,7 @@
 
 	ret = cx24116_lookup_fecmod(state, mod, fec);
 
-	if(ret < 0)
+	if (ret < 0)
 		return ret;
 
 	state->dnxt.fec = fec;
@@ -429,7 +449,7 @@
 	return 0;
 }
 
-static int cx24116_set_symbolrate(struct cx24116_state* state, u32 rate)
+static int cx24116_set_symbolrate(struct cx24116_state *state, u32 rate)
 {
 	dprintk("%s(%d)\n", __func__, rate);
 
@@ -446,42 +466,49 @@
 	return 0;
 }
 
-static int cx24116_load_firmware (struct dvb_frontend* fe, const struct firmware *fw);
+static int cx24116_load_firmware(struct dvb_frontend *fe,
+	const struct firmware *fw);
 
-static int cx24116_firmware_ondemand(struct dvb_frontend* fe)
+static int cx24116_firmware_ondemand(struct dvb_frontend *fe)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 	const struct firmware *fw;
 	int ret = 0;
 
-	dprintk("%s()\n",__func__);
+	dprintk("%s()\n", __func__);
 
-	if (cx24116_readreg(state, 0x20) > 0)
-	{
+	if (cx24116_readreg(state, 0x20) > 0) {
 
 		if (state->skip_fw_load)
 			return 0;
 
 		/* Load firmware */
-		/* request the firmware, this will block until someone uploads it */
-		printk("%s: Waiting for firmware upload (%s)...\n", __func__, CX24116_DEFAULT_FIRMWARE);
-		ret = request_firmware(&fw, CX24116_DEFAULT_FIRMWARE, &state->i2c->dev);
-		printk("%s: Waiting for firmware upload(2)...\n", __func__);
+		/* request the firmware, this will block until loaded */
+		printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n",
+			__func__, CX24116_DEFAULT_FIRMWARE);
+		ret = request_firmware(&fw, CX24116_DEFAULT_FIRMWARE,
+			&state->i2c->dev);
+		printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n",
+			__func__);
 		if (ret) {
-			printk("%s: No firmware uploaded (timeout or file not found?)\n", __func__);
+			printk(KERN_ERR "%s: No firmware uploaded "
+				"(timeout or file not found?)\n", __func__);
 			return ret;
 		}
 
-		/* Make sure we don't recurse back through here during loading */
+		/* Make sure we don't recurse back through here
+		 * during loading */
 		state->skip_fw_load = 1;
 
 		ret = cx24116_load_firmware(fe, fw);
 		if (ret)
-			printk("%s: Writing firmware to device failed\n", __func__);
+			printk(KERN_ERR "%s: Writing firmware to device failed\n",
+				__func__);
 
 		release_firmware(fw);
 
-		printk("%s: Firmware upload %s\n", __func__, ret == 0 ? "complete" : "failed");
+		printk(KERN_INFO "%s: Firmware upload %s\n", __func__,
+			ret == 0 ? "complete" : "failed");
 
 		/* Ensure firmware is always loaded if required */
 		state->skip_fw_load = 0;
@@ -490,8 +517,10 @@
 	return ret;
 }
 
-/* Take a basic firmware command structure, format it and forward it for processing */
-static int cx24116_cmd_execute(struct dvb_frontend* fe, struct cx24116_cmd *cmd)
+/* Take a basic firmware command structure, format it
+ * and forward it for processing
+ */
+static int cx24116_cmd_execute(struct dvb_frontend *fe, struct cx24116_cmd *cmd)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 	int i, ret;
@@ -499,49 +528,49 @@
 	dprintk("%s()\n", __func__);
 
 	/* Load the firmware if required */
-	if ( (ret = cx24116_firmware_ondemand(fe)) != 0)
-	{
-		printk("%s(): Unable initialise the firmware\n", __func__);
+	ret = cx24116_firmware_ondemand(fe);
+	if (ret != 0) {
+		printk(KERN_ERR "%s(): Unable initialise the firmware\n",
+			__func__);
 		return ret;
 	}
 
 	/* Write the command */
-	for(i = 0; i < cmd->len ; i++)
-	{
+	for (i = 0; i < cmd->len ; i++) {
 		dprintk("%s: 0x%02x == 0x%02x\n", __func__, i, cmd->args[i]);
 		cx24116_writereg(state, i, cmd->args[i]);
 	}
 
 	/* Start execution and wait for cmd to terminate */
 	cx24116_writereg(state, CX24116_REG_EXECUTE, 0x01);
-	while( cx24116_readreg(state, CX24116_REG_EXECUTE) )
-	{
+	while (cx24116_readreg(state, CX24116_REG_EXECUTE)) {
 		msleep(10);
-		if(i++ > 64)
-		{
-			/* Avoid looping forever if the firmware does no respond */
-			printk("%s() Firmware not responding\n", __func__);
+		if (i++ > 64) {
+			/* Avoid looping forever if the firmware does
+				not respond */
+			printk(KERN_WARNING "%s() Firmware not responding\n",
+				__func__);
 			return -EREMOTEIO;
 		}
 	}
 	return 0;
 }
 
-static int cx24116_load_firmware (struct dvb_frontend* fe, const struct firmware *fw)
+static int cx24116_load_firmware(struct dvb_frontend *fe,
+	const struct firmware *fw)
 {
-	struct cx24116_state* state = fe->demodulator_priv;
+	struct cx24116_state *state = fe->demodulator_priv;
 	struct cx24116_cmd cmd;
 	int i, ret;
 	unsigned char vers[4];
 
 	dprintk("%s\n", __func__);
-	dprintk("Firmware is %zu bytes (%02x %02x .. %02x %02x)\n"
-			,fw->size
-			,fw->data[0]
-			,fw->data[1]
-			,fw->data[ fw->size-2 ]
-			,fw->data[ fw->size-1 ]
-			);
+	dprintk("Firmware is %zu bytes (%02x %02x .. %02x %02x)\n",
+			fw->size,
+			fw->data[0],
+			fw->data[1],
+			fw->data[fw->size-2],
+			fw->data[fw->size-1]);
 
 	/* Toggle 88x SRST pin to reset demod */
 	if (state->config->reset_device)
@@ -587,7 +616,7 @@
 	cmd.args[0x07] = 0x9d;
 	cmd.args[0x08] = 0xfc;
 	cmd.args[0x09] = 0x06;
-	cmd.len= 0x0a;
+	cmd.len = 0x0a;
 	ret = cx24116_cmd_execute(fe, &cmd);
 	if (ret != 0)
 		return ret;
@@ -598,7 +627,7 @@
 	cmd.args[0x00] = CMD_TUNERINIT;
 	cmd.args[0x01] = 0x00;
 	cmd.args[0x02] = 0x00;
-	cmd.len= 0x03;
+	cmd.len = 0x03;
 	ret = cx24116_cmd_execute(fe, &cmd);
 	if (ret != 0)
 		return ret;
@@ -615,36 +644,38 @@
 	else
 		cmd.args[0x04] = 0x02;
 	cmd.args[0x05] = 0x00;
-	cmd.len= 0x06;
+	cmd.len = 0x06;
 	ret = cx24116_cmd_execute(fe, &cmd);
 	if (ret != 0)
 		return ret;
 
 	/* Firmware CMD 35: Get firmware version */
 	cmd.args[0x00] = CMD_UPDFWVERS;
-	cmd.len= 0x02;
-	for(i=0; i<4; i++) {
+	cmd.len = 0x02;
+	for (i = 0; i < 4; i++) {
 		cmd.args[0x01] = i;
 		ret = cx24116_cmd_execute(fe, &cmd);
 		if (ret != 0)
 			return ret;
-		vers[i]= cx24116_readreg(state, CX24116_REG_MAILBOX);
+		vers[i] = cx24116_readreg(state, CX24116_REG_MAILBOX);
 	}
-	printk("%s: FW version %i.%i.%i.%i\n", __func__,
+	printk(KERN_INFO "%s: FW version %i.%i.%i.%i\n", __func__,
 		vers[0], vers[1], vers[2], vers[3]);
 
 	return 0;
 }
 
-static int cx24116_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage)
+static int cx24116_set_voltage(struct dvb_frontend *fe,
+	fe_sec_voltage_t voltage)
 {
 	/* The isl6421 module will override this function in the fops. */
-	dprintk("%s() This should never appear if the isl6421 module is loaded correctly\n",__func__);
+	dprintk("%s() This should never appear if the isl6421 module "
+		"is loaded correctly\n", __func__);
 
 	return -EOPNOTSUPP;
 }
 
-static int cx24116_read_status(struct dvb_frontend* fe, fe_status_t* status)
+static int cx24116_read_status(struct dvb_frontend *fe, fe_status_t *status)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 
@@ -666,22 +697,23 @@
 	return 0;
 }
 
-static int cx24116_read_ber(struct dvb_frontend* fe, u32* ber)
+static int cx24116_read_ber(struct dvb_frontend *fe, u32 *ber)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 
 	dprintk("%s()\n", __func__);
 
-	*ber =  ( cx24116_readreg(state, CX24116_REG_BER24) << 24 ) |
-		( cx24116_readreg(state, CX24116_REG_BER16) << 16 ) |
-		( cx24116_readreg(state, CX24116_REG_BER8 ) << 8  ) |
-		  cx24116_readreg(state, CX24116_REG_BER0 );
+	*ber =  (cx24116_readreg(state, CX24116_REG_BER24) << 24) |
+		(cx24116_readreg(state, CX24116_REG_BER16) << 16) |
+		(cx24116_readreg(state, CX24116_REG_BER8)  << 8)  |
+		 cx24116_readreg(state, CX24116_REG_BER0);
 
 	return 0;
 }
 
 /* TODO Determine function and scale appropriately */
-static int cx24116_read_signal_strength(struct dvb_frontend* fe, u16* signal_strength)
+static int cx24116_read_signal_strength(struct dvb_frontend *fe,
+	u16 *signal_strength)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 	struct cx24116_cmd cmd;
@@ -692,39 +724,43 @@
 
 	/* Firmware CMD 19: Get AGC */
 	cmd.args[0x00] = CMD_GETAGC;
-	cmd.len= 0x01;
+	cmd.len = 0x01;
 	ret = cx24116_cmd_execute(fe, &cmd);
 	if (ret != 0)
 		return ret;
 
-	sig_reading = ( cx24116_readreg(state, CX24116_REG_SSTATUS) & CX24116_SIGNAL_MASK ) |
-		( cx24116_readreg(state, CX24116_REG_SIGNAL) << 6 );
-	*signal_strength= 0 - sig_reading;
+	sig_reading =
+		(cx24116_readreg(state,
+			CX24116_REG_SSTATUS) & CX24116_SIGNAL_MASK) |
+		(cx24116_readreg(state, CX24116_REG_SIGNAL) << 6);
+	*signal_strength = 0 - sig_reading;
 
-	dprintk("%s: raw / cooked = 0x%04x / 0x%04x\n", __func__, sig_reading, *signal_strength);
+	dprintk("%s: raw / cooked = 0x%04x / 0x%04x\n",
+		__func__, sig_reading, *signal_strength);
 
 	return 0;
 }
 
 /* SNR (0..100)% = (sig & 0xf0) * 10 + (sig & 0x0f) * 10 / 16 */
-static int cx24116_read_snr_pct(struct dvb_frontend* fe, u16* snr)
+static int cx24116_read_snr_pct(struct dvb_frontend *fe, u16 *snr)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 	u8 snr_reading;
 	static const u32 snr_tab[] = { /* 10 x Table (rounded up) */
-		0x00000,0x0199A,0x03333,0x04ccD,0x06667,
-			0x08000,0x0999A,0x0b333,0x0cccD,0x0e667,
-		0x10000,0x1199A,0x13333,0x14ccD,0x16667,0x18000 };
+		0x00000, 0x0199A, 0x03333, 0x04ccD, 0x06667,
+		0x08000, 0x0999A, 0x0b333, 0x0cccD, 0x0e667,
+		0x10000, 0x1199A, 0x13333, 0x14ccD, 0x16667,
+		0x18000 };
 
 	dprintk("%s()\n", __func__);
 
 	snr_reading = cx24116_readreg(state, CX24116_REG_QUALITY0);
 
-	if(snr_reading >= 0xa0 /* 100% */)
+	if (snr_reading >= 0xa0 /* 100% */)
 		*snr = 0xffff;
 	else
-		*snr = snr_tab [ ( snr_reading & 0xf0 )   >> 4 ] +
-			( snr_tab [ ( snr_reading & 0x0f ) ] >> 4 );
+		*snr = snr_tab[(snr_reading & 0xf0) >> 4] +
+			(snr_tab[(snr_reading & 0x0f)] >> 4);
 
 	dprintk("%s: raw / cooked = 0x%02x / 0x%04x\n", __func__,
 		snr_reading, *snr);
@@ -736,7 +772,7 @@
  * ESNO, from 0->30db (values 0->300). We provide this value by
  * default.
  */
-static int cx24116_read_snr_esno(struct dvb_frontend* fe, u16* snr)
+static int cx24116_read_snr_esno(struct dvb_frontend *fe, u16 *snr)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 
@@ -750,7 +786,7 @@
 	return 0;
 }
 
-static int cx24116_read_snr(struct dvb_frontend* fe, u16* snr)
+static int cx24116_read_snr(struct dvb_frontend *fe, u16 *snr)
 {
 	if (esno_snr == 1)
 		return cx24116_read_snr_esno(fe, snr);
@@ -758,27 +794,27 @@
 		return cx24116_read_snr_pct(fe, snr);
 }
 
-static int cx24116_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
+static int cx24116_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 
 	dprintk("%s()\n", __func__);
 
-	*ucblocks = ( cx24116_readreg(state, CX24116_REG_UCB8) << 8 ) |
+	*ucblocks = (cx24116_readreg(state, CX24116_REG_UCB8) << 8) |
 		cx24116_readreg(state, CX24116_REG_UCB0);
 
 	return 0;
 }
 
 /* Overwrite the current tuning params, we are about to tune */
-static void cx24116_clone_params(struct dvb_frontend* fe)
+static void cx24116_clone_params(struct dvb_frontend *fe)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 	memcpy(&state->dcur, &state->dnxt, sizeof(state->dcur));
 }
 
 /* Wait for LNB */
-static int cx24116_wait_for_lnb(struct dvb_frontend* fe)
+static int cx24116_wait_for_lnb(struct dvb_frontend *fe)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 	int i;
@@ -787,7 +823,7 @@
 		cx24116_readreg(state, CX24116_REG_QSTATUS));
 
 	/* Wait for up to 300 ms */
-	for(i = 0; i < 30 ; i++) {
+	for (i = 0; i < 30 ; i++) {
 		if (cx24116_readreg(state, CX24116_REG_QSTATUS) & 0x20)
 			return 0;
 		msleep(10);
@@ -798,20 +834,21 @@
 	return -ETIMEDOUT; /* -EBUSY ? */
 }
 
-static int cx24116_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone)
+static int cx24116_set_tone(struct dvb_frontend *fe,
+	fe_sec_tone_mode_t tone)
 {
 	struct cx24116_cmd cmd;
 	int ret;
 
 	dprintk("%s(%d)\n", __func__, tone);
-	if ( (tone != SEC_TONE_ON) && (tone != SEC_TONE_OFF) ) {
-		printk("%s: Invalid, tone=%d\n", __func__, tone);
+	if ((tone != SEC_TONE_ON) && (tone != SEC_TONE_OFF)) {
+		printk(KERN_ERR "%s: Invalid, tone=%d\n", __func__, tone);
 		return -EINVAL;
 	}
 
 	/* Wait for LNB ready */
 	ret = cx24116_wait_for_lnb(fe);
-	if(ret != 0)
+	if (ret != 0)
 		return ret;
 
 	/* Min delay time after DiSEqC send */
@@ -820,7 +857,7 @@
 	/* This is always done before the tone is set */
 	cmd.args[0x00] = CMD_SET_TONEPRE;
 	cmd.args[0x01] = 0x00;
-	cmd.len= 0x02;
+	cmd.len = 0x02;
 	ret = cx24116_cmd_execute(fe, &cmd);
 	if (ret != 0)
 		return ret;
@@ -836,11 +873,11 @@
 		cmd.args[0x03] = 0x01;
 		break;
 	case SEC_TONE_OFF:
-		dprintk("%s: setting tone off\n",__func__);
+		dprintk("%s: setting tone off\n", __func__);
 		cmd.args[0x03] = 0x00;
 		break;
 	}
-	cmd.len= 0x04;
+	cmd.len = 0x04;
 
 	/* Min delay time before DiSEqC send */
 	msleep(15); /* XXX determine is FW does this, see send_diseqc/burst */
@@ -849,7 +886,7 @@
 }
 
 /* Initialise DiSEqC */
-static int cx24116_diseqc_init(struct dvb_frontend* fe)
+static int cx24116_diseqc_init(struct dvb_frontend *fe)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 	struct cx24116_cmd cmd;
@@ -864,7 +901,7 @@
 	cmd.args[0x05] = 0x28;
 	cmd.args[0x06] = (toneburst == CX24116_DISEQC_TONEOFF) ? 0x00 : 0x01;
 	cmd.args[0x07] = 0x01;
-	cmd.len= 0x08;
+	cmd.len = 0x08;
 	ret = cx24116_cmd_execute(fe, &cmd);
 	if (ret != 0)
 		return ret;
@@ -878,36 +915,38 @@
 	/* Unknown */
 	state->dsec_cmd.args[CX24116_DISEQC_ARG2_2] = 0x02;
 	state->dsec_cmd.args[CX24116_DISEQC_ARG3_0] = 0x00;
-	state->dsec_cmd.args[CX24116_DISEQC_ARG4_0] = 0x00; /* Continuation flag? */
+	/* Continuation flag? */
+	state->dsec_cmd.args[CX24116_DISEQC_ARG4_0] = 0x00;
 
 	/* DiSEqC message length */
 	state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = 0x00;
 
 	/* Command length */
-	state->dsec_cmd.len= CX24116_DISEQC_MSGOFS;
+	state->dsec_cmd.len = CX24116_DISEQC_MSGOFS;
 
 	return 0;
 }
 
 /* Send DiSEqC message with derived burst (hack) || previous burst */
-static int cx24116_send_diseqc_msg(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *d)
+static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
+	struct dvb_diseqc_master_cmd *d)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 	int i, ret;
 
 	/* Dump DiSEqC message */
 	if (debug) {
-		printk("cx24116: %s(", __func__);
-		for(i = 0 ; i < d->msg_len ;) {
-			printk("0x%02x", d->msg[i]);
-			if(++i < d->msg_len)
-				printk(", ");
-			}
+		printk(KERN_INFO "cx24116: %s(", __func__);
+		for (i = 0 ; i < d->msg_len ;) {
+			printk(KERN_INFO "0x%02x", d->msg[i]);
+			if (++i < d->msg_len)
+				printk(KERN_INFO ", ");
+		}
 		printk(") toneburst=%d\n", toneburst);
 	}
 
 	/* Validate length */
-	if(d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS))
+	if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS))
 		return -EINVAL;
 
 	/* DiSEqC message */
@@ -918,18 +957,19 @@
 	state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = d->msg_len;
 
 	/* Command length */
-	state->dsec_cmd.len= CX24116_DISEQC_MSGOFS + state->dsec_cmd.args[CX24116_DISEQC_MSGLEN];
+	state->dsec_cmd.len = CX24116_DISEQC_MSGOFS +
+		state->dsec_cmd.args[CX24116_DISEQC_MSGLEN];
 
 	/* DiSEqC toneburst */
-	if(toneburst == CX24116_DISEQC_MESGCACHE)
+	if (toneburst == CX24116_DISEQC_MESGCACHE)
 		/* Message is cached */
 		return 0;
 
-	else if(toneburst == CX24116_DISEQC_TONEOFF)
+	else if (toneburst == CX24116_DISEQC_TONEOFF)
 		/* Message is sent without burst */
 		state->dsec_cmd.args[CX24116_DISEQC_BURST] = 0;
 
-	else if(toneburst == CX24116_DISEQC_TONECACHE) {
+	else if (toneburst == CX24116_DISEQC_TONECACHE) {
 		/*
 		 * Message is sent with derived else cached burst
 		 *
@@ -948,15 +988,17 @@
 		 *              Y = VOLTAGE             (0=13V, 1=18V)
 		 *              Z = BAND                (0=LOW, 1=HIGH(22K))
 		 */
-		if(d->msg_len >= 4 && d->msg[2] == 0x38)
-			state->dsec_cmd.args[CX24116_DISEQC_BURST] = ((d->msg[3] & 4) >> 2);
-		if(debug)
-			dprintk("%s burst=%d\n", __func__, state->dsec_cmd.args[CX24116_DISEQC_BURST]);
+		if (d->msg_len >= 4 && d->msg[2] == 0x38)
+			state->dsec_cmd.args[CX24116_DISEQC_BURST] =
+				((d->msg[3] & 4) >> 2);
+		if (debug)
+			dprintk("%s burst=%d\n", __func__,
+				state->dsec_cmd.args[CX24116_DISEQC_BURST]);
 	}
 
 	/* Wait for LNB ready */
 	ret = cx24116_wait_for_lnb(fe);
-	if(ret != 0)
+	if (ret != 0)
 		return ret;
 
 	/* Wait for voltage/min repeat delay */
@@ -964,7 +1006,7 @@
 
 	/* Command */
 	ret = cx24116_cmd_execute(fe, &state->dsec_cmd);
-	if(ret != 0)
+	if (ret != 0)
 		return ret;
 	/*
 	 * Wait for send
@@ -976,29 +1018,33 @@
 	 *  12.5ms burst        +
 	 * >15ms delay            (XXX determine if FW does this, see set_tone)
 	 */
-	msleep( (state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) + ((toneburst == CX24116_DISEQC_TONEOFF) ? 30 : 60) );
+	msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) +
+		((toneburst == CX24116_DISEQC_TONEOFF) ? 30 : 60));
 
 	return 0;
 }
 
 /* Send DiSEqC burst */
-static int cx24116_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t burst)
+static int cx24116_diseqc_send_burst(struct dvb_frontend *fe,
+	fe_sec_mini_cmd_t burst)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 	int ret;
 
-	dprintk("%s(%d) toneburst=%d\n",__func__, burst, toneburst);
+	dprintk("%s(%d) toneburst=%d\n", __func__, burst, toneburst);
 
 	/* DiSEqC burst */
 	if (burst == SEC_MINI_A)
-		state->dsec_cmd.args[CX24116_DISEQC_BURST] = CX24116_DISEQC_MINI_A;
-	else if(burst == SEC_MINI_B)
-		state->dsec_cmd.args[CX24116_DISEQC_BURST] = CX24116_DISEQC_MINI_B;
+		state->dsec_cmd.args[CX24116_DISEQC_BURST] =
+			CX24116_DISEQC_MINI_A;
+	else if (burst == SEC_MINI_B)
+		state->dsec_cmd.args[CX24116_DISEQC_BURST] =
+			CX24116_DISEQC_MINI_B;
 	else
 		return -EINVAL;
 
 	/* DiSEqC toneburst */
-	if(toneburst != CX24116_DISEQC_MESGCACHE)
+	if (toneburst != CX24116_DISEQC_MESGCACHE)
 		/* Burst is cached */
 		return 0;
 
@@ -1006,7 +1052,7 @@
 
 	/* Wait for LNB ready */
 	ret = cx24116_wait_for_lnb(fe);
-	if(ret != 0)
+	if (ret != 0)
 		return ret;
 
 	/* Wait for voltage/min repeat delay */
@@ -1014,7 +1060,7 @@
 
 	/* Command */
 	ret = cx24116_cmd_execute(fe, &state->dsec_cmd);
-	if(ret != 0)
+	if (ret != 0)
 		return ret;
 
 	/*
@@ -1027,34 +1073,32 @@
 	 *  12.5ms burst        +
 	 * >15ms delay            (XXX determine if FW does this, see set_tone)
 	 */
-	msleep( (state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) + 60 );
+	msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) + 60);
 
 	return 0;
 }
 
-static void cx24116_release(struct dvb_frontend* fe)
+static void cx24116_release(struct dvb_frontend *fe)
 {
-	struct cx24116_state* state = fe->demodulator_priv;
-	dprintk("%s\n",__func__);
+	struct cx24116_state *state = fe->demodulator_priv;
+	dprintk("%s\n", __func__);
 	kfree(state);
 }
 
 static struct dvb_frontend_ops cx24116_ops;
 
-struct dvb_frontend* cx24116_attach(const struct cx24116_config* config,
-				    struct i2c_adapter* i2c)
+struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
+	struct i2c_adapter *i2c)
 {
-	struct cx24116_state* state = NULL;
+	struct cx24116_state *state = NULL;
 	int ret;
 
-	dprintk("%s\n",__func__);
+	dprintk("%s\n", __func__);
 
 	/* allocate memory for the internal state */
 	state = kmalloc(sizeof(struct cx24116_state), GFP_KERNEL);
-	if (state == NULL) {
-		printk("Unable to kmalloc\n");
+	if (state == NULL)
 		goto error1;
-	}
 
 	/* setup the state */
 	memset(state, 0, sizeof(struct cx24116_state));
@@ -1063,32 +1107,36 @@
 	state->i2c = i2c;
 
 	/* check if the demod is present */
-	ret = (cx24116_readreg(state, 0xFF) << 8) | cx24116_readreg(state, 0xFE);
+	ret = (cx24116_readreg(state, 0xFF) << 8) |
+		cx24116_readreg(state, 0xFE);
 	if (ret != 0x0501) {
-		printk("Invalid probe, probably not a CX24116 device\n");
+		printk(KERN_INFO "Invalid probe, probably not a CX24116 device\n");
 		goto error2;
 	}
 
 	/* create dvb_frontend */
-	memcpy(&state->frontend.ops, &cx24116_ops, sizeof(struct dvb_frontend_ops));
+	memcpy(&state->frontend.ops, &cx24116_ops,
+		sizeof(struct dvb_frontend_ops));
 	state->frontend.demodulator_priv = state;
 	return &state->frontend;
 
 error2: kfree(state);
 error1: return NULL;
 }
+EXPORT_SYMBOL(cx24116_attach);
+
 /*
  * Initialise or wake up device
  *
  * Power config will reset and load initial firmware if required
  */
-static int cx24116_initfe(struct dvb_frontend* fe)
+static int cx24116_initfe(struct dvb_frontend *fe)
 {
-	struct cx24116_state* state = fe->demodulator_priv;
+	struct cx24116_state *state = fe->demodulator_priv;
 	struct cx24116_cmd cmd;
 	int ret;
 
-	dprintk("%s()\n",__func__);
+	dprintk("%s()\n", __func__);
 
 	/* Power on */
 	cx24116_writereg(state, 0xe0, 0);
@@ -1098,9 +1146,9 @@
 	/* Firmware CMD 36: Power config */
 	cmd.args[0x00] = CMD_TUNERSLEEP;
 	cmd.args[0x01] = 0;
-	cmd.len= 0x02;
+	cmd.len = 0x02;
 	ret = cx24116_cmd_execute(fe, &cmd);
-	if(ret != 0)
+	if (ret != 0)
 		return ret;
 
 	return cx24116_diseqc_init(fe);
@@ -1109,20 +1157,20 @@
 /*
  * Put device to sleep
  */
-static int cx24116_sleep(struct dvb_frontend* fe)
+static int cx24116_sleep(struct dvb_frontend *fe)
 {
-	struct cx24116_state* state = fe->demodulator_priv;
+	struct cx24116_state *state = fe->demodulator_priv;
 	struct cx24116_cmd cmd;
 	int ret;
 
-	dprintk("%s()\n",__func__);
+	dprintk("%s()\n", __func__);
 
 	/* Firmware CMD 36: Power config */
 	cmd.args[0x00] = CMD_TUNERSLEEP;
 	cmd.args[0x01] = 1;
-	cmd.len= 0x02;
+	cmd.len = 0x02;
 	ret = cx24116_cmd_execute(fe, &cmd);
-	if(ret != 0)
+	if (ret != 0)
 		return ret;
 
 	/* Power off (Shutdown clocks) */
@@ -1133,13 +1181,15 @@
 	return 0;
 }
 
-static int cx24116_set_property(struct dvb_frontend *fe, struct dtv_property* tvp)
+static int cx24116_set_property(struct dvb_frontend *fe,
+	struct dtv_property *tvp)
 {
 	dprintk("%s(..)\n", __func__);
 	return 0;
 }
 
-static int cx24116_get_property(struct dvb_frontend *fe, struct dtv_property* tvp)
+static int cx24116_get_property(struct dvb_frontend *fe,
+	struct dtv_property *tvp)
 {
 	dprintk("%s(..)\n", __func__);
 	return 0;
@@ -1148,7 +1198,8 @@
 /* dvb-core told us to tune, the tv property cache will be complete,
  * it's safe for is to pull values and use them for tuning purposes.
  */
-static int cx24116_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int cx24116_set_frontend(struct dvb_frontend *fe,
+	struct dvb_frontend_parameters *p)
 {
 	struct cx24116_state *state = fe->demodulator_priv;
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
@@ -1156,96 +1207,102 @@
 	fe_status_t tunerstat;
 	int i, status, ret, retune;
 
-	dprintk("%s()\n",__func__);
+	dprintk("%s()\n", __func__);
 
-	switch(c->delivery_system) {
-		case SYS_DVBS:
-			dprintk("%s: DVB-S delivery system selected\n",__func__);
+	switch (c->delivery_system) {
+	case SYS_DVBS:
+		dprintk("%s: DVB-S delivery system selected\n", __func__);
 
-			/* Only QPSK is supported for DVB-S */
-			if(c->modulation != QPSK) {
-				dprintk("%s: unsupported modulation selected (%d)\n",
-					__func__, c->modulation);
-				return -EOPNOTSUPP;
-			}
+		/* Only QPSK is supported for DVB-S */
+		if (c->modulation != QPSK) {
+			dprintk("%s: unsupported modulation selected (%d)\n",
+				__func__, c->modulation);
+			return -EOPNOTSUPP;
+		}
 
-			/* Pilot doesn't exist in DVB-S, turn bit off */
+		/* Pilot doesn't exist in DVB-S, turn bit off */
+		state->dnxt.pilot_val = CX24116_PILOT_OFF;
+		retune = 1;
+
+		/* DVB-S only supports 0.35 */
+		if (c->rolloff != ROLLOFF_35) {
+			dprintk("%s: unsupported rolloff selected (%d)\n",
+				__func__, c->rolloff);
+			return -EOPNOTSUPP;
+		}
+		state->dnxt.rolloff_val = CX24116_ROLLOFF_035;
+		break;
+
+	case SYS_DVBS2:
+		dprintk("%s: DVB-S2 delivery system selected\n", __func__);
+
+		/*
+		 * NBC 8PSK/QPSK with DVB-S is supported for DVB-S2,
+		 * but not hardware auto detection
+		 */
+		if (c->modulation != PSK_8 && c->modulation != QPSK) {
+			dprintk("%s: unsupported modulation selected (%d)\n",
+				__func__, c->modulation);
+			return -EOPNOTSUPP;
+		}
+
+		switch (c->pilot) {
+		case PILOT_AUTO:	/* Not supported but emulated */
+			state->dnxt.pilot_val = (c->modulation == QPSK)
+				? CX24116_PILOT_OFF : CX24116_PILOT_ON;
+			retune = 2;
+			break;
+		case PILOT_OFF:
 			state->dnxt.pilot_val = CX24116_PILOT_OFF;
-			retune = 1;
+			break;
+		case PILOT_ON:
+			state->dnxt.pilot_val = CX24116_PILOT_ON;
+			break;
+		default:
+			dprintk("%s: unsupported pilot mode selected (%d)\n",
+				__func__, c->pilot);
+			return -EOPNOTSUPP;
+		}
 
-			/* DVB-S only supports 0.35 */
-			if(c->rolloff != ROLLOFF_35) {
-				dprintk("%s: unsupported rolloff selected (%d)\n",
-					__func__, c->rolloff);
-				return -EOPNOTSUPP;
-			}
+		switch (c->rolloff) {
+		case ROLLOFF_20:
+			state->dnxt.rolloff_val = CX24116_ROLLOFF_020;
+			break;
+		case ROLLOFF_25:
+			state->dnxt.rolloff_val = CX24116_ROLLOFF_025;
+			break;
+		case ROLLOFF_35:
 			state->dnxt.rolloff_val = CX24116_ROLLOFF_035;
 			break;
-
-		case SYS_DVBS2:
-			dprintk("%s: DVB-S2 delivery system selected\n",__func__);
-
-			/*
-			 * NBC 8PSK/QPSK with DVB-S is supported for DVB-S2,
-			 * but not hardware auto detection
-			 */
-			if(c->modulation != PSK_8 && c->modulation != QPSK) {
-				dprintk("%s: unsupported modulation selected (%d)\n",
-					__func__, c->modulation);
-				return -EOPNOTSUPP;
-			}
-
-			switch(c->pilot) {
-				case PILOT_AUTO:	/* Not supported but emulated */
-					retune = 2;	/* Fall-through */
-				case PILOT_OFF:
-					state->dnxt.pilot_val = CX24116_PILOT_OFF;
-					break;
-				case PILOT_ON:
-					state->dnxt.pilot_val = CX24116_PILOT_ON;
-					break;
-				default:
-					dprintk("%s: unsupported pilot mode selected (%d)\n",
-						__func__, c->pilot);
-					return -EOPNOTSUPP;
-			}
-
-			switch(c->rolloff) {
-				case ROLLOFF_20:
-					state->dnxt.rolloff_val= CX24116_ROLLOFF_020;
-					break;
-				case ROLLOFF_25:
-					state->dnxt.rolloff_val= CX24116_ROLLOFF_025;
-					break;
-				case ROLLOFF_35:
-					state->dnxt.rolloff_val= CX24116_ROLLOFF_035;
-					break;
-				case ROLLOFF_AUTO:	/* Rolloff must be explicit */
-				default:
-					dprintk("%s: unsupported rolloff selected (%d)\n",
-						__func__, c->rolloff);
-					return -EOPNOTSUPP;
-			}
-			break;
-
+		case ROLLOFF_AUTO:	/* Rolloff must be explicit */
 		default:
-			dprintk("%s: unsupported delivery system selected (%d)\n",
-				__func__, c->delivery_system);
+			dprintk("%s: unsupported rolloff selected (%d)\n",
+				__func__, c->rolloff);
 			return -EOPNOTSUPP;
+		}
+		break;
+
+	default:
+		dprintk("%s: unsupported delivery system selected (%d)\n",
+			__func__, c->delivery_system);
+		return -EOPNOTSUPP;
 	}
 	state->dnxt.modulation = c->modulation;
 	state->dnxt.frequency = c->frequency;
 	state->dnxt.pilot = c->pilot;
 	state->dnxt.rolloff = c->rolloff;
 
-	if ((ret = cx24116_set_inversion(state, c->inversion)) !=  0)
+	ret = cx24116_set_inversion(state, c->inversion);
+	if (ret !=  0)
 		return ret;
 
 	/* FEC_NONE/AUTO for DVB-S2 is not supported and detected here */
-	if ((ret = cx24116_set_fec(state, c->modulation, c->fec_inner)) !=  0)
+	ret = cx24116_set_fec(state, c->modulation, c->fec_inner);
+	if (ret !=  0)
 		return ret;
 
-	if ((ret = cx24116_set_symbolrate(state, c->symbol_rate)) !=  0)
+	ret = cx24116_set_symbolrate(state, c->symbol_rate);
+	if (ret !=  0)
 		return ret;
 
 	/* discard the 'current' tuning parameters and prepare to tune */
@@ -1271,7 +1328,7 @@
 	/* Set/Reset B/W */
 	cmd.args[0x00] = CMD_BANDWIDTH;
 	cmd.args[0x01] = 0x01;
-	cmd.len= 0x02;
+	cmd.len = 0x02;
 	ret = cx24116_cmd_execute(fe, &cmd);
 	if (ret != 0)
 		return ret;
@@ -1319,7 +1376,7 @@
 		cx24116_writereg(state, CX24116_REG_RATEDIV, 0x00);
 	}
 
-	cmd.len= 0x13;
+	cmd.len = 0x13;
 
 	/* We need to support pilot and non-pilot tuning in the
 	 * driver automatically. This is a workaround for because
@@ -1327,12 +1384,13 @@
 	 */
 	do {
 		/* Reset status register */
-		status = cx24116_readreg(state, CX24116_REG_SSTATUS) & CX24116_SIGNAL_MASK;
+		status = cx24116_readreg(state, CX24116_REG_SSTATUS)
+			& CX24116_SIGNAL_MASK;
 		cx24116_writereg(state, CX24116_REG_SSTATUS, status);
 
 		/* Tune */
 		ret = cx24116_cmd_execute(fe, &cmd);
-		if( ret != 0 )
+		if (ret != 0)
 			break;
 
 		/*
@@ -1341,28 +1399,27 @@
 		 * If we are able to tune then generally it occurs within 100ms.
 		 * If it takes longer, try a different toneburst setting.
 		 */
-		for(i = 0; i < 50 ; i++) {
+		for (i = 0; i < 50 ; i++) {
 			cx24116_read_status(fe, &tunerstat);
 			status = tunerstat & (FE_HAS_SIGNAL | FE_HAS_SYNC);
-			if(status == (FE_HAS_SIGNAL | FE_HAS_SYNC)) {
-				dprintk("%s: Tuned\n",__func__);
+			if (status == (FE_HAS_SIGNAL | FE_HAS_SYNC)) {
+				dprintk("%s: Tuned\n", __func__);
 				goto tuned;
 			}
 			msleep(10);
 		}
 
-		dprintk("%s: Not tuned\n",__func__);
+		dprintk("%s: Not tuned\n", __func__);
 
 		/* Toggle pilot bit when in auto-pilot */
-		if(state->dcur.pilot == PILOT_AUTO)
+		if (state->dcur.pilot == PILOT_AUTO)
 			cmd.args[0x07] ^= CX24116_PILOT_ON;
-	}
-	while(--retune);
+	} while (--retune);
 
 tuned:  /* Set/Reset B/W */
 	cmd.args[0x00] = CMD_BANDWIDTH;
 	cmd.args[0x01] = 0x00;
-	cmd.len= 0x02;
+	cmd.len = 0x02;
 	ret = cx24116_cmd_execute(fe, &cmd);
 	if (ret != 0)
 		return ret;
@@ -1407,17 +1464,7 @@
 	.set_frontend = cx24116_set_frontend,
 };
 
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
-
-module_param(toneburst, int, 0644);
-MODULE_PARM_DESC(toneburst, "DiSEqC toneburst 0=OFF, 1=TONE CACHE, 2=MESSAGE CACHE (default:1)");
-
-module_param(esno_snr, int, 0644);
-MODULE_PARM_DESC(debug, "SNR return units, 0=PERCENTAGE 0-100, 1=ESNO(db * 10) (default:0)");
-
 MODULE_DESCRIPTION("DVB Frontend module for Conexant cx24116/cx24118 hardware");
 MODULE_AUTHOR("Steven Toth");
 MODULE_LICENSE("GPL");
 
-EXPORT_SYMBOL(cx24116_attach);
diff --git a/drivers/media/dvb/frontends/cx24116.h b/drivers/media/dvb/frontends/cx24116.h
index 8dbcec2..4cb3ddd 100644
--- a/drivers/media/dvb/frontends/cx24116.h
+++ b/drivers/media/dvb/frontends/cx24116.h
@@ -23,31 +23,32 @@
 
 #include <linux/dvb/frontend.h>
 
-struct cx24116_config
-{
+struct cx24116_config {
 	/* the demodulator's i2c address */
 	u8 demod_address;
 
 	/* Need to set device param for start_dma */
-	int (*set_ts_params)(struct dvb_frontend* fe, int is_punctured);
+	int (*set_ts_params)(struct dvb_frontend *fe, int is_punctured);
 
 	/* Need to reset device during firmware loading */
-	int (*reset_device)(struct dvb_frontend* fe);
+	int (*reset_device)(struct dvb_frontend *fe);
 
 	/* Need to set MPEG parameters */
 	u8 mpg_clk_pos_pol:0x02;
 };
 
 #if defined(CONFIG_DVB_CX24116) || defined(CONFIG_DVB_CX24116_MODULE)
-extern struct dvb_frontend* cx24116_attach(const struct cx24116_config* config,
-					   struct i2c_adapter* i2c);
+extern struct dvb_frontend *cx24116_attach(
+	const struct cx24116_config *config,
+	struct i2c_adapter *i2c);
 #else
-static inline struct dvb_frontend* cx24116_attach(const struct cx24116_config* config,
-						  struct i2c_adapter* i2c)
+static inline struct dvb_frontend *cx24116_attach(
+	const struct cx24116_config *config,
+	struct i2c_adapter *i2c)
 {
-	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __FUNCTION__);
+	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
 	return NULL;
 }
-#endif // CONFIG_DVB_CX24116
+#endif
 
 #endif /* CX24116_H */
diff --git a/drivers/media/dvb/frontends/cx24123.c b/drivers/media/dvb/frontends/cx24123.c
index 7156157..1a8c36f 100644
--- a/drivers/media/dvb/frontends/cx24123.c
+++ b/drivers/media/dvb/frontends/cx24123.c
@@ -33,7 +33,13 @@
 #define XTAL 10111000
 
 static int force_band;
+module_param(force_band, int, 0644);
+MODULE_PARM_DESC(force_band, "Force a specific band select "\
+	"(1-9, default:off).");
+
 static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
 
 #define info(args...) do { printk(KERN_INFO "CX24123: " args); } while (0)
 #define err(args...)  do { printk(KERN_ERR  "CX24123: " args); } while (0)
@@ -46,10 +52,9 @@
 		} \
 	} while (0)
 
-struct cx24123_state
-{
-	struct i2c_adapter* i2c;
-	const struct cx24123_config* config;
+struct cx24123_state {
+	struct i2c_adapter *i2c;
+	const struct cx24123_config *config;
 
 	struct dvb_frontend frontend;
 
@@ -70,8 +75,7 @@
 };
 
 /* Various tuner defaults need to be established for a given symbol rate Sps */
-static struct
-{
+static struct cx24123_AGC_val {
 	u32 symbolrate_low;
 	u32 symbolrate_high;
 	u32 VCAprogdata;
@@ -109,8 +113,7 @@
  * fixme: The bounds on the bands do not match the doc in real life.
  * fixme: Some of them have been moved, other might need adjustment.
  */
-static struct
-{
+static struct cx24123_bandselect_val {
 	u32 freq_low;
 	u32 freq_high;
 	u32 VCOdivider;
@@ -249,7 +252,8 @@
 
 	/* printk(KERN_DEBUG "wr(%02x): %02x %02x\n", i2c_addr, reg, data); */
 
-	if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
+	err = i2c_transfer(state->i2c, &msg, 1);
+	if (err != 1) {
 		printk("%s: writereg error(err == %i, reg == 0x%02x,"
 			 " data == 0x%02x)\n", __func__, err, reg, data);
 		return err;
@@ -284,7 +288,8 @@
 #define cx24123_writereg(state, reg, val) \
 	cx24123_i2c_writereg(state, state->config->demod_address, reg, val)
 
-static int cx24123_set_inversion(struct cx24123_state* state, fe_spectral_inversion_t inversion)
+static int cx24123_set_inversion(struct cx24123_state *state,
+	fe_spectral_inversion_t inversion)
 {
 	u8 nom_reg = cx24123_readreg(state, 0x0e);
 	u8 auto_reg = cx24123_readreg(state, 0x10);
@@ -311,7 +316,8 @@
 	return 0;
 }
 
-static int cx24123_get_inversion(struct cx24123_state* state, fe_spectral_inversion_t *inversion)
+static int cx24123_get_inversion(struct cx24123_state *state,
+	fe_spectral_inversion_t *inversion)
 {
 	u8 val;
 
@@ -328,18 +334,20 @@
 	return 0;
 }
 
-static int cx24123_set_fec(struct cx24123_state* state, fe_code_rate_t fec)
+static int cx24123_set_fec(struct cx24123_state *state, fe_code_rate_t fec)
 {
 	u8 nom_reg = cx24123_readreg(state, 0x0e) & ~0x07;
 
-	if ( (fec < FEC_NONE) || (fec > FEC_AUTO) )
+	if ((fec < FEC_NONE) || (fec > FEC_AUTO))
 		fec = FEC_AUTO;
 
 	/* Set the soft decision threshold */
-	if(fec == FEC_1_2)
-		cx24123_writereg(state, 0x43, cx24123_readreg(state, 0x43) | 0x01);
+	if (fec == FEC_1_2)
+		cx24123_writereg(state, 0x43,
+			cx24123_readreg(state, 0x43) | 0x01);
 	else
-		cx24123_writereg(state, 0x43, cx24123_readreg(state, 0x43) & ~0x01);
+		cx24123_writereg(state, 0x43,
+			cx24123_readreg(state, 0x43) & ~0x01);
 
 	switch (fec) {
 	case FEC_1_2:
@@ -388,11 +396,11 @@
 	return 0;
 }
 
-static int cx24123_get_fec(struct cx24123_state* state, fe_code_rate_t *fec)
+static int cx24123_get_fec(struct cx24123_state *state, fe_code_rate_t *fec)
 {
 	int ret;
 
-	ret = cx24123_readreg (state, 0x1b);
+	ret = cx24123_readreg(state, 0x1b);
 	if (ret < 0)
 		return ret;
 	ret = ret & 0x07;
@@ -433,16 +441,16 @@
 {
 	u32 exp, nearest = 0;
 	u32 div = a / b;
-	if(a % b >= b / 2) ++div;
-	if(div < (1 << 31))
-	{
-		for(exp = 1; div > exp; nearest++)
+	if (a % b >= b / 2)
+		++div;
+	if (div < (1 << 31)) {
+		for (exp = 1; div > exp; nearest++)
 			exp += exp;
 	}
 	return nearest;
 }
 
-static int cx24123_set_symbolrate(struct cx24123_state* state, u32 srate)
+static int cx24123_set_symbolrate(struct cx24123_state *state, u32 srate)
 {
 	u32 tmp, sample_rate, ratio, sample_gain;
 	u8 pll_mult;
@@ -498,9 +506,9 @@
 
 	cx24123_writereg(state, 0x01, pll_mult * 6);
 
-	cx24123_writereg(state, 0x08, (ratio >> 16) & 0x3f );
-	cx24123_writereg(state, 0x09, (ratio >>  8) & 0xff );
-	cx24123_writereg(state, 0x0a, (ratio      ) & 0xff );
+	cx24123_writereg(state, 0x08, (ratio >> 16) & 0x3f);
+	cx24123_writereg(state, 0x09, (ratio >> 8) & 0xff);
+	cx24123_writereg(state, 0x0a, ratio & 0xff);
 
 	/* also set the demodulator sample gain */
 	sample_gain = cx24123_int_log2(sample_rate, srate);
@@ -514,10 +522,12 @@
 }
 
 /*
- * Based on the required frequency and symbolrate, the tuner AGC has to be configured
- * and the correct band selected. Calculate those values
+ * Based on the required frequency and symbolrate, the tuner AGC has
+ * to be configured and the correct band selected.
+ * Calculate those values.
  */
-static int cx24123_pll_calculate(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int cx24123_pll_calculate(struct dvb_frontend *fe,
+	struct dvb_frontend_parameters *p)
 {
 	struct cx24123_state *state = fe->demodulator_priv;
 	u32 ndiv = 0, adiv = 0, vco_div = 0;
@@ -525,6 +535,8 @@
 	int pump = 2;
 	int band = 0;
 	int num_bands = ARRAY_SIZE(cx24123_bandselect_vals);
+	struct cx24123_bandselect_val *bsv = NULL;
+	struct cx24123_AGC_val *agcv = NULL;
 
 	/* Defaults for low freq, low rate */
 	state->VCAarg = cx24123_AGC_vals[0].VCAprogdata;
@@ -532,58 +544,65 @@
 	state->bandselectarg = cx24123_bandselect_vals[0].progdata;
 	vco_div = cx24123_bandselect_vals[0].VCOdivider;
 
-	/* For the given symbol rate, determine the VCA, VGA and FILTUNE programming bits */
-	for (i = 0; i < ARRAY_SIZE(cx24123_AGC_vals); i++)
-	{
-		if ((cx24123_AGC_vals[i].symbolrate_low <= p->u.qpsk.symbol_rate) &&
-		    (cx24123_AGC_vals[i].symbolrate_high >= p->u.qpsk.symbol_rate) ) {
-			state->VCAarg = cx24123_AGC_vals[i].VCAprogdata;
-			state->VGAarg = cx24123_AGC_vals[i].VGAprogdata;
-			state->FILTune = cx24123_AGC_vals[i].FILTune;
+	/* For the given symbol rate, determine the VCA, VGA and
+	 * FILTUNE programming bits */
+	for (i = 0; i < ARRAY_SIZE(cx24123_AGC_vals); i++) {
+		agcv = &cx24123_AGC_vals[i];
+		if ((agcv->symbolrate_low <= p->u.qpsk.symbol_rate) &&
+		    (agcv->symbolrate_high >= p->u.qpsk.symbol_rate)) {
+			state->VCAarg = agcv->VCAprogdata;
+			state->VGAarg = agcv->VGAprogdata;
+			state->FILTune = agcv->FILTune;
 		}
 	}
 
 	/* determine the band to use */
-	if(force_band < 1 || force_band > num_bands)
-	{
-		for (i = 0; i < num_bands; i++)
-		{
-			if ((cx24123_bandselect_vals[i].freq_low <= p->frequency) &&
-			    (cx24123_bandselect_vals[i].freq_high >= p->frequency) )
+	if (force_band < 1 || force_band > num_bands) {
+		for (i = 0; i < num_bands; i++) {
+			bsv = &cx24123_bandselect_vals[i];
+			if ((bsv->freq_low <= p->frequency) &&
+				(bsv->freq_high >= p->frequency))
 				band = i;
 		}
-	}
-	else
+	} else
 		band = force_band - 1;
 
 	state->bandselectarg = cx24123_bandselect_vals[band].progdata;
 	vco_div = cx24123_bandselect_vals[band].VCOdivider;
 
 	/* determine the charge pump current */
-	if ( p->frequency < (cx24123_bandselect_vals[band].freq_low + cx24123_bandselect_vals[band].freq_high)/2 )
+	if (p->frequency < (cx24123_bandselect_vals[band].freq_low +
+		cx24123_bandselect_vals[band].freq_high) / 2)
 		pump = 0x01;
 	else
 		pump = 0x02;
 
 	/* Determine the N/A dividers for the requested lband freq (in kHz). */
-	/* Note: the reference divider R=10, frequency is in KHz, XTAL is in Hz */
-	ndiv = ( ((p->frequency * vco_div * 10) / (2 * XTAL / 1000)) / 32) & 0x1ff;
-	adiv = ( ((p->frequency * vco_div * 10) / (2 * XTAL / 1000)) % 32) & 0x1f;
+	/* Note: the reference divider R=10, frequency is in KHz,
+	 * XTAL is in Hz */
+	ndiv = (((p->frequency * vco_div * 10) /
+		(2 * XTAL / 1000)) / 32) & 0x1ff;
+	adiv = (((p->frequency * vco_div * 10) /
+		(2 * XTAL / 1000)) % 32) & 0x1f;
 
 	if (adiv == 0 && ndiv > 0)
 		ndiv--;
 
-	/* control bits 11, refdiv 11, charge pump polarity 1, charge pump current, ndiv, adiv */
-	state->pllarg = (3 << 19) | (3 << 17) | (1 << 16) | (pump << 14) | (ndiv << 5) | adiv;
+	/* control bits 11, refdiv 11, charge pump polarity 1,
+	 * charge pump current, ndiv, adiv */
+	state->pllarg = (3 << 19) | (3 << 17) | (1 << 16) |
+		(pump << 14) | (ndiv << 5) | adiv;
 
 	return 0;
 }
 
 /*
  * Tuner data is 21 bits long, must be left-aligned in data.
- * Tuner cx24109 is written through a dedicated 3wire interface on the demod chip.
+ * Tuner cx24109 is written through a dedicated 3wire interface
+ * on the demod chip.
  */
-static int cx24123_pll_writereg(struct dvb_frontend* fe, struct dvb_frontend_parameters *p, u32 data)
+static int cx24123_pll_writereg(struct dvb_frontend *fe,
+	struct dvb_frontend_parameters *p, u32 data)
 {
 	struct cx24123_state *state = fe->demodulator_priv;
 	unsigned long timeout;
@@ -610,7 +629,7 @@
 
 	/* send another 8 bytes, wait for the send to be completed */
 	timeout = jiffies + msecs_to_jiffies(40);
-	cx24123_writereg(state, 0x22, (data>>8) & 0xff );
+	cx24123_writereg(state, 0x22, (data >> 8) & 0xff);
 	while ((cx24123_readreg(state, 0x20) & 0x40) == 0) {
 		if (time_after(jiffies, timeout)) {
 			err("%s:  demodulator is not responding, "\
@@ -620,9 +639,10 @@
 		msleep(10);
 	}
 
-	/* send the lower 5 bits of this byte, padded with 3 LBB, wait for the send to be completed */
+	/* send the lower 5 bits of this byte, padded with 3 LBB,
+	 * wait for the send to be completed */
 	timeout = jiffies + msecs_to_jiffies(40);
-	cx24123_writereg(state, 0x22, (data) & 0xff );
+	cx24123_writereg(state, 0x22, (data) & 0xff);
 	while ((cx24123_readreg(state, 0x20) & 0x80)) {
 		if (time_after(jiffies, timeout)) {
 			err("%s:  demodulator is not responding," \
@@ -639,7 +659,8 @@
 	return 0;
 }
 
-static int cx24123_pll_tune(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int cx24123_pll_tune(struct dvb_frontend *fe,
+	struct dvb_frontend_parameters *p)
 {
 	struct cx24123_state *state = fe->demodulator_priv;
 	u8 val;
@@ -690,7 +711,7 @@
 	return cx24123_writereg(state, 0x23, r);
 }
 
-static int cx24123_initfe(struct dvb_frontend* fe)
+static int cx24123_initfe(struct dvb_frontend *fe)
 {
 	struct cx24123_state *state = fe->demodulator_priv;
 	int i;
@@ -699,19 +720,22 @@
 
 	/* Configure the demod to a good set of defaults */
 	for (i = 0; i < ARRAY_SIZE(cx24123_regdata); i++)
-		cx24123_writereg(state, cx24123_regdata[i].reg, cx24123_regdata[i].data);
+		cx24123_writereg(state, cx24123_regdata[i].reg,
+			cx24123_regdata[i].data);
 
 	/* Set the LNB polarity */
-	if(state->config->lnb_polarity)
-		cx24123_writereg(state, 0x32, cx24123_readreg(state, 0x32) | 0x02);
+	if (state->config->lnb_polarity)
+		cx24123_writereg(state, 0x32,
+			cx24123_readreg(state, 0x32) | 0x02);
 
 	if (state->config->dont_use_pll)
-	cx24123_repeater_mode(state, 1, 0);
+		cx24123_repeater_mode(state, 1, 0);
 
 	return 0;
 }
 
-static int cx24123_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage)
+static int cx24123_set_voltage(struct dvb_frontend *fe,
+	fe_sec_voltage_t voltage)
 {
 	struct cx24123_state *state = fe->demodulator_priv;
 	u8 val;
@@ -740,7 +764,7 @@
 {
 	unsigned long timeout = jiffies + msecs_to_jiffies(200);
 	while (!(cx24123_readreg(state, 0x29) & 0x40)) {
-		if(time_after(jiffies, timeout)) {
+		if (time_after(jiffies, timeout)) {
 			err("%s: diseqc queue not ready, " \
 				"command may be lost.\n", __func__);
 			break;
@@ -749,7 +773,8 @@
 	}
 }
 
-static int cx24123_send_diseqc_msg(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *cmd)
+static int cx24123_send_diseqc_msg(struct dvb_frontend *fe,
+	struct dvb_diseqc_master_cmd *cmd)
 {
 	struct cx24123_state *state = fe->demodulator_priv;
 	int i, val, tone;
@@ -771,20 +796,21 @@
 		cx24123_writereg(state, 0x2C + i, cmd->msg[i]);
 
 	val = cx24123_readreg(state, 0x29);
-	cx24123_writereg(state, 0x29, ((val & 0x90) | 0x40) | ((cmd->msg_len-3) & 3));
+	cx24123_writereg(state, 0x29, ((val & 0x90) | 0x40) |
+		((cmd->msg_len-3) & 3));
 
 	/* wait for diseqc message to finish sending */
 	cx24123_wait_for_diseqc(state);
 
 	/* restart continuous tone if enabled */
-	if (tone & 0x10) {
+	if (tone & 0x10)
 		cx24123_writereg(state, 0x29, tone & ~0x40);
-	}
 
 	return 0;
 }
 
-static int cx24123_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t burst)
+static int cx24123_diseqc_send_burst(struct dvb_frontend *fe,
+	fe_sec_mini_cmd_t burst)
 {
 	struct cx24123_state *state = fe->demodulator_priv;
 	int val, tone;
@@ -814,13 +840,13 @@
 	cx24123_writereg(state, 0x2a, cx24123_readreg(state, 0x2a) & 0xfb);
 
 	/* restart continuous tone if enabled */
-	if (tone & 0x10) {
+	if (tone & 0x10)
 		cx24123_writereg(state, 0x29, tone & ~0x40);
-	}
+
 	return 0;
 }
 
-static int cx24123_read_status(struct dvb_frontend* fe, fe_status_t* status)
+static int cx24123_read_status(struct dvb_frontend *fe, fe_status_t *status)
 {
 	struct cx24123_state *state = fe->demodulator_priv;
 	int sync = cx24123_readreg(state, 0x14);
@@ -853,8 +879,9 @@
 }
 
 /*
- * Configured to return the measurement of errors in blocks, because no UCBLOCKS value
- * is available, so this value doubles up to satisfy both measurements
+ * Configured to return the measurement of errors in blocks,
+ * because no UCBLOCKS value is available, so this value doubles up
+ * to satisfy both measurements.
  */
 static int cx24123_read_ber(struct dvb_frontend *fe, u32 *ber)
 {
@@ -876,7 +903,8 @@
 {
 	struct cx24123_state *state = fe->demodulator_priv;
 
-	*signal_strength = cx24123_readreg(state, 0x3b) << 8; /* larger = better */
+	/* larger = better */
+	*signal_strength = cx24123_readreg(state, 0x3b) << 8;
 
 	dprintk("Signal strength = %d\n", *signal_strength);
 
@@ -907,7 +935,7 @@
 	if (state->config->set_ts_params)
 		state->config->set_ts_params(fe, 0);
 
-	state->currentfreq=p->frequency;
+	state->currentfreq = p->frequency;
 	state->currentsymbolrate = p->u.qpsk.symbol_rate;
 
 	cx24123_set_inversion(state, p->inversion);
@@ -932,7 +960,8 @@
 	return 0;
 }
 
-static int cx24123_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int cx24123_get_frontend(struct dvb_frontend *fe,
+	struct dvb_frontend_parameters *p)
 {
 	struct cx24123_state *state = fe->demodulator_priv;
 
@@ -952,7 +981,7 @@
 	return 0;
 }
 
-static int cx24123_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone)
+static int cx24123_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
 {
 	struct cx24123_state *state = fe->demodulator_priv;
 	u8 val;
@@ -977,8 +1006,8 @@
 	return 0;
 }
 
-static int cx24123_tune(struct dvb_frontend* fe,
-			struct dvb_frontend_parameters* params,
+static int cx24123_tune(struct dvb_frontend *fe,
+			struct dvb_frontend_parameters *params,
 			unsigned int mode_flags,
 			unsigned int *delay,
 			fe_status_t *status)
@@ -997,12 +1026,12 @@
 
 static int cx24123_get_algo(struct dvb_frontend *fe)
 {
-	return 1; //FE_ALGO_HW
+	return 1; /* FE_ALGO_HW */
 }
 
-static void cx24123_release(struct dvb_frontend* fe)
+static void cx24123_release(struct dvb_frontend *fe)
 {
-	struct cx24123_state* state = fe->demodulator_priv;
+	struct cx24123_state *state = fe->demodulator_priv;
 	dprintk("\n");
 	i2c_del_adapter(&state->tuner_i2c_adapter);
 	kfree(state);
@@ -1013,7 +1042,7 @@
 {
 	struct cx24123_state *state = i2c_get_adapdata(i2c_adap);
 	/* this repeater closes after the first stop */
-    cx24123_repeater_mode(state, 1, 1);
+	cx24123_repeater_mode(state, 1, 1);
 	return i2c_transfer(state->i2c, msg, num);
 }
 
@@ -1037,8 +1066,8 @@
 
 static struct dvb_frontend_ops cx24123_ops;
 
-struct dvb_frontend* cx24123_attach(const struct cx24123_config* config,
-				    struct i2c_adapter* i2c)
+struct dvb_frontend *cx24123_attach(const struct cx24123_config *config,
+				    struct i2c_adapter *i2c)
 {
 	struct cx24123_state *state =
 		kzalloc(sizeof(struct cx24123_state), GFP_KERNEL);
@@ -1057,20 +1086,25 @@
 	/* check if the demod is there */
 	state->demod_rev = cx24123_readreg(state, 0x00);
 	switch (state->demod_rev) {
-	case 0xe1: info("detected CX24123C\n"); break;
-	case 0xd1: info("detected CX24123\n"); break;
+	case 0xe1:
+		info("detected CX24123C\n");
+		break;
+	case 0xd1:
+		info("detected CX24123\n");
+		break;
 	default:
 		err("wrong demod revision: %x\n", state->demod_rev);
 		goto error;
 	}
 
 	/* create dvb_frontend */
-	memcpy(&state->frontend.ops, &cx24123_ops, sizeof(struct dvb_frontend_ops));
+	memcpy(&state->frontend.ops, &cx24123_ops,
+		sizeof(struct dvb_frontend_ops));
 	state->frontend.demodulator_priv = state;
 
-    /* create tuner i2c adapter */
-    if (config->dont_use_pll)
-	cx24123_repeater_mode(state, 1, 0);
+	/* create tuner i2c adapter */
+	if (config->dont_use_pll)
+		cx24123_repeater_mode(state, 1, 0);
 
 	strlcpy(state->tuner_i2c_adapter.name, "CX24123 tuner I2C bus",
 		sizeof(state->tuner_i2c_adapter.name));
@@ -1079,7 +1113,7 @@
 	state->tuner_i2c_adapter.algo_data = NULL;
 	i2c_set_adapdata(&state->tuner_i2c_adapter, state);
 	if (i2c_add_adapter(&state->tuner_i2c_adapter) < 0) {
-	err("tuner i2c bus could not be initialized\n");
+		err("tuner i2c bus could not be initialized\n");
 		goto error;
 	}
 
@@ -1090,6 +1124,7 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL(cx24123_attach);
 
 static struct dvb_frontend_ops cx24123_ops = {
 
@@ -1126,15 +1161,8 @@
 	.get_frontend_algo = cx24123_get_algo,
 };
 
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
-
-module_param(force_band, int, 0644);
-MODULE_PARM_DESC(force_band, "Force a specific band select (1-9, default:off).");
-
 MODULE_DESCRIPTION("DVB Frontend module for Conexant " \
 	"CX24123/CX24109/CX24113 hardware");
 MODULE_AUTHOR("Steven Toth");
 MODULE_LICENSE("GPL");
 
-EXPORT_SYMBOL(cx24123_attach);
diff --git a/drivers/media/dvb/frontends/cx24123.h b/drivers/media/dvb/frontends/cx24123.h
index cc6b411..51ae866 100644
--- a/drivers/media/dvb/frontends/cx24123.h
+++ b/drivers/media/dvb/frontends/cx24123.h
@@ -23,13 +23,12 @@
 
 #include <linux/dvb/frontend.h>
 
-struct cx24123_config
-{
+struct cx24123_config {
 	/* the demodulator's i2c address */
 	u8 demod_address;
 
 	/* Need to set device param for start_dma */
-	int (*set_ts_params)(struct dvb_frontend* fe, int is_punctured);
+	int (*set_ts_params)(struct dvb_frontend *fe, int is_punctured);
 
 	/* 0 = LNB voltage normal, 1 = LNB voltage inverted */
 	int lnb_polarity;
@@ -39,7 +38,8 @@
 	void (*agc_callback) (struct dvb_frontend *);
 };
 
-#if defined(CONFIG_DVB_CX24123) || (defined(CONFIG_DVB_CX24123_MODULE) && defined(MODULE))
+#if defined(CONFIG_DVB_CX24123) || (defined(CONFIG_DVB_CX24123_MODULE) \
+	&& defined(MODULE))
 extern struct dvb_frontend *cx24123_attach(const struct cx24123_config *config,
 					   struct i2c_adapter *i2c);
 extern struct i2c_adapter *cx24123_get_tuner_i2c_adapter(struct dvb_frontend *);
@@ -56,6 +56,6 @@
 	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
 	return NULL;
 }
-#endif // CONFIG_DVB_CX24123
+#endif
 
 #endif /* CX24123_H */
diff --git a/drivers/media/dvb/frontends/s5h1409.c b/drivers/media/dvb/frontends/s5h1409.c
index 7500a1c..cf4d893 100644
--- a/drivers/media/dvb/frontends/s5h1409.c
+++ b/drivers/media/dvb/frontends/s5h1409.c
@@ -30,10 +30,10 @@
 
 struct s5h1409_state {
 
-	struct i2c_adapter* i2c;
+	struct i2c_adapter *i2c;
 
 	/* configuration settings */
-	const struct s5h1409_config* config;
+	const struct s5h1409_config *config;
 
 	struct dvb_frontend frontend;
 
@@ -48,6 +48,9 @@
 };
 
 static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Enable verbose debug messages");
+
 #define dprintk	if (debug) printk
 
 /* Register values to initialise the demod, this will set VSB by default */
@@ -299,10 +302,10 @@
 };
 
 /* 8 bit registers, 16 bit values */
-static int s5h1409_writereg(struct s5h1409_state* state, u8 reg, u16 data)
+static int s5h1409_writereg(struct s5h1409_state *state, u8 reg, u16 data)
 {
 	int ret;
-	u8 buf [] = { reg, data >> 8,  data & 0xff };
+	u8 buf[] = { reg, data >> 8,  data & 0xff };
 
 	struct i2c_msg msg = { .addr = state->config->demod_address,
 			       .flags = 0, .buf = buf, .len = 3 };
@@ -310,19 +313,19 @@
 	ret = i2c_transfer(state->i2c, &msg, 1);
 
 	if (ret != 1)
-		printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, "
+		printk(KERN_ERR "%s: error (reg == 0x%02x, val == 0x%04x, "
 		       "ret == %i)\n", __func__, reg, data, ret);
 
 	return (ret != 1) ? -1 : 0;
 }
 
-static u16 s5h1409_readreg(struct s5h1409_state* state, u8 reg)
+static u16 s5h1409_readreg(struct s5h1409_state *state, u8 reg)
 {
 	int ret;
-	u8 b0 [] = { reg };
-	u8 b1 [] = { 0, 0 };
+	u8 b0[] = { reg };
+	u8 b1[] = { 0, 0 };
 
-	struct i2c_msg msg [] = {
+	struct i2c_msg msg[] = {
 		{ .addr = state->config->demod_address, .flags = 0,
 		  .buf = b0, .len = 1 },
 		{ .addr = state->config->demod_address, .flags = I2C_M_RD,
@@ -335,9 +338,9 @@
 	return (b1[0] << 8) | b1[1];
 }
 
-static int s5h1409_softreset(struct dvb_frontend* fe)
+static int s5h1409_softreset(struct dvb_frontend *fe)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 
 	dprintk("%s()\n", __func__);
 
@@ -349,11 +352,11 @@
 }
 
 #define S5H1409_VSB_IF_FREQ 5380
-#define S5H1409_QAM_IF_FREQ state->config->qam_if
+#define S5H1409_QAM_IF_FREQ (state->config->qam_if)
 
-static int s5h1409_set_if_freq(struct dvb_frontend* fe, int KHz)
+static int s5h1409_set_if_freq(struct dvb_frontend *fe, int KHz)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 
 	dprintk("%s(%d KHz)\n", __func__, KHz);
 
@@ -376,26 +379,26 @@
 	return 0;
 }
 
-static int s5h1409_set_spectralinversion(struct dvb_frontend* fe, int inverted)
+static int s5h1409_set_spectralinversion(struct dvb_frontend *fe, int inverted)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 
 	dprintk("%s(%d)\n", __func__, inverted);
 
-	if(inverted == 1)
+	if (inverted == 1)
 		return s5h1409_writereg(state, 0x1b, 0x1101); /* Inverted */
 	else
 		return s5h1409_writereg(state, 0x1b, 0x0110); /* Normal */
 }
 
-static int s5h1409_enable_modulation(struct dvb_frontend* fe,
+static int s5h1409_enable_modulation(struct dvb_frontend *fe,
 				     fe_modulation_t m)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 
 	dprintk("%s(0x%08x)\n", __func__, m);
 
-	switch(m) {
+	switch (m) {
 	case VSB_8:
 		dprintk("%s() VSB_8\n", __func__);
 		if (state->if_freq != S5H1409_VSB_IF_FREQ)
@@ -422,9 +425,9 @@
 	return 0;
 }
 
-static int s5h1409_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
+static int s5h1409_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 
 	dprintk("%s(%d)\n", __func__, enable);
 
@@ -434,9 +437,9 @@
 		return s5h1409_writereg(state, 0xf3, 0);
 }
 
-static int s5h1409_set_gpio(struct dvb_frontend* fe, int enable)
+static int s5h1409_set_gpio(struct dvb_frontend *fe, int enable)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 
 	dprintk("%s(%d)\n", __func__, enable);
 
@@ -448,18 +451,18 @@
 			s5h1409_readreg(state, 0xe3) & 0xfeff);
 }
 
-static int s5h1409_sleep(struct dvb_frontend* fe, int enable)
+static int s5h1409_sleep(struct dvb_frontend *fe, int enable)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 
 	dprintk("%s(%d)\n", __func__, enable);
 
 	return s5h1409_writereg(state, 0xf2, enable);
 }
 
-static int s5h1409_register_reset(struct dvb_frontend* fe)
+static int s5h1409_register_reset(struct dvb_frontend *fe)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 
 	dprintk("%s()\n", __func__);
 
@@ -483,7 +486,7 @@
 		reg &= 0xff;
 
 		s5h1409_writereg(state, 0x96, 0x00c);
-		if ((reg < 0x38) || (reg > 0x68) ) {
+		if ((reg < 0x38) || (reg > 0x68)) {
 			s5h1409_writereg(state, 0x93, 0x3332);
 			s5h1409_writereg(state, 0x9e, 0x2c37);
 		} else {
@@ -514,7 +517,7 @@
 
 			s5h1409_writereg(state, 0x96, 0x20);
 			s5h1409_writereg(state, 0xad,
-				( ((reg1 & 0xf000) >> 4) | (reg2 & 0xf0ff)) );
+				(((reg1 & 0xf000) >> 4) | (reg2 & 0xf0ff)));
 			s5h1409_writereg(state, 0xab,
 				s5h1409_readreg(state, 0xab) & 0xeffe);
 		}
@@ -529,10 +532,10 @@
 }
 
 /* Talk to the demod, set the FEC, GUARD, QAM settings etc */
-static int s5h1409_set_frontend (struct dvb_frontend* fe,
+static int s5h1409_set_frontend(struct dvb_frontend *fe,
 				 struct dvb_frontend_parameters *p)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 
 	dprintk("%s(frequency=%d)\n", __func__, p->frequency);
 
@@ -546,9 +549,11 @@
 	msleep(100);
 
 	if (fe->ops.tuner_ops.set_params) {
-		if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1);
+		if (fe->ops.i2c_gate_ctrl)
+			fe->ops.i2c_gate_ctrl(fe, 1);
 		fe->ops.tuner_ops.set_params(fe, p);
-		if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
+		if (fe->ops.i2c_gate_ctrl)
+			fe->ops.i2c_gate_ctrl(fe, 0);
 	}
 
 	/* Optimize the demod for QAM */
@@ -592,17 +597,17 @@
 
 /* Reset the demod hardware and reset all of the configuration registers
    to a default state. */
-static int s5h1409_init (struct dvb_frontend* fe)
+static int s5h1409_init(struct dvb_frontend *fe)
 {
 	int i;
 
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 	dprintk("%s()\n", __func__);
 
 	s5h1409_sleep(fe, 0);
 	s5h1409_register_reset(fe);
 
-	for (i=0; i < ARRAY_SIZE(init_tab); i++)
+	for (i = 0; i < ARRAY_SIZE(init_tab); i++)
 		s5h1409_writereg(state, init_tab[i].reg, init_tab[i].data);
 
 	/* The datasheet says that after initialisation, VSB is default */
@@ -627,9 +632,9 @@
 	return 0;
 }
 
-static int s5h1409_read_status(struct dvb_frontend* fe, fe_status_t* status)
+static int s5h1409_read_status(struct dvb_frontend *fe, fe_status_t *status)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 	u16 reg;
 	u32 tuner_status = 0;
 
@@ -637,12 +642,12 @@
 
 	/* Get the demodulator status */
 	reg = s5h1409_readreg(state, 0xf1);
-	if(reg & 0x1000)
+	if (reg & 0x1000)
 		*status |= FE_HAS_VITERBI;
-	if(reg & 0x8000)
+	if (reg & 0x8000)
 		*status |= FE_HAS_LOCK | FE_HAS_SYNC;
 
-	switch(state->config->status_mode) {
+	switch (state->config->status_mode) {
 	case S5H1409_DEMODLOCKING:
 		if (*status & FE_HAS_VITERBI)
 			*status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
@@ -668,12 +673,12 @@
 	return 0;
 }
 
-static int s5h1409_qam256_lookup_snr(struct dvb_frontend* fe, u16* snr, u16 v)
+static int s5h1409_qam256_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
 {
 	int i, ret = -EINVAL;
 	dprintk("%s()\n", __func__);
 
-	for (i=0; i < ARRAY_SIZE(qam256_snr_tab); i++) {
+	for (i = 0; i < ARRAY_SIZE(qam256_snr_tab); i++) {
 		if (v < qam256_snr_tab[i].val) {
 			*snr = qam256_snr_tab[i].data;
 			ret = 0;
@@ -683,12 +688,12 @@
 	return ret;
 }
 
-static int s5h1409_qam64_lookup_snr(struct dvb_frontend* fe, u16* snr, u16 v)
+static int s5h1409_qam64_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
 {
 	int i, ret = -EINVAL;
 	dprintk("%s()\n", __func__);
 
-	for (i=0; i < ARRAY_SIZE(qam64_snr_tab); i++) {
+	for (i = 0; i < ARRAY_SIZE(qam64_snr_tab); i++) {
 		if (v < qam64_snr_tab[i].val) {
 			*snr = qam64_snr_tab[i].data;
 			ret = 0;
@@ -698,12 +703,12 @@
 	return ret;
 }
 
-static int s5h1409_vsb_lookup_snr(struct dvb_frontend* fe, u16* snr, u16 v)
+static int s5h1409_vsb_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
 {
 	int i, ret = -EINVAL;
 	dprintk("%s()\n", __func__);
 
-	for (i=0; i < ARRAY_SIZE(vsb_snr_tab); i++) {
+	for (i = 0; i < ARRAY_SIZE(vsb_snr_tab); i++) {
 		if (v > vsb_snr_tab[i].val) {
 			*snr = vsb_snr_tab[i].data;
 			ret = 0;
@@ -714,13 +719,13 @@
 	return ret;
 }
 
-static int s5h1409_read_snr(struct dvb_frontend* fe, u16* snr)
+static int s5h1409_read_snr(struct dvb_frontend *fe, u16 *snr)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 	u16 reg;
 	dprintk("%s()\n", __func__);
 
-	switch(state->current_modulation) {
+	switch (state->current_modulation) {
 	case QAM_64:
 		reg = s5h1409_readreg(state, 0xf0) & 0xff;
 		return s5h1409_qam64_lookup_snr(fe, snr, reg);
@@ -737,30 +742,30 @@
 	return -EINVAL;
 }
 
-static int s5h1409_read_signal_strength(struct dvb_frontend* fe,
-					u16* signal_strength)
+static int s5h1409_read_signal_strength(struct dvb_frontend *fe,
+					u16 *signal_strength)
 {
 	return s5h1409_read_snr(fe, signal_strength);
 }
 
-static int s5h1409_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
+static int s5h1409_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 
 	*ucblocks = s5h1409_readreg(state, 0xb5);
 
 	return 0;
 }
 
-static int s5h1409_read_ber(struct dvb_frontend* fe, u32* ber)
+static int s5h1409_read_ber(struct dvb_frontend *fe, u32 *ber)
 {
 	return s5h1409_read_ucblocks(fe, ber);
 }
 
-static int s5h1409_get_frontend(struct dvb_frontend* fe,
+static int s5h1409_get_frontend(struct dvb_frontend *fe,
 				struct dvb_frontend_parameters *p)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 
 	p->frequency = state->current_frequency;
 	p->u.vsb.modulation = state->current_modulation;
@@ -768,25 +773,25 @@
 	return 0;
 }
 
-static int s5h1409_get_tune_settings(struct dvb_frontend* fe,
+static int s5h1409_get_tune_settings(struct dvb_frontend *fe,
 				     struct dvb_frontend_tune_settings *tune)
 {
 	tune->min_delay_ms = 1000;
 	return 0;
 }
 
-static void s5h1409_release(struct dvb_frontend* fe)
+static void s5h1409_release(struct dvb_frontend *fe)
 {
-	struct s5h1409_state* state = fe->demodulator_priv;
+	struct s5h1409_state *state = fe->demodulator_priv;
 	kfree(state);
 }
 
 static struct dvb_frontend_ops s5h1409_ops;
 
-struct dvb_frontend* s5h1409_attach(const struct s5h1409_config* config,
-				    struct i2c_adapter* i2c)
+struct dvb_frontend *s5h1409_attach(const struct s5h1409_config *config,
+				    struct i2c_adapter *i2c)
 {
-	struct s5h1409_state* state = NULL;
+	struct s5h1409_state *state = NULL;
 	u16 reg;
 
 	/* allocate memory for the internal state */
@@ -825,6 +830,7 @@
 	kfree(state);
 	return NULL;
 }
+EXPORT_SYMBOL(s5h1409_attach);
 
 static struct dvb_frontend_ops s5h1409_ops = {
 
@@ -850,14 +856,10 @@
 	.release              = s5h1409_release,
 };
 
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Enable verbose debug messages");
-
 MODULE_DESCRIPTION("Samsung S5H1409 QAM-B/ATSC Demodulator driver");
 MODULE_AUTHOR("Steven Toth");
 MODULE_LICENSE("GPL");
 
-EXPORT_SYMBOL(s5h1409_attach);
 
 /*
  * Local variables:
diff --git a/drivers/media/dvb/frontends/s5h1409.h b/drivers/media/dvb/frontends/s5h1409.h
index d1a1d2e..070d974 100644
--- a/drivers/media/dvb/frontends/s5h1409.h
+++ b/drivers/media/dvb/frontends/s5h1409.h
@@ -24,8 +24,7 @@
 
 #include <linux/dvb/frontend.h>
 
-struct s5h1409_config
-{
+struct s5h1409_config {
 	/* the demodulator's i2c address */
 	u8 demod_address;
 
@@ -60,12 +59,14 @@
 	u16 mpeg_timing;
 };
 
-#if defined(CONFIG_DVB_S5H1409) || (defined(CONFIG_DVB_S5H1409_MODULE) && defined(MODULE))
-extern struct dvb_frontend* s5h1409_attach(const struct s5h1409_config* config,
-					   struct i2c_adapter* i2c);
+#if defined(CONFIG_DVB_S5H1409) || (defined(CONFIG_DVB_S5H1409_MODULE) \
+	&& defined(MODULE))
+extern struct dvb_frontend *s5h1409_attach(const struct s5h1409_config *config,
+					   struct i2c_adapter *i2c);
 #else
-static inline struct dvb_frontend* s5h1409_attach(const struct s5h1409_config* config,
-						  struct i2c_adapter* i2c)
+static inline struct dvb_frontend *s5h1409_attach(
+	const struct s5h1409_config *config,
+	struct i2c_adapter *i2c)
 {
 	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
 	return NULL;
diff --git a/drivers/media/dvb/frontends/s5h1411.c b/drivers/media/dvb/frontends/s5h1411.c
index 2da1a37..40644aa 100644
--- a/drivers/media/dvb/frontends/s5h1411.c
+++ b/drivers/media/dvb/frontends/s5h1411.c
@@ -38,6 +38,7 @@
 	struct dvb_frontend frontend;
 
 	fe_modulation_t current_modulation;
+	unsigned int first_tune:1;
 
 	u32 current_frequency;
 	int if_freq;
@@ -62,7 +63,7 @@
 	{ S5H1411_I2C_TOP_ADDR, 0x08, 0x0047, },
 	{ S5H1411_I2C_TOP_ADDR, 0x1c, 0x0400, },
 	{ S5H1411_I2C_TOP_ADDR, 0x1e, 0x0370, },
-	{ S5H1411_I2C_TOP_ADDR, 0x1f, 0x342a, },
+	{ S5H1411_I2C_TOP_ADDR, 0x1f, 0x342c, },
 	{ S5H1411_I2C_TOP_ADDR, 0x24, 0x0231, },
 	{ S5H1411_I2C_TOP_ADDR, 0x25, 0x1011, },
 	{ S5H1411_I2C_TOP_ADDR, 0x26, 0x0f07, },
@@ -100,7 +101,6 @@
 	{ S5H1411_I2C_TOP_ADDR, 0x78, 0x3141, },
 	{ S5H1411_I2C_TOP_ADDR, 0x7a, 0x3141, },
 	{ S5H1411_I2C_TOP_ADDR, 0xb3, 0x8003, },
-	{ S5H1411_I2C_TOP_ADDR, 0xb5, 0xafbb, },
 	{ S5H1411_I2C_TOP_ADDR, 0xb5, 0xa6bb, },
 	{ S5H1411_I2C_TOP_ADDR, 0xb6, 0x0609, },
 	{ S5H1411_I2C_TOP_ADDR, 0xb7, 0x2f06, },
@@ -343,7 +343,7 @@
 	u8 addr, u8 reg, u16 data)
 {
 	int ret;
-	u8 buf [] = { reg, data >> 8,  data & 0xff };
+	u8 buf[] = { reg, data >> 8,  data & 0xff };
 
 	struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = buf, .len = 3 };
 
@@ -359,10 +359,10 @@
 static u16 s5h1411_readreg(struct s5h1411_state *state, u8 addr, u8 reg)
 {
 	int ret;
-	u8 b0 [] = { reg };
-	u8 b1 [] = { 0, 0 };
+	u8 b0[] = { reg };
+	u8 b1[] = { 0, 0 };
 
-	struct i2c_msg msg [] = {
+	struct i2c_msg msg[] = {
 		{ .addr = addr, .flags = 0, .buf = b0, .len = 1 },
 		{ .addr = addr, .flags = I2C_M_RD, .buf = b1, .len = 2 } };
 
@@ -393,7 +393,7 @@
 
 	switch (KHz) {
 	case 3250:
-		s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x10d9);
+		s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x10d5);
 		s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x5342);
 		s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x10d9);
 		break;
@@ -464,13 +464,25 @@
 
 	if (inversion == 1)
 		val |= 0x1000; /* Inverted */
-	else
-		val |= 0x0000;
 
 	state->inversion = inversion;
 	return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x24, val);
 }
 
+static int s5h1411_set_serialmode(struct dvb_frontend *fe, int serial)
+{
+	struct s5h1411_state *state = fe->demodulator_priv;
+	u16 val;
+
+	dprintk("%s(%d)\n", __func__, serial);
+	val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xbd) & ~0x100;
+
+	if (serial == 1)
+		val |= 0x100;
+
+	return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, val);
+}
+
 static int s5h1411_enable_modulation(struct dvb_frontend *fe,
 				     fe_modulation_t m)
 {
@@ -478,6 +490,12 @@
 
 	dprintk("%s(0x%08x)\n", __func__, m);
 
+	if ((state->first_tune == 0) && (m == state->current_modulation)) {
+		dprintk("%s() Already at desired modulation.  Skipping...\n",
+			__func__);
+		return 0;
+	}
+
 	switch (m) {
 	case VSB_8:
 		dprintk("%s() VSB_8\n", __func__);
@@ -502,6 +520,7 @@
 	}
 
 	state->current_modulation = m;
+	state->first_tune = 0;
 	s5h1411_softreset(fe);
 
 	return 0;
@@ -535,7 +554,7 @@
 		return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0, val);
 }
 
-static int s5h1411_sleep(struct dvb_frontend *fe, int enable)
+static int s5h1411_set_powerstate(struct dvb_frontend *fe, int enable)
 {
 	struct s5h1411_state *state = fe->demodulator_priv;
 
@@ -551,6 +570,11 @@
 	return 0;
 }
 
+static int s5h1411_sleep(struct dvb_frontend *fe)
+{
+	return s5h1411_set_powerstate(fe, 1);
+}
+
 static int s5h1411_register_reset(struct dvb_frontend *fe)
 {
 	struct s5h1411_state *state = fe->demodulator_priv;
@@ -574,9 +598,6 @@
 
 	s5h1411_enable_modulation(fe, p->u.vsb.modulation);
 
-	/* Allow the demod to settle */
-	msleep(100);
-
 	if (fe->ops.tuner_ops.set_params) {
 		if (fe->ops.i2c_gate_ctrl)
 			fe->ops.i2c_gate_ctrl(fe, 1);
@@ -587,6 +608,10 @@
 			fe->ops.i2c_gate_ctrl(fe, 0);
 	}
 
+	/* Issue a reset to the demod so it knows to resync against the
+	   newly tuned frequency */
+	s5h1411_softreset(fe);
+
 	return 0;
 }
 
@@ -599,7 +624,7 @@
 
 	dprintk("%s()\n", __func__);
 
-	s5h1411_sleep(fe, 0);
+	s5h1411_set_powerstate(fe, 0);
 	s5h1411_register_reset(fe);
 
 	for (i = 0; i < ARRAY_SIZE(init_tab); i++)
@@ -610,12 +635,17 @@
 	/* The datasheet says that after initialisation, VSB is default */
 	state->current_modulation = VSB_8;
 
+	/* Although the datasheet says it's in VSB, empirical evidence
+	   shows problems getting lock on the first tuning request.  Make
+	   sure we call enable_modulation the first time around */
+	state->first_tune = 1;
+
 	if (state->config->output_mode == S5H1411_SERIAL_OUTPUT)
 		/* Serial */
-		s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1101);
+		s5h1411_set_serialmode(fe, 1);
 	else
 		/* Parallel */
-		s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1001);
+		s5h1411_set_serialmode(fe, 0);
 
 	s5h1411_set_spectralinversion(fe, state->config->inversion);
 	s5h1411_set_if_freq(fe, state->config->vsb_if);
@@ -637,28 +667,29 @@
 
 	*status = 0;
 
-	/* Get the demodulator status */
-	reg = (s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2) >> 15)
-		& 0x0001;
-	if (reg)
-		*status |= FE_HAS_LOCK | FE_HAS_CARRIER | FE_HAS_SIGNAL;
+	/* Register F2 bit 15 = Master Lock, removed */
 
 	switch (state->current_modulation) {
 	case QAM_64:
 	case QAM_256:
 		reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf0);
-		if (reg & 0x100)
-			*status |= FE_HAS_VITERBI;
-		if (reg & 0x10)
-			*status |= FE_HAS_SYNC;
+		if (reg & 0x10) /* QAM FEC Lock */
+			*status |= FE_HAS_SYNC | FE_HAS_LOCK;
+		if (reg & 0x100) /* QAM EQ Lock */
+			*status |= FE_HAS_VITERBI | FE_HAS_CARRIER | FE_HAS_SIGNAL;
+
 		break;
 	case VSB_8:
-		reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x5e);
-		if (reg & 0x0001)
-			*status |= FE_HAS_SYNC;
 		reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2);
-		if (reg & 0x1000)
-			*status |= FE_HAS_VITERBI;
+		if (reg & 0x1000) /* FEC Lock */
+			*status |= FE_HAS_SYNC | FE_HAS_LOCK;
+		if (reg & 0x2000) /* EQ Lock */
+			*status |= FE_HAS_VITERBI | FE_HAS_CARRIER | FE_HAS_SIGNAL;
+
+		reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x53);
+		if (reg & 0x1) /* AFC Lock */
+			*status |= FE_HAS_SIGNAL;
+
 		break;
 	default:
 		return -EINVAL;
@@ -863,6 +894,7 @@
 	},
 
 	.init                 = s5h1411_init,
+	.sleep                = s5h1411_sleep,
 	.i2c_gate_ctrl        = s5h1411_i2c_gate_ctrl,
 	.set_frontend         = s5h1411_set_frontend,
 	.get_frontend         = s5h1411_get_frontend,
diff --git a/drivers/media/dvb/frontends/s5h1411.h b/drivers/media/dvb/frontends/s5h1411.h
index 7d542bc..45ec0f8 100644
--- a/drivers/media/dvb/frontends/s5h1411.h
+++ b/drivers/media/dvb/frontends/s5h1411.h
@@ -47,7 +47,7 @@
 	u16 mpeg_timing;
 
 	/* IF Freq for QAM and VSB in KHz */
-#define S5H1411_IF_2500  2500
+#define S5H1411_IF_3250  3250
 #define S5H1411_IF_3500  3500
 #define S5H1411_IF_4000  4000
 #define S5H1411_IF_5380  5380
diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c
index 04e7f1c..2a8bbcd 100644
--- a/drivers/media/dvb/frontends/tda10048.c
+++ b/drivers/media/dvb/frontends/tda10048.c
@@ -195,7 +195,7 @@
 static int tda10048_writereg(struct tda10048_state *state, u8 reg, u8 data)
 {
 	int ret;
-	u8 buf [] = { reg, data };
+	u8 buf[] = { reg, data };
 	struct i2c_msg msg = {
 		.addr = state->config->demod_address,
 		.flags = 0, .buf = buf, .len = 2 };
@@ -213,9 +213,9 @@
 static u8 tda10048_readreg(struct tda10048_state *state, u8 reg)
 {
 	int ret;
-	u8 b0 [] = { reg };
-	u8 b1 [] = { 0 };
-	struct i2c_msg msg [] = {
+	u8 b0[] = { reg };
+	u8 b1[] = { 0 };
+	struct i2c_msg msg[] = {
 		{ .addr = state->config->demod_address,
 			.flags = 0, .buf = b0, .len = 1 },
 		{ .addr = state->config->demod_address,
@@ -393,43 +393,89 @@
 
 	val = tda10048_readreg(state, TDA10048_OUT_CONF2);
 	switch ((val & 0x60) >> 5) {
-	case 0: p->constellation =   QPSK; break;
-	case 1: p->constellation = QAM_16; break;
-	case 2: p->constellation = QAM_64; break;
+	case 0:
+		p->constellation = QPSK;
+		break;
+	case 1:
+		p->constellation = QAM_16;
+		break;
+	case 2:
+		p->constellation = QAM_64;
+		break;
 	}
 	switch ((val & 0x18) >> 3) {
-	case 0: p->hierarchy_information = HIERARCHY_NONE; break;
-	case 1: p->hierarchy_information =    HIERARCHY_1; break;
-	case 2: p->hierarchy_information =    HIERARCHY_2; break;
-	case 3: p->hierarchy_information =    HIERARCHY_4; break;
+	case 0:
+		p->hierarchy_information = HIERARCHY_NONE;
+		break;
+	case 1:
+		p->hierarchy_information = HIERARCHY_1;
+		break;
+	case 2:
+		p->hierarchy_information = HIERARCHY_2;
+		break;
+	case 3:
+		p->hierarchy_information = HIERARCHY_4;
+		break;
 	}
 	switch (val & 0x07) {
-	case 0: p->code_rate_HP = FEC_1_2; break;
-	case 1: p->code_rate_HP = FEC_2_3; break;
-	case 2: p->code_rate_HP = FEC_3_4; break;
-	case 3: p->code_rate_HP = FEC_5_6; break;
-	case 4: p->code_rate_HP = FEC_7_8; break;
+	case 0:
+		p->code_rate_HP = FEC_1_2;
+		break;
+	case 1:
+		p->code_rate_HP = FEC_2_3;
+		break;
+	case 2:
+		p->code_rate_HP = FEC_3_4;
+		break;
+	case 3:
+		p->code_rate_HP = FEC_5_6;
+		break;
+	case 4:
+		p->code_rate_HP = FEC_7_8;
+		break;
 	}
 
 	val = tda10048_readreg(state, TDA10048_OUT_CONF3);
 	switch (val & 0x07) {
-	case 0: p->code_rate_LP = FEC_1_2; break;
-	case 1: p->code_rate_LP = FEC_2_3; break;
-	case 2: p->code_rate_LP = FEC_3_4; break;
-	case 3: p->code_rate_LP = FEC_5_6; break;
-	case 4: p->code_rate_LP = FEC_7_8; break;
+	case 0:
+		p->code_rate_LP = FEC_1_2;
+		break;
+	case 1:
+		p->code_rate_LP = FEC_2_3;
+		break;
+	case 2:
+		p->code_rate_LP = FEC_3_4;
+		break;
+	case 3:
+		p->code_rate_LP = FEC_5_6;
+		break;
+	case 4:
+		p->code_rate_LP = FEC_7_8;
+		break;
 	}
 
 	val = tda10048_readreg(state, TDA10048_OUT_CONF1);
 	switch ((val & 0x0c) >> 2) {
-	case 0: p->guard_interval = GUARD_INTERVAL_1_32; break;
-	case 1: p->guard_interval = GUARD_INTERVAL_1_16; break;
-	case 2: p->guard_interval =  GUARD_INTERVAL_1_8; break;
-	case 3: p->guard_interval =  GUARD_INTERVAL_1_4; break;
+	case 0:
+		p->guard_interval = GUARD_INTERVAL_1_32;
+		break;
+	case 1:
+		p->guard_interval = GUARD_INTERVAL_1_16;
+		break;
+	case 2:
+		p->guard_interval =  GUARD_INTERVAL_1_8;
+		break;
+	case 3:
+		p->guard_interval =  GUARD_INTERVAL_1_4;
+		break;
 	}
 	switch (val & 0x02) {
-	case 0: p->transmission_mode = TRANSMISSION_MODE_2K; break;
-	case 1: p->transmission_mode = TRANSMISSION_MODE_8K; break;
+	case 0:
+		p->transmission_mode = TRANSMISSION_MODE_2K;
+		break;
+	case 1:
+		p->transmission_mode = TRANSMISSION_MODE_8K;
+		break;
 	}
 
 	return 0;
diff --git a/drivers/media/dvb/frontends/z0194a.h b/drivers/media/dvb/frontends/z0194a.h
index d2876d2..07f3fc0 100644
--- a/drivers/media/dvb/frontends/z0194a.h
+++ b/drivers/media/dvb/frontends/z0194a.h
@@ -12,7 +12,7 @@
 #ifndef Z0194A
 #define Z0194A
 
-static int sharp_z0194a__set_symbol_rate(struct dvb_frontend *fe,
+static int sharp_z0194a_set_symbol_rate(struct dvb_frontend *fe,
 					 u32 srate, u32 ratio)
 {
 	u8 aclk = 0;
@@ -40,7 +40,7 @@
 	return 0;
 }
 
-static u8 sharp_z0194a__inittab[] = {
+static u8 sharp_z0194a_inittab[] = {
 	0x01, 0x15,
 	0x02, 0x00,
 	0x03, 0x00,
@@ -82,16 +82,4 @@
 	0xff, 0xff
 };
 
-static struct stv0299_config sharp_z0194a_config = {
-	.demod_address = 0x68,
-	.inittab = sharp_z0194a__inittab,
-	.mclk = 88000000UL,
-	.invert = 1,
-	.skip_reinit = 0,
-	.lock_output = STV0299_LOCKOUTPUT_1,
-	.volt13_op0_op1 = STV0299_VOLT13_OP1,
-	.min_delay_ms = 100,
-	.set_symbol_rate = sharp_z0194a__set_symbol_rate,
-};
-
 #endif
diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c
index 9da260f..6f9b773 100644
--- a/drivers/media/dvb/siano/sms-cards.c
+++ b/drivers/media/dvb/siano/sms-cards.c
@@ -42,6 +42,10 @@
 		.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
 	{ USB_DEVICE(0x2040, 0x5510),
 		.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+	{ USB_DEVICE(0x2040, 0x5520),
+		.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+	{ USB_DEVICE(0x2040, 0x5530),
+		.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
 	{ USB_DEVICE(0x2040, 0x5580),
 		.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
 	{ USB_DEVICE(0x2040, 0x5590),
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index c7c770c..aa1ff52 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -36,7 +36,6 @@
 #include <linux/fs.h>
 #include <linux/timer.h>
 #include <linux/poll.h>
-#include <linux/byteorder/swabb.h>
 #include <linux/smp_lock.h>
 
 #include <linux/kernel.h>
@@ -52,6 +51,7 @@
 #include <linux/i2c.h>
 #include <linux/kthread.h>
 #include <asm/unaligned.h>
+#include <asm/byteorder.h>
 
 #include <asm/system.h>
 
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index e51d707..04cd7c0 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -359,7 +359,7 @@
 	  computer's USB port.
 
 	  To compile this driver as a module, choose M here: the
-	  module will be called radio-silabs.
+	  module will be called radio-si470x.
 
 config USB_MR800
 	tristate "AverMedia MR 800 USB FM radio support"
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 66783ff..a5ca176 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -171,11 +171,11 @@
 	if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
 			USB_REQ_GET_STATUS,
 			USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
-			0x00, 0xC7, radio->transfer_buffer, 8, 300)<0 ||
+			0x00, 0xC7, radio->transfer_buffer, 8, 300) < 0 ||
 	usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
 			DSB100_ONOFF,
 			USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
-			0x01, 0x00, radio->transfer_buffer, 8, 300)<0)
+			0x01, 0x00, radio->transfer_buffer, 8, 300) < 0)
 		return -1;
 	radio->muted=0;
 	return (radio->transfer_buffer)[0];
@@ -188,11 +188,11 @@
 	if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
 			USB_REQ_GET_STATUS,
 			USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
-			0x16, 0x1C, radio->transfer_buffer, 8, 300)<0 ||
+			0x16, 0x1C, radio->transfer_buffer, 8, 300) < 0 ||
 	usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
 			DSB100_ONOFF,
 			USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
-			0x00, 0x00, radio->transfer_buffer, 8, 300)<0)
+			0x00, 0x00, radio->transfer_buffer, 8, 300) < 0)
 		return -1;
 	radio->muted=1;
 	return (radio->transfer_buffer)[0];
@@ -201,24 +201,24 @@
 /* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
 static int dsbr100_setfreq(struct dsbr100_device *radio, int freq)
 {
-	freq = (freq/16*80)/1000+856;
+	freq = (freq / 16 * 80) / 1000 + 856;
 	if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
 			DSB100_TUNE,
 			USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
-			(freq>>8)&0x00ff, freq&0xff,
-			radio->transfer_buffer, 8, 300)<0 ||
+			(freq >> 8) & 0x00ff, freq & 0xff,
+			radio->transfer_buffer, 8, 300) < 0 ||
 	   usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
 			USB_REQ_GET_STATUS,
 			USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
-			0x96, 0xB7, radio->transfer_buffer, 8, 300)<0 ||
+			0x96, 0xB7, radio->transfer_buffer, 8, 300) < 0 ||
 	usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
 			USB_REQ_GET_STATUS,
 			USB_TYPE_VENDOR | USB_RECIP_DEVICE |  USB_DIR_IN,
-			0x00, 0x24, radio->transfer_buffer, 8, 300)<0) {
+			0x00, 0x24, radio->transfer_buffer, 8, 300) < 0) {
 		radio->stereo = -1;
 		return -1;
 	}
-	radio->stereo = ! ((radio->transfer_buffer)[0]&0x01);
+	radio->stereo = !((radio->transfer_buffer)[0] & 0x01);
 	return (radio->transfer_buffer)[0];
 }
 
@@ -229,10 +229,10 @@
 	if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
 		USB_REQ_GET_STATUS,
 		USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
-		0x00 , 0x24, radio->transfer_buffer, 8, 300)<0)
+		0x00 , 0x24, radio->transfer_buffer, 8, 300) < 0)
 		radio->stereo = -1;
 	else
-		radio->stereo = ! (radio->transfer_buffer[0]&0x01);
+		radio->stereo = !(radio->transfer_buffer[0] & 0x01);
 }
 
 
@@ -265,7 +265,7 @@
 {
 	strlcpy(v->driver, "dsbr100", sizeof(v->driver));
 	strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card));
-	sprintf(v->bus_info, "ISA");
+	sprintf(v->bus_info, "USB");
 	v->version = RADIO_VERSION;
 	v->capabilities = V4L2_CAP_TUNER;
 	return 0;
@@ -282,9 +282,9 @@
 	dsbr100_getstat(radio);
 	strcpy(v->name, "FM");
 	v->type = V4L2_TUNER_RADIO;
-	v->rangelow = FREQ_MIN*FREQ_MUL;
-	v->rangehigh = FREQ_MAX*FREQ_MUL;
-	v->rxsubchans = V4L2_TUNER_SUB_MONO|V4L2_TUNER_SUB_STEREO;
+	v->rangelow = FREQ_MIN * FREQ_MUL;
+	v->rangehigh = FREQ_MAX * FREQ_MUL;
+	v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
 	v->capability = V4L2_TUNER_CAP_LOW;
 	if(radio->stereo)
 		v->audmode = V4L2_TUNER_MODE_STEREO;
@@ -309,8 +309,8 @@
 	struct dsbr100_device *radio = video_drvdata(file);
 
 	radio->curfreq = f->frequency;
-	if (dsbr100_setfreq(radio, radio->curfreq)==-1)
-		warn("Set frequency failed");
+	if (dsbr100_setfreq(radio, radio->curfreq) == -1)
+		dev_warn(&radio->usbdev->dev, "Set frequency failed\n");
 	return 0;
 }
 
@@ -331,8 +331,7 @@
 
 	for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
 		if (qc->id && qc->id == radio_qctrl[i].id) {
-			memcpy(qc, &(radio_qctrl[i]),
-						sizeof(*qc));
+			memcpy(qc, &(radio_qctrl[i]), sizeof(*qc));
 			return 0;
 		}
 	}
@@ -361,12 +360,14 @@
 	case V4L2_CID_AUDIO_MUTE:
 		if (ctrl->value) {
 			if (dsbr100_stop(radio) == -1) {
-				warn("Radio did not respond properly");
+				dev_warn(&radio->usbdev->dev,
+					 "Radio did not respond properly\n");
 				return -EBUSY;
 			}
 		} else {
 			if (dsbr100_start(radio) == -1) {
-				warn("Radio did not respond properly");
+				dev_warn(&radio->usbdev->dev,
+					 "Radio did not respond properly\n");
 				return -EBUSY;
 			}
 		}
@@ -410,18 +411,25 @@
 static int usb_dsbr100_open(struct inode *inode, struct file *file)
 {
 	struct dsbr100_device *radio = video_drvdata(file);
+	int retval;
 
 	lock_kernel();
 	radio->users = 1;
 	radio->muted = 1;
 
-	if (dsbr100_start(radio)<0) {
-		warn("Radio did not start up properly");
+	if (dsbr100_start(radio) < 0) {
+		dev_warn(&radio->usbdev->dev,
+			 "Radio did not start up properly\n");
 		radio->users = 0;
 		unlock_kernel();
 		return -EIO;
 	}
-	dsbr100_setfreq(radio, radio->curfreq);
+
+	retval = dsbr100_setfreq(radio, radio->curfreq);
+
+	if (retval == -1)
+		printk(KERN_WARNING KBUILD_MODNAME ": Set frequency failed\n");
+
 	unlock_kernel();
 	return 0;
 }
@@ -482,13 +490,20 @@
 {
 	struct dsbr100_device *radio;
 
-	if (!(radio = kmalloc(sizeof(struct dsbr100_device), GFP_KERNEL)))
+	radio = kmalloc(sizeof(struct dsbr100_device), GFP_KERNEL);
+
+	if (!radio)
 		return -ENOMEM;
-	if (!(radio->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL))) {
+
+	radio->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL);
+
+	if (!(radio->transfer_buffer)) {
 		kfree(radio);
 		return -ENOMEM;
 	}
-	if (!(radio->videodev = video_device_alloc())) {
+	radio->videodev = video_device_alloc();
+
+	if (!(radio->videodev)) {
 		kfree(radio->transfer_buffer);
 		kfree(radio);
 		return -ENOMEM;
@@ -498,10 +513,10 @@
 	radio->removed = 0;
 	radio->users = 0;
 	radio->usbdev = interface_to_usbdev(intf);
-	radio->curfreq = FREQ_MIN*FREQ_MUL;
+	radio->curfreq = FREQ_MIN * FREQ_MUL;
 	video_set_drvdata(radio->videodev, radio);
 	if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr) < 0) {
-		warn("Could not register video device");
+		dev_warn(&intf->dev, "Could not register video device\n");
 		video_device_release(radio->videodev);
 		kfree(radio->transfer_buffer);
 		kfree(radio);
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index a33717c..256cbef 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -469,16 +469,21 @@
 {
 	struct amradio_device *radio = video_get_drvdata(video_devdata(file));
 
+	lock_kernel();
+
 	radio->users = 1;
 	radio->muted = 1;
 
 	if (amradio_start(radio) < 0) {
 		warn("Radio did not start up properly");
 		radio->users = 0;
+		unlock_kernel();
 		return -EIO;
 	}
 	if (amradio_setfreq(radio, radio->curfreq) < 0)
 		warn("Set frequency failed");
+
+	unlock_kernel();
 	return 0;
 }
 
diff --git a/drivers/media/radio/radio-si470x.c b/drivers/media/radio/radio-si470x.c
index f6cedcd..5920cd30 100644
--- a/drivers/media/radio/radio-si470x.c
+++ b/drivers/media/radio/radio-si470x.c
@@ -104,6 +104,7 @@
  *		- hardware frequency seek support
  *		- afc indication
  *		- more safety checks, let si470x_get_freq return errno
+ *		- vidioc behavior corrected according to v4l2 spec
  *
  * ToDo:
  * - add firmware download/update support
@@ -141,9 +142,9 @@
 /* USB Device ID List */
 static struct usb_device_id si470x_usb_driver_id_table[] = {
 	/* Silicon Labs USB FM Radio Reference Design */
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x10c4, 0x818a,	USB_CLASS_HID, 0, 0) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x10c4, 0x818a, USB_CLASS_HID, 0, 0) },
 	/* ADS/Tech FM Radio Receiver (formerly Instant FM Music) */
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x06e1, 0xa155,	USB_CLASS_HID, 0, 0) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x06e1, 0xa155, USB_CLASS_HID, 0, 0) },
 	/* Terminating entry */
 	{ }
 };
@@ -157,7 +158,7 @@
 
 /* Radio Nr */
 static int radio_nr = -1;
-module_param(radio_nr, int, 0);
+module_param(radio_nr, int, 0444);
 MODULE_PARM_DESC(radio_nr, "Radio Nr");
 
 /* Spacing (kHz) */
@@ -165,42 +166,42 @@
 /* 1: 100 kHz (Europe, Japan) */
 /* 2:  50 kHz */
 static unsigned short space = 2;
-module_param(space, ushort, 0);
-MODULE_PARM_DESC(radio_nr, "Spacing: 0=200kHz 1=100kHz *2=50kHz*");
+module_param(space, ushort, 0444);
+MODULE_PARM_DESC(space, "Spacing: 0=200kHz 1=100kHz *2=50kHz*");
 
 /* Bottom of Band (MHz) */
 /* 0: 87.5 - 108 MHz (USA, Europe)*/
 /* 1: 76   - 108 MHz (Japan wide band) */
 /* 2: 76   -  90 MHz (Japan) */
 static unsigned short band = 1;
-module_param(band, ushort, 0);
-MODULE_PARM_DESC(radio_nr, "Band: 0=87.5..108MHz *1=76..108MHz* 2=76..90MHz");
+module_param(band, ushort, 0444);
+MODULE_PARM_DESC(band, "Band: 0=87.5..108MHz *1=76..108MHz* 2=76..90MHz");
 
 /* De-emphasis */
 /* 0: 75 us (USA) */
 /* 1: 50 us (Europe, Australia, Japan) */
 static unsigned short de = 1;
-module_param(de, ushort, 0);
-MODULE_PARM_DESC(radio_nr, "De-emphasis: 0=75us *1=50us*");
+module_param(de, ushort, 0444);
+MODULE_PARM_DESC(de, "De-emphasis: 0=75us *1=50us*");
 
 /* USB timeout */
 static unsigned int usb_timeout = 500;
-module_param(usb_timeout, uint, 0);
+module_param(usb_timeout, uint, 0644);
 MODULE_PARM_DESC(usb_timeout, "USB timeout (ms): *500*");
 
 /* Tune timeout */
 static unsigned int tune_timeout = 3000;
-module_param(tune_timeout, uint, 0);
+module_param(tune_timeout, uint, 0644);
 MODULE_PARM_DESC(tune_timeout, "Tune timeout: *3000*");
 
 /* Seek timeout */
 static unsigned int seek_timeout = 5000;
-module_param(seek_timeout, uint, 0);
+module_param(seek_timeout, uint, 0644);
 MODULE_PARM_DESC(seek_timeout, "Seek timeout: *5000*");
 
 /* RDS buffer blocks */
 static unsigned int rds_buf = 100;
-module_param(rds_buf, uint, 0);
+module_param(rds_buf, uint, 0444);
 MODULE_PARM_DESC(rds_buf, "RDS buffer entries: *100*");
 
 /* RDS maximum block errors */
@@ -209,7 +210,7 @@
 /* 1 means 1-2  errors requiring correction (used by original USBRadio.exe) */
 /* 2 means 3-5  errors requiring correction */
 /* 3 means   6+ errors or errors in checkword, correction not possible */
-module_param(max_rds_errors, ushort, 0);
+module_param(max_rds_errors, ushort, 0644);
 MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
 
 /* RDS poll frequency */
@@ -218,7 +219,7 @@
 /* 50 is used by radio-cadet */
 /* 75 should be okay */
 /* 80 is the usual RDS receive interval */
-module_param(rds_poll_time, uint, 0);
+module_param(rds_poll_time, uint, 0644);
 MODULE_PARM_DESC(rds_poll_time, "RDS poll time (ms): *40*");
 
 
@@ -667,23 +668,29 @@
 	int retval;
 
 	/* Spacing (kHz) */
-	switch (space) {
+	switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_SPACE) >> 4) {
 	/* 0: 200 kHz (USA, Australia) */
-	case 0 : spacing = 0.200 * FREQ_MUL; break;
+	case 0:
+		spacing = 0.200 * FREQ_MUL; break;
 	/* 1: 100 kHz (Europe, Japan) */
-	case 1 : spacing = 0.100 * FREQ_MUL; break;
+	case 1:
+		spacing = 0.100 * FREQ_MUL; break;
 	/* 2:  50 kHz */
-	default: spacing = 0.050 * FREQ_MUL; break;
+	default:
+		spacing = 0.050 * FREQ_MUL; break;
 	};
 
 	/* Bottom of Band (MHz) */
-	switch (band) {
+	switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) {
 	/* 0: 87.5 - 108 MHz (USA, Europe) */
-	case 0 : band_bottom = 87.5 * FREQ_MUL; break;
+	case 0:
+		band_bottom = 87.5 * FREQ_MUL; break;
 	/* 1: 76   - 108 MHz (Japan wide band) */
-	default: band_bottom = 76   * FREQ_MUL; break;
+	default:
+		band_bottom = 76   * FREQ_MUL; break;
 	/* 2: 76   -  90 MHz (Japan) */
-	case 2 : band_bottom = 76   * FREQ_MUL; break;
+	case 2:
+		band_bottom = 76   * FREQ_MUL; break;
 	};
 
 	/* read channel */
@@ -706,23 +713,29 @@
 	unsigned short chan;
 
 	/* Spacing (kHz) */
-	switch (space) {
+	switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_SPACE) >> 4) {
 	/* 0: 200 kHz (USA, Australia) */
-	case 0 : spacing = 0.200 * FREQ_MUL; break;
+	case 0:
+		spacing = 0.200 * FREQ_MUL; break;
 	/* 1: 100 kHz (Europe, Japan) */
-	case 1 : spacing = 0.100 * FREQ_MUL; break;
+	case 1:
+		spacing = 0.100 * FREQ_MUL; break;
 	/* 2:  50 kHz */
-	default: spacing = 0.050 * FREQ_MUL; break;
+	default:
+		spacing = 0.050 * FREQ_MUL; break;
 	};
 
 	/* Bottom of Band (MHz) */
-	switch (band) {
+	switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) {
 	/* 0: 87.5 - 108 MHz (USA, Europe) */
-	case 0 : band_bottom = 87.5 * FREQ_MUL; break;
+	case 0:
+		band_bottom = 87.5 * FREQ_MUL; break;
 	/* 1: 76   - 108 MHz (Japan wide band) */
-	default: band_bottom = 76   * FREQ_MUL; break;
+	default:
+		band_bottom = 76   * FREQ_MUL; break;
 	/* 2: 76   -  90 MHz (Japan) */
-	case 2 : band_bottom = 76   * FREQ_MUL; break;
+	case 2:
+		band_bottom = 76   * FREQ_MUL; break;
 	};
 
 	/* Chan = [ Freq (Mhz) - Bottom of Band (MHz) ] / Spacing (kHz) */
@@ -1164,7 +1177,6 @@
  * si470x_v4l2_queryctrl - query control
  */
 static struct v4l2_queryctrl si470x_v4l2_queryctrl[] = {
-/* HINT: the disabled controls are only here to satify kradio and such apps */
 	{
 		.id		= V4L2_CID_AUDIO_VOLUME,
 		.type		= V4L2_CTRL_TYPE_INTEGER,
@@ -1175,18 +1187,6 @@
 		.default_value	= 15,
 	},
 	{
-		.id		= V4L2_CID_AUDIO_BALANCE,
-		.flags		= V4L2_CTRL_FLAG_DISABLED,
-	},
-	{
-		.id		= V4L2_CID_AUDIO_BASS,
-		.flags		= V4L2_CTRL_FLAG_DISABLED,
-	},
-	{
-		.id		= V4L2_CID_AUDIO_TREBLE,
-		.flags		= V4L2_CTRL_FLAG_DISABLED,
-	},
-	{
 		.id		= V4L2_CID_AUDIO_MUTE,
 		.type		= V4L2_CTRL_TYPE_BOOLEAN,
 		.name		= "Mute",
@@ -1195,10 +1195,6 @@
 		.step		= 1,
 		.default_value	= 1,
 	},
-	{
-		.id		= V4L2_CID_AUDIO_LOUDNESS,
-		.flags		= V4L2_CTRL_FLAG_DISABLED,
-	},
 };
 
 
@@ -1220,56 +1216,34 @@
 
 
 /*
- * si470x_vidioc_g_input - get input
- */
-static int si470x_vidioc_g_input(struct file *file, void *priv,
-		unsigned int *i)
-{
-	*i = 0;
-
-	return 0;
-}
-
-
-/*
- * si470x_vidioc_s_input - set input
- */
-static int si470x_vidioc_s_input(struct file *file, void *priv, unsigned int i)
-{
-	int retval = 0;
-
-	/* safety checks */
-	if (i != 0)
-		retval = -EINVAL;
-
-	if (retval < 0)
-		printk(KERN_WARNING DRIVER_NAME
-			": set input failed with %d\n", retval);
-	return retval;
-}
-
-
-/*
  * si470x_vidioc_queryctrl - enumerate control items
  */
 static int si470x_vidioc_queryctrl(struct file *file, void *priv,
 		struct v4l2_queryctrl *qc)
 {
-	unsigned char i;
+	unsigned char i = 0;
 	int retval = -EINVAL;
 
-	/* safety checks */
-	if (!qc->id)
+	/* abort if qc->id is below V4L2_CID_BASE */
+	if (qc->id < V4L2_CID_BASE)
 		goto done;
 
+	/* search video control */
 	for (i = 0; i < ARRAY_SIZE(si470x_v4l2_queryctrl); i++) {
 		if (qc->id == si470x_v4l2_queryctrl[i].id) {
 			memcpy(qc, &(si470x_v4l2_queryctrl[i]), sizeof(*qc));
-			retval = 0;
+			retval = 0; /* found */
 			break;
 		}
 	}
 
+	/* disable unsupported base controls */
+	/* to satisfy kradio and such apps */
+	if ((retval == -EINVAL) && (qc->id < V4L2_CID_LASTP1)) {
+		qc->flags = V4L2_CTRL_FLAG_DISABLED;
+		retval = 0;
+	}
+
 done:
 	if (retval < 0)
 		printk(KERN_WARNING DRIVER_NAME
@@ -1360,44 +1334,13 @@
 static int si470x_vidioc_g_audio(struct file *file, void *priv,
 		struct v4l2_audio *audio)
 {
-	int retval = 0;
-
-	/* safety checks */
-	if (audio->index != 0) {
-		retval = -EINVAL;
-		goto done;
-	}
-
+	/* driver constants */
+	audio->index = 0;
 	strcpy(audio->name, "Radio");
 	audio->capability = V4L2_AUDCAP_STEREO;
+	audio->mode = 0;
 
-done:
-	if (retval < 0)
-		printk(KERN_WARNING DRIVER_NAME
-			": get audio failed with %d\n", retval);
-	return retval;
-}
-
-
-/*
- * si470x_vidioc_s_audio - set audio attributes
- */
-static int si470x_vidioc_s_audio(struct file *file, void *priv,
-		struct v4l2_audio *audio)
-{
-	int retval = 0;
-
-	/* safety checks */
-	if (audio->index != 0) {
-		retval = -EINVAL;
-		goto done;
-	}
-
-done:
-	if (retval < 0)
-		printk(KERN_WARNING DRIVER_NAME
-			": set audio failed with %d\n", retval);
-	return retval;
+	return 0;
 }
 
 
@@ -1415,7 +1358,7 @@
 		retval = -EIO;
 		goto done;
 	}
-	if ((tuner->index != 0) && (tuner->type != V4L2_TUNER_RADIO)) {
+	if (tuner->index != 0) {
 		retval = -EINVAL;
 		goto done;
 	}
@@ -1424,8 +1367,13 @@
 	if (retval < 0)
 		goto done;
 
+	/* driver constants */
 	strcpy(tuner->name, "FM");
-	switch (band) {
+	tuner->type = V4L2_TUNER_RADIO;
+	tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
+
+	/* range limits */
+	switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) {
 	/* 0: 87.5 - 108 MHz (USA, Europe, default) */
 	default:
 		tuner->rangelow  =  87.5 * FREQ_MUL;
@@ -1442,14 +1390,18 @@
 		tuner->rangehigh =  90   * FREQ_MUL;
 		break;
 	};
-	tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
-	tuner->capability = V4L2_TUNER_CAP_LOW;
 
-	/* Stereo indicator == Stereo (instead of Mono) */
+	/* stereo indicator == stereo (instead of mono) */
 	if ((radio->registers[STATUSRSSI] & STATUSRSSI_ST) == 1)
-		tuner->audmode = V4L2_TUNER_MODE_STEREO;
+		tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
 	else
+		tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
+
+	/* mono/stereo selector */
+	if ((radio->registers[POWERCFG] & POWERCFG_MONO) == 1)
 		tuner->audmode = V4L2_TUNER_MODE_MONO;
+	else
+		tuner->audmode = V4L2_TUNER_MODE_STEREO;
 
 	/* min is worst, max is best; signal:0..0xffff; rssi: 0..0xff */
 	tuner->signal = (radio->registers[STATUSRSSI] & STATUSRSSI_RSSI)
@@ -1474,23 +1426,28 @@
 		struct v4l2_tuner *tuner)
 {
 	struct si470x_device *radio = video_drvdata(file);
-	int retval = 0;
+	int retval = -EINVAL;
 
 	/* safety checks */
 	if (radio->disconnected) {
 		retval = -EIO;
 		goto done;
 	}
-	if ((tuner->index != 0) && (tuner->type != V4L2_TUNER_RADIO)) {
-		retval = -EINVAL;
+	if (tuner->index != 0)
+		goto done;
+
+	/* mono/stereo selector */
+	switch (tuner->audmode) {
+	case V4L2_TUNER_MODE_MONO:
+		radio->registers[POWERCFG] |= POWERCFG_MONO;  /* force mono */
+		break;
+	case V4L2_TUNER_MODE_STEREO:
+		radio->registers[POWERCFG] &= ~POWERCFG_MONO; /* try stereo */
+		break;
+	default:
 		goto done;
 	}
 
-	if (tuner->audmode == V4L2_TUNER_MODE_MONO)
-		radio->registers[POWERCFG] |= POWERCFG_MONO;  /* force mono */
-	else
-		radio->registers[POWERCFG] &= ~POWERCFG_MONO; /* try stereo */
-
 	retval = si470x_set_register(radio, POWERCFG);
 
 done:
@@ -1515,11 +1472,12 @@
 		retval = -EIO;
 		goto done;
 	}
-	if ((freq->tuner != 0) && (freq->type != V4L2_TUNER_RADIO)) {
+	if (freq->tuner != 0) {
 		retval = -EINVAL;
 		goto done;
 	}
 
+	freq->type = V4L2_TUNER_RADIO;
 	retval = si470x_get_freq(radio, &freq->frequency);
 
 done:
@@ -1544,7 +1502,7 @@
 		retval = -EIO;
 		goto done;
 	}
-	if ((freq->tuner != 0) && (freq->type != V4L2_TUNER_RADIO)) {
+	if (freq->tuner != 0) {
 		retval = -EINVAL;
 		goto done;
 	}
@@ -1573,7 +1531,7 @@
 		retval = -EIO;
 		goto done;
 	}
-	if ((seek->tuner != 0) && (seek->type != V4L2_TUNER_RADIO)) {
+	if (seek->tuner != 0) {
 		retval = -EINVAL;
 		goto done;
 	}
@@ -1588,15 +1546,16 @@
 	return retval;
 }
 
+
+/*
+ * si470x_ioctl_ops - video device ioctl operations
+ */
 static const struct v4l2_ioctl_ops si470x_ioctl_ops = {
 	.vidioc_querycap	= si470x_vidioc_querycap,
-	.vidioc_g_input		= si470x_vidioc_g_input,
-	.vidioc_s_input		= si470x_vidioc_s_input,
 	.vidioc_queryctrl	= si470x_vidioc_queryctrl,
 	.vidioc_g_ctrl		= si470x_vidioc_g_ctrl,
 	.vidioc_s_ctrl		= si470x_vidioc_s_ctrl,
 	.vidioc_g_audio		= si470x_vidioc_g_audio,
-	.vidioc_s_audio		= si470x_vidioc_s_audio,
 	.vidioc_g_tuner		= si470x_vidioc_g_tuner,
 	.vidioc_s_tuner		= si470x_vidioc_s_tuner,
 	.vidioc_g_frequency	= si470x_vidioc_g_frequency,
@@ -1604,14 +1563,15 @@
 	.vidioc_s_hw_freq_seek	= si470x_vidioc_s_hw_freq_seek,
 };
 
+
 /*
- * si470x_viddev_tamples - video device interface
+ * si470x_viddev_template - video device interface
  */
 static struct video_device si470x_viddev_template = {
 	.fops			= &si470x_fops,
-	.ioctl_ops 		= &si470x_ioctl_ops,
 	.name			= DRIVER_NAME,
 	.release		= video_device_release,
+	.ioctl_ops		= &si470x_ioctl_ops,
 };
 
 
diff --git a/drivers/media/video/adv7170.c b/drivers/media/video/adv7170.c
index f794f2d..e0eb4f3 100644
--- a/drivers/media/video/adv7170.c
+++ b/drivers/media/video/adv7170.c
@@ -29,43 +29,24 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/signal.h>
 #include <linux/types.h>
-#include <linux/i2c.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
+#include <linux/ioctl.h>
 #include <asm/uaccess.h>
-
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
 #include <linux/videodev.h>
 #include <linux/video_encoder.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-i2c-drv-legacy.h>
 
 MODULE_DESCRIPTION("Analog Devices ADV7170 video encoder driver");
 MODULE_AUTHOR("Maxim Yevtyushkin");
 MODULE_LICENSE("GPL");
 
-
-#define I2C_NAME(x) (x)->name
-
-
 static int debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0-1)");
 
-#define dprintk(num, format, args...) \
-	do { \
-		if (debug >= num) \
-			printk(format, ##args); \
-	} while (0)
-
 /* ----------------------------------------------------------------------- */
 
 struct adv7170 {
@@ -80,21 +61,12 @@
 	int sat;
 };
 
-#define   I2C_ADV7170        0xd4
-#define   I2C_ADV7171        0x54
-
-static char adv7170_name[] = "adv7170";
-static char adv7171_name[] = "adv7171";
-
 static char *inputs[] = { "pass_through", "play_back" };
 static char *norms[] = { "PAL", "NTSC" };
 
 /* ----------------------------------------------------------------------- */
 
-static inline int
-adv7170_write (struct i2c_client *client,
-	       u8                 reg,
-	       u8                 value)
+static inline int adv7170_write(struct i2c_client *client, u8 reg, u8 value)
 {
 	struct adv7170 *encoder = i2c_get_clientdata(client);
 
@@ -102,17 +74,13 @@
 	return i2c_smbus_write_byte_data(client, reg, value);
 }
 
-static inline int
-adv7170_read (struct i2c_client *client,
-	      u8                 reg)
+static inline int adv7170_read(struct i2c_client *client, u8 reg)
 {
 	return i2c_smbus_read_byte_data(client, reg);
 }
 
-static int
-adv7170_write_block (struct i2c_client *client,
-		     const u8          *data,
-		     unsigned int       len)
+static int adv7170_write_block(struct i2c_client *client,
+		     const u8 *data, unsigned int len)
 {
 	int ret = -1;
 	u8 reg;
@@ -133,33 +101,25 @@
 				    encoder->reg[reg++] = data[1];
 				len -= 2;
 				data += 2;
-			} while (len >= 2 && data[0] == reg &&
-				 block_len < 32);
-			if ((ret = i2c_master_send(client, block_data,
-						   block_len)) < 0)
+			} while (len >= 2 && data[0] == reg && block_len < 32);
+			ret = i2c_master_send(client, block_data, block_len);
+			if (ret < 0)
 				break;
 		}
 	} else {
 		/* do some slow I2C emulation kind of thing */
 		while (len >= 2) {
 			reg = *data++;
-			if ((ret = adv7170_write(client, reg,
-						 *data++)) < 0)
+			ret = adv7170_write(client, reg, *data++);
+			if (ret < 0)
 				break;
 			len -= 2;
 		}
 	}
-
 	return ret;
 }
 
 /* ----------------------------------------------------------------------- */
-// Output filter:  S-Video  Composite
-
-#define MR050       0x11	//0x09
-#define MR060       0x14	//0x0c
-
-//---------------------------------------------------------------------------
 
 #define TR0MODE     0x4c
 #define TR0RST	    0x80
@@ -167,7 +127,6 @@
 #define TR1CAPT	    0x00
 #define TR1PLAY	    0x00
 
-
 static const unsigned char init_NTSC[] = {
 	0x00, 0x10,		// MR0
 	0x01, 0x20,		// MR1
@@ -227,15 +186,11 @@
 };
 
 
-static int
-adv7170_command (struct i2c_client *client,
-		 unsigned int       cmd,
-		 void *             arg)
+static int adv7170_command(struct i2c_client *client, unsigned cmd, void *arg)
 {
 	struct adv7170 *encoder = i2c_get_clientdata(client);
 
 	switch (cmd) {
-
 	case 0:
 #if 0
 		/* This is just for testing!!! */
@@ -254,18 +209,16 @@
 			     VIDEO_ENCODER_NTSC;
 		cap->inputs = 2;
 		cap->outputs = 1;
-	}
 		break;
+	}
 
 	case ENCODER_SET_NORM:
 	{
 		int iarg = *(int *) arg;
 
-		dprintk(1, KERN_DEBUG "%s_command: set norm %d",
-			I2C_NAME(client), iarg);
+		v4l_dbg(1, debug, client, "set norm %d\n", iarg);
 
 		switch (iarg) {
-
 		case VIDEO_MODE_NTSC:
 			adv7170_write_block(client, init_NTSC,
 					    sizeof(init_NTSC));
@@ -285,16 +238,13 @@
 			break;
 
 		default:
-			dprintk(1, KERN_ERR "%s: illegal norm: %d\n",
-			       I2C_NAME(client), iarg);
+			v4l_dbg(1, debug, client, "illegal norm: %d\n", iarg);
 			return -EINVAL;
-
 		}
-		dprintk(1, KERN_DEBUG "%s: switched to %s\n", I2C_NAME(client),
-			norms[iarg]);
+		v4l_dbg(1, debug, client, "switched to %s\n", norms[iarg]);
 		encoder->norm = iarg;
-	}
 		break;
+	}
 
 	case ENCODER_SET_INPUT:
 	{
@@ -304,19 +254,17 @@
 		 *iarg = 1: input is from ZR36060
 		 *iarg = 2: color bar */
 
-		dprintk(1, KERN_DEBUG "%s_command: set input from %s\n",
-			I2C_NAME(client),
+		v4l_dbg(1, debug, client, "set input from %s\n",
 			iarg == 0 ? "decoder" : "ZR36060");
 
 		switch (iarg) {
-
 		case 0:
 			adv7170_write(client, 0x01, 0x20);
 			adv7170_write(client, 0x08, TR1CAPT);	/* TR1 */
 			adv7170_write(client, 0x02, 0x0e);	// Enable genlock
 			adv7170_write(client, 0x07, TR0MODE | TR0RST);
 			adv7170_write(client, 0x07, TR0MODE);
-			//udelay(10);
+			/* udelay(10); */
 			break;
 
 		case 1:
@@ -325,20 +273,17 @@
 			adv7170_write(client, 0x02, 0x08);
 			adv7170_write(client, 0x07, TR0MODE | TR0RST);
 			adv7170_write(client, 0x07, TR0MODE);
-			//udelay(10);
+			/* udelay(10); */
 			break;
 
 		default:
-			dprintk(1, KERN_ERR "%s: illegal input: %d\n",
-				I2C_NAME(client), iarg);
+			v4l_dbg(1, debug, client, "illegal input: %d\n", iarg);
 			return -EINVAL;
-
 		}
-		dprintk(1, KERN_DEBUG "%s: switched to %s\n", I2C_NAME(client),
-			inputs[iarg]);
+		v4l_dbg(1, debug, client, "switched to %s\n", inputs[iarg]);
 		encoder->input = iarg;
-	}
 		break;
+	}
 
 	case ENCODER_SET_OUTPUT:
 	{
@@ -348,16 +293,16 @@
 		if (*iarg != 0) {
 			return -EINVAL;
 		}
-	}
 		break;
+	}
 
 	case ENCODER_ENABLE_OUTPUT:
 	{
 		int *iarg = arg;
 
 		encoder->enable = !!*iarg;
-	}
 		break;
+	}
 
 	default:
 		return -EINVAL;
@@ -368,149 +313,67 @@
 
 /* ----------------------------------------------------------------------- */
 
-/*
- * Generic i2c probe
- * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
- */
-static unsigned short normal_i2c[] =
-    { I2C_ADV7170 >> 1, (I2C_ADV7170 >> 1) + 1,
-	I2C_ADV7171 >> 1, (I2C_ADV7171 >> 1) + 1,
+static unsigned short normal_i2c[] = {
+	0xd4 >> 1, 0xd6 >> 1,	/* adv7170 IDs */
+	0x54 >> 1, 0x56 >> 1,	/* adv7171 IDs */
 	I2C_CLIENT_END
 };
 
-static unsigned short ignore = I2C_CLIENT_END;
+I2C_CLIENT_INSMOD;
 
-static struct i2c_client_address_data addr_data = {
-	.normal_i2c		= normal_i2c,
-	.probe			= &ignore,
-	.ignore			= &ignore,
-};
-
-static struct i2c_driver i2c_driver_adv7170;
-
-static int
-adv7170_detect_client (struct i2c_adapter *adapter,
-		       int                 address,
-		       int                 kind)
+static int adv7170_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
 {
-	int i;
-	struct i2c_client *client;
 	struct adv7170 *encoder;
-	char *dname;
-
-	dprintk(1,
-		KERN_INFO
-		"adv7170.c: detecting adv7170 client on address 0x%x\n",
-		address << 1);
+	int i;
 
 	/* Check if the adapter supports the needed features */
-	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-		return 0;
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -ENODEV;
 
-	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
-	if (!client)
-		return -ENOMEM;
-	client->addr = address;
-	client->adapter = adapter;
-	client->driver = &i2c_driver_adv7170;
-	if ((client->addr == I2C_ADV7170 >> 1) ||
-	    (client->addr == (I2C_ADV7170 >> 1) + 1)) {
-		dname = adv7170_name;
-	} else if ((client->addr == I2C_ADV7171 >> 1) ||
-		   (client->addr == (I2C_ADV7171 >> 1) + 1)) {
-		dname = adv7171_name;
-	} else {
-		/* We should never get here!!! */
-		kfree(client);
-		return 0;
-	}
-	strlcpy(I2C_NAME(client), dname, sizeof(I2C_NAME(client)));
+	v4l_info(client, "chip found @ 0x%x (%s)\n",
+			client->addr << 1, client->adapter->name);
 
 	encoder = kzalloc(sizeof(struct adv7170), GFP_KERNEL);
-	if (encoder == NULL) {
-		kfree(client);
+	if (encoder == NULL)
 		return -ENOMEM;
-	}
 	encoder->norm = VIDEO_MODE_NTSC;
 	encoder->input = 0;
 	encoder->enable = 1;
 	i2c_set_clientdata(client, encoder);
 
-	i = i2c_attach_client(client);
-	if (i) {
-		kfree(client);
-		kfree(encoder);
-		return i;
-	}
-
 	i = adv7170_write_block(client, init_NTSC, sizeof(init_NTSC));
 	if (i >= 0) {
 		i = adv7170_write(client, 0x07, TR0MODE | TR0RST);
 		i = adv7170_write(client, 0x07, TR0MODE);
 		i = adv7170_read(client, 0x12);
-		dprintk(1, KERN_INFO "%s_attach: rev. %d at 0x%02x\n",
-			I2C_NAME(client), i & 1, client->addr << 1);
+		v4l_dbg(1, debug, client, "revision %d\n", i & 1);
 	}
-	if (i < 0) {
-		dprintk(1, KERN_ERR "%s_attach: init error 0x%x\n",
-		       I2C_NAME(client), i);
-	}
-
+	if (i < 0)
+		v4l_dbg(1, debug, client, "init error 0x%x\n", i);
 	return 0;
 }
 
-static int
-adv7170_attach_adapter (struct i2c_adapter *adapter)
+static int adv7170_remove(struct i2c_client *client)
 {
-	dprintk(1,
-		KERN_INFO
-		"adv7170.c: starting probe for adapter %s (0x%x)\n",
-		I2C_NAME(adapter), adapter->id);
-	return i2c_probe(adapter, &addr_data, &adv7170_detect_client);
-}
-
-static int
-adv7170_detach_client (struct i2c_client *client)
-{
-	struct adv7170 *encoder = i2c_get_clientdata(client);
-	int err;
-
-	err = i2c_detach_client(client);
-	if (err) {
-		return err;
-	}
-
-	kfree(encoder);
-	kfree(client);
-
+	kfree(i2c_get_clientdata(client));
 	return 0;
 }
 
 /* ----------------------------------------------------------------------- */
 
-static struct i2c_driver i2c_driver_adv7170 = {
-	.driver = {
-		.name = "adv7170",	/* name */
-	},
-
-	.id = I2C_DRIVERID_ADV7170,
-
-	.attach_adapter = adv7170_attach_adapter,
-	.detach_client = adv7170_detach_client,
-	.command = adv7170_command,
+static const struct i2c_device_id adv7170_id[] = {
+	{ "adv7170", 0 },
+	{ "adv7171", 0 },
+	{ }
 };
+MODULE_DEVICE_TABLE(i2c, adv7170_id);
 
-static int __init
-adv7170_init (void)
-{
-	return i2c_add_driver(&i2c_driver_adv7170);
-}
-
-static void __exit
-adv7170_exit (void)
-{
-	i2c_del_driver(&i2c_driver_adv7170);
-}
-
-module_init(adv7170_init);
-module_exit(adv7170_exit);
+static struct v4l2_i2c_driver_data v4l2_i2c_data = {
+	.name = "adv7170",
+	.driverid = I2C_DRIVERID_ADV7170,
+	.command = adv7170_command,
+	.probe = adv7170_probe,
+	.remove = adv7170_remove,
+	.id_table = adv7170_id,
+};
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index 8ee07a6..6008e84 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -25,43 +25,24 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/signal.h>
 #include <linux/types.h>
-#include <linux/i2c.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
+#include <linux/ioctl.h>
 #include <asm/uaccess.h>
-
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
 #include <linux/videodev.h>
 #include <linux/video_encoder.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-i2c-drv-legacy.h>
 
 MODULE_DESCRIPTION("Analog Devices ADV7175 video encoder driver");
 MODULE_AUTHOR("Dave Perks");
 MODULE_LICENSE("GPL");
 
-
-#define I2C_NAME(s) (s)->name
-
-
 static int debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0-1)");
 
-#define dprintk(num, format, args...) \
-	do { \
-		if (debug >= num) \
-			printk(format, ##args); \
-	} while (0)
-
 /* ----------------------------------------------------------------------- */
 
 struct adv7175 {
@@ -77,33 +58,23 @@
 #define   I2C_ADV7175        0xd4
 #define   I2C_ADV7176        0x54
 
-static char adv7175_name[] = "adv7175";
-static char adv7176_name[] = "adv7176";
-
 static char *inputs[] = { "pass_through", "play_back", "color_bar" };
 static char *norms[] = { "PAL", "NTSC", "SECAM->PAL (may not work!)" };
 
 /* ----------------------------------------------------------------------- */
 
-static inline int
-adv7175_write (struct i2c_client *client,
-	       u8                 reg,
-	       u8                 value)
+static inline int adv7175_write(struct i2c_client *client, u8 reg, u8 value)
 {
 	return i2c_smbus_write_byte_data(client, reg, value);
 }
 
-static inline int
-adv7175_read (struct i2c_client *client,
-	      u8                 reg)
+static inline int adv7175_read(struct i2c_client *client, u8 reg)
 {
 	return i2c_smbus_read_byte_data(client, reg);
 }
 
-static int
-adv7175_write_block (struct i2c_client *client,
-		     const u8          *data,
-		     unsigned int       len)
+static int adv7175_write_block(struct i2c_client *client,
+		     const u8 *data, unsigned int len)
 {
 	int ret = -1;
 	u8 reg;
@@ -123,18 +94,17 @@
 				reg++;
 				len -= 2;
 				data += 2;
-			} while (len >= 2 && data[0] == reg &&
-				 block_len < 32);
-			if ((ret = i2c_master_send(client, block_data,
-						   block_len)) < 0)
+			} while (len >= 2 && data[0] == reg && block_len < 32);
+			ret = i2c_master_send(client, block_data, block_len);
+			if (ret < 0)
 				break;
 		}
 	} else {
 		/* do some slow I2C emulation kind of thing */
 		while (len >= 2) {
 			reg = *data++;
-			if ((ret = adv7175_write(client, reg,
-						 *data++)) < 0)
+			ret = adv7175_write(client, reg, *data++);
+			if (ret < 0)
 				break;
 			len -= 2;
 		}
@@ -143,13 +113,11 @@
 	return ret;
 }
 
-static void
-set_subcarrier_freq (struct i2c_client *client,
-		     int                pass_through)
+static void set_subcarrier_freq(struct i2c_client *client, int pass_through)
 {
 	/* for some reason pass_through NTSC needs
 	 * a different sub-carrier freq to remain stable. */
-	if(pass_through)
+	if (pass_through)
 		adv7175_write(client, 0x02, 0x00);
 	else
 		adv7175_write(client, 0x02, 0x55);
@@ -160,12 +128,12 @@
 }
 
 /* ----------------------------------------------------------------------- */
-// Output filter:  S-Video  Composite
+/* Output filter:  S-Video  Composite */
 
-#define MR050       0x11	//0x09
-#define MR060       0x14	//0x0c
+#define MR050       0x11	/* 0x09 */
+#define MR060       0x14	/* 0x0c */
 
-//---------------------------------------------------------------------------
+/* ----------------------------------------------------------------------- */
 
 #define TR0MODE     0x46
 #define TR0RST	    0x80
@@ -216,15 +184,11 @@
 	0x06, 0x1a,		/* subc. phase */
 };
 
-static int
-adv7175_command (struct i2c_client *client,
-		 unsigned int       cmd,
-		 void              *arg)
+static int adv7175_command(struct i2c_client *client, unsigned cmd, void *arg)
 {
 	struct adv7175 *encoder = i2c_get_clientdata(client);
 
 	switch (cmd) {
-
 	case 0:
 		/* This is just for testing!!! */
 		adv7175_write_block(client, init_common,
@@ -242,15 +206,14 @@
 			     VIDEO_ENCODER_SECAM; /* well, hacky */
 		cap->inputs = 2;
 		cap->outputs = 1;
-	}
 		break;
+	}
 
 	case ENCODER_SET_NORM:
 	{
 		int iarg = *(int *) arg;
 
 		switch (iarg) {
-
 		case VIDEO_MODE_NTSC:
 			adv7175_write_block(client, init_ntsc,
 					    sizeof(init_ntsc));
@@ -284,16 +247,13 @@
 			adv7175_write(client, 0x07, TR0MODE);
 			break;
 		default:
-			dprintk(1, KERN_ERR "%s: illegal norm: %d\n",
-				I2C_NAME(client), iarg);
+			v4l_dbg(1, debug, client, "illegal norm: %d\n", iarg);
 			return -EINVAL;
-
 		}
-		dprintk(1, KERN_INFO "%s: switched to %s\n", I2C_NAME(client),
-			norms[iarg]);
+		v4l_dbg(1, debug, client, "switched to %s\n", norms[iarg]);
 		encoder->norm = iarg;
-	}
 		break;
+	}
 
 	case ENCODER_SET_INPUT:
 	{
@@ -304,7 +264,6 @@
 		 *iarg = 2: color bar */
 
 		switch (iarg) {
-
 		case 0:
 			adv7175_write(client, 0x01, 0x00);
 
@@ -331,7 +290,7 @@
 			adv7175_write(client, 0x0d, 0x49);
 			adv7175_write(client, 0x07, TR0MODE | TR0RST);
 			adv7175_write(client, 0x07, TR0MODE);
-			//udelay(10);
+			/* udelay(10); */
 			break;
 
 		case 2:
@@ -343,39 +302,35 @@
 			adv7175_write(client, 0x0d, 0x49);
 			adv7175_write(client, 0x07, TR0MODE | TR0RST);
 			adv7175_write(client, 0x07, TR0MODE);
-			//udelay(10);
+			/* udelay(10); */
 			break;
 
 		default:
-			dprintk(1, KERN_ERR "%s: illegal input: %d\n",
-				I2C_NAME(client), iarg);
+			v4l_dbg(1, debug, client, "illegal input: %d\n", iarg);
 			return -EINVAL;
-
 		}
-		dprintk(1, KERN_INFO "%s: switched to %s\n", I2C_NAME(client),
-			inputs[iarg]);
+		v4l_dbg(1, debug, client, "switched to %s\n", inputs[iarg]);
 		encoder->input = iarg;
-	}
 		break;
+	}
 
 	case ENCODER_SET_OUTPUT:
 	{
 		int *iarg = arg;
 
 		/* not much choice of outputs */
-		if (*iarg != 0) {
+		if (*iarg != 0)
 			return -EINVAL;
-		}
-	}
 		break;
+	}
 
 	case ENCODER_ENABLE_OUTPUT:
 	{
 		int *iarg = arg;
 
 		encoder->enable = !!*iarg;
-	}
 		break;
+	}
 
 	default:
 		return -EINVAL;
@@ -390,145 +345,67 @@
  * Generic i2c probe
  * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
  */
-static unsigned short normal_i2c[] =
-    { I2C_ADV7175 >> 1, (I2C_ADV7175 >> 1) + 1,
+static unsigned short normal_i2c[] = {
+	I2C_ADV7175 >> 1, (I2C_ADV7175 >> 1) + 1,
 	I2C_ADV7176 >> 1, (I2C_ADV7176 >> 1) + 1,
 	I2C_CLIENT_END
 };
 
-static unsigned short ignore = I2C_CLIENT_END;
+I2C_CLIENT_INSMOD;
 
-static struct i2c_client_address_data addr_data = {
-	.normal_i2c		= normal_i2c,
-	.probe			= &ignore,
-	.ignore			= &ignore,
-};
-
-static struct i2c_driver i2c_driver_adv7175;
-
-static int
-adv7175_detect_client (struct i2c_adapter *adapter,
-		       int                 address,
-		       int                 kind)
+static int adv7175_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
 {
 	int i;
-	struct i2c_client *client;
 	struct adv7175 *encoder;
-	char *dname;
-
-	dprintk(1,
-		KERN_INFO
-		"adv7175.c: detecting adv7175 client on address 0x%x\n",
-		address << 1);
 
 	/* Check if the adapter supports the needed features */
-	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-		return 0;
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -ENODEV;
 
-	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
-	if (!client)
-		return -ENOMEM;
-	client->addr = address;
-	client->adapter = adapter;
-	client->driver = &i2c_driver_adv7175;
-	if ((client->addr == I2C_ADV7175 >> 1) ||
-	    (client->addr == (I2C_ADV7175 >> 1) + 1)) {
-		dname = adv7175_name;
-	} else if ((client->addr == I2C_ADV7176 >> 1) ||
-		   (client->addr == (I2C_ADV7176 >> 1) + 1)) {
-		dname = adv7176_name;
-	} else {
-		/* We should never get here!!! */
-		kfree(client);
-		return 0;
-	}
-	strlcpy(I2C_NAME(client), dname, sizeof(I2C_NAME(client)));
+	v4l_info(client, "chip found @ 0x%x (%s)\n",
+			client->addr << 1, client->adapter->name);
 
 	encoder = kzalloc(sizeof(struct adv7175), GFP_KERNEL);
-	if (encoder == NULL) {
-		kfree(client);
+	if (encoder == NULL)
 		return -ENOMEM;
-	}
 	encoder->norm = VIDEO_MODE_PAL;
 	encoder->input = 0;
 	encoder->enable = 1;
 	i2c_set_clientdata(client, encoder);
 
-	i = i2c_attach_client(client);
-	if (i) {
-		kfree(client);
-		kfree(encoder);
-		return i;
-	}
-
 	i = adv7175_write_block(client, init_common, sizeof(init_common));
 	if (i >= 0) {
 		i = adv7175_write(client, 0x07, TR0MODE | TR0RST);
 		i = adv7175_write(client, 0x07, TR0MODE);
 		i = adv7175_read(client, 0x12);
-		dprintk(1, KERN_INFO "%s_attach: rev. %d at 0x%x\n",
-			I2C_NAME(client), i & 1, client->addr << 1);
+		v4l_dbg(1, debug, client, "revision %d\n", i & 1);
 	}
-	if (i < 0) {
-		dprintk(1, KERN_ERR "%s_attach: init error 0x%x\n",
-			I2C_NAME(client), i);
-	}
-
+	if (i < 0)
+		v4l_dbg(1, debug, client, "init error 0x%x\n", i);
 	return 0;
 }
 
-static int
-adv7175_attach_adapter (struct i2c_adapter *adapter)
+static int adv7175_remove(struct i2c_client *client)
 {
-	dprintk(1,
-		KERN_INFO
-		"adv7175.c: starting probe for adapter %s (0x%x)\n",
-		I2C_NAME(adapter), adapter->id);
-	return i2c_probe(adapter, &addr_data, &adv7175_detect_client);
-}
-
-static int
-adv7175_detach_client (struct i2c_client *client)
-{
-	struct adv7175 *encoder = i2c_get_clientdata(client);
-	int err;
-
-	err = i2c_detach_client(client);
-	if (err) {
-		return err;
-	}
-
-	kfree(encoder);
-	kfree(client);
-
+	kfree(i2c_get_clientdata(client));
 	return 0;
 }
 
 /* ----------------------------------------------------------------------- */
 
-static struct i2c_driver i2c_driver_adv7175 = {
-	.driver = {
-		.name = "adv7175",	/* name */
-	},
-
-	.id = I2C_DRIVERID_ADV7175,
-
-	.attach_adapter = adv7175_attach_adapter,
-	.detach_client = adv7175_detach_client,
-	.command = adv7175_command,
+static const struct i2c_device_id adv7175_id[] = {
+	{ "adv7175", 0 },
+	{ "adv7176", 0 },
+	{ }
 };
+MODULE_DEVICE_TABLE(i2c, adv7175_id);
 
-static int __init
-adv7175_init (void)
-{
-	return i2c_add_driver(&i2c_driver_adv7175);
-}
-
-static void __exit
-adv7175_exit (void)
-{
-	i2c_del_driver(&i2c_driver_adv7175);
-}
-
-module_init(adv7175_init);
-module_exit(adv7175_exit);
+static struct v4l2_i2c_driver_data v4l2_i2c_data = {
+	.name = "adv7175",
+	.driverid = I2C_DRIVERID_ADV7175,
+	.command = adv7175_command,
+	.probe = adv7175_probe,
+	.remove = adv7175_remove,
+	.id_table = adv7175_id,
+};
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index 218754b..e09b006 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -866,7 +866,7 @@
 	}
 
 	printk("video%d: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
-		ar->vdev->minor, M32R_IRQ_INT3, freq);
+		ar->vdev->num, M32R_IRQ_INT3, freq);
 
 	return 0;
 
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 5f07a8a..d60123b 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -90,6 +90,7 @@
 	case 72221: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and basic analog video */
 	case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and basic analog video */
 	case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and basic analog video */
+	case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and basic analog video */
 	case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and basic analog video */
 	case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
 		break;
@@ -185,7 +186,7 @@
 }
 
 /* table of devices that work with this driver */
-struct usb_device_id au0828_usb_id_table [] = {
+struct usb_device_id au0828_usb_id_table[] = {
 	{ USB_DEVICE(0x2040, 0x7200),
 		.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
 	{ USB_DEVICE(0x2040, 0x7240),
@@ -198,6 +199,8 @@
 		.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
 	{ USB_DEVICE(0x2040, 0x721b),
 		.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
+	{ USB_DEVICE(0x2040, 0x721e),
+		.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
 	{ USB_DEVICE(0x2040, 0x721f),
 		.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
 	{ USB_DEVICE(0x2040, 0x7280),
diff --git a/drivers/media/video/au0828/au0828-core.c b/drivers/media/video/au0828/au0828-core.c
index d856de9..5765e86 100644
--- a/drivers/media/video/au0828/au0828-core.c
+++ b/drivers/media/video/au0828/au0828-core.c
@@ -91,7 +91,8 @@
 		status = usb_control_msg(dev->usbdev,
 				usb_sndctrlpipe(dev->usbdev, 0),
 				request,
-				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+				USB_DIR_OUT | USB_TYPE_VENDOR |
+					USB_RECIP_DEVICE,
 				value, index,
 				cp, size, 1000);
 
diff --git a/drivers/media/video/au0828/au0828-dvb.c b/drivers/media/video/au0828/au0828-dvb.c
index f0fcdb4..a882cf5 100644
--- a/drivers/media/video/au0828/au0828-dvb.c
+++ b/drivers/media/video/au0828/au0828-dvb.c
@@ -173,7 +173,8 @@
 		purb->status = -EINPROGRESS;
 		usb_fill_bulk_urb(purb,
 				  dev->usbdev,
-				  usb_rcvbulkpipe(dev->usbdev, _AU0828_BULKPIPE),
+				  usb_rcvbulkpipe(dev->usbdev,
+					_AU0828_BULKPIPE),
 				  purb->transfer_buffer,
 				  URB_BUFSIZE,
 				  urb_completion,
diff --git a/drivers/media/video/bt819.c b/drivers/media/video/bt819.c
index ddd2a79..a07b7b8 100644
--- a/drivers/media/video/bt819.c
+++ b/drivers/media/video/bt819.c
@@ -29,44 +29,25 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/signal.h>
 #include <linux/types.h>
-#include <linux/i2c.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
+#include <linux/ioctl.h>
 #include <asm/uaccess.h>
-
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
 #include <linux/videodev.h>
 #include <linux/video_decoder.h>
-
+#include <media/v4l2-common.h>
+#include <media/v4l2-i2c-drv-legacy.h>
 
 MODULE_DESCRIPTION("Brooktree-819 video decoder driver");
 MODULE_AUTHOR("Mike Bernson & Dave Perks");
 MODULE_LICENSE("GPL");
 
-
-#define I2C_NAME(s) (s)->name
-
-
 static int debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0-1)");
 
-#define dprintk(num, format, args...) \
-	do { \
-		if (debug >= num) \
-			printk(format, ##args); \
-	} while (0)
-
 /* ----------------------------------------------------------------------- */
 
 struct bt819 {
@@ -97,14 +78,9 @@
 	{858 - 24, 20, 525 - 2, 1, 0x00f8, 0x0000},
 };
 
-#define   I2C_BT819        0x8a
-
 /* ----------------------------------------------------------------------- */
 
-static inline int
-bt819_write (struct i2c_client *client,
-	     u8                 reg,
-	     u8                 value)
+static inline int bt819_write(struct i2c_client *client, u8 reg, u8 value)
 {
 	struct bt819 *decoder = i2c_get_clientdata(client);
 
@@ -112,24 +88,15 @@
 	return i2c_smbus_write_byte_data(client, reg, value);
 }
 
-static inline int
-bt819_setbit (struct i2c_client *client,
-	      u8                 reg,
-	      u8                 bit,
-	      u8                 value)
+static inline int bt819_setbit(struct i2c_client *client, u8 reg, u8 bit, u8 value)
 {
 	struct bt819 *decoder = i2c_get_clientdata(client);
 
 	return bt819_write(client, reg,
-			   (decoder->
-			    reg[reg] & ~(1 << bit)) |
-			    (value ? (1 << bit) : 0));
+		(decoder->reg[reg] & ~(1 << bit)) | (value ? (1 << bit) : 0));
 }
 
-static int
-bt819_write_block (struct i2c_client *client,
-		   const u8          *data,
-		   unsigned int       len)
+static int bt819_write_block(struct i2c_client *client, const u8 *data, unsigned int len)
 {
 	int ret = -1;
 	u8 reg;
@@ -150,10 +117,9 @@
 				    decoder->reg[reg++] = data[1];
 				len -= 2;
 				data += 2;
-			} while (len >= 2 && data[0] == reg &&
-				 block_len < 32);
-			if ((ret = i2c_master_send(client, block_data,
-						   block_len)) < 0)
+			} while (len >= 2 && data[0] == reg && block_len < 32);
+			ret = i2c_master_send(client, block_data, block_len);
+			if (ret < 0)
 				break;
 		}
 	} else {
@@ -169,20 +135,17 @@
 	return ret;
 }
 
-static inline int
-bt819_read (struct i2c_client *client,
-	    u8                 reg)
+static inline int bt819_read(struct i2c_client *client, u8 reg)
 {
 	return i2c_smbus_read_byte_data(client, reg);
 }
 
-static int
-bt819_init (struct i2c_client *client)
+static int bt819_init(struct i2c_client *client)
 {
 	struct bt819 *decoder = i2c_get_clientdata(client);
 
 	static unsigned char init[] = {
-		//0x1f, 0x00,     /* Reset */
+		/*0x1f, 0x00,*/     /* Reset */
 		0x01, 0x59,	/* 0x01 input format */
 		0x02, 0x00,	/* 0x02 temporal decimation */
 		0x03, 0x12,	/* 0x03 Cropping msb */
@@ -218,12 +181,10 @@
 	struct timing *timing = &timing_data[decoder->norm];
 
 	init[0x03 * 2 - 1] =
-	    (((timing->vdelay >> 8) & 0x03) << 6) | (((timing->
-						       vactive >> 8) &
-						      0x03) << 4) |
-	    (((timing->hdelay >> 8) & 0x03) << 2) | ((timing->
-						      hactive >> 8) &
-						     0x03);
+	    (((timing->vdelay >> 8) & 0x03) << 6) |
+	    (((timing->vactive >> 8) & 0x03) << 4) |
+	    (((timing->hdelay >> 8) & 0x03) << 2) |
+	    ((timing->hactive >> 8) & 0x03);
 	init[0x04 * 2 - 1] = timing->vdelay & 0xff;
 	init[0x05 * 2 - 1] = timing->vactive & 0xff;
 	init[0x06 * 2 - 1] = timing->hdelay & 0xff;
@@ -238,27 +199,22 @@
 
 	/* init */
 	return bt819_write_block(client, init, sizeof(init));
-
 }
 
 /* ----------------------------------------------------------------------- */
 
-static int
-bt819_command (struct i2c_client *client,
-	       unsigned int       cmd,
-	       void              *arg)
+static int bt819_command(struct i2c_client *client, unsigned cmd, void *arg)
 {
 	int temp;
 
 	struct bt819 *decoder = i2c_get_clientdata(client);
 
-	if (!decoder->initialized) {	// First call to bt819_init could be
-		bt819_init(client);	// without #FRST = 0
+	if (!decoder->initialized) {	/* First call to bt819_init could be */
+		bt819_init(client);	/* without #FRST = 0 */
 		decoder->initialized = 1;
 	}
 
 	switch (cmd) {
-
 	case 0:
 		/* This is just for testing!!! */
 		bt819_init(client);
@@ -274,8 +230,8 @@
 			     VIDEO_DECODER_CCIR;
 		cap->inputs = 8;
 		cap->outputs = 1;
-	}
 		break;
+	}
 
 	case DECODER_GET_STATUS:
 	{
@@ -285,9 +241,9 @@
 
 		status = bt819_read(client, 0x00);
 		res = 0;
-		if ((status & 0x80)) {
+		if ((status & 0x80))
 			res |= DECODER_STATUS_GOOD;
-		}
+
 		switch (decoder->norm) {
 		case VIDEO_MODE_NTSC:
 			res |= DECODER_STATUS_NTSC;
@@ -297,28 +253,25 @@
 			break;
 		default:
 		case VIDEO_MODE_AUTO:
-			if ((status & 0x10)) {
+			if ((status & 0x10))
 				res |= DECODER_STATUS_PAL;
-			} else {
+			else
 				res |= DECODER_STATUS_NTSC;
-			}
 			break;
 		}
 		res |= DECODER_STATUS_COLOR;
 		*iarg = res;
 
-		dprintk(1, KERN_INFO "%s: get status %x\n", I2C_NAME(client),
-			*iarg);
-	}
+		v4l_dbg(1, debug, client, "get status %x\n", *iarg);
 		break;
+	}
 
 	case DECODER_SET_NORM:
 	{
 		int *iarg = arg;
 		struct timing *timing = NULL;
 
-		dprintk(1, KERN_INFO "%s: set norm %x\n", I2C_NAME(client),
-			*iarg);
+		v4l_dbg(1, debug, client, "set norm %x\n", *iarg);
 
 		switch (*iarg) {
 		case VIDEO_MODE_NTSC:
@@ -327,7 +280,7 @@
 			bt819_setbit(client, 0x01, 5, 0);
 			bt819_write(client, 0x18, 0x68);
 			bt819_write(client, 0x19, 0x5d);
-			//bt819_setbit(client, 0x1a,  5, 1);
+			/* bt819_setbit(client, 0x1a,  5, 1); */
 			timing = &timing_data[VIDEO_MODE_NTSC];
 			break;
 		case VIDEO_MODE_PAL:
@@ -336,7 +289,7 @@
 			bt819_setbit(client, 0x01, 5, 1);
 			bt819_write(client, 0x18, 0x7f);
 			bt819_write(client, 0x19, 0x72);
-			//bt819_setbit(client, 0x1a,  5, 0);
+			/* bt819_setbit(client, 0x1a,  5, 0); */
 			timing = &timing_data[VIDEO_MODE_PAL];
 			break;
 		case VIDEO_MODE_AUTO:
@@ -344,10 +297,7 @@
 			bt819_setbit(client, 0x01, 1, 0);
 			break;
 		default:
-			dprintk(1,
-				KERN_ERR
-				"%s: unsupported norm %d\n",
-				I2C_NAME(client), *iarg);
+			v4l_dbg(1, debug, client, "unsupported norm %x\n", *iarg);
 			return -EINVAL;
 		}
 
@@ -366,19 +316,17 @@
 		}
 
 		decoder->norm = *iarg;
-	}
 		break;
+	}
 
 	case DECODER_SET_INPUT:
 	{
 		int *iarg = arg;
 
-		dprintk(1, KERN_INFO "%s: set input %x\n", I2C_NAME(client),
-			*iarg);
+		v4l_dbg(1, debug, client, "set input %x\n", *iarg);
 
-		if (*iarg < 0 || *iarg > 7) {
+		if (*iarg < 0 || *iarg > 7)
 			return -EINVAL;
-		}
 
 		if (decoder->input != *iarg) {
 			decoder->input = *iarg;
@@ -391,52 +339,42 @@
 				bt819_setbit(client, 0x1a, 1, 0);
 			}
 		}
-	}
 		break;
+	}
 
 	case DECODER_SET_OUTPUT:
 	{
 		int *iarg = arg;
 
-		dprintk(1, KERN_INFO "%s: set output %x\n", I2C_NAME(client),
-			*iarg);
+		v4l_dbg(1, debug, client, "set output %x\n", *iarg);
 
 		/* not much choice of outputs */
-		if (*iarg != 0) {
+		if (*iarg != 0)
 			return -EINVAL;
-		}
-	}
 		break;
+	}
 
 	case DECODER_ENABLE_OUTPUT:
 	{
 		int *iarg = arg;
 		int enable = (*iarg != 0);
 
-		dprintk(1, KERN_INFO "%s: enable output %x\n",
-			I2C_NAME(client), *iarg);
+		v4l_dbg(1, debug, client, "enable output %x\n", *iarg);
 
 		if (decoder->enable != enable) {
 			decoder->enable = enable;
-
-			if (decoder->enable) {
-				bt819_setbit(client, 0x16, 7, 0);
-			} else {
-				bt819_setbit(client, 0x16, 7, 1);
-			}
+			bt819_setbit(client, 0x16, 7, !enable);
 		}
-	}
 		break;
+	}
 
 	case DECODER_SET_PICTURE:
 	{
 		struct video_picture *pic = arg;
 
-		dprintk(1,
-			KERN_INFO
-			"%s: set picture brightness %d contrast %d colour %d\n",
-			I2C_NAME(client), pic->brightness, pic->contrast,
-			pic->colour);
+		v4l_dbg(1, debug, client,
+			"set picture brightness %d contrast %d colour %d\n",
+			pic->brightness, pic->contrast, pic->colour);
 
 
 		if (decoder->bright != pic->brightness) {
@@ -474,8 +412,8 @@
 			bt819_write(client, 0x0f,
 				    128 - (decoder->hue >> 8));
 		}
-	}
 		break;
+	}
 
 	default:
 		return -EINVAL;
@@ -486,55 +424,44 @@
 
 /* ----------------------------------------------------------------------- */
 
-/*
- * Generic i2c probe
- * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
- */
-static unsigned short normal_i2c[] = {
-	I2C_BT819 >> 1,
-	I2C_CLIENT_END,
-};
+static unsigned short normal_i2c[] = { 0x8a >> 1, I2C_CLIENT_END };
 
-static unsigned short ignore = I2C_CLIENT_END;
+I2C_CLIENT_INSMOD;
 
-static struct i2c_client_address_data addr_data = {
-	.normal_i2c		= normal_i2c,
-	.probe			= &ignore,
-	.ignore			= &ignore,
-};
-
-static struct i2c_driver i2c_driver_bt819;
-
-static int
-bt819_detect_client (struct i2c_adapter *adapter,
-		     int                 address,
-		     int                 kind)
+static int bt819_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
 {
-	int i, id;
+	int i, ver;
 	struct bt819 *decoder;
-	struct i2c_client *client;
-
-	dprintk(1,
-		KERN_INFO
-		"bt819: detecting bt819 client on address 0x%x\n",
-		address << 1);
+	const char *name;
 
 	/* Check if the adapter supports the needed features */
-	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-		return 0;
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -ENODEV;
 
-	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
-	if (!client)
-		return -ENOMEM;
-	client->addr = address;
-	client->adapter = adapter;
-	client->driver = &i2c_driver_bt819;
+	ver = bt819_read(client, 0x17);
+	switch (ver & 0xf0) {
+	case 0x70:
+		name = "bt819a";
+		break;
+	case 0x60:
+		name = "bt817a";
+		break;
+	case 0x20:
+		name = "bt815a";
+		break;
+	default:
+		v4l_dbg(1, debug, client,
+			"unknown chip version 0x%02x\n", ver);
+		return -ENODEV;
+	}
+
+	v4l_info(client, "%s found @ 0x%x (%s)\n", name,
+			client->addr << 1, client->adapter->name);
 
 	decoder = kzalloc(sizeof(struct bt819), GFP_KERNEL);
-	if (decoder == NULL) {
-		kfree(client);
+	if (decoder == NULL)
 		return -ENOMEM;
-	}
 	decoder->norm = VIDEO_MODE_NTSC;
 	decoder->input = 0;
 	decoder->enable = 1;
@@ -545,97 +472,33 @@
 	decoder->initialized = 0;
 	i2c_set_clientdata(client, decoder);
 
-	id = bt819_read(client, 0x17);
-	switch (id & 0xf0) {
-	case 0x70:
-		strlcpy(I2C_NAME(client), "bt819a", sizeof(I2C_NAME(client)));
-		break;
-	case 0x60:
-		strlcpy(I2C_NAME(client), "bt817a", sizeof(I2C_NAME(client)));
-		break;
-	case 0x20:
-		strlcpy(I2C_NAME(client), "bt815a", sizeof(I2C_NAME(client)));
-		break;
-	default:
-		dprintk(1,
-			KERN_ERR
-			"bt819: unknown chip version 0x%x (ver 0x%x)\n",
-			id & 0xf0, id & 0x0f);
-		kfree(decoder);
-		kfree(client);
-		return 0;
-	}
-
-	i = i2c_attach_client(client);
-	if (i) {
-		kfree(client);
-		kfree(decoder);
-		return i;
-	}
-
 	i = bt819_init(client);
-	if (i < 0) {
-		dprintk(1, KERN_ERR "%s_attach: init status %d\n",
-			I2C_NAME(client), i);
-	} else {
-		dprintk(1,
-			KERN_INFO
-			"%s_attach: chip version 0x%x at address 0x%x\n",
-			I2C_NAME(client), id & 0x0f,
-			client->addr << 1);
-	}
-
+	if (i < 0)
+		v4l_dbg(1, debug, client, "init status %d\n", i);
 	return 0;
 }
 
-static int
-bt819_attach_adapter (struct i2c_adapter *adapter)
+static int bt819_remove(struct i2c_client *client)
 {
-	return i2c_probe(adapter, &addr_data, &bt819_detect_client);
-}
-
-static int
-bt819_detach_client (struct i2c_client *client)
-{
-	struct bt819 *decoder = i2c_get_clientdata(client);
-	int err;
-
-	err = i2c_detach_client(client);
-	if (err) {
-		return err;
-	}
-
-	kfree(decoder);
-	kfree(client);
-
+	kfree(i2c_get_clientdata(client));
 	return 0;
 }
 
 /* ----------------------------------------------------------------------- */
 
-static struct i2c_driver i2c_driver_bt819 = {
-	.driver = {
-		.name = "bt819",
-	},
-
-	.id = I2C_DRIVERID_BT819,
-
-	.attach_adapter = bt819_attach_adapter,
-	.detach_client = bt819_detach_client,
-	.command = bt819_command,
+static const struct i2c_device_id bt819_id[] = {
+	{ "bt819a", 0 },
+	{ "bt817a", 0 },
+	{ "bt815a", 0 },
+	{ }
 };
+MODULE_DEVICE_TABLE(i2c, bt819_id);
 
-static int __init
-bt819_init_module (void)
-{
-	return i2c_add_driver(&i2c_driver_bt819);
-}
-
-static void __exit
-bt819_exit (void)
-{
-	i2c_del_driver(&i2c_driver_bt819);
-}
-
-module_init(bt819_init_module);
-module_exit(bt819_exit);
+static struct v4l2_i2c_driver_data v4l2_i2c_data = {
+	.name = "bt819",
+	.driverid = I2C_DRIVERID_BT819,
+	.command = bt819_command,
+	.probe = bt819_probe,
+	.remove = bt819_remove,
+	.id_table = bt819_id,
+};
diff --git a/drivers/media/video/bt856.c b/drivers/media/video/bt856.c
index ab2ce4d..4213867 100644
--- a/drivers/media/video/bt856.c
+++ b/drivers/media/video/bt856.c
@@ -29,43 +29,24 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/signal.h>
 #include <linux/types.h>
-#include <linux/i2c.h>
-#include <linux/video_encoder.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
+#include <linux/ioctl.h>
 #include <asm/uaccess.h>
-
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
 #include <linux/videodev.h>
+#include <linux/video_encoder.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-i2c-drv-legacy.h>
 
 MODULE_DESCRIPTION("Brooktree-856A video encoder driver");
 MODULE_AUTHOR("Mike Bernson & Dave Perks");
 MODULE_LICENSE("GPL");
 
-
-#define I2C_NAME(s) (s)->name
-
-
 static int debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0-1)");
 
-#define dprintk(num, format, args...) \
-	do { \
-		if (debug >= num) \
-			printk(format, ##args); \
-	} while (0)
-
 /* ----------------------------------------------------------------------- */
 
 #define BT856_REG_OFFSET	0xDA
@@ -78,14 +59,9 @@
 	int enable;
 };
 
-#define   I2C_BT856        0x88
-
 /* ----------------------------------------------------------------------- */
 
-static inline int
-bt856_write (struct i2c_client *client,
-	     u8                 reg,
-	     u8                 value)
+static inline int bt856_write(struct i2c_client *client, u8 reg, u8 value)
 {
 	struct bt856 *encoder = i2c_get_clientdata(client);
 
@@ -93,46 +69,36 @@
 	return i2c_smbus_write_byte_data(client, reg, value);
 }
 
-static inline int
-bt856_setbit (struct i2c_client *client,
-	      u8                 reg,
-	      u8                 bit,
-	      u8                 value)
+static inline int bt856_setbit(struct i2c_client *client, u8 reg, u8 bit, u8 value)
 {
 	struct bt856 *encoder = i2c_get_clientdata(client);
 
 	return bt856_write(client, reg,
-			   (encoder->
-			    reg[reg - BT856_REG_OFFSET] & ~(1 << bit)) |
-			    (value ? (1 << bit) : 0));
+		(encoder->reg[reg - BT856_REG_OFFSET] & ~(1 << bit)) |
+				(value ? (1 << bit) : 0));
 }
 
-static void
-bt856_dump (struct i2c_client *client)
+static void bt856_dump(struct i2c_client *client)
 {
 	int i;
 	struct bt856 *encoder = i2c_get_clientdata(client);
 
-	printk(KERN_INFO "%s: register dump:", I2C_NAME(client));
+	v4l_info(client, "register dump:\n");
 	for (i = 0; i < BT856_NR_REG; i += 2)
-		printk(" %02x", encoder->reg[i]);
-	printk("\n");
+		printk(KERN_CONT " %02x", encoder->reg[i]);
+	printk(KERN_CONT "\n");
 }
 
 /* ----------------------------------------------------------------------- */
 
-static int
-bt856_command (struct i2c_client *client,
-	       unsigned int       cmd,
-	       void              *arg)
+static int bt856_command(struct i2c_client *client, unsigned cmd, void *arg)
 {
 	struct bt856 *encoder = i2c_get_clientdata(client);
 
 	switch (cmd) {
-
 	case 0:
 		/* This is just for testing!!! */
-		dprintk(1, KERN_INFO "bt856: init\n");
+		v4l_dbg(1, debug, client, "init\n");
 		bt856_write(client, 0xdc, 0x18);
 		bt856_write(client, 0xda, 0);
 		bt856_write(client, 0xde, 0);
@@ -142,7 +108,6 @@
 		bt856_setbit(client, 0xdc, 4, 1);
 
 		switch (encoder->norm) {
-
 		case VIDEO_MODE_NTSC:
 			bt856_setbit(client, 0xdc, 2, 0);
 			break;
@@ -163,26 +128,23 @@
 	{
 		struct video_encoder_capability *cap = arg;
 
-		dprintk(1, KERN_INFO "%s: get capabilities\n",
-			I2C_NAME(client));
+		v4l_dbg(1, debug, client, "get capabilities\n");
 
 		cap->flags = VIDEO_ENCODER_PAL |
 			     VIDEO_ENCODER_NTSC |
 			     VIDEO_ENCODER_CCIR;
 		cap->inputs = 2;
 		cap->outputs = 1;
-	}
 		break;
+	}
 
 	case ENCODER_SET_NORM:
 	{
 		int *iarg = arg;
 
-		dprintk(1, KERN_INFO "%s: set norm %d\n", I2C_NAME(client),
-			*iarg);
+		v4l_dbg(1, debug, client, "set norm %d\n", *iarg);
 
 		switch (*iarg) {
-
 		case VIDEO_MODE_NTSC:
 			bt856_setbit(client, 0xdc, 2, 0);
 			break;
@@ -195,27 +157,23 @@
 
 		default:
 			return -EINVAL;
-
 		}
 		encoder->norm = *iarg;
 		if (debug != 0)
 			bt856_dump(client);
-	}
 		break;
+	}
 
 	case ENCODER_SET_INPUT:
 	{
 		int *iarg = arg;
 
-		dprintk(1, KERN_INFO "%s: set input %d\n", I2C_NAME(client),
-			*iarg);
+		v4l_dbg(1, debug, client, "set input %d\n", *iarg);
 
 		/* We only have video bus.
 		 * iarg = 0: input is from bt819
 		 * iarg = 1: input is from ZR36060 */
-
 		switch (*iarg) {
-
 		case 0:
 			bt856_setbit(client, 0xde, 4, 0);
 			bt856_setbit(client, 0xde, 3, 1);
@@ -234,27 +192,24 @@
 			break;
 		default:
 			return -EINVAL;
-
 		}
 
 		if (debug != 0)
 			bt856_dump(client);
-	}
 		break;
+	}
 
 	case ENCODER_SET_OUTPUT:
 	{
 		int *iarg = arg;
 
-		dprintk(1, KERN_INFO "%s: set output %d\n", I2C_NAME(client),
-			*iarg);
+		v4l_dbg(1, debug, client, "set output %d\n", *iarg);
 
 		/* not much choice of outputs */
-		if (*iarg != 0) {
+		if (*iarg != 0)
 			return -EINVAL;
-		}
-	}
 		break;
+	}
 
 	case ENCODER_ENABLE_OUTPUT:
 	{
@@ -262,10 +217,9 @@
 
 		encoder->enable = !!*iarg;
 
-		dprintk(1, KERN_INFO "%s: enable output %d\n",
-			I2C_NAME(client), encoder->enable);
-	}
+		v4l_dbg(1, debug, client, "enable output %d\n", encoder->enable);
 		break;
+	}
 
 	default:
 		return -EINVAL;
@@ -276,64 +230,29 @@
 
 /* ----------------------------------------------------------------------- */
 
-/*
- * Generic i2c probe
- * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
- */
-static unsigned short normal_i2c[] = { I2C_BT856 >> 1, I2C_CLIENT_END };
+static unsigned short normal_i2c[] = { 0x88 >> 1, I2C_CLIENT_END };
 
-static unsigned short ignore = I2C_CLIENT_END;
+I2C_CLIENT_INSMOD;
 
-static struct i2c_client_address_data addr_data = {
-	.normal_i2c		= normal_i2c,
-	.probe			= &ignore,
-	.ignore			= &ignore,
-};
-
-static struct i2c_driver i2c_driver_bt856;
-
-static int
-bt856_detect_client (struct i2c_adapter *adapter,
-		     int                 address,
-		     int                 kind)
+static int bt856_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
 {
-	int i;
-	struct i2c_client *client;
 	struct bt856 *encoder;
 
-	dprintk(1,
-		KERN_INFO
-		"bt856.c: detecting bt856 client on address 0x%x\n",
-		address << 1);
-
 	/* Check if the adapter supports the needed features */
-	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-		return 0;
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -ENODEV;
 
-	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
-	if (!client)
-		return -ENOMEM;
-	client->addr = address;
-	client->adapter = adapter;
-	client->driver = &i2c_driver_bt856;
-	strlcpy(I2C_NAME(client), "bt856", sizeof(I2C_NAME(client)));
+	v4l_info(client, "chip found @ 0x%x (%s)\n",
+			client->addr << 1, client->adapter->name);
 
 	encoder = kzalloc(sizeof(struct bt856), GFP_KERNEL);
-	if (encoder == NULL) {
-		kfree(client);
+	if (encoder == NULL)
 		return -ENOMEM;
-	}
 	encoder->norm = VIDEO_MODE_NTSC;
 	encoder->enable = 1;
 	i2c_set_clientdata(client, encoder);
 
-	i = i2c_attach_client(client);
-	if (i) {
-		kfree(client);
-		kfree(encoder);
-		return i;
-	}
-
 	bt856_write(client, 0xdc, 0x18);
 	bt856_write(client, 0xda, 0);
 	bt856_write(client, 0xde, 0);
@@ -359,65 +278,26 @@
 
 	if (debug != 0)
 		bt856_dump(client);
-
-	dprintk(1, KERN_INFO "%s_attach: at address 0x%x\n", I2C_NAME(client),
-		client->addr << 1);
-
 	return 0;
 }
 
-static int
-bt856_attach_adapter (struct i2c_adapter *adapter)
+static int bt856_remove(struct i2c_client *client)
 {
-	dprintk(1,
-		KERN_INFO
-		"bt856.c: starting probe for adapter %s (0x%x)\n",
-		I2C_NAME(adapter), adapter->id);
-	return i2c_probe(adapter, &addr_data, &bt856_detect_client);
-}
-
-static int
-bt856_detach_client (struct i2c_client *client)
-{
-	struct bt856 *encoder = i2c_get_clientdata(client);
-	int err;
-
-	err = i2c_detach_client(client);
-	if (err) {
-		return err;
-	}
-
-	kfree(encoder);
-	kfree(client);
-
+	kfree(i2c_get_clientdata(client));
 	return 0;
 }
 
-/* ----------------------------------------------------------------------- */
-
-static struct i2c_driver i2c_driver_bt856 = {
-	.driver = {
-		.name = "bt856",
-	},
-
-	.id = I2C_DRIVERID_BT856,
-
-	.attach_adapter = bt856_attach_adapter,
-	.detach_client = bt856_detach_client,
-	.command = bt856_command,
+static const struct i2c_device_id bt856_id[] = {
+	{ "bt856", 0 },
+	{ }
 };
+MODULE_DEVICE_TABLE(i2c, bt856_id);
 
-static int __init
-bt856_init (void)
-{
-	return i2c_add_driver(&i2c_driver_bt856);
-}
-
-static void __exit
-bt856_exit (void)
-{
-	i2c_del_driver(&i2c_driver_bt856);
-}
-
-module_init(bt856_init);
-module_exit(bt856_exit);
+static struct v4l2_i2c_driver_data v4l2_i2c_data = {
+	.name = "bt856",
+	.driverid = I2C_DRIVERID_BT856,
+	.command = bt856_command,
+	.probe = bt856_probe,
+	.remove = bt856_remove,
+	.id_table = bt856_id,
+};
diff --git a/drivers/media/video/bt866.c b/drivers/media/video/bt866.c
index 96b4155..596f9e2 100644
--- a/drivers/media/video/bt866.c
+++ b/drivers/media/video/bt866.c
@@ -29,42 +29,28 @@
 */
 
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/signal.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <linux/sched.h>
 #include <linux/types.h>
-#include <linux/i2c.h>
-
-#include <linux/videodev.h>
+#include <linux/ioctl.h>
 #include <asm/uaccess.h>
-
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/videodev.h>
 #include <linux/video_encoder.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-i2c-drv-legacy.h>
 
+MODULE_DESCRIPTION("Brooktree-866 video encoder driver");
+MODULE_AUTHOR("Mike Bernson & Dave Perks");
 MODULE_LICENSE("GPL");
 
-#define	BT866_DEVNAME	"bt866"
-#define I2C_BT866	0x88
-
-MODULE_LICENSE("GPL");
-
-#define DEBUG(x)		/* Debug driver */
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
 
 /* ----------------------------------------------------------------------- */
 
 struct bt866 {
-	struct i2c_client *i2c;
-	int addr;
-	unsigned char reg[256];
+	u8 reg[256];
 
 	int norm;
 	int enable;
@@ -74,20 +60,45 @@
 	int sat;
 };
 
-static int bt866_write(struct bt866 *dev,
-			unsigned char subaddr, unsigned char data);
-
-static int bt866_do_command(struct bt866 *encoder,
-			unsigned int cmd, void *arg)
+static int bt866_write(struct i2c_client *client, u8 subaddr, u8 data)
 {
+	struct bt866 *encoder = i2c_get_clientdata(client);
+	u8 buffer[2];
+	int err;
+
+	buffer[0] = subaddr;
+	buffer[1] = data;
+
+	encoder->reg[subaddr] = data;
+
+	v4l_dbg(1, debug, client, "write 0x%02x = 0x%02x\n", subaddr, data);
+
+	for (err = 0; err < 3;) {
+		if (i2c_master_send(client, buffer, 2) == 2)
+			break;
+		err++;
+		v4l_warn(client, "error #%d writing to 0x%02x\n",
+				err, subaddr);
+		schedule_timeout_interruptible(msecs_to_jiffies(100));
+	}
+	if (err == 3) {
+		v4l_warn(client, "giving up\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int bt866_command(struct i2c_client *client, unsigned cmd, void *arg)
+{
+	struct bt866 *encoder = i2c_get_clientdata(client);
+
 	switch (cmd) {
 	case ENCODER_GET_CAPABILITIES:
 	{
 		struct video_encoder_capability *cap = arg;
 
-		DEBUG(printk
-		      (KERN_INFO "%s: get capabilities\n",
-		       encoder->i2c->name));
+		v4l_dbg(1, debug, client, "get capabilities\n");
 
 		cap->flags
 			= VIDEO_ENCODER_PAL
@@ -95,18 +106,16 @@
 			| VIDEO_ENCODER_CCIR;
 		cap->inputs = 2;
 		cap->outputs = 1;
+		break;
 	}
-	break;
 
 	case ENCODER_SET_NORM:
 	{
 		int *iarg = arg;
 
-		DEBUG(printk(KERN_INFO "%s: set norm %d\n",
-			     encoder->i2c->name, *iarg));
+		v4l_dbg(1, debug, client, "set norm %d\n", *iarg);
 
 		switch (*iarg) {
-
 		case VIDEO_MODE_NTSC:
 			break;
 
@@ -115,11 +124,10 @@
 
 		default:
 			return -EINVAL;
-
 		}
 		encoder->norm = *iarg;
+		break;
 	}
-	break;
 
 	case ENCODER_SET_INPUT:
 	{
@@ -155,7 +163,7 @@
 		u8 val;
 
 		for (i = 0; i < ARRAY_SIZE(init) / 2; i += 2)
-			bt866_write(encoder, init[i], init[i+1]);
+			bt866_write(client, init[i], init[i+1]);
 
 		val = encoder->reg[0xdc];
 
@@ -164,17 +172,16 @@
 		else
 			val &= ~0x40; /* !CBSWAP */
 
-		bt866_write(encoder, 0xdc, val);
+		bt866_write(client, 0xdc, val);
 
 		val = encoder->reg[0xcc];
 		if (*iarg == 2)
 			val |= 0x01; /* OSDBAR */
 		else
 			val &= ~0x01; /* !OSDBAR */
-		bt866_write(encoder, 0xcc, val);
+		bt866_write(client, 0xcc, val);
 
-		DEBUG(printk(KERN_INFO "%s: set input %d\n",
-			     encoder->i2c->name, *iarg));
+		v4l_dbg(1, debug, client, "set input %d\n", *iarg);
 
 		switch (*iarg) {
 		case 0:
@@ -183,48 +190,44 @@
 			break;
 		default:
 			return -EINVAL;
-
 		}
+		break;
 	}
-	break;
 
 	case ENCODER_SET_OUTPUT:
 	{
 		int *iarg = arg;
 
-		DEBUG(printk(KERN_INFO "%s: set output %d\n",
-			     encoder->i2c->name, *iarg));
+		v4l_dbg(1, debug, client, "set output %d\n", *iarg);
 
 		/* not much choice of outputs */
 		if (*iarg != 0)
 			return -EINVAL;
+		break;
 	}
-	break;
 
 	case ENCODER_ENABLE_OUTPUT:
 	{
 		int *iarg = arg;
 		encoder->enable = !!*iarg;
 
-		DEBUG(printk
-		      (KERN_INFO "%s: enable output %d\n",
-		       encoder->i2c->name, encoder->enable));
+		v4l_dbg(1, debug, client, "enable output %d\n", encoder->enable);
+		break;
 	}
-	break;
 
 	case 4711:
 	{
 		int *iarg = arg;
 		__u8 val;
 
-		printk("bt866: square = %d\n", *iarg);
+		v4l_dbg(1, debug, client, "square %d\n", *iarg);
 
 		val = encoder->reg[0xdc];
 		if (*iarg)
 			val |= 1; /* SQUARE */
 		else
 			val &= ~1; /* !SQUARE */
-		bt866_write(encoder, 0xdc, val);
+		bt866_write(client, 0xdc, val);
 		break;
 	}
 
@@ -235,141 +238,49 @@
 	return 0;
 }
 
-static int bt866_write(struct bt866 *encoder,
-			unsigned char subaddr, unsigned char data)
-{
-	unsigned char buffer[2];
-	int err;
+static unsigned short normal_i2c[] = { 0x88 >> 1, I2C_CLIENT_END };
 
-	buffer[0] = subaddr;
-	buffer[1] = data;
+I2C_CLIENT_INSMOD;
 
-	encoder->reg[subaddr] = data;
-
-	DEBUG(printk
-	      ("%s: write 0x%02X = 0x%02X\n",
-	       encoder->i2c->name, subaddr, data));
-
-	for (err = 0; err < 3;) {
-		if (i2c_master_send(encoder->i2c, buffer, 2) == 2)
-			break;
-		err++;
-		printk(KERN_WARNING "%s: I/O error #%d "
-		       "(write 0x%02x/0x%02x)\n",
-		       encoder->i2c->name, err, encoder->addr, subaddr);
-		schedule_timeout_interruptible(msecs_to_jiffies(100));
-	}
-	if (err == 3) {
-		printk(KERN_WARNING "%s: giving up\n",
-		       encoder->i2c->name);
-		return -1;
-	}
-
-	return 0;
-}
-
-static int bt866_attach(struct i2c_adapter *adapter);
-static int bt866_detach(struct i2c_client *client);
-static int bt866_command(struct i2c_client *client,
-			 unsigned int cmd, void *arg);
-
-
-/* Addresses to scan */
-static unsigned short normal_i2c[]	= {I2C_BT866>>1, I2C_CLIENT_END};
-static unsigned short probe[2]		= {I2C_CLIENT_END, I2C_CLIENT_END};
-static unsigned short ignore[2]		= {I2C_CLIENT_END, I2C_CLIENT_END};
-
-static struct i2c_client_address_data addr_data = {
-	normal_i2c,
-	probe,
-	ignore,
-};
-
-static struct i2c_driver i2c_driver_bt866 = {
-	.driver.name = BT866_DEVNAME,
-	.id = I2C_DRIVERID_BT866,
-	.attach_adapter = bt866_attach,
-	.detach_client = bt866_detach,
-	.command = bt866_command
-};
-
-
-static struct i2c_client bt866_client_tmpl =
-{
-	.name = "(nil)",
-	.addr = 0,
-	.adapter = NULL,
-	.driver = &i2c_driver_bt866,
-};
-
-static int bt866_found_proc(struct i2c_adapter *adapter,
-			    int addr, int kind)
+static int bt866_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
 {
 	struct bt866 *encoder;
-	struct i2c_client *client;
 
-	client = kzalloc(sizeof(*client), GFP_KERNEL);
-	if (client == NULL)
-		return -ENOMEM;
-	memcpy(client, &bt866_client_tmpl, sizeof(*client));
+	v4l_info(client, "chip found @ 0x%x (%s)\n",
+			client->addr << 1, client->adapter->name);
 
 	encoder = kzalloc(sizeof(*encoder), GFP_KERNEL);
-	if (encoder == NULL) {
-		kfree(client);
+	if (encoder == NULL)
 		return -ENOMEM;
-	}
 
 	i2c_set_clientdata(client, encoder);
-	client->adapter = adapter;
-	client->addr = addr;
-	sprintf(client->name, "%s-%02x", BT866_DEVNAME, adapter->id);
-
-	encoder->i2c = client;
-	encoder->addr = addr;
-	//encoder->encoder_type = ENCODER_TYPE_UNKNOWN;
-
-	/* initialize */
-
-	i2c_attach_client(client);
-
 	return 0;
 }
 
-static int bt866_attach(struct i2c_adapter *adapter)
+static int bt866_remove(struct i2c_client *client)
 {
-	if (adapter->id == I2C_HW_B_ZR36067)
-		return i2c_probe(adapter, &addr_data, bt866_found_proc);
+	kfree(i2c_get_clientdata(client));
 	return 0;
 }
 
-static int bt866_detach(struct i2c_client *client)
+static int bt866_legacy_probe(struct i2c_adapter *adapter)
 {
-	struct bt866 *encoder = i2c_get_clientdata(client);
-
-	i2c_detach_client(client);
-	kfree(encoder);
-	kfree(client);
-
-	return 0;
+	return adapter->id == I2C_HW_B_ZR36067;
 }
 
-static int bt866_command(struct i2c_client *client,
-			 unsigned int cmd, void *arg)
-{
-	struct bt866 *encoder = i2c_get_clientdata(client);
-	return bt866_do_command(encoder, cmd, arg);
-}
+static const struct i2c_device_id bt866_id[] = {
+	{ "bt866", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, bt866_id);
 
-static int __devinit bt866_init(void)
-{
-	i2c_add_driver(&i2c_driver_bt866);
-	return 0;
-}
-
-static void __devexit bt866_exit(void)
-{
-	i2c_del_driver(&i2c_driver_bt866);
-}
-
-module_init(bt866_init);
-module_exit(bt866_exit);
+static struct v4l2_i2c_driver_data v4l2_i2c_data = {
+	.name = "bt866",
+	.driverid = I2C_DRIVERID_BT866,
+	.command = bt866_command,
+	.probe = bt866_probe,
+	.remove = bt866_remove,
+	.legacy_probe = bt866_legacy_probe,
+	.id_table = bt866_id,
+};
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 5858bf5..9ec4cec 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -4246,7 +4246,7 @@
 				  video_nr[btv->c.nr]) < 0)
 		goto err;
 	printk(KERN_INFO "bttv%d: registered device video%d\n",
-	       btv->c.nr,btv->video_dev->minor & 0x1f);
+	       btv->c.nr, btv->video_dev->num);
 	if (device_create_file(&btv->video_dev->dev,
 				     &dev_attr_card)<0) {
 		printk(KERN_ERR "bttv%d: device_create_file 'card' "
@@ -4263,7 +4263,7 @@
 				  vbi_nr[btv->c.nr]) < 0)
 		goto err;
 	printk(KERN_INFO "bttv%d: registered device vbi%d\n",
-	       btv->c.nr,btv->vbi_dev->minor & 0x1f);
+	       btv->c.nr, btv->vbi_dev->num);
 
 	if (!btv->has_radio)
 		return 0;
@@ -4275,7 +4275,7 @@
 				  radio_nr[btv->c.nr]) < 0)
 		goto err;
 	printk(KERN_INFO "bttv%d: registered device radio%d\n",
-	       btv->c.nr,btv->radio_dev->minor & 0x1f);
+	       btv->c.nr, btv->radio_dev->num);
 
 	/* all done */
 	return 0;
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 17aa0ad..0f930d3 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -815,7 +815,7 @@
 	}
 
 	printk(KERN_INFO "video%d: Colour QuickCam found on %s\n",
-	       qcam->vdev.minor, qcam->pport->name);
+	       qcam->vdev.num, qcam->pport->name);
 
 	qcams[num_cams++] = qcam;
 
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index fc9497b..a8c068e 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -2059,10 +2059,10 @@
 
 	if (!cafe_dfs_root)
 		return;
-	sprintf(fname, "regs-%d", cam->v4ldev.minor);
+	sprintf(fname, "regs-%d", cam->v4ldev.num);
 	cam->dfs_regs = debugfs_create_file(fname, 0444, cafe_dfs_root,
 			cam, &cafe_dfs_reg_ops);
-	sprintf(fname, "cam-%d", cam->v4ldev.minor);
+	sprintf(fname, "cam-%d", cam->v4ldev.num);
 	cam->dfs_cam_regs = debugfs_create_file(fname, 0444, cafe_dfs_root,
 			cam, &cafe_dfs_cam_ops);
 }
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 1798b77..16c094f 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -1347,7 +1347,7 @@
 	if (!cpia_proc_root || !cam)
 		return;
 
-	snprintf(name, sizeof(name), "video%d", cam->vdev.minor);
+	snprintf(name, sizeof(name), "video%d", cam->vdev.num);
 
 	ent = create_proc_entry(name, S_IFREG|S_IRUGO|S_IWUSR, cpia_proc_root);
 	if (!ent)
@@ -1372,7 +1372,7 @@
 	if (!cam || !cam->proc_entry)
 		return;
 
-	snprintf(name, sizeof(name), "video%d", cam->vdev.minor);
+	snprintf(name, sizeof(name), "video%d", cam->vdev.num);
 	remove_proc_entry(name, cpia_proc_root);
 	cam->proc_entry = NULL;
 }
@@ -4005,7 +4005,7 @@
 	}
 
 #ifdef CONFIG_PROC_FS
-	DBG("destroying /proc/cpia/video%d\n", cam->vdev.minor);
+	DBG("destroying /proc/cpia/video%d\n", cam->vdev.num);
 	destroy_proc_cpia_cam(cam);
 #endif
 	if (!cam->open_count) {
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 897e8d1..1c6bd63 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -1973,7 +1973,7 @@
 	} else {
 		LOG("/dev/video%d removed while open, "
 		    "deferring video_unregister_device\n",
-		    cam->vdev->minor);
+		    cam->vdev->num);
 	}
 }
 
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 085121c..7a1a783 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -613,6 +613,7 @@
 				const struct pci_device_id *pci_id)
 {
 	int retval = 0;
+	int i;
 	int vbi_buf_size;
 	u32 devtype;
 	struct cx18 *cx;
@@ -698,7 +699,8 @@
 
 	/* active i2c  */
 	CX18_DEBUG_INFO("activating i2c...\n");
-	if (init_cx18_i2c(cx)) {
+	retval = init_cx18_i2c(cx);
+	if (retval) {
 		CX18_ERR("Could not initialize i2c\n");
 		goto free_map;
 	}
@@ -836,8 +838,11 @@
 	CX18_ERR("Error %d on initialization\n", retval);
 	cx18_log_statistics(cx);
 
-	kfree(cx18_cards[cx18_cards_active]);
-	cx18_cards[cx18_cards_active] = NULL;
+	i = cx->num;
+	spin_lock(&cx18_cards_lock);
+	kfree(cx18_cards[i]);
+	cx18_cards[i] = NULL;
+	spin_unlock(&cx18_cards_lock);
 	return retval;
 }
 
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index fa8be07..a4b1708 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -41,6 +41,7 @@
 #include <linux/pagemap.h>
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
+#include <asm/byteorder.h>
 
 #include <linux/dvb/video.h>
 #include <linux/dvb/audio.h>
diff --git a/drivers/media/video/cx18/cx18-io.h b/drivers/media/video/cx18/cx18-io.h
index 197d4fb..287a5e8 100644
--- a/drivers/media/video/cx18/cx18-io.h
+++ b/drivers/media/video/cx18/cx18-io.h
@@ -39,7 +39,7 @@
 
 /* Statistics gathering */
 static inline
-void cx18_log_write_retries(struct cx18 *cx, int i, const void *addr)
+void cx18_log_write_retries(struct cx18 *cx, int i, const void __iomem *addr)
 {
 	if (i > CX18_MAX_MMIO_RETRIES)
 		i = CX18_MAX_MMIO_RETRIES;
@@ -48,7 +48,7 @@
 }
 
 static inline
-void cx18_log_read_retries(struct cx18 *cx, int i, const void *addr)
+void cx18_log_read_retries(struct cx18 *cx, int i, const void __iomem *addr)
 {
 	if (i > CX18_MAX_MMIO_RETRIES)
 		i = CX18_MAX_MMIO_RETRIES;
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 0c8e754..e5ff770 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -200,16 +200,18 @@
 /* Initialize v4l2 variables and register v4l2 devices */
 int cx18_streams_setup(struct cx18 *cx)
 {
-	int type;
+	int type, ret;
 
 	/* Setup V4L2 Devices */
 	for (type = 0; type < CX18_MAX_STREAMS; type++) {
 		/* Prepare device */
-		if (cx18_prep_dev(cx, type))
+		ret = cx18_prep_dev(cx, type);
+		if (ret < 0)
 			break;
 
 		/* Allocate Stream */
-		if (cx18_stream_alloc(&cx->streams[type]))
+		ret = cx18_stream_alloc(&cx->streams[type]);
+		if (ret < 0)
 			break;
 	}
 	if (type == CX18_MAX_STREAMS)
@@ -217,14 +219,14 @@
 
 	/* One or more streams could not be initialized. Clean 'em all up. */
 	cx18_streams_cleanup(cx, 0);
-	return -ENOMEM;
+	return ret;
 }
 
 static int cx18_reg_dev(struct cx18 *cx, int type)
 {
 	struct cx18_stream *s = &cx->streams[type];
 	int vfl_type = cx18_stream_info[type].vfl_type;
-	int num;
+	int num, ret;
 
 	/* TODO: Shouldn't this be a VFL_TYPE_TRANSPORT or something?
 	 * We need a VFL_TYPE_TS defined.
@@ -233,9 +235,10 @@
 		/* just return if no DVB is supported */
 		if ((cx->card->hw_all & CX18_HW_DVB) == 0)
 			return 0;
-		if (cx18_dvb_register(s) < 0) {
+		ret = cx18_dvb_register(s);
+		if (ret < 0) {
 			CX18_ERR("DVB failed to register\n");
-			return -EINVAL;
+			return ret;
 		}
 	}
 
@@ -252,12 +255,13 @@
 	}
 
 	/* Register device. First try the desired minor, then any free one. */
-	if (video_register_device(s->v4l2dev, vfl_type, num)) {
+	ret = video_register_device(s->v4l2dev, vfl_type, num);
+	if (ret < 0) {
 		CX18_ERR("Couldn't register v4l2 device for %s kernel number %d\n",
 			s->name, num);
 		video_device_release(s->v4l2dev);
 		s->v4l2dev = NULL;
-		return -ENOMEM;
+		return ret;
 	}
 	num = s->v4l2dev->num;
 
@@ -290,18 +294,22 @@
 int cx18_streams_register(struct cx18 *cx)
 {
 	int type;
-	int err = 0;
+	int err;
+	int ret = 0;
 
 	/* Register V4L2 devices */
-	for (type = 0; type < CX18_MAX_STREAMS; type++)
-		err |= cx18_reg_dev(cx, type);
+	for (type = 0; type < CX18_MAX_STREAMS; type++) {
+		err = cx18_reg_dev(cx, type);
+		if (err && ret == 0)
+			ret = err;
+	}
 
-	if (err == 0)
+	if (ret == 0)
 		return 0;
 
 	/* One or more streams could not be initialized. Clean 'em all up. */
 	cx18_streams_cleanup(cx, 1);
-	return -ENOMEM;
+	return ret;
 }
 
 /* Unregister v4l2 devices */
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 395c11f..00831f3 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1815,7 +1815,7 @@
 	cx23885_mc417_init(dev);
 
 	printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
-	       dev->name, dev->v4l_device->minor & 0x1f);
+	       dev->name, dev->v4l_device->num);
 
 	return 0;
 }
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 2cda15f..dac5ccc 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -39,16 +39,16 @@
 		.input          = {{
 			.type   = CX23885_VMUX_COMPOSITE1,
 			.vmux   = 0,
-		},{
+		}, {
 			.type   = CX23885_VMUX_COMPOSITE2,
 			.vmux   = 1,
-		},{
+		}, {
 			.type   = CX23885_VMUX_COMPOSITE3,
 			.vmux   = 2,
-		},{
+		}, {
 			.type   = CX23885_VMUX_COMPOSITE4,
 			.vmux   = 3,
-		}},
+		} },
 	},
 	[CX23885_BOARD_HAUPPAUGE_HVR1800lp] = {
 		.name		= "Hauppauge WinTV-HVR1800lp",
@@ -57,19 +57,19 @@
 			.type   = CX23885_VMUX_TELEVISION,
 			.vmux   = 0,
 			.gpio0  = 0xff00,
-		},{
+		}, {
 			.type   = CX23885_VMUX_DEBUG,
 			.vmux   = 0,
 			.gpio0  = 0xff01,
-		},{
+		}, {
 			.type   = CX23885_VMUX_COMPOSITE1,
 			.vmux   = 1,
 			.gpio0  = 0xff02,
-		},{
+		}, {
 			.type   = CX23885_VMUX_SVIDEO,
 			.vmux   = 2,
 			.gpio0  = 0xff02,
-		}},
+		} },
 	},
 	[CX23885_BOARD_HAUPPAUGE_HVR1800] = {
 		.name		= "Hauppauge WinTV-HVR1800",
@@ -84,20 +84,20 @@
 					CX25840_VIN5_CH2 |
 					CX25840_VIN2_CH1,
 			.gpio0  = 0,
-		},{
+		}, {
 			.type   = CX23885_VMUX_COMPOSITE1,
 			.vmux   =	CX25840_VIN7_CH3 |
 					CX25840_VIN4_CH2 |
 					CX25840_VIN6_CH1,
 			.gpio0  = 0,
-		},{
+		}, {
 			.type   = CX23885_VMUX_SVIDEO,
 			.vmux   =	CX25840_VIN7_CH3 |
 					CX25840_VIN4_CH2 |
 					CX25840_VIN8_CH1 |
 					CX25840_SVIDEO_ON,
 			.gpio0  = 0,
-		}},
+		} },
 	},
 	[CX23885_BOARD_HAUPPAUGE_HVR1250] = {
 		.name		= "Hauppauge WinTV-HVR1250",
@@ -106,19 +106,19 @@
 			.type   = CX23885_VMUX_TELEVISION,
 			.vmux   = 0,
 			.gpio0  = 0xff00,
-		},{
+		}, {
 			.type   = CX23885_VMUX_DEBUG,
 			.vmux   = 0,
 			.gpio0  = 0xff01,
-		},{
+		}, {
 			.type   = CX23885_VMUX_COMPOSITE1,
 			.vmux   = 1,
 			.gpio0  = 0xff02,
-		},{
+		}, {
 			.type   = CX23885_VMUX_SVIDEO,
 			.vmux   = 2,
 			.gpio0  = 0xff02,
-		}},
+		} },
 	},
 	[CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP] = {
 		.name		= "DViCO FusionHDTV5 Express",
@@ -169,43 +169,43 @@
 		.subvendor = 0x0070,
 		.subdevice = 0x3400,
 		.card      = CX23885_BOARD_UNKNOWN,
-	},{
+	}, {
 		.subvendor = 0x0070,
 		.subdevice = 0x7600,
 		.card      = CX23885_BOARD_HAUPPAUGE_HVR1800lp,
-	},{
+	}, {
 		.subvendor = 0x0070,
 		.subdevice = 0x7800,
 		.card      = CX23885_BOARD_HAUPPAUGE_HVR1800,
-	},{
+	}, {
 		.subvendor = 0x0070,
 		.subdevice = 0x7801,
 		.card      = CX23885_BOARD_HAUPPAUGE_HVR1800,
-	},{
+	}, {
 		.subvendor = 0x0070,
 		.subdevice = 0x7809,
 		.card      = CX23885_BOARD_HAUPPAUGE_HVR1800,
-	},{
+	}, {
 		.subvendor = 0x0070,
 		.subdevice = 0x7911,
 		.card      = CX23885_BOARD_HAUPPAUGE_HVR1250,
-	},{
+	}, {
 		.subvendor = 0x18ac,
 		.subdevice = 0xd500,
 		.card      = CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP,
-	},{
+	}, {
 		.subvendor = 0x0070,
 		.subdevice = 0x7790,
 		.card      = CX23885_BOARD_HAUPPAUGE_HVR1500Q,
-	},{
+	}, {
 		.subvendor = 0x0070,
 		.subdevice = 0x7797,
 		.card      = CX23885_BOARD_HAUPPAUGE_HVR1500Q,
-	},{
+	}, {
 		.subvendor = 0x0070,
 		.subdevice = 0x7710,
 		.card      = CX23885_BOARD_HAUPPAUGE_HVR1500,
-	},{
+	}, {
 		.subvendor = 0x0070,
 		.subdevice = 0x7717,
 		.card      = CX23885_BOARD_HAUPPAUGE_HVR1500,
@@ -225,11 +225,11 @@
 		.subvendor = 0x0070,
 		.subdevice = 0x8010,
 		.card      = CX23885_BOARD_HAUPPAUGE_HVR1400,
-	},{
+	}, {
 		.subvendor = 0x18ac,
 		.subdevice = 0xd618,
 		.card      = CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP,
-	},{
+	}, {
 		.subvendor = 0x18ac,
 		.subdevice = 0xdb78,
 		.card      = CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP,
@@ -247,23 +247,25 @@
 
 	if (0 == dev->pci->subsystem_vendor &&
 	    0 == dev->pci->subsystem_device) {
-		printk("%s: Your board has no valid PCIe Subsystem ID and thus can't\n"
-		       "%s: be autodetected.  Please pass card=<n> insmod option to\n"
-		       "%s: workaround that.  Redirect complaints to the vendor of\n"
-		       "%s: the TV card.  Best regards,\n"
+		printk(KERN_INFO
+			"%s: Board has no valid PCIe Subsystem ID and can't\n"
+		       "%s: be autodetected. Pass card=<n> insmod option\n"
+		       "%s: to workaround that. Redirect complaints to the\n"
+		       "%s: vendor of the TV card.  Best regards,\n"
 		       "%s:         -- tux\n",
 		       dev->name, dev->name, dev->name, dev->name, dev->name);
 	} else {
-		printk("%s: Your board isn't known (yet) to the driver.  You can\n"
-		       "%s: try to pick one of the existing card configs via\n"
+		printk(KERN_INFO
+			"%s: Your board isn't known (yet) to the driver.\n"
+		       "%s: Try to pick one of the existing card configs via\n"
 		       "%s: card=<n> insmod option.  Updating to the latest\n"
 		       "%s: version might help as well.\n",
 		       dev->name, dev->name, dev->name, dev->name);
 	}
-	printk("%s: Here is a list of valid choices for the card=<n> insmod option:\n",
+	printk(KERN_INFO "%s: Here is a list of valid choices for the card=<n> insmod option:\n",
 	       dev->name);
 	for (i = 0; i < cx23885_bcount; i++)
-		printk("%s:    card=%d -> %s\n",
+		printk(KERN_INFO "%s:    card=%d -> %s\n",
 		       dev->name, i, cx23885_boards[i].name);
 }
 
@@ -271,11 +273,11 @@
 {
 	struct tveeprom tv;
 
-	tveeprom_hauppauge_analog(&dev->i2c_bus[0].i2c_client, &tv, eeprom_data);
+	tveeprom_hauppauge_analog(&dev->i2c_bus[0].i2c_client, &tv,
+		eeprom_data);
 
 	/* Make sure we support the board model */
-	switch (tv.model)
-	{
+	switch (tv.model) {
 	case 71009:
 		/* WinTV-HVR1200 (PCIe, Retail, full height)
 		 * DVB-T and basic analog */
@@ -303,21 +305,51 @@
 	case 71999:
 		/* WinTV-HVR1200 (PCIe, OEM, full height)
 		 * DVB-T and basic analog */
-	case 76601: /* WinTV-HVR1800lp (PCIe, Retail, No IR, Dual channel ATSC and MPEG2 HW Encoder */
-	case 77001: /* WinTV-HVR1500 (Express Card, OEM, No IR, ATSC and Basic analog */
-	case 77011: /* WinTV-HVR1500 (Express Card, Retail, No IR, ATSC and Basic analog */
-	case 77041: /* WinTV-HVR1500Q (Express Card, OEM, No IR, ATSC/QAM and Basic analog */
-	case 77051: /* WinTV-HVR1500Q (Express Card, Retail, No IR, ATSC/QAM and Basic analog */
-	case 78011: /* WinTV-HVR1800 (PCIe, Retail, 3.5mm in, IR, No FM, Dual channel ATSC and MPEG2 HW Encoder */
-	case 78501: /* WinTV-HVR1800 (PCIe, OEM, RCA in, No IR, FM, Dual channel ATSC and MPEG2 HW Encoder */
-	case 78521: /* WinTV-HVR1800 (PCIe, OEM, RCA in, No IR, FM, Dual channel ATSC and MPEG2 HW Encoder */
-	case 78531: /* WinTV-HVR1800 (PCIe, OEM, RCA in, No IR, No FM, Dual channel ATSC and MPEG2 HW Encoder */
-	case 78631: /* WinTV-HVR1800 (PCIe, OEM, No IR, No FM, Dual channel ATSC and MPEG2 HW Encoder */
-	case 79001: /* WinTV-HVR1250 (PCIe, Retail, IR, full height, ATSC and Basic analog */
-	case 79101: /* WinTV-HVR1250 (PCIe, Retail, IR, half height, ATSC and Basic analog */
-	case 79561: /* WinTV-HVR1250 (PCIe, OEM, No IR, half height, ATSC and Basic analog */
-	case 79571: /* WinTV-HVR1250 (PCIe, OEM, No IR, full height, ATSC and Basic analog */
-	case 79671: /* WinTV-HVR1250 (PCIe, OEM, No IR, half height, ATSC and Basic analog */
+	case 76601:
+		/* WinTV-HVR1800lp (PCIe, Retail, No IR, Dual
+			channel ATSC and MPEG2 HW Encoder */
+	case 77001:
+		/* WinTV-HVR1500 (Express Card, OEM, No IR, ATSC
+			and Basic analog */
+	case 77011:
+		/* WinTV-HVR1500 (Express Card, Retail, No IR, ATSC
+			and Basic analog */
+	case 77041:
+		/* WinTV-HVR1500Q (Express Card, OEM, No IR, ATSC/QAM
+			and Basic analog */
+	case 77051:
+		/* WinTV-HVR1500Q (Express Card, Retail, No IR, ATSC/QAM
+			and Basic analog */
+	case 78011:
+		/* WinTV-HVR1800 (PCIe, Retail, 3.5mm in, IR, No FM,
+			Dual channel ATSC and MPEG2 HW Encoder */
+	case 78501:
+		/* WinTV-HVR1800 (PCIe, OEM, RCA in, No IR, FM,
+			Dual channel ATSC and MPEG2 HW Encoder */
+	case 78521:
+		/* WinTV-HVR1800 (PCIe, OEM, RCA in, No IR, FM,
+			Dual channel ATSC and MPEG2 HW Encoder */
+	case 78531:
+		/* WinTV-HVR1800 (PCIe, OEM, RCA in, No IR, No FM,
+			Dual channel ATSC and MPEG2 HW Encoder */
+	case 78631:
+		/* WinTV-HVR1800 (PCIe, OEM, No IR, No FM,
+			Dual channel ATSC and MPEG2 HW Encoder */
+	case 79001:
+		/* WinTV-HVR1250 (PCIe, Retail, IR, full height,
+			ATSC and Basic analog */
+	case 79101:
+		/* WinTV-HVR1250 (PCIe, Retail, IR, half height,
+			ATSC and Basic analog */
+	case 79561:
+		/* WinTV-HVR1250 (PCIe, OEM, No IR, half height,
+			ATSC and Basic analog */
+	case 79571:
+		/* WinTV-HVR1250 (PCIe, OEM, No IR, full height,
+		 ATSC and Basic analog */
+	case 79671:
+		/* WinTV-HVR1250 (PCIe, OEM, No IR, half height,
+			ATSC and Basic analog */
 	case 80019:
 		/* WinTV-HVR1400 (Express Card, Retail, IR,
 		 * DVB-T and Basic analog */
@@ -329,7 +361,8 @@
 		 * DVB-T and MPEG2 HW Encoder */
 		break;
 	default:
-		printk("%s: warning: unknown hauppauge model #%d\n", dev->name, tv.model);
+		printk(KERN_WARNING "%s: warning: unknown hauppauge model #%d\n",
+			dev->name, tv.model);
 		break;
 	}
 
@@ -352,7 +385,7 @@
 		return -EINVAL;
 	}
 
-	switch(dev->board) {
+	switch (dev->board) {
 	case CX23885_BOARD_HAUPPAUGE_HVR1400:
 	case CX23885_BOARD_HAUPPAUGE_HVR1500:
 	case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
@@ -383,7 +416,7 @@
 
 void cx23885_gpio_setup(struct cx23885_dev *dev)
 {
-	switch(dev->board) {
+	switch (dev->board) {
 	case CX23885_BOARD_HAUPPAUGE_HVR1250:
 		/* GPIO-0 cx24227 demodulator reset */
 		cx_set(GP0_IO, 0x00010001); /* Bring the part out of reset */
@@ -617,10 +650,3 @@
 }
 
 /* ------------------------------------------------------------------ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
- */
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index beb3e61..8f6fb2a 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -37,12 +37,12 @@
 MODULE_LICENSE("GPL");
 
 static unsigned int debug;
-module_param(debug,int,0644);
-MODULE_PARM_DESC(debug,"enable debug messages");
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages");
 
 static unsigned int card[]  = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
 module_param_array(card,  int, NULL, 0444);
-MODULE_PARM_DESC(card,"card type");
+MODULE_PARM_DESC(card, "card type");
 
 #define dprintk(level, fmt, arg...)\
 	do { if (debug >= level)\
@@ -364,13 +364,12 @@
 		list_del(&buf->vb.queue);
 		wake_up(&buf->vb.done);
 	}
-	if (list_empty(&q->active)) {
+	if (list_empty(&q->active))
 		del_timer(&q->timeout);
-	} else {
+	else
 		mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
-	}
 	if (bc != 1)
-		printk("%s: %d buffers handled (should be 1)\n",
+		printk(KERN_WARNING "%s: %d buffers handled (should be 1)\n",
 		       __func__, bc);
 }
 
@@ -381,8 +380,7 @@
 	unsigned int i, lines;
 	u32 cdt;
 
-	if (ch->cmds_start == 0)
-	{
+	if (ch->cmds_start == 0) {
 		dprintk(1, "%s() Erasing channel [%s]\n", __func__,
 			ch->name);
 		cx_write(ch->ptr1_reg, 0);
@@ -418,15 +416,15 @@
 
 	/* write CMDS */
 	if (ch->jumponly)
-		cx_write(ch->cmds_start +  0, 8);
+		cx_write(ch->cmds_start + 0, 8);
 	else
-		cx_write(ch->cmds_start +  0, risc);
+		cx_write(ch->cmds_start + 0, risc);
 	cx_write(ch->cmds_start +  4, 0); /* 64 bits 63-32 */
 	cx_write(ch->cmds_start +  8, cdt);
 	cx_write(ch->cmds_start + 12, (lines*16) >> 3);
 	cx_write(ch->cmds_start + 16, ch->ctrl_start);
 	if (ch->jumponly)
-		cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2) );
+		cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
 	else
 		cx_write(ch->cmds_start + 20, 64 >> 2);
 	for (i = 24; i < 80; i += 4)
@@ -436,9 +434,9 @@
 	cx_write(ch->ptr1_reg, ch->fifo_start);
 	cx_write(ch->ptr2_reg, cdt);
 	cx_write(ch->cnt2_reg, (lines*16) >> 3);
-	cx_write(ch->cnt1_reg, (bpl >> 3) -1);
+	cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
 
-	dprintk(2,"[bridge %d] sram setup %s: bpl=%d lines=%d\n",
+	dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
 		dev->bridge,
 		ch->name,
 		bpl,
@@ -469,43 +467,43 @@
 	u32 risc;
 	unsigned int i, j, n;
 
-	printk("%s: %s - dma channel status dump\n",
+	printk(KERN_WARNING "%s: %s - dma channel status dump\n",
 	       dev->name, ch->name);
 	for (i = 0; i < ARRAY_SIZE(name); i++)
-		printk("%s:   cmds: %-15s: 0x%08x\n",
+		printk(KERN_WARNING "%s:   cmds: %-15s: 0x%08x\n",
 		       dev->name, name[i],
 		       cx_read(ch->cmds_start + 4*i));
 
 	for (i = 0; i < 4; i++) {
 		risc = cx_read(ch->cmds_start + 4 * (i + 14));
-		printk("%s:   risc%d: ", dev->name, i);
+		printk(KERN_WARNING "%s:   risc%d: ", dev->name, i);
 		cx23885_risc_decode(risc);
 	}
 	for (i = 0; i < (64 >> 2); i += n) {
 		risc = cx_read(ch->ctrl_start + 4 * i);
 		/* No consideration for bits 63-32 */
 
-		printk("%s:   (0x%08x) iq %x: ", dev->name,
+		printk(KERN_WARNING "%s:   (0x%08x) iq %x: ", dev->name,
 		       ch->ctrl_start + 4 * i, i);
 		n = cx23885_risc_decode(risc);
 		for (j = 1; j < n; j++) {
 			risc = cx_read(ch->ctrl_start + 4 * (i + j));
-			printk("%s:   iq %x: 0x%08x [ arg #%d ]\n",
+			printk(KERN_WARNING "%s:   iq %x: 0x%08x [ arg #%d ]\n",
 			       dev->name, i+j, risc, j);
 		}
 	}
 
-	printk("%s: fifo: 0x%08x -> 0x%x\n",
+	printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
 	       dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
-	printk("%s: ctrl: 0x%08x -> 0x%x\n",
+	printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
 	       dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
-	printk("%s:   ptr1_reg: 0x%08x\n",
+	printk(KERN_WARNING "%s:   ptr1_reg: 0x%08x\n",
 	       dev->name, cx_read(ch->ptr1_reg));
-	printk("%s:   ptr2_reg: 0x%08x\n",
+	printk(KERN_WARNING "%s:   ptr2_reg: 0x%08x\n",
 	       dev->name, cx_read(ch->ptr2_reg));
-	printk("%s:   cnt1_reg: 0x%08x\n",
+	printk(KERN_WARNING "%s:   cnt1_reg: 0x%08x\n",
 	       dev->name, cx_read(ch->cnt1_reg));
-	printk("%s:   cnt2_reg: 0x%08x\n",
+	printk(KERN_WARNING "%s:   cnt2_reg: 0x%08x\n",
 	       dev->name, cx_read(ch->cnt2_reg));
 }
 
@@ -515,13 +513,13 @@
 	struct cx23885_dev *dev = port->dev;
 	unsigned int i, j, n;
 
-	printk("%s: risc disasm: %p [dma=0x%08lx]\n",
+	printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
 	       dev->name, risc->cpu, (unsigned long)risc->dma);
 	for (i = 0; i < (risc->size >> 2); i += n) {
-		printk("%s:   %04d: ", dev->name, i);
+		printk(KERN_INFO "%s:   %04d: ", dev->name, i);
 		n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
 		for (j = 1; j < n; j++)
-			printk("%s:   %04d: 0x%08x [ arg #%d ]\n",
+			printk(KERN_INFO "%s:   %04d: 0x%08x [ arg #%d ]\n",
 			       dev->name, i + j, risc->cpu[i + j], j);
 		if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
 			break;
@@ -600,7 +598,7 @@
 	 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
 	 * occur on the cx23887 bridge.
 	 */
-	if(dev->bridge == CX23885_BRIDGE_885)
+	if (dev->bridge == CX23885_BRIDGE_885)
 		cx_clear(RDR_TLCTL0, 1 << 4);
 
 	return 0;
@@ -608,13 +606,13 @@
 
 static int get_resources(struct cx23885_dev *dev)
 {
-	if (request_mem_region(pci_resource_start(dev->pci,0),
-			       pci_resource_len(dev->pci,0),
+	if (request_mem_region(pci_resource_start(dev->pci, 0),
+			       pci_resource_len(dev->pci, 0),
 			       dev->name))
 		return 0;
 
 	printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
-		dev->name, (unsigned long long)pci_resource_start(dev->pci,0));
+		dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
 
 	return -EBUSY;
 }
@@ -623,7 +621,8 @@
 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
 				u32 reg, u32 mask, u32 value);
 
-static int cx23885_init_tsport(struct cx23885_dev *dev, struct cx23885_tsport *port, int portno)
+static int cx23885_init_tsport(struct cx23885_dev *dev,
+	struct cx23885_tsport *port, int portno)
 {
 	dprintk(1, "%s(portno=%d)\n", __func__, portno);
 
@@ -643,7 +642,18 @@
 	port->mpegq.timeout.data = (unsigned long)port;
 	init_timer(&port->mpegq.timeout);
 
-	switch(portno) {
+	mutex_init(&port->frontends.lock);
+	INIT_LIST_HEAD(&port->frontends.felist);
+	port->frontends.active_fe_id = 0;
+
+	/* This should be hardcoded allow a single frontend
+	 * attachment to this tsport, keeping the -dvb.c
+	 * code clean and safe.
+	 */
+	if (!port->num_frontends)
+		port->num_frontends = 1;
+
+	switch (portno) {
 	case 1:
 		port->reg_gpcnt          = VID_B_GPCNT;
 		port->reg_gpcnt_ctl      = VID_B_GPCNT_CTL;
@@ -744,13 +754,13 @@
 	mutex_unlock(&devlist);
 
 	/* Configure the internal memory */
-	if(dev->pci->device == 0x8880) {
+	if (dev->pci->device == 0x8880) {
 		dev->bridge = CX23885_BRIDGE_887;
 		/* Apply a sensible clock frequency for the PCIe bridge */
 		dev->clk_freq = 25000000;
 		dev->sram_channels = cx23887_sram_channels;
 	} else
-	if(dev->pci->device == 0x8852) {
+	if (dev->pci->device == 0x8852) {
 		dev->bridge = CX23885_BRIDGE_885;
 		/* Apply a sensible clock frequency for the PCIe bridge */
 		dev->clk_freq = 28000000;
@@ -831,8 +841,8 @@
 	}
 
 	/* PCIe stuff */
-	dev->lmmio = ioremap(pci_resource_start(dev->pci,0),
-			     pci_resource_len(dev->pci,0));
+	dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
+			     pci_resource_len(dev->pci, 0));
 
 	dev->bmmio = (u8 __iomem *)dev->lmmio;
 
@@ -862,7 +872,7 @@
 	cx23885_i2c_register(&dev->i2c_bus[1]);
 	cx23885_i2c_register(&dev->i2c_bus[2]);
 	cx23885_card_setup(dev);
-	cx23885_call_i2c_clients (&dev->i2c_bus[0], TUNER_SET_STANDBY, NULL);
+	cx23885_call_i2c_clients(&dev->i2c_bus[0], TUNER_SET_STANDBY, NULL);
 	cx23885_ir_init(dev);
 
 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
@@ -908,8 +918,8 @@
 
 static void cx23885_dev_unregister(struct cx23885_dev *dev)
 {
-	release_mem_region(pci_resource_start(dev->pci,0),
-			   pci_resource_len(dev->pci,0));
+	release_mem_region(pci_resource_start(dev->pci, 0),
+			   pci_resource_len(dev->pci, 0));
 
 	if (!atomic_dec_and_test(&dev->refcount))
 		return;
@@ -936,7 +946,7 @@
 	iounmap(dev->lmmio);
 }
 
-static __le32* cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
+static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
 			       unsigned int offset, u32 sync_line,
 			       unsigned int bpl, unsigned int padding,
 			       unsigned int lines)
@@ -957,31 +967,31 @@
 		}
 		if (bpl <= sg_dma_len(sg)-offset) {
 			/* fits into current chunk */
-			*(rp++)=cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl);
-			*(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
-			*(rp++)=cpu_to_le32(0); /* bits 63-32 */
-			offset+=bpl;
+			*(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl);
+			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
+			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
+			offset += bpl;
 		} else {
 			/* scanline needs to be split */
 			todo = bpl;
-			*(rp++)=cpu_to_le32(RISC_WRITE|RISC_SOL|
+			*(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|
 					    (sg_dma_len(sg)-offset));
-			*(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
-			*(rp++)=cpu_to_le32(0); /* bits 63-32 */
+			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
+			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
 			todo -= (sg_dma_len(sg)-offset);
 			offset = 0;
 			sg++;
 			while (todo > sg_dma_len(sg)) {
-				*(rp++)=cpu_to_le32(RISC_WRITE|
+				*(rp++) = cpu_to_le32(RISC_WRITE|
 						    sg_dma_len(sg));
-				*(rp++)=cpu_to_le32(sg_dma_address(sg));
-				*(rp++)=cpu_to_le32(0); /* bits 63-32 */
+				*(rp++) = cpu_to_le32(sg_dma_address(sg));
+				*(rp++) = cpu_to_le32(0); /* bits 63-32 */
 				todo -= sg_dma_len(sg);
 				sg++;
 			}
-			*(rp++)=cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
-			*(rp++)=cpu_to_le32(sg_dma_address(sg));
-			*(rp++)=cpu_to_le32(0); /* bits 63-32 */
+			*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
+			*(rp++) = cpu_to_le32(sg_dma_address(sg));
+			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
 			offset += todo;
 		}
 		offset += padding;
@@ -1010,9 +1020,11 @@
 	   can cause next bpl to start close to a page border.  First DMA
 	   region may be smaller than PAGE_SIZE */
 	/* write and jump need and extra dword */
-	instructions  = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE + lines);
+	instructions  = fields * (1 + ((bpl + padding) * lines)
+		/ PAGE_SIZE + lines);
 	instructions += 2;
-	if ((rc = btcx_riscmem_alloc(pci,risc,instructions*12)) < 0)
+	rc = btcx_riscmem_alloc(pci, risc, instructions*12);
+	if (rc < 0)
 		return rc;
 
 	/* write risc instructions */
@@ -1026,7 +1038,7 @@
 
 	/* save pointer to jmp instruction address */
 	risc->jmp = rp;
-	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size);
+	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
 	return 0;
 }
 
@@ -1048,7 +1060,8 @@
 	instructions  = 1 + (bpl * lines) / PAGE_SIZE + lines;
 	instructions += 1;
 
-	if ((rc = btcx_riscmem_alloc(pci,risc,instructions*12)) < 0)
+	rc = btcx_riscmem_alloc(pci, risc, instructions*12);
+	if (rc < 0)
 		return rc;
 
 	/* write risc instructions */
@@ -1057,7 +1070,7 @@
 
 	/* save pointer to jmp instruction address */
 	risc->jmp = rp;
-	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size);
+	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
 	return 0;
 }
 
@@ -1067,7 +1080,8 @@
 	__le32 *rp;
 	int rc;
 
-	if ((rc = btcx_riscmem_alloc(pci, risc, 4*16)) < 0)
+	rc = btcx_riscmem_alloc(pci, risc, 4*16);
+	if (rc < 0)
 		return rc;
 
 	/* write risc instructions */
@@ -1161,22 +1175,23 @@
 
 	/* setup fifo + format */
 	cx23885_sram_channel_setup(dev,
-				   &dev->sram_channels[ port->sram_chno ],
+				   &dev->sram_channels[port->sram_chno],
 				   port->ts_packet_size, buf->risc.dma);
-	if(debug > 5) {
-		cx23885_sram_channel_dump(dev, &dev->sram_channels[ port->sram_chno ] );
+	if (debug > 5) {
+		cx23885_sram_channel_dump(dev,
+			&dev->sram_channels[port->sram_chno]);
 		cx23885_risc_disasm(port, &buf->risc);
 	}
 
 	/* write TS length to chip */
 	cx_write(port->reg_lngth, buf->vb.width);
 
-	if ( (!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
-		(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB)) ) {
-		printk( "%s() Failed. Unsupported value in .portb/c (0x%08x)/(0x%08x)\n",
+	if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
+		(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
+		printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
 			__func__,
 			cx23885_boards[dev->board].portb,
-			cx23885_boards[dev->board].portc );
+			cx23885_boards[dev->board].portc);
 		return -EINVAL;
 	}
 
@@ -1186,7 +1201,7 @@
 	udelay(100);
 
 	/* If the port supports SRC SELECT, configure it */
-	if(port->reg_src_sel)
+	if (port->reg_src_sel)
 		cx_write(port->reg_src_sel, port->src_sel_val);
 
 	cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
@@ -1195,7 +1210,7 @@
 	cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
 	udelay(100);
 
-	// NOTE: this is 2 (reserved) for portb, does it matter?
+	/* NOTE: this is 2 (reserved) for portb, does it matter? */
 	/* reset counter to zero */
 	cx_write(port->reg_gpcnt_ctl, 3);
 	q->count = 1;
@@ -1229,11 +1244,11 @@
 		cx_write(ALT_PIN_OUT_SEL, 0x10100045);
 	}
 
-	switch(dev->bridge) {
+	switch (dev->bridge) {
 	case CX23885_BRIDGE_885:
 	case CX23885_BRIDGE_887:
 		/* enable irqs */
-		dprintk(1, "%s() enabling TS int's and DMA\n", __func__ );
+		dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
 		cx_set(port->reg_ts_int_msk,  port->ts_int_msk_val);
 		cx_set(port->reg_dma_ctl, port->dma_ctl_val);
 		cx_set(PCI_INT_MSK, dev->pci_irqmask | port->pci_irqmask);
@@ -1292,8 +1307,7 @@
 	struct cx23885_buffer *buf;
 
 	dprintk(5, "%s()\n", __func__);
-	if (list_empty(&q->active))
-	{
+	if (list_empty(&q->active)) {
 		struct cx23885_buffer *prev;
 		prev = NULL;
 
@@ -1311,7 +1325,7 @@
 				buf->vb.state = VIDEOBUF_ACTIVE;
 				buf->count    = q->count++;
 				mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
-				dprintk(5, "[%p/%d] restart_queue - first active\n",
+				dprintk(5, "[%p/%d] restart_queue - f/active\n",
 					buf, buf->vb.i);
 
 			} else if (prev->vb.width  == buf->vb.width  &&
@@ -1322,8 +1336,9 @@
 				buf->vb.state = VIDEOBUF_ACTIVE;
 				buf->count    = q->count++;
 				prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
-				prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
-				dprintk(5,"[%p/%d] restart_queue - move to active\n",
+				/* 64 bit bits 63-32 */
+				prev->risc.jmp[2] = cpu_to_le32(0);
+				dprintk(5, "[%p/%d] restart_queue - m/active\n",
 					buf, buf->vb.i);
 			} else {
 				return 0;
@@ -1362,7 +1377,8 @@
 		buf->vb.size   = size;
 		buf->vb.field  = field /*V4L2_FIELD_TOP*/;
 
-		if (0 != (rc = videobuf_iolock(q, &buf->vb, NULL)))
+		rc = videobuf_iolock(q, &buf->vb, NULL);
+		if (0 != rc)
 			goto fail;
 		cx23885_risc_databuffer(dev->pci, &buf->risc,
 					videobuf_to_dma(&buf->vb)->sglist,
@@ -1388,7 +1404,7 @@
 	buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
 
 	if (list_empty(&cx88q->active)) {
-		dprintk( 1, "queue is empty - first active\n" );
+		dprintk(1, "queue is empty - first active\n");
 		list_add_tail(&buf->vb.queue, &cx88q->active);
 		cx23885_start_dma(port, cx88q, buf);
 		buf->vb.state = VIDEOBUF_ACTIVE;
@@ -1397,7 +1413,7 @@
 		dprintk(1, "[%p/%d] %s - first active\n",
 			buf, buf->vb.i, __func__);
 	} else {
-		dprintk( 1, "queue is not empty - append to active\n" );
+		dprintk(1, "queue is not empty - append to active\n");
 		prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
 				  vb.queue);
 		list_add_tail(&buf->vb.queue, &cx88q->active);
@@ -1405,7 +1421,7 @@
 		buf->count    = cx88q->count++;
 		prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
 		prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
-		dprintk( 1, "[%p/%d] %s - append to active\n",
+		dprintk(1, "[%p/%d] %s - append to active\n",
 			 buf, buf->vb.i, __func__);
 	}
 }
@@ -1431,7 +1447,7 @@
 			buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
 	}
 	if (restart) {
-		dprintk(1, "restarting queue\n" );
+		dprintk(1, "restarting queue\n");
 		cx23885_restart_queue(port, q);
 	}
 	spin_unlock_irqrestore(&port->slock, flags);
@@ -1453,10 +1469,11 @@
 	struct cx23885_tsport *port = (struct cx23885_tsport *)data;
 	struct cx23885_dev *dev = port->dev;
 
-	dprintk(1, "%s()\n",__func__);
+	dprintk(1, "%s()\n", __func__);
 
 	if (debug > 5)
-		cx23885_sram_channel_dump(dev, &dev->sram_channels[ port->sram_chno ]);
+		cx23885_sram_channel_dump(dev,
+			&dev->sram_channels[port->sram_chno]);
 
 	cx23885_stop_dma(port);
 	do_cancel_buffers(port, "timeout", 1);
@@ -1532,16 +1549,23 @@
 	if ((status & VID_BC_MSK_OPC_ERR) ||
 		(status & VID_BC_MSK_BAD_PKT) ||
 		(status & VID_BC_MSK_SYNC) ||
-		(status & VID_BC_MSK_OF))
-	{
+		(status & VID_BC_MSK_OF)) {
+
 		if (status & VID_BC_MSK_OPC_ERR)
-			dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n", VID_BC_MSK_OPC_ERR);
+			dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
+				VID_BC_MSK_OPC_ERR);
+
 		if (status & VID_BC_MSK_BAD_PKT)
-			dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n", VID_BC_MSK_BAD_PKT);
+			dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
+				VID_BC_MSK_BAD_PKT);
+
 		if (status & VID_BC_MSK_SYNC)
-			dprintk(7, " (VID_BC_MSK_SYNC    0x%08x)\n", VID_BC_MSK_SYNC);
+			dprintk(7, " (VID_BC_MSK_SYNC    0x%08x)\n",
+				VID_BC_MSK_SYNC);
+
 		if (status & VID_BC_MSK_OF)
-			dprintk(7, " (VID_BC_MSK_OF      0x%08x)\n", VID_BC_MSK_OF);
+			dprintk(7, " (VID_BC_MSK_OF      0x%08x)\n",
+				VID_BC_MSK_OF);
 
 		printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
 
@@ -1595,7 +1619,7 @@
 	ts2_status = cx_read(VID_C_INT_STAT);
 	ts2_mask = cx_read(VID_C_INT_MSK);
 
-	if ( (pci_status == 0) && (ts2_status == 0) && (ts1_status == 0) )
+	if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
 		goto out;
 
 	vida_count = cx_read(VID_A_GPCNT);
@@ -1610,38 +1634,56 @@
 	dprintk(7, "ts2_status: 0x%08x  ts2_mask: 0x%08x count: 0x%x\n",
 		ts2_status, ts2_mask, ts2_count);
 
-	if ( (pci_status & PCI_MSK_RISC_RD) ||
-	     (pci_status & PCI_MSK_RISC_WR) ||
-	     (pci_status & PCI_MSK_AL_RD) ||
-	     (pci_status & PCI_MSK_AL_WR) ||
-	     (pci_status & PCI_MSK_APB_DMA) ||
-	     (pci_status & PCI_MSK_VID_C) ||
-	     (pci_status & PCI_MSK_VID_B) ||
-	     (pci_status & PCI_MSK_VID_A) ||
-	     (pci_status & PCI_MSK_AUD_INT) ||
-	     (pci_status & PCI_MSK_AUD_EXT) )
-	{
+	if ((pci_status & PCI_MSK_RISC_RD) ||
+	    (pci_status & PCI_MSK_RISC_WR) ||
+	    (pci_status & PCI_MSK_AL_RD) ||
+	    (pci_status & PCI_MSK_AL_WR) ||
+	    (pci_status & PCI_MSK_APB_DMA) ||
+	    (pci_status & PCI_MSK_VID_C) ||
+	    (pci_status & PCI_MSK_VID_B) ||
+	    (pci_status & PCI_MSK_VID_A) ||
+	    (pci_status & PCI_MSK_AUD_INT) ||
+	    (pci_status & PCI_MSK_AUD_EXT)) {
 
 		if (pci_status & PCI_MSK_RISC_RD)
-			dprintk(7, " (PCI_MSK_RISC_RD   0x%08x)\n", PCI_MSK_RISC_RD);
+			dprintk(7, " (PCI_MSK_RISC_RD   0x%08x)\n",
+				PCI_MSK_RISC_RD);
+
 		if (pci_status & PCI_MSK_RISC_WR)
-			dprintk(7, " (PCI_MSK_RISC_WR   0x%08x)\n", PCI_MSK_RISC_WR);
+			dprintk(7, " (PCI_MSK_RISC_WR   0x%08x)\n",
+				PCI_MSK_RISC_WR);
+
 		if (pci_status & PCI_MSK_AL_RD)
-			dprintk(7, " (PCI_MSK_AL_RD     0x%08x)\n", PCI_MSK_AL_RD);
+			dprintk(7, " (PCI_MSK_AL_RD     0x%08x)\n",
+				PCI_MSK_AL_RD);
+
 		if (pci_status & PCI_MSK_AL_WR)
-			dprintk(7, " (PCI_MSK_AL_WR     0x%08x)\n", PCI_MSK_AL_WR);
+			dprintk(7, " (PCI_MSK_AL_WR     0x%08x)\n",
+				PCI_MSK_AL_WR);
+
 		if (pci_status & PCI_MSK_APB_DMA)
-			dprintk(7, " (PCI_MSK_APB_DMA   0x%08x)\n", PCI_MSK_APB_DMA);
+			dprintk(7, " (PCI_MSK_APB_DMA   0x%08x)\n",
+				PCI_MSK_APB_DMA);
+
 		if (pci_status & PCI_MSK_VID_C)
-			dprintk(7, " (PCI_MSK_VID_C     0x%08x)\n", PCI_MSK_VID_C);
+			dprintk(7, " (PCI_MSK_VID_C     0x%08x)\n",
+				PCI_MSK_VID_C);
+
 		if (pci_status & PCI_MSK_VID_B)
-			dprintk(7, " (PCI_MSK_VID_B     0x%08x)\n", PCI_MSK_VID_B);
+			dprintk(7, " (PCI_MSK_VID_B     0x%08x)\n",
+				PCI_MSK_VID_B);
+
 		if (pci_status & PCI_MSK_VID_A)
-			dprintk(7, " (PCI_MSK_VID_A     0x%08x)\n", PCI_MSK_VID_A);
+			dprintk(7, " (PCI_MSK_VID_A     0x%08x)\n",
+				PCI_MSK_VID_A);
+
 		if (pci_status & PCI_MSK_AUD_INT)
-			dprintk(7, " (PCI_MSK_AUD_INT   0x%08x)\n", PCI_MSK_AUD_INT);
+			dprintk(7, " (PCI_MSK_AUD_INT   0x%08x)\n",
+				PCI_MSK_AUD_INT);
+
 		if (pci_status & PCI_MSK_AUD_EXT)
-			dprintk(7, " (PCI_MSK_AUD_EXT   0x%08x)\n", PCI_MSK_AUD_EXT);
+			dprintk(7, " (PCI_MSK_AUD_EXT   0x%08x)\n",
+				PCI_MSK_AUD_EXT);
 
 	}
 
@@ -1753,13 +1795,13 @@
 		.device       = 0x8852,
 		.subvendor    = PCI_ANY_ID,
 		.subdevice    = PCI_ANY_ID,
-	},{
+	}, {
 		/* CX23887 Rev 2 */
 		.vendor       = 0x14f1,
 		.device       = 0x8880,
 		.subvendor    = PCI_ANY_ID,
 		.subdevice    = PCI_ANY_ID,
-	},{
+	}, {
 		/* --- end of list --- */
 	}
 };
@@ -1797,9 +1839,3 @@
 module_exit(cx23885_fini);
 
 /* ----------------------------------------------------------- */
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
- */
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 24bd183..e1aac07 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -78,19 +78,19 @@
 			   struct videobuf_buffer *vb, enum v4l2_field field)
 {
 	struct cx23885_tsport *port = q->priv_data;
-	return cx23885_buf_prepare(q, port, (struct cx23885_buffer*)vb, field);
+	return cx23885_buf_prepare(q, port, (struct cx23885_buffer *)vb, field);
 }
 
 static void dvb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
 {
 	struct cx23885_tsport *port = q->priv_data;
-	cx23885_buf_queue(port, (struct cx23885_buffer*)vb);
+	cx23885_buf_queue(port, (struct cx23885_buffer *)vb);
 }
 
 static void dvb_buf_release(struct videobuf_queue *q,
 			    struct videobuf_buffer *vb)
 {
-	cx23885_free_buffer(q, (struct cx23885_buffer*)vb);
+	cx23885_free_buffer(q, (struct cx23885_buffer *)vb);
 }
 
 static struct videobuf_queue_ops dvb_qops = {
@@ -312,19 +312,25 @@
 {
 	struct cx23885_dev *dev = port->dev;
 	struct cx23885_i2c *i2c_bus = NULL;
+	struct videobuf_dvb_frontend *fe0;
+
+	/* Get the first frontend */
+	fe0 = videobuf_dvb_get_frontend(&port->frontends, 1);
+	if (!fe0)
+		return -EINVAL;
 
 	/* init struct videobuf_dvb */
-	port->dvb.name = dev->name;
+	fe0->dvb.name = dev->name;
 
 	/* init frontend */
 	switch (dev->board) {
 	case CX23885_BOARD_HAUPPAUGE_HVR1250:
 		i2c_bus = &dev->i2c_bus[0];
-		port->dvb.frontend = dvb_attach(s5h1409_attach,
+		fe0->dvb.frontend = dvb_attach(s5h1409_attach,
 						&hauppauge_generic_config,
 						&i2c_bus->i2c_adap);
-		if (port->dvb.frontend != NULL) {
-			dvb_attach(mt2131_attach, port->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			dvb_attach(mt2131_attach, fe0->dvb.frontend,
 				   &i2c_bus->i2c_adap,
 				   &hauppauge_generic_tunerconfig, 0);
 		}
@@ -333,27 +339,27 @@
 		i2c_bus = &dev->i2c_bus[0];
 		switch (alt_tuner) {
 		case 1:
-			port->dvb.frontend =
+			fe0->dvb.frontend =
 				dvb_attach(s5h1409_attach,
 					   &hauppauge_ezqam_config,
 					   &i2c_bus->i2c_adap);
-			if (port->dvb.frontend != NULL) {
-				dvb_attach(tda829x_attach, port->dvb.frontend,
+			if (fe0->dvb.frontend != NULL) {
+				dvb_attach(tda829x_attach, fe0->dvb.frontend,
 					   &dev->i2c_bus[1].i2c_adap, 0x42,
 					   &tda829x_no_probe);
-				dvb_attach(tda18271_attach, port->dvb.frontend,
+				dvb_attach(tda18271_attach, fe0->dvb.frontend,
 					   0x60, &dev->i2c_bus[1].i2c_adap,
 					   &hauppauge_tda18271_config);
 			}
 			break;
 		case 0:
 		default:
-			port->dvb.frontend =
+			fe0->dvb.frontend =
 				dvb_attach(s5h1409_attach,
 					   &hauppauge_generic_config,
 					   &i2c_bus->i2c_adap);
-			if (port->dvb.frontend != NULL)
-				dvb_attach(mt2131_attach, port->dvb.frontend,
+			if (fe0->dvb.frontend != NULL)
+				dvb_attach(mt2131_attach, fe0->dvb.frontend,
 					   &i2c_bus->i2c_adap,
 					   &hauppauge_generic_tunerconfig, 0);
 			break;
@@ -361,42 +367,42 @@
 		break;
 	case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
 		i2c_bus = &dev->i2c_bus[0];
-		port->dvb.frontend = dvb_attach(s5h1409_attach,
+		fe0->dvb.frontend = dvb_attach(s5h1409_attach,
 						&hauppauge_hvr1800lp_config,
 						&i2c_bus->i2c_adap);
-		if (port->dvb.frontend != NULL) {
-			dvb_attach(mt2131_attach, port->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			dvb_attach(mt2131_attach, fe0->dvb.frontend,
 				   &i2c_bus->i2c_adap,
 				   &hauppauge_generic_tunerconfig, 0);
 		}
 		break;
 	case CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP:
 		i2c_bus = &dev->i2c_bus[0];
-		port->dvb.frontend = dvb_attach(lgdt330x_attach,
+		fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
 						&fusionhdtv_5_express,
 						&i2c_bus->i2c_adap);
-		if (port->dvb.frontend != NULL) {
-			dvb_attach(simple_tuner_attach, port->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 				   &i2c_bus->i2c_adap, 0x61,
 				   TUNER_LG_TDVS_H06XF);
 		}
 		break;
 	case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
 		i2c_bus = &dev->i2c_bus[1];
-		port->dvb.frontend = dvb_attach(s5h1409_attach,
+		fe0->dvb.frontend = dvb_attach(s5h1409_attach,
 						&hauppauge_hvr1500q_config,
 						&dev->i2c_bus[0].i2c_adap);
-		if (port->dvb.frontend != NULL)
-			dvb_attach(xc5000_attach, port->dvb.frontend,
+		if (fe0->dvb.frontend != NULL)
+			dvb_attach(xc5000_attach, fe0->dvb.frontend,
 				   &i2c_bus->i2c_adap,
 				   &hauppauge_hvr1500q_tunerconfig);
 		break;
 	case CX23885_BOARD_HAUPPAUGE_HVR1500:
 		i2c_bus = &dev->i2c_bus[1];
-		port->dvb.frontend = dvb_attach(s5h1409_attach,
+		fe0->dvb.frontend = dvb_attach(s5h1409_attach,
 						&hauppauge_hvr1500_config,
 						&dev->i2c_bus[0].i2c_adap);
-		if (port->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend != NULL) {
 			struct dvb_frontend *fe;
 			struct xc2028_config cfg = {
 				.i2c_adap  = &i2c_bus->i2c_adap,
@@ -409,7 +415,7 @@
 			};
 
 			fe = dvb_attach(xc2028_attach,
-					port->dvb.frontend, &cfg);
+					fe0->dvb.frontend, &cfg);
 			if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
 				fe->ops.tuner_ops.set_config(fe, &ctl);
 		}
@@ -417,24 +423,24 @@
 	case CX23885_BOARD_HAUPPAUGE_HVR1200:
 	case CX23885_BOARD_HAUPPAUGE_HVR1700:
 		i2c_bus = &dev->i2c_bus[0];
-		port->dvb.frontend = dvb_attach(tda10048_attach,
+		fe0->dvb.frontend = dvb_attach(tda10048_attach,
 			&hauppauge_hvr1200_config,
 			&i2c_bus->i2c_adap);
-		if (port->dvb.frontend != NULL) {
-			dvb_attach(tda829x_attach, port->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			dvb_attach(tda829x_attach, fe0->dvb.frontend,
 				&dev->i2c_bus[1].i2c_adap, 0x42,
 				&tda829x_no_probe);
-			dvb_attach(tda18271_attach, port->dvb.frontend,
+			dvb_attach(tda18271_attach, fe0->dvb.frontend,
 				0x60, &dev->i2c_bus[1].i2c_adap,
 				&hauppauge_hvr1200_tuner_config);
 		}
 		break;
 	case CX23885_BOARD_HAUPPAUGE_HVR1400:
 		i2c_bus = &dev->i2c_bus[0];
-		port->dvb.frontend = dvb_attach(dib7000p_attach,
+		fe0->dvb.frontend = dvb_attach(dib7000p_attach,
 			&i2c_bus->i2c_adap,
 			0x12, &hauppauge_hvr1400_dib7000_config);
-		if (port->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend != NULL) {
 			struct dvb_frontend *fe;
 			struct xc2028_config cfg = {
 				.i2c_adap  = &dev->i2c_bus[1].i2c_adap,
@@ -444,12 +450,13 @@
 				.fname   = XC3028L_DEFAULT_FIRMWARE,
 				.max_len = 64,
 				.demod   = 5000,
-				/* This is true for all demods with v36 firmware? */
+				/* This is true for all demods with
+					v36 firmware? */
 				.type    = XC2028_D2633,
 			};
 
 			fe = dvb_attach(xc2028_attach,
-					port->dvb.frontend, &cfg);
+					fe0->dvb.frontend, &cfg);
 			if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
 				fe->ops.tuner_ops.set_config(fe, &ctl);
 		}
@@ -457,25 +464,25 @@
 	case CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP:
 		i2c_bus = &dev->i2c_bus[port->nr - 1];
 
-		port->dvb.frontend = dvb_attach(s5h1409_attach,
+		fe0->dvb.frontend = dvb_attach(s5h1409_attach,
 						&dvico_s5h1409_config,
 						&i2c_bus->i2c_adap);
-		if (port->dvb.frontend == NULL)
-			port->dvb.frontend = dvb_attach(s5h1411_attach,
+		if (fe0->dvb.frontend == NULL)
+			fe0->dvb.frontend = dvb_attach(s5h1411_attach,
 							&dvico_s5h1411_config,
 							&i2c_bus->i2c_adap);
-		if (port->dvb.frontend != NULL)
-			dvb_attach(xc5000_attach, port->dvb.frontend,
+		if (fe0->dvb.frontend != NULL)
+			dvb_attach(xc5000_attach, fe0->dvb.frontend,
 				   &i2c_bus->i2c_adap,
 				   &dvico_xc5000_tunerconfig);
 		break;
 	case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP: {
 		i2c_bus = &dev->i2c_bus[port->nr - 1];
 
-		port->dvb.frontend = dvb_attach(zl10353_attach,
+		fe0->dvb.frontend = dvb_attach(zl10353_attach,
 					       &dvico_fusionhdtv_xc3028,
 					       &i2c_bus->i2c_adap);
-		if (port->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend != NULL) {
 			struct dvb_frontend      *fe;
 			struct xc2028_config	  cfg = {
 				.i2c_adap  = &i2c_bus->i2c_adap,
@@ -487,7 +494,7 @@
 				.demod       = XC3028_FE_ZARLINK456,
 			};
 
-			fe = dvb_attach(xc2028_attach, port->dvb.frontend,
+			fe = dvb_attach(xc2028_attach, fe0->dvb.frontend,
 					&cfg);
 			if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
 				fe->ops.tuner_ops.set_config(fe, &ctl);
@@ -497,10 +504,10 @@
 	case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
 		i2c_bus = &dev->i2c_bus[0];
 
-		port->dvb.frontend = dvb_attach(zl10353_attach,
+		fe0->dvb.frontend = dvb_attach(zl10353_attach,
 			&dvico_fusionhdtv_xc3028,
 			&i2c_bus->i2c_adap);
-		if (port->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend != NULL) {
 			struct dvb_frontend      *fe;
 			struct xc2028_config	  cfg = {
 				.i2c_adap  = &dev->i2c_bus[1].i2c_adap,
@@ -512,73 +519,108 @@
 				.demod       = XC3028_FE_ZARLINK456,
 			};
 
-			fe = dvb_attach(xc2028_attach, port->dvb.frontend,
+			fe = dvb_attach(xc2028_attach, fe0->dvb.frontend,
 				&cfg);
 			if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
 				fe->ops.tuner_ops.set_config(fe, &ctl);
 		}
 		break;
 	default:
-		printk("%s: The frontend of your DVB/ATSC card isn't supported yet\n",
+		printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
+			" isn't supported yet\n",
 		       dev->name);
 		break;
 	}
-	if (NULL == port->dvb.frontend) {
-		printk("%s: frontend initialization failed\n", dev->name);
+	if (NULL == fe0->dvb.frontend) {
+		printk(KERN_ERR "%s: frontend initialization failed\n",
+			dev->name);
 		return -1;
 	}
 	/* define general-purpose callback pointer */
-	port->dvb.frontend->callback = cx23885_tuner_callback;
+	fe0->dvb.frontend->callback = cx23885_tuner_callback;
 
 	/* Put the analog decoder in standby to keep it quiet */
 	cx23885_call_i2c_clients(i2c_bus, TUNER_SET_STANDBY, NULL);
 
-	if (port->dvb.frontend->ops.analog_ops.standby)
-		port->dvb.frontend->ops.analog_ops.standby(port->dvb.frontend);
+	if (fe0->dvb.frontend->ops.analog_ops.standby)
+		fe0->dvb.frontend->ops.analog_ops.standby(fe0->dvb.frontend);
 
 	/* register everything */
-	return videobuf_dvb_register(&port->dvb, THIS_MODULE, port,
-				     &dev->pci->dev, adapter_nr);
+	return videobuf_dvb_register_bus(&port->frontends, THIS_MODULE, port,
+		&dev->pci->dev, adapter_nr, 0);
+
 }
 
 int cx23885_dvb_register(struct cx23885_tsport *port)
 {
+
+	struct videobuf_dvb_frontend *fe0;
 	struct cx23885_dev *dev = port->dev;
-	int err;
+	int err, i;
 
-	dprintk(1, "%s\n", __func__);
-	dprintk(1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
-		dev->board,
-		dev->name,
-		dev->pci_bus,
-		dev->pci_slot);
+	/* Here we need to allocate the correct number of frontends,
+	 * as reflected in the cards struct. The reality is that currrently
+	 * no cx23885 boards support this - yet. But, if we don't modify this
+	 * code then the second frontend would never be allocated (later)
+	 * and fail with error before the attach in dvb_register().
+	 * Without these changes we risk an OOPS later. The changes here
+	 * are for safety, and should provide a good foundation for the
+	 * future addition of any multi-frontend cx23885 based boards.
+	 */
+	printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__,
+		port->num_frontends);
 
-	err = -ENODEV;
+	for (i = 1; i <= port->num_frontends; i++) {
+		if (videobuf_dvb_alloc_frontend(
+			&port->frontends, i) == NULL) {
+			printk(KERN_ERR "%s() failed to alloc\n", __func__);
+			return -ENOMEM;
+		}
 
-	/* dvb stuff */
-	printk("%s: cx23885 based dvb card\n", dev->name);
-	videobuf_queue_sg_init(&port->dvb.dvbq, &dvb_qops, &dev->pci->dev, &port->slock,
+		fe0 = videobuf_dvb_get_frontend(&port->frontends, i);
+		if (!fe0)
+			err = -EINVAL;
+
+		dprintk(1, "%s\n", __func__);
+		dprintk(1, " ->probed by Card=%d Name=%s, PCI %02x:%02x\n",
+			dev->board,
+			dev->name,
+			dev->pci_bus,
+			dev->pci_slot);
+
+		err = -ENODEV;
+
+		/* dvb stuff */
+		/* We have to init the queue for each frontend on a port. */
+		printk(KERN_INFO "%s: cx23885 based dvb card\n", dev->name);
+		videobuf_queue_sg_init(&fe0->dvb.dvbq, &dvb_qops,
+			    &dev->pci->dev, &port->slock,
 			    V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_TOP,
 			    sizeof(struct cx23885_buffer), port);
+	}
 	err = dvb_register(port);
 	if (err != 0)
-		printk("%s() dvb_register failed err = %d\n", __func__, err);
+		printk(KERN_ERR "%s() dvb_register failed err = %d\n",
+			__func__, err);
 
 	return err;
 }
 
 int cx23885_dvb_unregister(struct cx23885_tsport *port)
 {
-	/* dvb */
-	if(port->dvb.frontend)
-		videobuf_dvb_unregister(&port->dvb);
+	struct videobuf_dvb_frontend *fe0;
+
+	/* FIXME: in an error condition where the we have
+	 * an expected number of frontends (attach problem)
+	 * then this might not clean up correctly, if 1
+	 * is invalid.
+	 * This comment only applies to future boards IF they
+	 * implement MFE support.
+	 */
+	fe0 = videobuf_dvb_get_frontend(&port->frontends, 1);
+	if (fe0->dvb.frontend)
+		videobuf_dvb_unregister_bus(&port->frontends);
 
 	return 0;
 }
 
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
-*/
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
index f98e476..bb7f71a 100644
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -131,7 +131,7 @@
 			printk(" >\n");
 	}
 
-	for (cnt = 1; cnt < msg->len; cnt++ ) {
+	for (cnt = 1; cnt < msg->len; cnt++) {
 		/* following bytes */
 		wdata = msg->buf[cnt];
 		ctrl = bus->i2c_period | (1 << 12) | (1 << 2);
@@ -151,9 +151,9 @@
 		if (retval == 0)
 			goto eio;
 		if (i2c_debug) {
-			printk(" %02x", msg->buf[cnt]);
+			dprintk(1, " %02x", msg->buf[cnt]);
 			if (!(ctrl & I2C_NOSTOP))
-				printk(" >\n");
+				dprintk(1, " >\n");
 		}
 	}
 	return msg->len;
@@ -162,7 +162,7 @@
 	retval = -EIO;
  err:
 	if (i2c_debug)
-		printk(" ERR: %d\n", retval);
+		printk(KERN_ERR " ERR: %d\n", retval);
 	return retval;
 }
 
@@ -194,12 +194,12 @@
 
 	if (i2c_debug) {
 		if (joined)
-			printk(" R");
+			dprintk(1, " R");
 		else
-			printk(" <R %02x", (msg->addr << 1) + 1);
+			dprintk(1, " <R %02x", (msg->addr << 1) + 1);
 	}
 
-	for(cnt = 0; cnt < msg->len; cnt++) {
+	for (cnt = 0; cnt < msg->len; cnt++) {
 
 		ctrl = bus->i2c_period | (1 << 12) | (1 << 2) | 1;
 
@@ -216,9 +216,9 @@
 			goto eio;
 		msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff;
 		if (i2c_debug) {
-			printk(" %02x", msg->buf[cnt]);
+			dprintk(1, " %02x", msg->buf[cnt]);
 			if (!(ctrl & I2C_NOSTOP))
-				printk(" >\n");
+				dprintk(1, " >\n");
 		}
 	}
 	return msg->len;
@@ -227,7 +227,7 @@
 	retval = -EIO;
  err:
 	if (i2c_debug)
-		printk(" ERR: %d\n", retval);
+		printk(KERN_ERR " ERR: %d\n", retval);
 	return retval;
 }
 
@@ -353,17 +353,17 @@
 };
 
 static char *i2c_devs[128] = {
-	[0x10 >> 1]   = "tda10048",
-	[0x12 >> 1]   = "dib7000pc",
-	[ 0x1c >> 1 ] = "lgdt3303",
-	[ 0x86 >> 1 ] = "tda9887",
-	[ 0x32 >> 1 ] = "cx24227",
-	[ 0x88 >> 1 ] = "cx25837",
-	[ 0x84 >> 1 ] = "tda8295",
-	[ 0xa0 >> 1 ] = "eeprom",
-	[ 0xc0 >> 1 ] = "tuner/mt2131/tda8275",
+	[0x10 >> 1] = "tda10048",
+	[0x12 >> 1] = "dib7000pc",
+	[0x1c >> 1] = "lgdt3303",
+	[0x86 >> 1] = "tda9887",
+	[0x32 >> 1] = "cx24227",
+	[0x88 >> 1] = "cx25837",
+	[0x84 >> 1] = "tda8295",
+	[0xa0 >> 1] = "eeprom",
+	[0xc0 >> 1] = "tuner/mt2131/tda8275",
 	[0xc2 >> 1] = "tuner/mt2131/tda8275/xc5000/xc3028",
-	[0xc8 >> 1]   = "tuner/xc3028L",
+	[0xc8 >> 1] = "tuner/xc3028L",
 };
 
 static void do_i2c_scan(char *name, struct i2c_client *c)
@@ -376,7 +376,7 @@
 		rc = i2c_master_recv(c, &buf, 0);
 		if (rc < 0)
 			continue;
-		printk("%s: i2c scan: found device @ 0x%x  [%s]\n",
+		printk(KERN_INFO "%s: i2c scan: found device @ 0x%x  [%s]\n",
 		       name, i << 1, i2c_devs[i] ? i2c_devs[i] : "???");
 	}
 }
@@ -408,11 +408,12 @@
 	bus->i2c_client.adapter = &bus->i2c_adap;
 
 	if (0 == bus->i2c_rc) {
-		printk("%s: i2c bus %d registered\n", dev->name, bus->nr);
+		dprintk(1, "%s: i2c bus %d registered\n", dev->name, bus->nr);
 		if (i2c_scan)
 			do_i2c_scan(dev->name, &bus->i2c_client);
 	} else
-		printk("%s: i2c bus %d register FAILED\n", dev->name, bus->nr);
+		printk(KERN_WARNING "%s: i2c bus %d register FAILED\n",
+			dev->name, bus->nr);
 
 	return bus->i2c_rc;
 }
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index f75ed1c..c742a10 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -285,11 +285,10 @@
 		list_del(&buf->vb.queue);
 		wake_up(&buf->vb.done);
 	}
-	if (list_empty(&q->active)) {
+	if (list_empty(&q->active))
 		del_timer(&q->timeout);
-	} else {
+	else
 		mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
-	}
 	if (bc != 1)
 		printk(KERN_ERR "%s: %d buffers handled (should be 1)\n",
 			__func__, bc);
@@ -379,12 +378,12 @@
 
 static int res_check(struct cx23885_fh *fh, unsigned int bit)
 {
-	return (fh->resources & bit);
+	return fh->resources & bit;
 }
 
 static int res_locked(struct cx23885_dev *dev, unsigned int bit)
 {
-	return (dev->resources & bit);
+	return dev->resources & bit;
 }
 
 static void res_free(struct cx23885_dev *dev, struct cx23885_fh *fh,
@@ -887,14 +886,16 @@
 /* ------------------------------------------------------------------ */
 /* VIDEO CTRL IOCTLS                                                  */
 
-static int cx23885_get_control(struct cx23885_dev *dev, struct v4l2_control *ctl)
+static int cx23885_get_control(struct cx23885_dev *dev,
+	struct v4l2_control *ctl)
 {
 	dprintk(1, "%s() calling cx25840(VIDIOC_G_CTRL)\n", __func__);
 	cx23885_call_i2c_clients(&dev->i2c_bus[2], VIDIOC_G_CTRL, ctl);
 	return 0;
 }
 
-static int cx23885_set_control(struct cx23885_dev *dev, struct v4l2_control *ctl)
+static int cx23885_set_control(struct cx23885_dev *dev,
+	struct v4l2_control *ctl)
 {
 	dprintk(1, "%s() calling cx25840(VIDIOC_S_CTRL)"
 		" (disabled - no action)\n", __func__);
@@ -1073,29 +1074,29 @@
 	struct v4l2_requestbuffers *p)
 {
 	struct cx23885_fh *fh = priv;
-	return (videobuf_reqbufs(get_queue(fh), p));
+	return videobuf_reqbufs(get_queue(fh), p);
 }
 
 static int vidioc_querybuf(struct file *file, void *priv,
 	struct v4l2_buffer *p)
 {
 	struct cx23885_fh *fh = priv;
-	return (videobuf_querybuf(get_queue(fh), p));
+	return videobuf_querybuf(get_queue(fh), p);
 }
 
 static int vidioc_qbuf(struct file *file, void *priv,
 	struct v4l2_buffer *p)
 {
 	struct cx23885_fh *fh = priv;
-	return (videobuf_qbuf(get_queue(fh), p));
+	return videobuf_qbuf(get_queue(fh), p);
 }
 
 static int vidioc_dqbuf(struct file *file, void *priv,
 	struct v4l2_buffer *p)
 {
 	struct cx23885_fh *fh = priv;
-	return (videobuf_dqbuf(get_queue(fh), p,
-				file->f_flags & O_NONBLOCK));
+	return videobuf_dqbuf(get_queue(fh), p,
+				file->f_flags & O_NONBLOCK);
 }
 
 static int vidioc_streamon(struct file *file, void *priv,
@@ -1542,7 +1543,7 @@
 		goto fail_unreg;
 	}
 	printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n",
-	       dev->name, dev->video_dev->minor & 0x1f);
+	       dev->name, dev->video_dev->num);
 	/* initial device configuration */
 	mutex_lock(&dev->lock);
 	cx23885_set_tvnorm(dev, dev->tvnorm);
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index ba4e0aa..1d53f54 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -37,7 +37,7 @@
 #include <linux/version.h>
 #include <linux/mutex.h>
 
-#define CX23885_VERSION_CODE KERNEL_VERSION(0,0,1)
+#define CX23885_VERSION_CODE KERNEL_VERSION(0, 0, 1)
 
 #define UNSET (-1U)
 
@@ -225,7 +225,7 @@
 	int                        nr;
 	int                        sram_chno;
 
-	struct videobuf_dvb        dvb;
+	struct videobuf_dvb_frontends frontends;
 
 	/* dma queues */
 	struct cx23885_dmaqueue    mpegq;
@@ -262,6 +262,9 @@
 	u32                        src_sel_val;
 	u32                        vld_misc_val;
 	u32                        hw_sop_ctrl_val;
+
+	/* Allow a single tsport to have multiple frontends */
+	u32                        num_frontends;
 };
 
 struct cx23885_dev {
@@ -367,14 +370,14 @@
 /* ----------------------------------------------------------- */
 
 #define cx_read(reg)             readl(dev->lmmio + ((reg)>>2))
-#define cx_write(reg,value)      writel((value), dev->lmmio + ((reg)>>2))
+#define cx_write(reg, value)     writel((value), dev->lmmio + ((reg)>>2))
 
-#define cx_andor(reg,mask,value) \
+#define cx_andor(reg, mask, value) \
   writel((readl(dev->lmmio+((reg)>>2)) & ~(mask)) |\
   ((value) & (mask)), dev->lmmio+((reg)>>2))
 
-#define cx_set(reg,bit)          cx_andor((reg),(bit),(bit))
-#define cx_clear(reg,bit)        cx_andor((reg),(bit),0)
+#define cx_set(reg, bit)          cx_andor((reg), (bit), (bit))
+#define cx_clear(reg, bit)        cx_andor((reg), (bit), 0)
 
 /* ----------------------------------------------------------- */
 /* cx23885-core.c                                              */
@@ -411,7 +414,8 @@
 extern struct cx23885_subid cx23885_subids[];
 extern const unsigned int cx23885_idcount;
 
-extern int cx23885_tuner_callback(void *priv, int component, int command, int arg);
+extern int cx23885_tuner_callback(void *priv, int component,
+	int command, int arg);
 extern void cx23885_card_list(struct cx23885_dev *dev);
 extern int  cx23885_ir_init(struct cx23885_dev *dev);
 extern void cx23885_gpio_setup(struct cx23885_dev *dev);
@@ -479,11 +483,3 @@
 {
 	return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 754 : 922;
 }
-
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
- */
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index e713697..078be63 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -1285,7 +1285,7 @@
 		return err;
 	}
 	printk(KERN_INFO "%s/2: registered device video%d [mpeg]\n",
-	       dev->core->name,dev->mpeg_dev->minor & 0x1f);
+	       dev->core->name, dev->mpeg_dev->num);
 	return 0;
 }
 
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 5da04e81..5bcbb4c 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1270,27 +1270,40 @@
 		.mpeg           = CX88_MPEG_DVB,
 	},
 	[CX88_BOARD_HAUPPAUGE_HVR3000] = {
-		/* FIXME: Add dvb & radio support */
 		.name           = "Hauppauge WinTV-HVR3000 TriMode Analog/DVB-S/DVB-T",
 		.tuner_type     = TUNER_PHILIPS_FMD1216ME_MK3,
 		.radio_type     = UNSET,
 		.tuner_addr     = ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.tda9887_conf   = TDA9887_PRESENT,
+		.audio_chip     = V4L2_IDENT_WM8775,
 		.input          = {{
 			.type   = CX88_VMUX_TELEVISION,
 			.vmux   = 0,
 			.gpio0  = 0x84bf,
+			/* 1: TV Audio / FM Mono */
+			.audioroute = 1,
 		},{
 			.type   = CX88_VMUX_COMPOSITE1,
 			.vmux   = 1,
 			.gpio0  = 0x84bf,
+			/* 2: Line-In */
+			.audioroute = 2,
 		},{
 			.type   = CX88_VMUX_SVIDEO,
 			.vmux   = 2,
 			.gpio0  = 0x84bf,
+			/* 2: Line-In */
+			.audioroute = 2,
 		}},
+		.radio = {
+			.type   = CX88_RADIO,
+			.gpio0	= 0x84bf,
+			/* 4: FM Stereo (untested) */
+			.audioroute = 8,
+		},
 		.mpeg           = CX88_MPEG_DVB,
+		.num_frontends	= 2,
 	},
 	[CX88_BOARD_NORWOOD_MICRO] = {
 		.name           = "Norwood Micro TV Tuner",
@@ -1356,23 +1369,27 @@
 			.type   = CX88_VMUX_TELEVISION,
 			.vmux   = 0,
 			.gpio0	= 0xef88,
+			/* 1: TV Audio / FM Mono */
 			.audioroute = 1,
 		},{
 			.type	= CX88_VMUX_COMPOSITE1,
 			.vmux	= 1,
 			.gpio0	= 0xef88,
+			/* 2: Line-In */
 			.audioroute = 2,
 		},{
 			.type	= CX88_VMUX_SVIDEO,
 			.vmux	= 2,
 			.gpio0	= 0xef88,
+			/* 2: Line-In */
 			.audioroute = 2,
 		}},
-		/* fixme: Add radio support */
 		.mpeg           = CX88_MPEG_DVB | CX88_MPEG_BLACKBIRD,
 		.radio = {
 			.type   = CX88_RADIO,
 			.gpio0	= 0xef88,
+			/* 4: FM Stereo (untested) */
+			.audioroute = 8,
 		},
 	},
 	[CX88_BOARD_ADSTECH_PTV_390] = {
@@ -1716,6 +1733,7 @@
 		.tuner_addr     = ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.tda9887_conf   = TDA9887_PRESENT,
+		.audio_chip     = V4L2_IDENT_WM8775,
 		/*
 		 * GPIO0 (WINTV2000)
 		 *
@@ -1729,7 +1747,7 @@
 		 * BIT  VALUE   FUNCTION GP{x}_IO
 		 * 0    1       I:?
 		 * 1    1       I:?
-		 * 2    1       O:DVB-T DEMOD ENABLE LOW/ANALOG DEMOD ENABLE HIGH
+		 * 2    1       O:MPEG PORT 0=DVB-T 1=DVB-S
 		 * 3    1       I:?
 		 * 4    1       I:?
 		 * 5    1       I:?
@@ -1745,22 +1763,41 @@
 		 * d    0       I
 		 * e    1       O
 		 * f    1       O
+		 *
+		 * WM8775 ADC
+		 *
+		 * 1: TV Audio / FM Mono
+		 * 2: Line-In
+		 * 3: Line-In Expansion
+		 * 4: FM Stereo
 		 */
 		.input          = {{
 			.type   = CX88_VMUX_TELEVISION,
 			.vmux   = 0,
 			.gpio0  = 0xc4bf,
+			/* 1: TV Audio / FM Mono */
+			.audioroute = 1,
 		}, {
 			.type   = CX88_VMUX_COMPOSITE1,
 			.vmux   = 1,
 			.gpio0  = 0xc4bf,
+			/* 2: Line-In */
+			.audioroute = 2,
 		}, {
 			.type   = CX88_VMUX_SVIDEO,
 			.vmux   = 2,
 			.gpio0  = 0xc4bf,
+			/* 2: Line-In */
+			.audioroute = 2,
 		} },
-		/* fixme: Add radio support */
+		.radio = {
+			.type   = CX88_RADIO,
+			.gpio0	= 0xc4bf,
+			/* 4: FM Stereo */
+			.audioroute = 8,
+		},
 		.mpeg           = CX88_MPEG_DVB,
+		.num_frontends	= 2,
 	},
 	[CX88_BOARD_HAUPPAUGE_HVR4000LITE] = {
 		.name           = "Hauppauge WinTV-HVR4000(Lite) DVB-S/S2",
@@ -2662,10 +2699,13 @@
 
 	case CX88_BOARD_HAUPPAUGE_HVR3000:
 	case CX88_BOARD_HAUPPAUGE_HVR4000:
-	case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
 		/* Init GPIO */
 		cx_write(MO_GP0_IO, core->board.input[0].gpio0);
 		udelay(1000);
+		cx_clear(MO_GP0_IO, 0x00000080);
+		udelay(50);
+		cx_set(MO_GP0_IO, 0x00000080); /* 702 out of reset */
+		udelay(1000);
 		break;
 	}
 }
@@ -3004,10 +3044,14 @@
 
 	memcpy(&core->board, &cx88_boards[core->boardnr], sizeof(core->board));
 
-	info_printk(core, "subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
+	if (!core->board.num_frontends && (core->board.mpeg & CX88_MPEG_DVB))
+		core->board.num_frontends = 1;
+
+	info_printk(core, "subsystem: %04x:%04x, board: %s [card=%d,%s], frontend(s): %d\n",
 		pci->subsystem_vendor, pci->subsystem_device, core->board.name,
 		core->boardnr, card[core->nr] == core->boardnr ?
-		"insmod option" : "autodetected");
+		"insmod option" : "autodetected",
+		core->board.num_frontends);
 
 	if (tuner[core->nr] != UNSET)
 		core->board.tuner_type = tuner[core->nr];
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index d656fec..60705b08 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -549,7 +549,8 @@
 		mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
 	}
 	if (bc != 1)
-		printk("%s: %d buffers handled (should be 1)\n",__func__,bc);
+		dprintk(2, "%s: %d buffers handled (should be 1)\n",
+			__func__, bc);
 }
 
 void cx88_shutdown(struct cx88_core *core)
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 344ed26..cf6c30d 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -116,13 +116,23 @@
 	struct cx8802_dev *dev= fe->dvb->priv;
 	struct cx8802_driver *drv = NULL;
 	int ret = 0;
+	int fe_id;
+
+	fe_id = videobuf_dvb_find_frontend(&dev->frontends, fe);
+	if (!fe_id) {
+		printk(KERN_ERR "%s() No frontend found\n", __func__);
+		return -EINVAL;
+	}
 
 	drv = cx8802_get_driver(dev, CX88_MPEG_DVB);
 	if (drv) {
-		if (acquire)
+		if (acquire){
+			dev->frontends.active_fe_id = fe_id;
 			ret = drv->request_acquire(drv);
-		else
+		} else {
 			ret = drv->request_release(drv);
+			dev->frontends.active_fe_id = 0;
+		}
 	}
 
 	return ret;
@@ -396,7 +406,7 @@
 			cx_write(MO_GP0_IO, 0x00006060);
 			break;
 		case SEC_VOLTAGE_OFF:
-			printk("LNB Voltage SEC_VOLTAGE_off\n");
+		   	printk("LNB Voltage SEC_VOLTAGE_off\n");
 			break;
 	}
 
@@ -483,6 +493,7 @@
 static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
 {
 	struct dvb_frontend *fe;
+	struct videobuf_dvb_frontend *fe0 = NULL;
 	struct xc2028_ctrl ctl;
 	struct xc2028_config cfg = {
 		.i2c_adap  = &dev->core->i2c_adap,
@@ -490,7 +501,12 @@
 		.ctrl      = &ctl,
 	};
 
-	if (!dev->dvb.frontend) {
+	/* Get the first frontend */
+	fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
+	if (!fe0)
+		return -EINVAL;
+
+	if (!fe0->dvb.frontend) {
 		printk(KERN_ERR "%s/2: dvb frontend not attached. "
 				"Can't attach xc3028\n",
 		       dev->core->name);
@@ -504,10 +520,13 @@
 	 */
 	cx88_setup_xc3028(dev->core, &ctl);
 
-	fe = dvb_attach(xc2028_attach, dev->dvb.frontend, &cfg);
+	fe = dvb_attach(xc2028_attach, fe0->dvb.frontend, &cfg);
 	if (!fe) {
 		printk(KERN_ERR "%s/2: xc3028 attach failed\n",
 		       dev->core->name);
+		dvb_frontend_detach(fe0->dvb.frontend);
+		dvb_unregister_frontend(fe0->dvb.frontend);
+		fe0->dvb.frontend = NULL;
 		return -EINVAL;
 	}
 
@@ -532,8 +551,10 @@
 	struct cx88_core *core = dev->core;
 
 	/* Reset the part */
+	/* Put the cx24116 into reset */
 	cx_write(MO_SRST_IO, 0);
 	msleep(10);
+	/* Take the cx24116 out of reset */
 	cx_write(MO_SRST_IO, 1);
 	msleep(10);
 
@@ -554,14 +575,14 @@
 
 static struct stv0299_config tevii_tuner_sharp_config = {
 	.demod_address = 0x68,
-	.inittab = sharp_z0194a__inittab,
+	.inittab = sharp_z0194a_inittab,
 	.mclk = 88000000UL,
 	.invert = 1,
 	.skip_reinit = 0,
 	.lock_output = 1,
 	.volt13_op0_op1 = STV0299_VOLT13_OP1,
 	.min_delay_ms = 100,
-	.set_symbol_rate = sharp_z0194a__set_symbol_rate,
+	.set_symbol_rate = sharp_z0194a_set_symbol_rate,
 	.set_ts_params = cx24116_set_ts_param,
 };
 
@@ -574,19 +595,25 @@
 static int dvb_register(struct cx8802_dev *dev)
 {
 	struct cx88_core *core = dev->core;
+	struct videobuf_dvb_frontend *fe0, *fe1 = NULL;
+	int mfe_shared = 0; /* bus not shared by default */
 
-	/* init struct videobuf_dvb */
-	dev->dvb.name = core->name;
-	dev->ts_gen_cntrl = 0x0c;
+	/* Get the first frontend */
+	fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
+	if (!fe0)
+		return -EINVAL;
 
-	/* init frontend */
+	/* multi-frontend gate control is undefined or defaults to fe0 */
+	dev->frontends.gate = 0;
+
+	/* init frontend(s) */
 	switch (core->boardnr) {
 	case CX88_BOARD_HAUPPAUGE_DVB_T1:
-		dev->dvb.frontend = dvb_attach(cx22702_attach,
+		fe0->dvb.frontend = dvb_attach(cx22702_attach,
 					       &connexant_refboard_config,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x61, &core->i2c_adap,
 					DVB_PLL_THOMSON_DTT759X))
 				goto frontend_detach;
@@ -596,11 +623,11 @@
 	case CX88_BOARD_CONEXANT_DVB_T1:
 	case CX88_BOARD_KWORLD_DVB_T_CX22702:
 	case CX88_BOARD_WINFAST_DTV1000:
-		dev->dvb.frontend = dvb_attach(cx22702_attach,
+		fe0->dvb.frontend = dvb_attach(cx22702_attach,
 					       &connexant_refboard_config,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x60, &core->i2c_adap,
 					DVB_PLL_THOMSON_DTT7579))
 				goto frontend_detach;
@@ -610,33 +637,67 @@
 	case CX88_BOARD_HAUPPAUGE_HVR1100:
 	case CX88_BOARD_HAUPPAUGE_HVR1100LP:
 	case CX88_BOARD_HAUPPAUGE_HVR1300:
-	case CX88_BOARD_HAUPPAUGE_HVR3000:
-		dev->dvb.frontend = dvb_attach(cx22702_attach,
+		fe0->dvb.frontend = dvb_attach(cx22702_attach,
 					       &hauppauge_hvr_config,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 				   &core->i2c_adap, 0x61,
 				   TUNER_PHILIPS_FMD1216ME_MK3))
 				goto frontend_detach;
 		}
 		break;
+	case CX88_BOARD_HAUPPAUGE_HVR3000:
+		/* DVB-S init */
+		fe0->dvb.frontend = dvb_attach(cx24123_attach,
+			       &hauppauge_novas_config,
+			       &dev->core->i2c_adap);
+		if (fe0->dvb.frontend) {
+			if (!dvb_attach(isl6421_attach, fe0->dvb.frontend,
+			&dev->core->i2c_adap, 0x08, ISL6421_DCL, 0x00)) {
+				dprintk( 1, "%s(): HVR3000 - DVB-S LNB Init: failed\n", __func__);
+			}
+		} else {
+			dprintk( 1, "%s(): HVR3000 - DVB-S Init: failed\n", __func__);
+		}
+		/* DVB-T init */
+		fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2);
+		if (fe1) {
+			dev->frontends.gate = 2;
+			mfe_shared = 1;
+			fe1->dvb.frontend = dvb_attach(cx22702_attach,
+				&hauppauge_hvr_config,
+				&dev->core->i2c_adap);
+			if (fe1->dvb.frontend) {
+				fe1->dvb.frontend->id = 1;
+				if(!dvb_attach(simple_tuner_attach, fe1->dvb.frontend,
+						&dev->core->i2c_adap, 0x61,
+						TUNER_PHILIPS_FMD1216ME_MK3)) {
+					dprintk( 1, "%s(): HVR3000 - DVB-T misc Init: failed\n", __func__);
+				}
+			} else {
+				dprintk( 1, "%s(): HVR3000 - DVB-T Init: failed\n", __func__);
+			}
+		} else {
+			dprintk( 1, "%s(): HVR3000 - DVB-T Init: can't find frontend 2.\n", __func__);
+		}
+		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
-		dev->dvb.frontend = dvb_attach(mt352_attach,
+		fe0->dvb.frontend = dvb_attach(mt352_attach,
 					       &dvico_fusionhdtv,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x60, NULL, DVB_PLL_THOMSON_DTT7579))
 				goto frontend_detach;
 			break;
 		}
 		/* ZL10353 replaces MT352 on later cards */
-		dev->dvb.frontend = dvb_attach(zl10353_attach,
+		fe0->dvb.frontend = dvb_attach(zl10353_attach,
 					       &dvico_fusionhdtv_plus_v1_1,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x60, NULL, DVB_PLL_THOMSON_DTT7579))
 				goto frontend_detach;
 		}
@@ -644,31 +705,31 @@
 	case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
 		/* The tin box says DEE1601, but it seems to be DTT7579
 		 * compatible, with a slightly different MT352 AGC gain. */
-		dev->dvb.frontend = dvb_attach(mt352_attach,
+		fe0->dvb.frontend = dvb_attach(mt352_attach,
 					       &dvico_fusionhdtv_dual,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x61, NULL, DVB_PLL_THOMSON_DTT7579))
 				goto frontend_detach;
 			break;
 		}
 		/* ZL10353 replaces MT352 on later cards */
-		dev->dvb.frontend = dvb_attach(zl10353_attach,
+		fe0->dvb.frontend = dvb_attach(zl10353_attach,
 					       &dvico_fusionhdtv_plus_v1_1,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x61, NULL, DVB_PLL_THOMSON_DTT7579))
 				goto frontend_detach;
 		}
 		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
-		dev->dvb.frontend = dvb_attach(mt352_attach,
+		fe0->dvb.frontend = dvb_attach(mt352_attach,
 					       &dvico_fusionhdtv,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x61, NULL, DVB_PLL_LG_Z201))
 				goto frontend_detach;
 		}
@@ -676,11 +737,11 @@
 	case CX88_BOARD_KWORLD_DVB_T:
 	case CX88_BOARD_DNTV_LIVE_DVB_T:
 	case CX88_BOARD_ADSTECH_DVB_T_PCI:
-		dev->dvb.frontend = dvb_attach(mt352_attach,
+		fe0->dvb.frontend = dvb_attach(mt352_attach,
 					       &dntv_live_dvbt_config,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x61, NULL, DVB_PLL_UNKNOWN_1))
 				goto frontend_detach;
 		}
@@ -688,10 +749,10 @@
 	case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
 #if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
 		/* MT352 is on a secondary I2C bus made from some GPIO lines */
-		dev->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config,
+		fe0->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config,
 					       &dev->vp3054->adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_PHILIPS_FMD1216ME_MK3))
 				goto frontend_detach;
@@ -702,22 +763,22 @@
 #endif
 		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID:
-		dev->dvb.frontend = dvb_attach(zl10353_attach,
+		fe0->dvb.frontend = dvb_attach(zl10353_attach,
 					       &dvico_fusionhdtv_hybrid,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 				   &core->i2c_adap, 0x61,
 				   TUNER_THOMSON_FE6600))
 				goto frontend_detach;
 		}
 		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
-		dev->dvb.frontend = dvb_attach(zl10353_attach,
+		fe0->dvb.frontend = dvb_attach(zl10353_attach,
 					       &dvico_fusionhdtv_xc3028,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend == NULL)
-			dev->dvb.frontend = dvb_attach(mt352_attach,
+		if (fe0->dvb.frontend == NULL)
+			fe0->dvb.frontend = dvb_attach(mt352_attach,
 						&dvico_fusionhdtv_mt352_xc3028,
 						&core->i2c_adap);
 		/*
@@ -725,16 +786,16 @@
 		 * We must not permit gate_ctrl to be performed, or
 		 * the xc3028 cannot communicate on the bus.
 		 */
-		if (dev->dvb.frontend)
-			dev->dvb.frontend->ops.i2c_gate_ctrl = NULL;
+		if (fe0->dvb.frontend)
+			fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
 		if (attach_xc3028(0x61, dev) < 0)
-			return -EINVAL;
+			goto frontend_detach;
 		break;
 	case CX88_BOARD_PCHDTV_HD3000:
-		dev->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000,
+		fe0->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_THOMSON_DTT761X))
 				goto frontend_detach;
@@ -751,11 +812,11 @@
 
 		/* Select RF connector callback */
 		fusionhdtv_3_gold.pll_rf_set = lgdt330x_pll_rf_set;
-		dev->dvb.frontend = dvb_attach(lgdt330x_attach,
+		fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
 					       &fusionhdtv_3_gold,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_MICROTUNE_4042FI5))
 				goto frontend_detach;
@@ -769,11 +830,11 @@
 		mdelay(100);
 		cx_set(MO_GP0_IO, 9);
 		mdelay(200);
-		dev->dvb.frontend = dvb_attach(lgdt330x_attach,
+		fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
 					       &fusionhdtv_3_gold,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_THOMSON_DTT761X))
 				goto frontend_detach;
@@ -787,15 +848,15 @@
 		mdelay(100);
 		cx_set(MO_GP0_IO, 1);
 		mdelay(200);
-		dev->dvb.frontend = dvb_attach(lgdt330x_attach,
+		fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
 					       &fusionhdtv_5_gold,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_LG_TDVS_H06XF))
 				goto frontend_detach;
-			if (!dvb_attach(tda9887_attach, dev->dvb.frontend,
+			if (!dvb_attach(tda9887_attach, fe0->dvb.frontend,
 				   &core->i2c_adap, 0x43))
 				goto frontend_detach;
 		}
@@ -808,25 +869,25 @@
 		mdelay(100);
 		cx_set(MO_GP0_IO, 1);
 		mdelay(200);
-		dev->dvb.frontend = dvb_attach(lgdt330x_attach,
+		fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
 					       &pchdtv_hd5500,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_LG_TDVS_H06XF))
 				goto frontend_detach;
-			if (!dvb_attach(tda9887_attach, dev->dvb.frontend,
+			if (!dvb_attach(tda9887_attach, fe0->dvb.frontend,
 				   &core->i2c_adap, 0x43))
 				goto frontend_detach;
 		}
 		break;
 	case CX88_BOARD_ATI_HDTVWONDER:
-		dev->dvb.frontend = dvb_attach(nxt200x_attach,
+		fe0->dvb.frontend = dvb_attach(nxt200x_attach,
 					       &ati_hdtvwonder,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_PHILIPS_TUV1236D))
 				goto frontend_detach;
@@ -834,49 +895,49 @@
 		break;
 	case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
 	case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
-		dev->dvb.frontend = dvb_attach(cx24123_attach,
+		fe0->dvb.frontend = dvb_attach(cx24123_attach,
 					       &hauppauge_novas_config,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend) {
-			if (!dvb_attach(isl6421_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend) {
+			if (!dvb_attach(isl6421_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x08, ISL6421_DCL, 0x00))
 				goto frontend_detach;
 		}
 		break;
 	case CX88_BOARD_KWORLD_DVBS_100:
-		dev->dvb.frontend = dvb_attach(cx24123_attach,
+		fe0->dvb.frontend = dvb_attach(cx24123_attach,
 					       &kworld_dvbs_100_config,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend) {
-			core->prev_set_voltage = dev->dvb.frontend->ops.set_voltage;
-			dev->dvb.frontend->ops.set_voltage = kworld_dvbs_100_set_voltage;
+		if (fe0->dvb.frontend) {
+			core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
+			fe0->dvb.frontend->ops.set_voltage = kworld_dvbs_100_set_voltage;
 		}
 		break;
 	case CX88_BOARD_GENIATECH_DVBS:
-		dev->dvb.frontend = dvb_attach(cx24123_attach,
+		fe0->dvb.frontend = dvb_attach(cx24123_attach,
 					       &geniatech_dvbs_config,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend) {
-			core->prev_set_voltage = dev->dvb.frontend->ops.set_voltage;
-			dev->dvb.frontend->ops.set_voltage = geniatech_dvbs_set_voltage;
+		if (fe0->dvb.frontend) {
+			core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
+			fe0->dvb.frontend->ops.set_voltage = geniatech_dvbs_set_voltage;
 		}
 		break;
 	case CX88_BOARD_PINNACLE_PCTV_HD_800i:
-		dev->dvb.frontend = dvb_attach(s5h1409_attach,
+		fe0->dvb.frontend = dvb_attach(s5h1409_attach,
 					       &pinnacle_pctv_hd_800i_config,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(xc5000_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(xc5000_attach, fe0->dvb.frontend,
 					&core->i2c_adap,
 					&pinnacle_pctv_hd_800i_tuner_config))
 				goto frontend_detach;
 		}
 		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
-		dev->dvb.frontend = dvb_attach(s5h1409_attach,
+		fe0->dvb.frontend = dvb_attach(s5h1409_attach,
 						&dvico_hdtv5_pci_nano_config,
 						&core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend != NULL) {
 			struct dvb_frontend *fe;
 			struct xc2028_config cfg = {
 				.i2c_adap  = &core->i2c_adap,
@@ -889,17 +950,17 @@
 			};
 
 			fe = dvb_attach(xc2028_attach,
-					dev->dvb.frontend, &cfg);
+					fe0->dvb.frontend, &cfg);
 			if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
 				fe->ops.tuner_ops.set_config(fe, &ctl);
 		}
 		break;
 	 case CX88_BOARD_PINNACLE_HYBRID_PCTV:
-		dev->dvb.frontend = dvb_attach(zl10353_attach,
+		fe0->dvb.frontend = dvb_attach(zl10353_attach,
 					       &cx88_pinnacle_hybrid_pctv,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend) {
-			dev->dvb.frontend->ops.i2c_gate_ctrl = NULL;
+		if (fe0->dvb.frontend) {
+			fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
 			if (attach_xc3028(0x61, dev) < 0)
 				goto frontend_detach;
 		}
@@ -907,85 +968,117 @@
 	 case CX88_BOARD_GENIATECH_X8000_MT:
 		dev->ts_gen_cntrl = 0x00;
 
-		dev->dvb.frontend = dvb_attach(zl10353_attach,
+		fe0->dvb.frontend = dvb_attach(zl10353_attach,
 					       &cx88_geniatech_x8000_mt,
 					       &core->i2c_adap);
 		if (attach_xc3028(0x61, dev) < 0)
 			goto frontend_detach;
 		break;
 	 case CX88_BOARD_KWORLD_ATSC_120:
-		dev->dvb.frontend = dvb_attach(s5h1409_attach,
+		fe0->dvb.frontend = dvb_attach(s5h1409_attach,
 					       &kworld_atsc_120_config,
 					       &core->i2c_adap);
 		if (attach_xc3028(0x61, dev) < 0)
 			goto frontend_detach;
 		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
-		dev->dvb.frontend = dvb_attach(s5h1411_attach,
+		fe0->dvb.frontend = dvb_attach(s5h1411_attach,
 					       &dvico_fusionhdtv7_config,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(xc5000_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(xc5000_attach, fe0->dvb.frontend,
 					&core->i2c_adap,
 					&dvico_fusionhdtv7_tuner_config))
 				goto frontend_detach;
 		}
 		break;
 	case CX88_BOARD_HAUPPAUGE_HVR4000:
-	case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
-		/* Support for DVB-S only, not DVB-T support */
-		dev->dvb.frontend = dvb_attach(cx24116_attach,
+		/* DVB-S/S2 Init */
+		fe0->dvb.frontend = dvb_attach(cx24116_attach,
 			&hauppauge_hvr4000_config,
 			&dev->core->i2c_adap);
-		if (dev->dvb.frontend) {
-			dvb_attach(isl6421_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend) {
+			if(!dvb_attach(isl6421_attach, fe0->dvb.frontend,
+				&dev->core->i2c_adap, 0x08, ISL6421_DCL, 0x00)) {
+				dprintk( 1, "%s(): HVR4000 - DVB-S LNB Init: failed\n", __func__);
+			}
+		} else {
+			dprintk( 1, "%s(): HVR4000 - DVB-S Init: failed\n", __func__);
+		}
+		/* DVB-T Init */
+		fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2);
+		if (fe1) {
+			dev->frontends.gate = 2;
+			mfe_shared = 1;
+			fe1->dvb.frontend = dvb_attach(cx22702_attach,
+				&hauppauge_hvr_config,
+				&dev->core->i2c_adap);
+			if (fe1->dvb.frontend) {
+				fe1->dvb.frontend->id = 1;
+				if(!dvb_attach(simple_tuner_attach, fe1->dvb.frontend,
+					&dev->core->i2c_adap, 0x61,
+					TUNER_PHILIPS_FMD1216ME_MK3)) {
+					dprintk( 1, "%s(): HVR4000 - DVB-T misc Init: failed\n", __func__);
+				}
+			} else {
+				dprintk( 1, "%s(): HVR4000 - DVB-T Init: failed\n", __func__);
+			}
+		} else {
+			dprintk( 1, "%s(): HVR4000 - DVB-T Init: can't find frontend 2.\n", __func__);
+		}
+		break;
+	case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
+		fe0->dvb.frontend = dvb_attach(cx24116_attach,
+			&hauppauge_hvr4000_config,
+			&dev->core->i2c_adap);
+		if (fe0->dvb.frontend) {
+			dvb_attach(isl6421_attach, fe0->dvb.frontend,
 				&dev->core->i2c_adap,
 				0x08, ISL6421_DCL, 0x00);
 		}
 		break;
 	case CX88_BOARD_TEVII_S420:
-		dev->dvb.frontend = dvb_attach(stv0299_attach,
+		fe0->dvb.frontend = dvb_attach(stv0299_attach,
 						&tevii_tuner_sharp_config,
 						&core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x60,
+		if (fe0->dvb.frontend != NULL) {
+			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x60,
 					&core->i2c_adap, DVB_PLL_OPERA1))
 				goto frontend_detach;
-			core->prev_set_voltage = dev->dvb.frontend->ops.set_voltage;
-			dev->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
+			core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
+			fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
 
 		} else {
-			dev->dvb.frontend = dvb_attach(stv0288_attach,
+			fe0->dvb.frontend = dvb_attach(stv0288_attach,
 							    &tevii_tuner_earda_config,
 							    &core->i2c_adap);
-				if (dev->dvb.frontend != NULL) {
-					if (!dvb_attach(stb6000_attach, dev->dvb.frontend, 0x61,
+				if (fe0->dvb.frontend != NULL) {
+					if (!dvb_attach(stb6000_attach, fe0->dvb.frontend, 0x61,
 						&core->i2c_adap))
 					goto frontend_detach;
-				core->prev_set_voltage = dev->dvb.frontend->ops.set_voltage;
-				dev->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
-
+				core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
+				fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
 			}
 		}
 		break;
 	case CX88_BOARD_TEVII_S460:
-		dev->dvb.frontend = dvb_attach(cx24116_attach,
+		fe0->dvb.frontend = dvb_attach(cx24116_attach,
 					       &tevii_s460_config,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			core->prev_set_voltage = dev->dvb.frontend->ops.set_voltage;
-			dev->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
+		if (fe0->dvb.frontend != NULL) {
+			core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
+			fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
 		}
 		break;
 	case CX88_BOARD_OMICOM_SS4_PCI:
 	case CX88_BOARD_TBS_8920:
 	case CX88_BOARD_PROF_7300:
-		dev->dvb.frontend = dvb_attach(cx24116_attach,
+		fe0->dvb.frontend = dvb_attach(cx24116_attach,
 					       &hauppauge_hvr4000_config,
 					       &core->i2c_adap);
-		if (dev->dvb.frontend != NULL) {
-			core->prev_set_voltage = dev->dvb.frontend->ops.set_voltage;
-			dev->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
+		if (fe0->dvb.frontend != NULL) {
+			core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
+			fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
 		}
 		break;
 	default:
@@ -993,30 +1086,30 @@
 		       core->name);
 		break;
 	}
-	if (NULL == dev->dvb.frontend) {
+
+        if ( (NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend) ) {
 		printk(KERN_ERR
 		       "%s/2: frontend initialization failed\n",
 		       core->name);
 		return -EINVAL;
 	}
 	/* define general-purpose callback pointer */
-	dev->dvb.frontend->callback = cx88_tuner_callback;
+	fe0->dvb.frontend->callback = cx88_tuner_callback;
 
 	/* Ensure all frontends negotiate bus access */
-	dev->dvb.frontend->ops.ts_bus_ctrl = cx88_dvb_bus_ctrl;
+	fe0->dvb.frontend->ops.ts_bus_ctrl = cx88_dvb_bus_ctrl;
+	if (fe1)
+		fe1->dvb.frontend->ops.ts_bus_ctrl = cx88_dvb_bus_ctrl;
 
 	/* Put the analog decoder in standby to keep it quiet */
 	cx88_call_i2c_clients(core, TUNER_SET_STANDBY, NULL);
 
 	/* register everything */
-	return videobuf_dvb_register(&dev->dvb, THIS_MODULE, dev,
-				     &dev->pci->dev, adapter_nr);
+	return videobuf_dvb_register_bus(&dev->frontends, THIS_MODULE, dev,
+		&dev->pci->dev, adapter_nr, mfe_shared);
 
 frontend_detach:
-	if (dev->dvb.frontend) {
-		dvb_frontend_detach(dev->dvb.frontend);
-		dev->dvb.frontend = NULL;
-	}
+	videobuf_dvb_dealloc_frontends(&dev->frontends);
 	return -EINVAL;
 }
 
@@ -1039,6 +1132,38 @@
 		cx_clear(MO_GP0_IO, 0x00000004);
 		udelay(1000);
 		break;
+
+	case CX88_BOARD_HAUPPAUGE_HVR3000:
+	case CX88_BOARD_HAUPPAUGE_HVR4000:
+		if(core->dvbdev->frontends.active_fe_id == 1) {
+			/* DVB-S/S2 Enabled */
+
+			/* Toggle reset on cx22702 leaving i2c active */
+			cx_write(MO_GP0_IO, (core->board.input[0].gpio0 & 0x0000ff00) | 0x00000080);
+			udelay(1000);
+			cx_clear(MO_GP0_IO, 0x00000080);
+			udelay(50);
+			cx_set(MO_GP0_IO, 0x00000080); /* cx22702 out of reset */
+			cx_set(MO_GP0_IO, 0x00000004); /* tri-state the cx22702 pins */
+			udelay(1000);
+
+			cx_write(MO_SRST_IO, 1); /* Take the cx24116/cx24123 out of reset */
+			core->dvbdev->ts_gen_cntrl = 0x02; /* Parallel IO */
+		} else
+		if (core->dvbdev->frontends.active_fe_id == 2) {
+			/* DVB-T Enabled */
+
+			/* Put the cx24116/cx24123 into reset */
+			cx_write(MO_SRST_IO, 0);
+
+			/* cx22702 out of reset and enable it */
+			cx_set(MO_GP0_IO,   0x00000080);
+			cx_clear(MO_GP0_IO, 0x00000004);
+			core->dvbdev->ts_gen_cntrl = 0x0c; /* Serial IO */
+			udelay(1000);
+		}
+		break;
+
 	default:
 		err = -ENODEV;
 	}
@@ -1056,6 +1181,9 @@
 	case CX88_BOARD_HAUPPAUGE_HVR1300:
 		/* Do Nothing, leave the cx22702 on the bus. */
 		break;
+	case CX88_BOARD_HAUPPAUGE_HVR3000:
+	case CX88_BOARD_HAUPPAUGE_HVR4000:
+		break;
 	default:
 		err = -ENODEV;
 	}
@@ -1066,7 +1194,8 @@
 {
 	struct cx88_core *core = drv->core;
 	struct cx8802_dev *dev = drv->core->dvbdev;
-	int err;
+	int err, i;
+	struct videobuf_dvb_frontend *fe;
 
 	dprintk( 1, "%s\n", __func__);
 	dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
@@ -1086,28 +1215,39 @@
 
 	/* dvb stuff */
 	printk(KERN_INFO "%s/2: cx2388x based DVB/ATSC card\n", core->name);
-	videobuf_queue_sg_init(&dev->dvb.dvbq, &dvb_qops,
+	dev->ts_gen_cntrl = 0x0c;
+
+	for (i = 1; i <= core->board.num_frontends; i++) {
+		fe = videobuf_dvb_get_frontend(&core->dvbdev->frontends, i);
+		if (!fe) {
+			printk(KERN_ERR "%s() failed to get frontend(%d)\n", __func__, i);
+			continue;
+		}
+		videobuf_queue_sg_init(&fe->dvb.dvbq, &dvb_qops,
 			    &dev->pci->dev, &dev->slock,
 			    V4L2_BUF_TYPE_VIDEO_CAPTURE,
 			    V4L2_FIELD_TOP,
 			    sizeof(struct cx88_buffer),
 			    dev);
+		/* init struct videobuf_dvb */
+		fe->dvb.name = dev->core->name;
+	}
 	err = dvb_register(dev);
 	if (err != 0)
 		printk(KERN_ERR "%s/2: dvb_register failed (err = %d)\n",
 		       core->name, err);
-
- fail_core:
+fail_core:
 	return err;
 }
 
 static int cx8802_dvb_remove(struct cx8802_driver *drv)
 {
+	struct cx88_core *core = drv->core;
 	struct cx8802_dev *dev = drv->core->dvbdev;
 
-	/* dvb */
-	if (dev->dvb.frontend)
-		videobuf_dvb_unregister(&dev->dvb);
+	dprintk( 1, "%s\n", __func__);
+
+	videobuf_dvb_unregister_bus(&dev->frontends);
 
 	vp3054_i2c_remove(dev);
 
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 8e74d64..1ab691d 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -116,18 +116,27 @@
 
 void cx88_call_i2c_clients(struct cx88_core *core, unsigned int cmd, void *arg)
 {
+#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
+	struct videobuf_dvb_frontends *f = &core->dvbdev->frontends;
+	struct videobuf_dvb_frontend *fe = NULL;
+#endif
 	if (0 != core->i2c_rc)
 		return;
 
 #if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
-	if ( (core->dvbdev) && (core->dvbdev->dvb.frontend) ) {
-		if (core->dvbdev->dvb.frontend->ops.i2c_gate_ctrl)
-			core->dvbdev->dvb.frontend->ops.i2c_gate_ctrl(core->dvbdev->dvb.frontend, 1);
+	if (core->dvbdev && f) {
+		if(f->gate <= 1) /* undefined or fe0 */
+			fe = videobuf_dvb_get_frontend(f, 1);
+		else
+			fe = videobuf_dvb_get_frontend(f, f->gate);
+
+		if (fe && fe->dvb.frontend && fe->dvb.frontend->ops.i2c_gate_ctrl)
+			fe->dvb.frontend->ops.i2c_gate_ctrl(fe->dvb.frontend, 1);
 
 		i2c_clients_command(&core->i2c_adap, cmd, arg);
 
-		if (core->dvbdev->dvb.frontend->ops.i2c_gate_ctrl)
-			core->dvbdev->dvb.frontend->ops.i2c_gate_ctrl(core->dvbdev->dvb.frontend, 0);
+		if (fe && fe->dvb.frontend && fe->dvb.frontend->ops.i2c_gate_ctrl)
+			fe->dvb.frontend->ops.i2c_gate_ctrl(fe->dvb.frontend, 0);
 	} else
 #endif
 		i2c_clients_command(&core->i2c_adap, cmd, arg);
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index a6b061c..a1c435b 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -769,6 +769,10 @@
 	struct cx8802_dev *dev;
 	struct cx88_core  *core;
 	int err;
+#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
+	struct videobuf_dvb_frontend *demod;
+	int i;
+#endif
 
 	/* general setup */
 	core = cx88_core_get(pci_dev);
@@ -795,6 +799,23 @@
 	INIT_LIST_HEAD(&dev->drvlist);
 	list_add_tail(&dev->devlist,&cx8802_devlist);
 
+#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
+	mutex_init(&dev->frontends.lock);
+	INIT_LIST_HEAD(&dev->frontends.felist);
+
+	if (core->board.num_frontends)
+		printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__, core->board.num_frontends);
+
+	for (i = 1; i <= core->board.num_frontends; i++) {
+		demod = videobuf_dvb_alloc_frontend(&dev->frontends, i);
+		if(demod == NULL) {
+			printk(KERN_ERR "%s() failed to alloc\n", __func__);
+			err = -ENOMEM;
+			goto fail_free;
+		}
+	}
+#endif
+
 	/* Maintain a reference so cx88-video can query the 8802 device. */
 	core->dvbdev = dev;
 
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 3a1977f..7dd506b 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -767,6 +767,14 @@
 	case WW_FM:
 		set_audio_standard_FM(core, radio_deemphasis);
 		break;
+	case WW_I2SADC:
+		set_audio_start(core, 0x01);
+		/* Slave/Philips/Autobaud */
+		cx_write(AUD_I2SINPUTCNTL, 0);
+		/* Switch to "I2S ADC mode" */
+		cx_write(AUD_I2SCNTL, 0x1);
+		set_audio_finish(core, EN_I2SIN_ENABLE);
+		break;
 	case WW_NONE:
 	default:
 		printk("%s/0: unknown tv audio mode [%d]\n",
@@ -895,6 +903,9 @@
 			break;
 		}
 		break;
+	case WW_I2SADC:
+		/* DO NOTHING */
+		break;
 	}
 
 	if (UNSET != ctl) {
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index be45955..61265fd 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -426,24 +426,7 @@
 
 	/* if there are audioroutes defined, we have an external
 	   ADC to deal with audio */
-
 	if (INPUT(input).audioroute) {
-
-		/* cx2388's C-ADC is connected to the tuner only.
-		   When used with S-Video, that ADC is busy dealing with
-		   chroma, so an external must be used for baseband audio */
-
-		if (INPUT(input).type != CX88_VMUX_TELEVISION &&
-			INPUT(input).type != CX88_RADIO) {
-			/* "ADC mode" */
-			cx_write(AUD_I2SCNTL, 0x1);
-			cx_set(AUD_CTL, EN_I2SIN_ENABLE);
-		} else {
-			/* Normal mode */
-			cx_write(AUD_I2SCNTL, 0x0);
-			cx_clear(AUD_CTL, EN_I2SIN_ENABLE);
-		}
-
 		/* The wm8775 module has the "2" route hardwired into
 		   the initialization. Some boards may use different
 		   routes for different inputs. HVR-1300 surely does */
@@ -454,9 +437,19 @@
 			route.input = INPUT(input).audioroute;
 			cx88_call_i2c_clients(core,
 				VIDIOC_INT_S_AUDIO_ROUTING, &route);
-
 		}
-
+		/* cx2388's C-ADC is connected to the tuner only.
+		   When used with S-Video, that ADC is busy dealing with
+		   chroma, so an external must be used for baseband audio */
+		if (INPUT(input).type != CX88_VMUX_TELEVISION ) {
+			/* "I2S ADC mode" */
+			core->tvaudio = WW_I2SADC;
+			cx88_set_tvaudio(core);
+		} else {
+			/* Normal mode */
+			cx_write(AUD_I2SCNTL, 0x0);
+			cx_clear(AUD_CTL, EN_I2SIN_ENABLE);
+		}
 	}
 
 	return 0;
@@ -832,9 +825,24 @@
 		cx_write(MO_GP0_IO, core->board.radio.gpio0);
 		cx_write(MO_GP1_IO, core->board.radio.gpio1);
 		cx_write(MO_GP2_IO, core->board.radio.gpio2);
-		core->tvaudio = WW_FM;
-		cx88_set_tvaudio(core);
-		cx88_set_stereo(core,V4L2_TUNER_MODE_STEREO,1);
+		if (core->board.radio.audioroute) {
+			if(core->board.audio_chip &&
+				core->board.audio_chip == V4L2_IDENT_WM8775) {
+				struct v4l2_routing route;
+
+				route.input = core->board.radio.audioroute;
+				cx88_call_i2c_clients(core,
+					VIDIOC_INT_S_AUDIO_ROUTING, &route);
+			}
+			/* "I2S ADC mode" */
+			core->tvaudio = WW_I2SADC;
+			cx88_set_tvaudio(core);
+		} else {
+			/* FM Mode */
+			core->tvaudio = WW_FM;
+			cx88_set_tvaudio(core);
+			cx88_set_stereo(core,V4L2_TUNER_MODE_STEREO,1);
+		}
 		cx88_call_i2c_clients(core,AUDC_SET_RADIO,NULL);
 	}
 	unlock_kernel();
@@ -1903,7 +1911,7 @@
 		goto fail_unreg;
 	}
 	printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n",
-	       core->name,dev->video_dev->minor & 0x1f);
+	       core->name, dev->video_dev->num);
 
 	dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi");
 	err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
@@ -1914,7 +1922,7 @@
 		goto fail_unreg;
 	}
 	printk(KERN_INFO "%s/0: registered device vbi%d\n",
-	       core->name,dev->vbi_dev->minor & 0x1f);
+	       core->name, dev->vbi_dev->num);
 
 	if (core->board.radio.type == CX88_RADIO) {
 		dev->radio_dev = cx88_vdev_init(core,dev->pci,
@@ -1927,7 +1935,7 @@
 			goto fail_unreg;
 		}
 		printk(KERN_INFO "%s/0: registered device radio%d\n",
-		       core->name,dev->radio_dev->minor & 0x1f);
+		       core->name, dev->radio_dev->num);
 	}
 
 	/* everything worked */
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index dbf01b8..76207c2 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -247,7 +247,7 @@
 	enum cx88_itype type;
 	u32             gpio0, gpio1, gpio2, gpio3;
 	unsigned int    vmux:2;
-	unsigned int    audioroute:2;
+	unsigned int    audioroute:4;
 };
 
 struct cx88_board {
@@ -261,6 +261,7 @@
 	struct cx88_input       radio;
 	enum cx88_board_type    mpeg;
 	unsigned int            audio_chip;
+	int			num_frontends;
 };
 
 struct cx88_subid {
@@ -356,6 +357,7 @@
 	struct cx8802_dev          *dvbdev;
 	enum cx88_board_type       active_type_id;
 	int			   active_ref;
+	int			   active_fe_id;
 };
 
 struct cx8800_dev;
@@ -490,7 +492,7 @@
 
 #if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
 	/* for dvb only */
-	struct videobuf_dvb        dvb;
+	struct videobuf_dvb_frontends frontends;
 #endif
 
 #if defined(CONFIG_VIDEO_CX88_VP3054) || \
@@ -628,6 +630,7 @@
 #define WW_EIAJ		 7
 #define WW_I2SPT	 8
 #define WW_FM		 9
+#define WW_I2SADC	 10
 
 void cx88_set_tvaudio(struct cx88_core *core);
 void cx88_newstation(struct cx88_core *core);
diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c
index 3aa538a..298810d 100644
--- a/drivers/media/video/dabusb.c
+++ b/drivers/media/video/dabusb.c
@@ -192,7 +192,7 @@
 					err("dabusb_iso_complete: invalid len %d", len);
 			}
 			else
-				warn("dabusb_iso_complete: corrupted packet status: %d", purb->iso_frame_desc[i].status);
+				dev_warn(&purb->dev->dev, "dabusb_iso_complete: corrupted packet status: %d\n", purb->iso_frame_desc[i].status);
 		if (dst != purb->actual_length)
 			err("dst!=purb->actual_length:%d!=%d", dst, purb->actual_length);
 	}
@@ -289,7 +289,7 @@
 	}
 
 	if( ret == -EPIPE ) {
-		warn("CLEAR_FEATURE request to remove STALL condition.");
+		dev_warn(&s->usbdev->dev, "CLEAR_FEATURE request to remove STALL condition.\n");
 		if(usb_clear_halt(s->usbdev, usb_pipeendpoint(pipe)))
 			err("request failed");
 	}
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index c53649e..a1ab2ef 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -2042,7 +2042,7 @@
 			goto fail_unreg;
 		}
 		em28xx_info("Registered radio device as /dev/radio%d\n",
-			    dev->radio_dev->minor & 0x1f);
+			    dev->radio_dev->num);
 	}
 
 	/* init video dma queues */
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index 7a85c41..9d0ef96 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -588,7 +588,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "URB timeout reached. The camera is misconfigured. To "
 		       "use it, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -EIO;
 	}
 
@@ -1195,7 +1195,7 @@
 
 	cam = container_of(kref, struct et61x251_device, kref);
 
-	DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor);
+	DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
 	video_set_drvdata(cam->v4ldev, NULL);
 	video_unregister_device(cam->v4ldev);
 	usb_put_dev(cam->usbdev);
@@ -1237,7 +1237,7 @@
 
 	if (cam->users) {
 		DBG(2, "Device /dev/video%d is already in use",
-		       cam->v4ldev->minor);
+		       cam->v4ldev->num);
 		DBG(3, "Simultaneous opens are not supported");
 		if ((filp->f_flags & O_NONBLOCK) ||
 		    (filp->f_flags & O_NDELAY)) {
@@ -1280,7 +1280,7 @@
 	cam->frame_count = 0;
 	et61x251_empty_framequeues(cam);
 
-	DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor);
+	DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
 
 out:
 	mutex_unlock(&cam->open_mutex);
@@ -1304,7 +1304,7 @@
 	cam->users--;
 	wake_up_interruptible_nr(&cam->wait_open, 1);
 
-	DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor);
+	DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
 
 	kref_put(&cam->kref, et61x251_release_resources);
 
@@ -1845,7 +1845,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
 		       "use the camera, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -EIO;
 	}
 
@@ -1858,7 +1858,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
 		       "use the camera, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -ENOMEM;
 	}
 
@@ -2068,7 +2068,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
 		       "use the camera, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -EIO;
 	}
 
@@ -2080,7 +2080,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
 		       "use the camera, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -ENOMEM;
 	}
 
@@ -2128,7 +2128,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
 		       "problems. To use the camera, close and open "
-		       "/dev/video%d again.", cam->v4ldev->minor);
+		       "/dev/video%d again.", cam->v4ldev->num);
 		return -EIO;
 	}
 
@@ -2605,7 +2605,7 @@
 		goto fail;
 	}
 
-	DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor);
+	DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
 
 	cam->module_param.force_munmap = force_munmap[dev_nr];
 	cam->module_param.frame_timeout = frame_timeout[dev_nr];
@@ -2658,7 +2658,7 @@
 	if (cam->users) {
 		DBG(2, "Device /dev/video%d is open! Deregistration and "
 		       "memory deallocation are deferred.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		cam->state |= DEV_MISCONFIGURED;
 		et61x251_stop_transfer(cam);
 		cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index c21af31..e48fbfc 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -21,6 +21,7 @@
 #define MODULE_NAME "gspca"
 
 #include <linux/init.h>
+#include <linux/version.h>
 #include <linux/fs.h>
 #include <linux/vmalloc.h>
 #include <linux/sched.h>
@@ -403,7 +404,7 @@
 	unsigned int i;
 
 	PDEBUG(D_STREAM, "kill transfer");
-	for (i = 0; i < MAX_NURBS; ++i) {
+	for (i = 0; i < MAX_NURBS; i++) {
 		urb = gspca_dev->urb[i];
 		if (urb == NULL)
 			break;
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 4779dd0..1d9dc90 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -2,7 +2,6 @@
 #define GSPCAV2_H
 
 #include <linux/module.h>
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/usb.h>
 #include <linux/videodev2.h>
diff --git a/drivers/media/video/gspca/m5602/m5602_bridge.h b/drivers/media/video/gspca/m5602/m5602_bridge.h
index c786d7d..1a37ae4 100644
--- a/drivers/media/video/gspca/m5602/m5602_bridge.h
+++ b/drivers/media/video/gspca/m5602/m5602_bridge.h
@@ -1,7 +1,7 @@
 /*
  * USB Driver for ALi m5602 based webcams
  *
- * Copyright (C) 2008 Erik Andren
+ * Copyright (C) 2008 Erik Andrén
  * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
  * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
  *
@@ -25,33 +25,6 @@
 
 /*****************************************************************************/
 
-#undef PDEBUG
-#undef info
-#undef err
-
-#define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \
-	format "\n" , ## arg)
-#define info(format, arg...) printk(KERN_INFO KBUILD_MODNAME ": " \
-	format "\n" , ## arg)
-
-/* Debug parameters */
-#define DBG_INIT 0x1
-#define DBG_PROBE 0x2
-#define DBG_V4L2 0x4
-#define DBG_TRACE 0x8
-#define DBG_DATA 0x10
-#define DBG_V4L2_CID 0x20
-#define DBG_GSPCA 0x40
-
-#define PDEBUG(level, fmt, args...) \
-	do { \
-		if (m5602_debug & level)     \
-			info("[%s:%d] " fmt, __func__, __LINE__ , \
-			## args); \
-	} while (0)
-
-/*****************************************************************************/
-
 #define M5602_XB_SENSOR_TYPE 0x00
 #define M5602_XB_SENSOR_CTRL 0x01
 #define M5602_XB_LINE_OF_FRAME_H 0x02
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index 19d5e35..fd6ce38 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -1,7 +1,7 @@
-/*
+ /*
  * USB Driver for ALi m5602 based webcams
  *
- * Copyright (C) 2008 Erik Andren
+ * Copyright (C) 2008 Erik Andrén
  * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
  * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
  *
@@ -26,7 +26,6 @@
 int force_sensor;
 int dump_bridge;
 int dump_sensor;
-unsigned int m5602_debug;
 
 static const __devinitdata struct usb_device_id m5602_table[] = {
 	{USB_DEVICE(0x0402, 0x5602)},
@@ -48,7 +47,7 @@
 			      1, M5602_URB_MSG_TIMEOUT);
 	*i2c_data = buf[0];
 
-	PDEBUG(DBG_TRACE, "Reading bridge register 0x%x containing 0x%x",
+	PDEBUG(D_CONF, "Reading bridge register 0x%x containing 0x%x",
 	       address, *i2c_data);
 
 	/* usb_control_msg(...) returns the number of bytes sent upon success,
@@ -63,7 +62,7 @@
 	struct usb_device *udev = sd->gspca_dev.dev;
 	__u8 *buf = sd->gspca_dev.usb_buf;
 
-	PDEBUG(DBG_TRACE, "Writing bridge register 0x%x with 0x%x",
+	PDEBUG(D_CONF, "Writing bridge register 0x%x with 0x%x",
 	       address, i2c_data);
 
 	memcpy(buf, bridge_urb_skeleton,
@@ -91,7 +90,8 @@
 		m5602_read_bridge(sd, i, &val);
 		info("ALi m5602 address 0x%x contains 0x%x", i, val);
 	}
-	info("Warning: The camera probably won't work until it's power cycled");
+	info("Warning: The ALi m5602 webcam probably won't work "
+		"until it's power cycled");
 }
 
 static int m5602_probe_sensor(struct sd *sd)
@@ -135,7 +135,7 @@
 	struct sd *sd = (struct sd *) gspca_dev;
 	int err;
 
-	PDEBUG(DBG_TRACE, "Initializing ALi m5602 webcam");
+	PDEBUG(D_CONF, "Initializing ALi m5602 webcam");
 	/* Run the init sequence */
 	err = sd->sensor->init(sd);
 
@@ -146,16 +146,18 @@
 {
 	struct sd *sd = (struct sd *) gspca_dev;
 	__u8 *buf = sd->gspca_dev.usb_buf;
+	int err;
 
 	/* Send start command to the camera */
 	const u8 buffer[4] = {0x13, 0xf9, 0x0f, 0x01};
 	memcpy(buf, buffer, sizeof(buffer));
-	usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0),
-			0x04, 0x40, 0x19, 0x0000, buf,
-			4, M5602_URB_MSG_TIMEOUT);
+	err = usb_control_msg(gspca_dev->dev,
+			      usb_sndctrlpipe(gspca_dev->dev, 0),
+			      0x04, 0x40, 0x19, 0x0000, buf,
+			      4, M5602_URB_MSG_TIMEOUT);
 
-	PDEBUG(DBG_V4L2, "Transfer started");
-	return 0;
+	PDEBUG(D_STREAM, "Transfer started");
+	return (err < 0) ? err : 0;
 }
 
 static void m5602_urb_complete(struct gspca_dev *gspca_dev,
@@ -165,14 +167,14 @@
 	struct sd *sd = (struct sd *) gspca_dev;
 
 	if (len < 6) {
-		PDEBUG(DBG_DATA, "Packet is less than 6 bytes");
+		PDEBUG(D_PACK, "Packet is less than 6 bytes");
 		return;
 	}
 
 	/* Frame delimiter: ff xx xx xx ff ff */
 	if (data[0] == 0xff && data[4] == 0xff && data[5] == 0xff &&
 	    data[2] != sd->frame_id) {
-		PDEBUG(DBG_DATA, "Frame delimiter detected");
+		PDEBUG(D_FRAM, "Frame delimiter detected");
 		sd->frame_id = data[2];
 
 		/* Remove the extra fluff appended on each header */
@@ -187,7 +189,7 @@
 		/* Create a new frame */
 		gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, len);
 
-		PDEBUG(DBG_V4L2, "Starting new frame %d",
+		PDEBUG(D_FRAM, "Starting new frame %d",
 		       sd->frame_count);
 
 	} else {
@@ -198,7 +200,7 @@
 		len -= 4;
 
 		if (cur_frame_len + len <= frame->v4l2_buf.length) {
-			PDEBUG(DBG_DATA, "Continuing frame %d copying %d bytes",
+			PDEBUG(D_FRAM, "Continuing frame %d copying %d bytes",
 			       sd->frame_count, len);
 
 			gspca_frame_add(gspca_dev, INTER_PACKET, frame,
@@ -234,8 +236,6 @@
 	struct cam *cam;
 	int err;
 
-	PDEBUG(DBG_GSPCA, "m5602_configure start");
-
 	cam = &gspca_dev->cam;
 	cam->epaddr = M5602_ISOC_ENDPOINT_ADDR;
 	sd->desc = &sd_desc;
@@ -248,11 +248,10 @@
 	if (err)
 		goto fail;
 
-	PDEBUG(DBG_GSPCA, "m5602_configure end");
 	return 0;
 
 fail:
-	PDEBUG(DBG_GSPCA, "m5602_configure failed");
+	PDEBUG(D_ERR, "ALi m5602 webcam failed");
 	cam->cam_mode = NULL;
 	cam->nmodes = 0;
 
@@ -282,13 +281,13 @@
 {
 	if (usb_register(&sd_driver) < 0)
 		return -1;
-	PDEBUG(D_PROBE, "m5602 module registered");
+	PDEBUG(D_PROBE, "registered");
 	return 0;
 }
 static void __exit mod_m5602_exit(void)
 {
 	usb_deregister(&sd_driver);
-	PDEBUG(D_PROBE, "m5602 module deregistered");
+	PDEBUG(D_PROBE, "deregistered");
 }
 
 module_init(mod_m5602_init);
@@ -297,9 +296,6 @@
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
-module_param_named(debug, m5602_debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "toggles debug on/off");
-
 module_param(force_sensor, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(force_sensor,
 		"force detection of sensor, "
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.c b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
index 566d492..fb700c2 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
@@ -1,7 +1,7 @@
 /*
  * Driver for the mt9m111 sensor
  *
- * Copyright (C) 2008 Erik Andren
+ * Copyright (C) 2008 Erik Andrén
  * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
  * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
  *
@@ -107,7 +107,7 @@
 	err = mt9m111_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
 				  data, 2);
 	*val = data[0] & MT9M111_RMB_MIRROR_ROWS;
-	PDEBUG(DBG_V4L2_CID, "Read vertical flip %d", *val);
+	PDEBUG(D_V4L2, "Read vertical flip %d", *val);
 
 	return (err < 0) ? err : 0;
 }
@@ -118,7 +118,7 @@
 	u8 data[2] = {0x00, 0x00};
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	PDEBUG(DBG_V4L2_CID, "Set vertical flip to %d", val);
+	PDEBUG(D_V4L2, "Set vertical flip to %d", val);
 
 	/* Set the correct page map */
 	err = mt9m111_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
@@ -145,7 +145,7 @@
 	err = mt9m111_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
 				  data, 2);
 	*val = data[0] & MT9M111_RMB_MIRROR_COLS;
-	PDEBUG(DBG_V4L2_CID, "Read horizontal flip %d", *val);
+	PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
 
 	return (err < 0) ? err : 0;
 }
@@ -156,7 +156,7 @@
 	u8 data[2] = {0x00, 0x00};
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	PDEBUG(DBG_V4L2_CID, "Set horizontal flip to %d", val);
+	PDEBUG(D_V4L2, "Set horizontal flip to %d", val);
 
 	/* Set the correct page map */
 	err = mt9m111_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
@@ -188,7 +188,7 @@
 	      ((tmp & (1 <<  8)) * 2) |
 	       (tmp & 0x7f);
 
-	PDEBUG(DBG_V4L2_CID, "Read gain %d", *val);
+	PDEBUG(D_V4L2, "Read gain %d", *val);
 
 	return (err < 0) ? err : 0;
 }
@@ -222,7 +222,7 @@
 
 	data[1] = (tmp & 0xff00) >> 8;
 	data[0] = (tmp & 0xff);
-	PDEBUG(DBG_V4L2_CID, "tmp=%d, data[1]=%d, data[0]=%d", tmp,
+	PDEBUG(D_V4L2, "tmp=%d, data[1]=%d, data[0]=%d", tmp,
 	       data[1], data[0]);
 
 	err = mt9m111_write_sensor(sd, MT9M111_SC_GLOBAL_GAIN,
@@ -257,7 +257,7 @@
 	for (i = 0; i < len && !err; i++) {
 		err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
 
-		PDEBUG(DBG_TRACE, "Reading sensor register "
+		PDEBUG(D_CONF, "Reading sensor register "
 		       "0x%x contains 0x%x ", address, *i2c_data);
 	}
 out:
@@ -290,7 +290,7 @@
 		memcpy(p, sensor_urb_skeleton + 16, 4);
 		p[3] = i2c_data[i];
 		p += 4;
-		PDEBUG(DBG_TRACE, "Writing sensor register 0x%x with 0x%x",
+		PDEBUG(D_CONF, "Writing sensor register 0x%x with 0x%x",
 		       address, i2c_data[i]);
 	}
 
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.h b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
index 79a5d88..315209d5 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.h
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
@@ -1,7 +1,7 @@
 /*
  * Driver for the mt9m111 sensor
  *
- * Copyright (C) 2008 Erik Andren
+ * Copyright (C) 2008 Erik Andrén
  * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
  * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
  *
@@ -82,7 +82,6 @@
 /* Kernel module parameters */
 extern int force_sensor;
 extern int dump_sensor;
-extern unsigned int m5602_debug;
 
 int mt9m111_probe(struct sd *sd);
 int mt9m111_init(struct sd *sd);
@@ -152,8 +151,8 @@
 			.default_value  = DEFAULT_GAIN,
 			.flags          = V4L2_CTRL_FLAG_SLIDER
 		},
-		.set = mt9m111_set_hflip,
-		.get = mt9m111_get_hflip
+		.set = mt9m111_set_gain,
+		.get = mt9m111_get_gain
 	}
 	},
 
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.c b/drivers/media/video/gspca/m5602/m5602_ov9650.c
index 31c5896..837c7e47 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.c
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.c
@@ -1,7 +1,7 @@
 /*
  * Driver for the ov9650 sensor
  *
- * Copyright (C) 2008 Erik Andren
+ * Copyright (C) 2008 Erik Andrén
  * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
  * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
  *
@@ -40,7 +40,7 @@
 	for (i = 0; i < len; i++) {
 		err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
 
-		PDEBUG(DBG_TRACE, "Reading sensor register "
+		PDEBUG(D_CONF, "Reading sensor register "
 				"0x%x containing 0x%x ", address, *i2c_data);
 	}
 	return (err < 0) ? err : 0;
@@ -72,7 +72,7 @@
 		memcpy(p, sensor_urb_skeleton + 16, 4);
 		p[3] = i2c_data[i];
 		p += 4;
-		PDEBUG(DBG_TRACE, "Writing sensor register 0x%x with 0x%x",
+		PDEBUG(D_CONF, "Writing sensor register 0x%x with 0x%x",
 		       address, i2c_data[i]);
 	}
 
@@ -199,7 +199,7 @@
 		goto out;
 	*val |= (i2c_data & 0x3f) << 10;
 
-	PDEBUG(DBG_V4L2_CID, "Read exposure %d", *val);
+	PDEBUG(D_V4L2, "Read exposure %d", *val);
 out:
 	return (err < 0) ? err : 0;
 }
@@ -210,7 +210,7 @@
 	u8 i2c_data;
 	int err;
 
-	PDEBUG(DBG_V4L2_CID, "Set exposure to %d",
+	PDEBUG(D_V4L2, "Set exposure to %d",
 	       val & 0xffff);
 
 	/* The 6 MSBs */
@@ -246,7 +246,7 @@
 
 	err = ov9650_read_sensor(sd, OV9650_GAIN, &i2c_data, 1);
 	*val |= i2c_data;
-	PDEBUG(DBG_V4L2_CID, "Read gain %d", *val);
+	PDEBUG(D_V4L2, "Read gain %d", *val);
 	return (err < 0) ? err : 0;
 }
 
@@ -280,7 +280,7 @@
 	err = ov9650_read_sensor(sd, OV9650_RED, &i2c_data, 1);
 	*val = i2c_data;
 
-	PDEBUG(DBG_V4L2_CID, "Read red gain %d", *val);
+	PDEBUG(D_V4L2, "Read red gain %d", *val);
 
 	return (err < 0) ? err : 0;
 }
@@ -291,7 +291,7 @@
 	u8 i2c_data;
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	PDEBUG(DBG_V4L2_CID, "Set red gain to %d",
+	PDEBUG(D_V4L2, "Set red gain to %d",
 			     val & 0xff);
 
 	i2c_data = val & 0xff;
@@ -309,7 +309,7 @@
 	err = ov9650_read_sensor(sd, OV9650_BLUE, &i2c_data, 1);
 	*val = i2c_data;
 
-	PDEBUG(DBG_V4L2_CID, "Read blue gain %d", *val);
+	PDEBUG(D_V4L2, "Read blue gain %d", *val);
 
 	return (err < 0) ? err : 0;
 }
@@ -320,7 +320,7 @@
 	u8 i2c_data;
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	PDEBUG(DBG_V4L2_CID, "Set blue gain to %d",
+	PDEBUG(D_V4L2, "Set blue gain to %d",
 	       val & 0xff);
 
 	i2c_data = val & 0xff;
@@ -340,7 +340,7 @@
 		*val = ((i2c_data & OV9650_HFLIP) >> 5) ? 0 : 1;
 	else
 		*val = (i2c_data & OV9650_HFLIP) >> 5;
-	PDEBUG(DBG_V4L2_CID, "Read horizontal flip %d", *val);
+	PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
 
 	return (err < 0) ? err : 0;
 }
@@ -351,7 +351,7 @@
 	u8 i2c_data;
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	PDEBUG(DBG_V4L2_CID, "Set horizontal flip to %d", val);
+	PDEBUG(D_V4L2, "Set horizontal flip to %d", val);
 	err = ov9650_read_sensor(sd, OV9650_MVFP, &i2c_data, 1);
 	if (err < 0)
 		goto out;
@@ -379,7 +379,7 @@
 		*val = ((i2c_data & 0x10) >> 4) ? 0 : 1;
 	else
 		*val = (i2c_data & 0x10) >> 4;
-	PDEBUG(DBG_V4L2_CID, "Read vertical flip %d", *val);
+	PDEBUG(D_V4L2, "Read vertical flip %d", *val);
 
 	return (err < 0) ? err : 0;
 }
@@ -390,7 +390,7 @@
 	u8 i2c_data;
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	PDEBUG(DBG_V4L2_CID, "Set vertical flip to %d", val);
+	PDEBUG(D_V4L2, "Set vertical flip to %d", val);
 	err = ov9650_read_sensor(sd, OV9650_MVFP, &i2c_data, 1);
 	if (err < 0)
 		goto out;
@@ -420,7 +420,7 @@
 
 	err = ov9650_read_sensor(sd, OV9650_GAIN, &i2c_data, 1);
 	*val |= i2c_data;
-	PDEBUG(DBG_V4L2_CID, "Read gain %d", *val);
+	PDEBUG(D_V4L2, "Read gain %d", *val);
 out:
 	return (err < 0) ? err : 0;
 }
@@ -431,7 +431,7 @@
 	u8 i2c_data;
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	PDEBUG(DBG_V4L2_CID, "Set gain to %d", val & 0x3ff);
+	PDEBUG(D_V4L2, "Set gain to %d", val & 0x3ff);
 
 	/* Read the OV9650_VREF register first to avoid
 		corrupting the VREF high and low bits */
@@ -461,7 +461,7 @@
 
 	err = ov9650_read_sensor(sd, OV9650_COM8, &i2c_data, 1);
 	*val = (i2c_data & OV9650_AWB_EN) >> 1;
-	PDEBUG(DBG_V4L2_CID, "Read auto white balance %d", *val);
+	PDEBUG(D_V4L2, "Read auto white balance %d", *val);
 
 	return (err < 0) ? err : 0;
 }
@@ -472,7 +472,7 @@
 	u8 i2c_data;
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	PDEBUG(DBG_V4L2_CID, "Set auto white balance to %d", val);
+	PDEBUG(D_V4L2, "Set auto white balance to %d", val);
 	err = ov9650_read_sensor(sd, OV9650_COM8, &i2c_data, 1);
 	if (err < 0)
 		goto out;
@@ -491,7 +491,7 @@
 
 	err = ov9650_read_sensor(sd, OV9650_COM8, &i2c_data, 1);
 	*val = (i2c_data & OV9650_AGC_EN) >> 2;
-	PDEBUG(DBG_V4L2_CID, "Read auto gain control %d", *val);
+	PDEBUG(D_V4L2, "Read auto gain control %d", *val);
 
 	return (err < 0) ? err : 0;
 }
@@ -502,7 +502,7 @@
 	u8 i2c_data;
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	PDEBUG(DBG_V4L2_CID, "Set auto gain control to %d", val);
+	PDEBUG(D_V4L2, "Set auto gain control to %d", val);
 	err = ov9650_read_sensor(sd, OV9650_COM8, &i2c_data, 1);
 	if (err < 0)
 		goto out;
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.h b/drivers/media/video/gspca/m5602/m5602_ov9650.h
index 2f29cb0..065632f 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.h
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.h
@@ -1,7 +1,7 @@
 /*
  * Driver for the ov9650 sensor
  *
- * Copyright (C) 2008 Erik Andren
+ * Copyright (C) 2008 Erik Andrén
  * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
  * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
  *
@@ -121,7 +121,6 @@
 /* Kernel module parameters */
 extern int force_sensor;
 extern int dump_sensor;
-extern unsigned int m5602_debug;
 
 int ov9650_probe(struct sd *sd);
 int ov9650_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.c b/drivers/media/video/gspca/m5602/m5602_po1030.c
index 08c015bd..d17ac52 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.c
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.c
@@ -1,7 +1,7 @@
 /*
  * Driver for the po1030 sensor
  *
- * Copyright (c) 2008 Erik Andren
+ * Copyright (c) 2008 Erik Andrén
  * Copyright (c) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
  * Copyright (c) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
  *
@@ -82,7 +82,7 @@
 	for (i = 0; i < len; i++) {
 		err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
 
-		PDEBUG(DBG_TRACE, "Reading sensor register "
+		PDEBUG(D_CONF, "Reading sensor register "
 				"0x%x containing 0x%x ", address, *i2c_data);
 	}
 	return (err < 0) ? err : 0;
@@ -112,7 +112,7 @@
 		memcpy(p, sensor_urb_skeleton + 16, 4);
 		p[3] = i2c_data[i];
 		p += 4;
-		PDEBUG(DBG_TRACE, "Writing sensor register 0x%x with 0x%x",
+		PDEBUG(D_CONF, "Writing sensor register 0x%x with 0x%x",
 		       address, i2c_data[i]);
 	}
 
@@ -185,7 +185,7 @@
 				 &i2c_data, 1);
 	*val |= i2c_data;
 
-	PDEBUG(DBG_V4L2_CID, "Exposure read as %d", *val);
+	PDEBUG(D_V4L2, "Exposure read as %d", *val);
 out:
 	return (err < 0) ? err : 0;
 }
@@ -196,10 +196,10 @@
 	u8 i2c_data;
 	int err;
 
-	PDEBUG(DBG_V4L2, "Set exposure to %d", val & 0xffff);
+	PDEBUG(D_V4L2, "Set exposure to %d", val & 0xffff);
 
 	i2c_data = ((val & 0xff00) >> 8);
-	PDEBUG(DBG_V4L2, "Set exposure to high byte to 0x%x",
+	PDEBUG(D_V4L2, "Set exposure to high byte to 0x%x",
 	       i2c_data);
 
 	err = po1030_write_sensor(sd, PO1030_REG_INTEGLINES_H,
@@ -208,7 +208,7 @@
 		goto out;
 
 	i2c_data = (val & 0xff);
-	PDEBUG(DBG_V4L2, "Set exposure to low byte to 0x%x",
+	PDEBUG(D_V4L2, "Set exposure to low byte to 0x%x",
 	       i2c_data);
 	err = po1030_write_sensor(sd, PO1030_REG_INTEGLINES_M,
 				  &i2c_data, 1);
@@ -226,7 +226,71 @@
 	err = po1030_read_sensor(sd, PO1030_REG_GLOBALGAIN,
 				 &i2c_data, 1);
 	*val = i2c_data;
-	PDEBUG(DBG_V4L2_CID, "Read global gain %d", *val);
+	PDEBUG(D_V4L2, "Read global gain %d", *val);
+
+	return (err < 0) ? err : 0;
+}
+
+int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
+{
+	struct sd *sd = (struct sd *) gspca_dev;
+	u8 i2c_data;
+	int err;
+
+	err = po1030_read_sensor(sd, PO1030_REG_CONTROL2,
+				 &i2c_data, 1);
+
+	*val = (i2c_data >> 7) & 0x01 ;
+
+	PDEBUG(D_V4L2, "Read hflip %d", *val);
+
+	return (err < 0) ? err : 0;
+}
+
+int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
+{
+	struct sd *sd = (struct sd *) gspca_dev;
+	u8 i2c_data;
+	int err;
+
+	PDEBUG(D_V4L2, "Set hflip %d", val);
+
+	i2c_data = (val & 0x01) << 7;
+
+	err = po1030_write_sensor(sd, PO1030_REG_CONTROL2,
+				  &i2c_data, 1);
+
+	return (err < 0) ? err : 0;
+}
+
+int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
+{
+	struct sd *sd = (struct sd *) gspca_dev;
+	u8 i2c_data;
+	int err;
+
+	err = po1030_read_sensor(sd, PO1030_REG_GLOBALGAIN,
+				 &i2c_data, 1);
+
+	*val = (i2c_data >> 6) & 0x01;
+
+	PDEBUG(D_V4L2, "Read vflip %d", *val);
+
+	return (err < 0) ? err : 0;
+}
+
+int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
+{
+	struct sd *sd = (struct sd *) gspca_dev;
+	u8 i2c_data;
+	int err;
+
+	PDEBUG(D_V4L2, "Set vflip %d", val);
+
+	i2c_data = (val & 0x01) << 6;
+
+	err = po1030_write_sensor(sd, PO1030_REG_CONTROL2,
+				  &i2c_data, 1);
 
 	return (err < 0) ? err : 0;
 }
@@ -238,7 +302,7 @@
 	int err;
 
 	i2c_data = val & 0xff;
-	PDEBUG(DBG_V4L2, "Set global gain to %d", i2c_data);
+	PDEBUG(D_V4L2, "Set global gain to %d", i2c_data);
 	err = po1030_write_sensor(sd, PO1030_REG_GLOBALGAIN,
 				  &i2c_data, 1);
 	return (err < 0) ? err : 0;
@@ -253,7 +317,7 @@
 	err = po1030_read_sensor(sd, PO1030_REG_RED_GAIN,
 				 &i2c_data, 1);
 	*val = i2c_data;
-	PDEBUG(DBG_V4L2_CID, "Read red gain %d", *val);
+	PDEBUG(D_V4L2, "Read red gain %d", *val);
 	return (err < 0) ? err : 0;
 }
 
@@ -264,7 +328,7 @@
 	int err;
 
 	i2c_data = val & 0xff;
-	PDEBUG(DBG_V4L2, "Set red gain to %d", i2c_data);
+	PDEBUG(D_V4L2, "Set red gain to %d", i2c_data);
 	err = po1030_write_sensor(sd, PO1030_REG_RED_GAIN,
 				  &i2c_data, 1);
 	return (err < 0) ? err : 0;
@@ -279,7 +343,7 @@
 	err = po1030_read_sensor(sd, PO1030_REG_BLUE_GAIN,
 				 &i2c_data, 1);
 	*val = i2c_data;
-	PDEBUG(DBG_V4L2_CID, "Read blue gain %d", *val);
+	PDEBUG(D_V4L2, "Read blue gain %d", *val);
 
 	return (err < 0) ? err : 0;
 }
@@ -290,7 +354,7 @@
 	u8 i2c_data;
 	int err;
 	i2c_data = val & 0xff;
-	PDEBUG(DBG_V4L2, "Set blue gain to %d", i2c_data);
+	PDEBUG(D_V4L2, "Set blue gain to %d", i2c_data);
 	err = po1030_write_sensor(sd, PO1030_REG_BLUE_GAIN,
 				  &i2c_data, 1);
 
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.h b/drivers/media/video/gspca/m5602/m5602_po1030.h
index 68f34c9..a0b75ff 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.h
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.h
@@ -1,8 +1,7 @@
 /*
  * Driver for the po1030 sensor.
- * This is probably a pixel plus sensor but we haven't identified it yet
  *
- * Copyright (c) 2008 Erik Andren
+ * Copyright (c) 2008 Erik Andrén
  * Copyright (c) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
  * Copyright (c) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
  *
@@ -109,10 +108,13 @@
 #define PO1030_REG_YCONTRAST		0x74
 #define PO1030_REG_YSATURATION		0x75
 
+#define PO1030_HFLIP			(1 << 7)
+#define PO1030_VFLIP			(1 << 6)
+
 /*****************************************************************************/
 
 #define PO1030_GLOBAL_GAIN_DEFAULT	0x12
-#define PO1030_EXPOSURE_DEFAULT		0xf0ff
+#define PO1030_EXPOSURE_DEFAULT		0x0085
 #define PO1030_BLUE_GAIN_DEFAULT 	0x40
 #define PO1030_RED_GAIN_DEFAULT 	0x40
 
@@ -121,7 +123,6 @@
 /* Kernel module parameters */
 extern int force_sensor;
 extern int dump_sensor;
-extern unsigned int m5602_debug;
 
 int po1030_probe(struct sd *sd);
 int po1030_init(struct sd *sd);
@@ -142,6 +143,10 @@
 int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
 int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
 int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
+int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
+int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
+int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
+int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
 
 static struct m5602_sensor po1030 = {
 	.name = "PO1030",
@@ -152,7 +157,7 @@
 	.init = po1030_init,
 	.power_down = po1030_power_down,
 
-	.nctrls = 4,
+	.nctrls = 6,
 	.ctrls = {
 	{
 		{
@@ -160,7 +165,7 @@
 			.type 		= V4L2_CTRL_TYPE_INTEGER,
 			.name 		= "gain",
 			.minimum 	= 0x00,
-			.maximum 	= 0xff,
+			.maximum 	= 0x4f,
 			.step 		= 0x1,
 			.default_value 	= PO1030_GLOBAL_GAIN_DEFAULT,
 			.flags         	= V4L2_CTRL_FLAG_SLIDER
@@ -173,7 +178,7 @@
 			.type 		= V4L2_CTRL_TYPE_INTEGER,
 			.name 		= "exposure",
 			.minimum 	= 0x00,
-			.maximum 	= 0xffff,
+			.maximum 	= 0x02ff,
 			.step 		= 0x1,
 			.default_value 	= PO1030_EXPOSURE_DEFAULT,
 			.flags         	= V4L2_CTRL_FLAG_SLIDER
@@ -206,8 +211,33 @@
 		},
 		.set = po1030_set_blue_balance,
 		.get = po1030_get_blue_balance
+	}, {
+		{
+			.id 		= V4L2_CID_HFLIP,
+			.type 		= V4L2_CTRL_TYPE_BOOLEAN,
+			.name 		= "horizontal flip",
+			.minimum 	= 0,
+			.maximum 	= 1,
+			.step 		= 1,
+			.default_value 	= 0,
+		},
+		.set = po1030_set_hflip,
+		.get = po1030_get_hflip
+	}, {
+		{
+			.id 		= V4L2_CID_VFLIP,
+			.type 		= V4L2_CTRL_TYPE_BOOLEAN,
+			.name 		= "vertical flip",
+			.minimum 	= 0,
+			.maximum 	= 1,
+			.step 		= 1,
+			.default_value 	= 0,
+		},
+		.set = po1030_set_vflip,
+		.get = po1030_get_vflip
 	}
 	},
+
 	.nmodes = 1,
 	.modes = {
 	{
@@ -381,7 +411,7 @@
 
 	/* Set the y window to 1 */
 	{SENSOR, PO1030_REG_WINDOWY_H, 0x00},
-	{SENSOR, PO1030_REG_WINDOWX_L, 0x01},
+	{SENSOR, PO1030_REG_WINDOWY_L, 0x01},
 
 	{SENSOR, PO1030_REG_WINDOWWIDTH_H, 0x02},
 	{SENSOR, PO1030_REG_WINDOWWIDTH_L, 0x87},
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
index 6820256..14b1eac 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
@@ -1,7 +1,7 @@
 /*
  * Driver for the s5k4aa sensor
  *
- * Copyright (C) 2008 Erik Andren
+ * Copyright (C) 2008 Erik Andrén
  * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
  * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
  *
@@ -117,7 +117,7 @@
 	for (i = 0; (i < len) & !err; i++) {
 		err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
 
-		PDEBUG(DBG_TRACE, "Reading sensor register "
+		PDEBUG(D_CONF, "Reading sensor register "
 				  "0x%x containing 0x%x ", address, *i2c_data);
 	}
 out:
@@ -150,7 +150,7 @@
 		memcpy(p, sensor_urb_skeleton + 16, 4);
 		p[3] = i2c_data[i];
 		p += 4;
-		PDEBUG(DBG_TRACE, "Writing sensor register 0x%x with 0x%x",
+		PDEBUG(D_CONF, "Writing sensor register 0x%x with 0x%x",
 		       address, i2c_data[i]);
 	}
 
@@ -248,7 +248,7 @@
 	*val = data << 8;
 	err = s5k4aa_read_sensor(sd, S5K4AA_EXPOSURE_LO, &data, 1);
 	*val |= data;
-	PDEBUG(DBG_V4L2_CID, "Read exposure %d", *val);
+	PDEBUG(D_V4L2, "Read exposure %d", *val);
 out:
 	return (err < 0) ? err : 0;
 }
@@ -259,7 +259,7 @@
 	u8 data = S5K4AA_PAGE_MAP_2;
 	int err;
 
-	PDEBUG(DBG_V4L2_CID, "Set exposure to %d", val);
+	PDEBUG(D_V4L2, "Set exposure to %d", val);
 	err = s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
 	if (err < 0)
 		goto out;
@@ -285,7 +285,7 @@
 
 	err = s5k4aa_read_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
 	*val = (data & S5K4AA_RM_V_FLIP) >> 7;
-	PDEBUG(DBG_V4L2_CID, "Read vertical flip %d", *val);
+	PDEBUG(D_V4L2, "Read vertical flip %d", *val);
 
 out:
 	return (err < 0) ? err : 0;
@@ -297,7 +297,7 @@
 	u8 data = S5K4AA_PAGE_MAP_2;
 	int err;
 
-	PDEBUG(DBG_V4L2_CID, "Set vertical flip to %d", val);
+	PDEBUG(D_V4L2, "Set vertical flip to %d", val);
 	err = s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
 	if (err < 0)
 		goto out;
@@ -341,7 +341,7 @@
 
 	err = s5k4aa_read_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
 	*val = (data & S5K4AA_RM_H_FLIP) >> 6;
-	PDEBUG(DBG_V4L2_CID, "Read horizontal flip %d", *val);
+	PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
 out:
 	return (err < 0) ? err : 0;
 }
@@ -352,7 +352,7 @@
 	u8 data = S5K4AA_PAGE_MAP_2;
 	int err;
 
-	PDEBUG(DBG_V4L2_CID, "Set horizontal flip to %d",
+	PDEBUG(D_V4L2, "Set horizontal flip to %d",
 	       val);
 	err = s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
 	if (err < 0)
@@ -397,7 +397,7 @@
 
 	err = s5k4aa_read_sensor(sd, S5K4AA_GAIN_2, &data, 1);
 	*val = data;
-	PDEBUG(DBG_V4L2_CID, "Read gain %d", *val);
+	PDEBUG(D_V4L2, "Read gain %d", *val);
 
 out:
 	return (err < 0) ? err : 0;
@@ -409,7 +409,7 @@
 	u8 data = S5K4AA_PAGE_MAP_2;
 	int err;
 
-	PDEBUG(DBG_V4L2_CID, "Set gain to %d", val);
+	PDEBUG(D_V4L2, "Set gain to %d", val);
 	err = s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
 	if (err < 0)
 		goto out;
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
index bb7f7e3..eaef676 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
@@ -1,7 +1,7 @@
 /*
  * Driver for the s5k4aa sensor
  *
- * Copyright (C) 2008 Erik Andren
+ * Copyright (C) 2008 Erik Andrén
  * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
  * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
  *
@@ -63,7 +63,6 @@
 /* Kernel module parameters */
 extern int force_sensor;
 extern int dump_sensor;
-extern unsigned int m5602_debug;
 
 int s5k4aa_probe(struct sd *sd);
 int s5k4aa_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.c b/drivers/media/video/gspca/m5602/m5602_s5k83a.c
index b4b33c2..8988a72 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.c
@@ -1,7 +1,7 @@
 /*
  * Driver for the s5k83a sensor
  *
- * Copyright (C) 2008 Erik Andren
+ * Copyright (C) 2008 Erik Andrén
  * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
  * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
  *
@@ -101,7 +101,7 @@
 	for (i = 0; i < len && !len; i++) {
 		err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
 
-		PDEBUG(DBG_TRACE, "Reading sensor register "
+		PDEBUG(D_CONF, "Reading sensor register "
 				  "0x%x containing 0x%x ", address, *i2c_data);
 	}
 
@@ -135,7 +135,7 @@
 		memcpy(p, sensor_urb_skeleton + 16, 4);
 		p[3] = i2c_data[i];
 		p += 4;
-		PDEBUG(DBG_TRACE, "Writing sensor register 0x%x with 0x%x",
+		PDEBUG(D_CONF, "Writing sensor register 0x%x with 0x%x",
 		       address, i2c_data[i]);
 	}
 
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.h b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
index 833708e..ee3ee9c 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
@@ -1,7 +1,7 @@
 /*
  * Driver for the s5k83a sensor
  *
- * Copyright (C) 2008 Erik Andren
+ * Copyright (C) 2008 Erik Andrén
  * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
  * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
  *
@@ -41,8 +41,6 @@
 /* Kernel module parameters */
 extern int force_sensor;
 extern int dump_sensor;
-extern unsigned int m5602_debug;
-
 
 int s5k83a_probe(struct sd *sd);
 int s5k83a_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/m5602/m5602_sensor.h b/drivers/media/video/gspca/m5602/m5602_sensor.h
index 930fcaa..60c9a48 100644
--- a/drivers/media/video/gspca/m5602/m5602_sensor.h
+++ b/drivers/media/video/gspca/m5602/m5602_sensor.h
@@ -1,7 +1,7 @@
 /*
  * USB Driver for ALi m5602 based webcams
  *
- * Copyright (C) 2008 Erik Andren
+ * Copyright (C) 2008 Erik Andrén
  * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
  * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
  *
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index b561f7c..eac245d 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -50,7 +50,7 @@
 
 	__u8 sensor;
 #define SENSOR_TAS5130A 0
-#define SENSOR_OTHER 1
+#define SENSOR_OM6802 1
 };
 
 /* V4L2 controls supported by the driver */
@@ -188,7 +188,7 @@
 	  .minimum = 0,
 	  .maximum = 1,
 	  .step = 1,
-	  .default_value = 1,
+	  .default_value = 0,
 	  },
 	 .set = sd_setwhitebalance,
 	 .get = sd_getwhitebalance
@@ -261,6 +261,59 @@
 		.priv = 0},
 };
 
+/* sensor specific data */
+struct additional_sensor_data {
+	const __u8 data1[20];
+	const __u8 data2[18];
+	const __u8 data3[18];
+	const __u8 data4[4];
+	const __u8 data5[6];
+	const __u8 stream[4];
+};
+
+const static struct additional_sensor_data sensor_data[] = {
+    {				/* TAS5130A */
+	.data1 =
+		{0xd0, 0xbb, 0xd1, 0x28, 0xd2, 0x10, 0xd3, 0x10,
+		 0xd4, 0xbb, 0xd5, 0x28, 0xd6, 0x1e, 0xd7, 0x27,
+		 0xd8, 0xc8, 0xd9, 0xfc},
+	.data2 =
+		{0xe0, 0x60, 0xe1, 0xa8, 0xe2, 0xe0, 0xe3, 0x60,
+		 0xe4, 0xa8, 0xe5, 0xe0, 0xe6, 0x60, 0xe7, 0xa8,
+		 0xe8, 0xe0},
+	.data3 =
+		{0xc7, 0x60, 0xc8, 0xa8, 0xc9, 0xe0, 0xca, 0x60,
+		 0xcb, 0xa8, 0xcc, 0xe0, 0xcd, 0x60, 0xce, 0xa8,
+		 0xcf, 0xe0},
+	.data4 =	/* Freq (50/60Hz). Splitted for test purpose */
+		{0x66, 0x00, 0xa8, 0xe8},
+	.data5 =
+		{0x0c, 0x03, 0xab, 0x10, 0x81, 0x20},
+	.stream =
+		{0x0b, 0x04, 0x0a, 0x40},
+    },
+    {				/* OM6802 */
+	.data1 =
+		{0xd0, 0xc2, 0xd1, 0x28, 0xd2, 0x0f, 0xd3, 0x22,
+		 0xd4, 0xcd, 0xd5, 0x27, 0xd6, 0x2c, 0xd7, 0x06,
+		 0xd8, 0xb3, 0xd9, 0xfc},
+	.data2 =
+		{0xe0, 0x80, 0xe1, 0xff, 0xe2, 0xff, 0xe3, 0x80,
+		 0xe4, 0xff, 0xe5, 0xff, 0xe6, 0x80, 0xe7, 0xff,
+		 0xe8, 0xff},
+	.data3 =
+		{0xc7, 0x80, 0xc8, 0xff, 0xc9, 0xff, 0xca, 0x80,
+		 0xcb, 0xff, 0xcc, 0xff, 0xcd, 0x80, 0xce, 0xff,
+		 0xcf, 0xff},
+	.data4 =	/*Freq (50/60Hz). Splitted for test purpose */
+		{0x66, 0xca, 0xa8, 0xf0 },
+	.data5 =	/* this could be removed later */
+		{0x0c, 0x03, 0xab, 0x13, 0x81, 0x23},
+	.stream =
+		{0x0b, 0x04, 0x0a, 0x78},
+    }
+};
+
 #define MAX_EFFECTS 7
 /* easily done by soft, this table could be removed,
  * i keep it here just in case */
@@ -365,6 +418,8 @@
 	{},
 };
 
+static __u8 sensor_reset[] = {0x61, 0x68, 0x62, 0xff, 0x60, 0x07};
+
 /* read 1 byte */
 static int reg_r(struct gspca_dev *gspca_dev,
 		   __u16 index)
@@ -385,12 +440,12 @@
 	usb_control_msg(gspca_dev->dev,
 			usb_sndctrlpipe(gspca_dev->dev, 0),
 			0,
-			USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+			USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 			0, index,
 			NULL, 0, 500);
 }
 
-static void i2c_w(struct gspca_dev *gspca_dev,
+static void reg_w_buf(struct gspca_dev *gspca_dev,
 		  const __u8 *buffer, __u16 len)
 {
 	if (len <= USB_BUF_SZ) {
@@ -398,7 +453,7 @@
 		usb_control_msg(gspca_dev->dev,
 				usb_sndctrlpipe(gspca_dev->dev, 0),
 				0,
-			   USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+			   USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 				0x01, 0,
 				gspca_dev->usb_buf, len, 500);
 	} else {
@@ -409,14 +464,15 @@
 		usb_control_msg(gspca_dev->dev,
 				usb_sndctrlpipe(gspca_dev->dev, 0),
 				0,
-			   USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+			   USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 				0x01, 0,
 				tmpbuf, len, 500);
 		kfree(tmpbuf);
 	}
 }
 
-static void other_sensor_init(struct gspca_dev *gspca_dev)
+/* Reported as OM6802*/
+static void om6802_sensor_init(struct gspca_dev *gspca_dev)
 {
 	int i;
 	const __u8 *p;
@@ -436,19 +492,32 @@
 		0x90, 0x24,
 		0x91, 0xb2,
 		0x82, 0x32,
-		0xfd, 0x00,
-		0xfd, 0x01,
 		0xfd, 0x41,
 		0x00			/* table end */
 	};
 
+	reg_w_buf(gspca_dev, sensor_reset, sizeof sensor_reset);
+	msleep(5);
+	i = 4;
+	while (--i < 0) {
+		byte = reg_r(gspca_dev, 0x0060);
+		if (!(byte & 0x01))
+			break;
+		msleep(100);
+	}
+	byte = reg_r(gspca_dev, 0x0063);
+	if (byte != 0x17) {
+		err("Bad sensor reset %02x", byte);
+		/* continue? */
+	}
+
 	p = sensor_init;
 	while (*p != 0) {
 		val[1] = *p++;
 		val[3] = *p++;
 		if (*p == 0)
 			reg_w(gspca_dev, 0x3c80);
-		i2c_w(gspca_dev, val, sizeof val);
+		reg_w_buf(gspca_dev, val, sizeof val);
 		i = 4;
 		while (--i >= 0) {
 			msleep(15);
@@ -457,7 +526,8 @@
 				break;
 		}
 	}
-			reg_w(gspca_dev, 0x3c80);
+	msleep(15);
+	reg_w(gspca_dev, 0x3c80);
 }
 
 /* this function is called at probe time */
@@ -485,12 +555,75 @@
 	return 0;
 }
 
+static void setbrightness(struct gspca_dev *gspca_dev)
+{
+	struct sd *sd = (struct sd *) gspca_dev;
+	unsigned int brightness;
+	__u8 set6[4] = { 0x8f, 0x24, 0xc3, 0x00 };
+
+	brightness = sd->brightness;
+	if (brightness < 7) {
+		set6[1] = 0x26;
+		set6[3] = 0x70 - brightness * 0x10;
+	} else {
+		set6[3] = 0x00 + ((brightness - 7) * 0x10);
+	}
+
+	reg_w_buf(gspca_dev, set6, sizeof set6);
+}
+
+static void setcontrast(struct gspca_dev *gspca_dev)
+{
+	struct sd *sd = (struct sd *) gspca_dev;
+	unsigned int contrast = sd->contrast;
+	__u16 reg_to_write;
+
+	if (contrast < 7)
+		reg_to_write = 0x8ea9 - contrast * 0x200;
+	else
+		reg_to_write = 0x00a9 + (contrast - 7) * 0x200;
+
+	reg_w(gspca_dev, reg_to_write);
+}
+
+static void setcolors(struct gspca_dev *gspca_dev)
+{
+	struct sd *sd = (struct sd *) gspca_dev;
+	__u16 reg_to_write;
+
+	reg_to_write = 0x80bb + sd->colors * 0x100;	/* was 0xc0 */
+	reg_w(gspca_dev, reg_to_write);
+}
+
 static void setgamma(struct gspca_dev *gspca_dev)
 {
 	struct sd *sd = (struct sd *) gspca_dev;
 
 	PDEBUG(D_CONF, "Gamma: %d", sd->gamma);
-	i2c_w(gspca_dev, gamma_table[sd->gamma], sizeof gamma_table[0]);
+	reg_w_buf(gspca_dev, gamma_table[sd->gamma], sizeof gamma_table[0]);
+}
+
+static void setwhitebalance(struct gspca_dev *gspca_dev)
+{
+	struct sd *sd = (struct sd *) gspca_dev;
+
+	__u8 white_balance[8] =
+		{0x87, 0x20, 0x88, 0x20, 0x89, 0x20, 0x80, 0x38};
+
+	if (sd->whitebalance)
+		white_balance[7] = 0x3c;
+
+	reg_w_buf(gspca_dev, white_balance, sizeof white_balance);
+}
+
+static void setsharpness(struct gspca_dev *gspca_dev)
+{
+	struct sd *sd = (struct sd *) gspca_dev;
+	__u16 reg_to_write;
+
+	reg_to_write = 0x0aa6 + 0x1000 * sd->sharpness;
+
+	reg_w(gspca_dev, reg_to_write);
 }
 
 /* this function is called at probe and resume time */
@@ -511,8 +644,6 @@
 			{0x08, 0x03, 0x09, 0x03, 0x12, 0x04};
 	static const __u8 n2[] =
 			{0x08, 0x00};
-	static const __u8 nset[] =
-			{ 0x61, 0x68, 0x62, 0xff, 0x60, 0x07 };
 	static const __u8 n3[] =
 			{0x61, 0x68, 0x65, 0x0a, 0x60, 0x04};
 	static const __u8 n4[] =
@@ -525,51 +656,29 @@
 		 0x65, 0x0a, 0xbb, 0x86, 0xaf, 0x58, 0xb0, 0x68,
 		 0x87, 0x40, 0x89, 0x2b, 0x8d, 0xff, 0x83, 0x40,
 		 0xac, 0x84, 0xad, 0x86, 0xaf, 0x46};
-	static const __u8 nset4[] = {
-		0xe0, 0x60, 0xe1, 0xa8, 0xe2, 0xe0, 0xe3, 0x60, 0xe4, 0xa8,
-		0xe5, 0xe0, 0xe6, 0x60, 0xe7, 0xa8,
-		0xe8, 0xe0
-	};
-	/* ojo puede ser 0xe6 en vez de 0xe9 */
-	static const __u8 nset2[] = {
-		0xd0, 0xbb, 0xd1, 0x28, 0xd2, 0x10, 0xd3, 0x10, 0xd4, 0xbb,
-		0xd5, 0x28, 0xd6, 0x1e, 0xd7, 0x27,
-		0xd8, 0xc8, 0xd9, 0xfc
-	};
-	static const __u8 missing[] =
-		{ 0x87, 0x20, 0x88, 0x20, 0x89, 0x20, 0x80, 0x38 };
-	static const __u8 nset3[] = {
-		0xc7, 0x60, 0xc8, 0xa8, 0xc9, 0xe0, 0xca, 0x60, 0xcb, 0xa8,
-		0xcc, 0xe0, 0xcd, 0x60, 0xce, 0xa8,
-		0xcf, 0xe0
-	};
-	static const __u8 nset5[] =
-			{ 0x8f, 0x24, 0xc3, 0x00 };	/* bright */
-	static const __u8 nset7[4] =
-			{ 0x66, 0xca, 0xa8, 0xf8 };	/* 50/60 Hz */
 	static const __u8 nset9[4] =
 			{ 0x0b, 0x04, 0x0a, 0x78 };
 	static const __u8 nset8[6] =
 			{ 0xa8, 0xf0, 0xc6, 0x88, 0xc0, 0x00 };
-	static const __u8 nset10[6] =
-			{ 0x0c, 0x03, 0xab, 0x10, 0x81, 0x20 };
 
 	byte = reg_r(gspca_dev, 0x06);
 	test_byte = reg_r(gspca_dev, 0x07);
 	if (byte == 0x08 && test_byte == 0x07) {
-		PDEBUG(D_CONF, "other sensor");
-		sd->sensor = SENSOR_OTHER;
+		PDEBUG(D_CONF, "sensor om6802");
+		sd->sensor = SENSOR_OM6802;
+	} else if (byte == 0x08 && test_byte == 0x01) {
+		PDEBUG(D_CONF, "sensor tas5130a");
+		sd->sensor = SENSOR_TAS5130A;
 	} else {
-		PDEBUG(D_CONF, "sensor %02x %02x", byte, test_byte);
+		PDEBUG(D_CONF, "unknown sensor %02x %02x", byte, test_byte);
 		sd->sensor = SENSOR_TAS5130A;
 	}
 
-	i2c_w(gspca_dev, n1, sizeof n1);
+	reg_w_buf(gspca_dev, n1, sizeof n1);
 	test_byte = 0;
 	i = 5;
 	while (--i >= 0) {
-		i2c_w(gspca_dev, nset, sizeof nset);
-		msleep(5);
+		reg_w_buf(gspca_dev, sensor_reset, sizeof sensor_reset);
 		test_byte = reg_r(gspca_dev, 0x0063);
 		msleep(100);
 		if (test_byte == 0x17)
@@ -580,7 +689,7 @@
 /*		return -EIO; */
 /*fixme: test - continue */
 	}
-	i2c_w(gspca_dev, n2, sizeof n2);
+	reg_w_buf(gspca_dev, n2, sizeof n2);
 
 	i = 0;
 	while (read_indexs[i] != 0x00) {
@@ -590,58 +699,52 @@
 		i++;
 	}
 
-	i2c_w(gspca_dev, n3, sizeof n3);
-	i2c_w(gspca_dev, n4, sizeof n4);
+	reg_w_buf(gspca_dev, n3, sizeof n3);
+	reg_w_buf(gspca_dev, n4, sizeof n4);
 	reg_r(gspca_dev, 0x0080);
 	reg_w(gspca_dev, 0x2c80);
-	i2c_w(gspca_dev, nset2, sizeof nset2);
-	i2c_w(gspca_dev, nset3, sizeof nset3);
-	i2c_w(gspca_dev, nset4, sizeof nset4);
+
+	reg_w_buf(gspca_dev, sensor_data[sd->sensor].data1,
+			sizeof sensor_data[sd->sensor].data1);
+	reg_w_buf(gspca_dev, sensor_data[sd->sensor].data3,
+			sizeof sensor_data[sd->sensor].data3);
+	reg_w_buf(gspca_dev, sensor_data[sd->sensor].data2,
+			sizeof sensor_data[sd->sensor].data2);
+
 	reg_w(gspca_dev, 0x3880);
 	reg_w(gspca_dev, 0x3880);
 	reg_w(gspca_dev, 0x338e);
-	i2c_w(gspca_dev, nset5, sizeof nset5);
-	reg_w(gspca_dev, 0x00a9);
+
+	setbrightness(gspca_dev);
+	setcontrast(gspca_dev);
 	setgamma(gspca_dev);
-	reg_w(gspca_dev, 0x86bb);
-	reg_w(gspca_dev, 0x4aa6);
+	setcolors(gspca_dev);
+	setsharpness(gspca_dev);
+	setwhitebalance(gspca_dev);
 
-	i2c_w(gspca_dev, missing, sizeof missing);
-
-	reg_w(gspca_dev, 0x2087);
+	reg_w(gspca_dev, 0x2087);	/* tied to white balance? */
 	reg_w(gspca_dev, 0x2088);
 	reg_w(gspca_dev, 0x2089);
 
-	i2c_w(gspca_dev, nset7, sizeof nset7);
-	i2c_w(gspca_dev, nset10, sizeof nset10);
-	i2c_w(gspca_dev, nset8, sizeof nset8);
-	i2c_w(gspca_dev, nset9, sizeof nset9);
+	reg_w_buf(gspca_dev, sensor_data[sd->sensor].data4,
+			sizeof sensor_data[sd->sensor].data4);
+	reg_w_buf(gspca_dev, sensor_data[sd->sensor].data5,
+			sizeof sensor_data[sd->sensor].data5);
+	reg_w_buf(gspca_dev, nset8, sizeof nset8);
+	reg_w_buf(gspca_dev, nset9, sizeof nset9);
 
 	reg_w(gspca_dev, 0x2880);
-	i2c_w(gspca_dev, nset2, sizeof nset2);
-	i2c_w(gspca_dev, nset3, sizeof nset3);
-	i2c_w(gspca_dev, nset4, sizeof nset4);
+
+	reg_w_buf(gspca_dev, sensor_data[sd->sensor].data1,
+			sizeof sensor_data[sd->sensor].data1);
+	reg_w_buf(gspca_dev, sensor_data[sd->sensor].data3,
+			sizeof sensor_data[sd->sensor].data3);
+	reg_w_buf(gspca_dev, sensor_data[sd->sensor].data2,
+			sizeof sensor_data[sd->sensor].data2);
 
 	return 0;
 }
 
-static void setbrightness(struct gspca_dev *gspca_dev)
-{
-	struct sd *sd = (struct sd *) gspca_dev;
-	unsigned int brightness;
-	__u8 set6[4] = { 0x8f, 0x26, 0xc3, 0x00 };
-
-	brightness = sd->brightness;
-	if (brightness < 7) {
-		set6[3] = 0x70 - brightness * 0x10;
-	} else {
-		set6[1] = 0x24;
-		set6[3] = 0x00 + ((brightness - 7) * 0x10);
-	}
-
-	i2c_w(gspca_dev, set6, sizeof set6);
-}
-
 static void setflip(struct gspca_dev *gspca_dev)
 {
 	struct sd *sd = (struct sd *) gspca_dev;
@@ -651,14 +754,15 @@
 	if (sd->mirror)
 		flipcmd[3] = 0x01;
 
-	i2c_w(gspca_dev, flipcmd, sizeof flipcmd);
+	reg_w_buf(gspca_dev, flipcmd, sizeof flipcmd);
 }
 
 static void seteffect(struct gspca_dev *gspca_dev)
 {
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	i2c_w(gspca_dev, effects_table[sd->effect], sizeof effects_table[0]);
+	reg_w_buf(gspca_dev, effects_table[sd->effect],
+				sizeof effects_table[0]);
 	if (sd->effect == 1 || sd->effect == 5) {
 		PDEBUG(D_CONF,
 		       "This effect have been disabled for webcam \"safety\"");
@@ -671,19 +775,6 @@
 		reg_w(gspca_dev, 0xfaa6);
 }
 
-static void setwhitebalance(struct gspca_dev *gspca_dev)
-{
-	struct sd *sd = (struct sd *) gspca_dev;
-
-	__u8 white_balance[8] =
-	    { 0x87, 0x20, 0x88, 0x20, 0x89, 0x20, 0x80, 0x38 };
-
-	if (sd->whitebalance == 1)
-		white_balance[7] = 0x3c;
-
-	i2c_w(gspca_dev, white_balance, sizeof white_balance);
-}
-
 static void setlightfreq(struct gspca_dev *gspca_dev)
 {
 	struct sd *sd = (struct sd *) gspca_dev;
@@ -692,52 +783,46 @@
 	if (sd->freq == 2)	/* 60hz */
 		freq[1] = 0x00;
 
-	i2c_w(gspca_dev, freq, sizeof freq);
+	reg_w_buf(gspca_dev, freq, sizeof freq);
 }
 
-static void setcontrast(struct gspca_dev *gspca_dev)
+/* Is this really needed?
+ * i added some module parameters for test with some users */
+static void poll_sensor(struct gspca_dev *gspca_dev)
 {
 	struct sd *sd = (struct sd *) gspca_dev;
-	unsigned int contrast = sd->contrast;
-	__u16 reg_to_write;
+	static const __u8 poll1[] =
+		{0x67, 0x05, 0x68, 0x81, 0x69, 0x80, 0x6a, 0x82,
+		 0x6b, 0x68, 0x6c, 0x69, 0x72, 0xd9, 0x73, 0x34,
+		 0x74, 0x32, 0x75, 0x92, 0x76, 0x00, 0x09, 0x01,
+		 0x60, 0x14};
+	static const __u8 poll2[] =
+		{0x67, 0x02, 0x68, 0x71, 0x69, 0x72, 0x72, 0xa9,
+		 0x73, 0x02, 0x73, 0x02, 0x60, 0x14};
+	static const __u8 poll3[] =
+		{0x87, 0x3f, 0x88, 0x20, 0x89, 0x2d};
+	static const __u8 poll4[] =
+		{0xa6, 0x0a, 0xea, 0xcf, 0xbe, 0x26, 0xb1, 0x5f,
+		 0xa1, 0xb1, 0xda, 0x6b, 0xdb, 0x98, 0xdf, 0x0c,
+		 0xc2, 0x80, 0xc3, 0x10};
 
-	if (contrast < 7)
-		reg_to_write = 0x8ea9 - (0x200 * contrast);
-	else
-		reg_to_write = (0x00a9 + ((contrast - 7) * 0x200));
-
-	reg_w(gspca_dev, reg_to_write);
-}
-
-static void setcolors(struct gspca_dev *gspca_dev)
-{
-	struct sd *sd = (struct sd *) gspca_dev;
-	__u16 reg_to_write;
-
-	reg_to_write = 0xc0bb + sd->colors * 0x100;
-	reg_w(gspca_dev, reg_to_write);
-}
-
-static void setsharpness(struct gspca_dev *gspca_dev)
-{
-	struct sd *sd = (struct sd *) gspca_dev;
-	__u16 reg_to_write;
-
-	reg_to_write = 0x0aa6 + 0x1000 * sd->sharpness;
-
-	reg_w(gspca_dev, reg_to_write);
+	if (sd->sensor != SENSOR_TAS5130A) {
+		PDEBUG(D_STREAM, "[Sensor requires polling]");
+		reg_w_buf(gspca_dev, poll1, sizeof poll1);
+		reg_w_buf(gspca_dev, poll2, sizeof poll2);
+		reg_w_buf(gspca_dev, poll3, sizeof poll3);
+		reg_w_buf(gspca_dev, poll4, sizeof poll4);
+	}
 }
 
 static int sd_start(struct gspca_dev *gspca_dev)
 {
 	struct sd *sd = (struct sd *) gspca_dev;
 	int i, mode;
-	static const __u8 t1[] = { 0x66, 0x00, 0xa8, 0xe8 };
 	__u8 t2[] = { 0x07, 0x00, 0x0d, 0x60, 0x0e, 0x80 };
 	static const __u8 t3[] =
 		{ 0xb3, 0x07, 0xb4, 0x00, 0xb5, 0x88, 0xb6, 0x02, 0xb7, 0x06,
 		  0xb8, 0x00, 0xb9, 0xe7, 0xba, 0x01 };
-	static const __u8 t4[] = { 0x0b, 0x04, 0x0a, 0x40 };
 
 	mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode]. priv;
 	switch (mode) {
@@ -760,25 +845,29 @@
 	if (sd->sensor == SENSOR_TAS5130A) {
 		i = 0;
 		while (tas5130a_sensor_init[i][0] != 0) {
-			i2c_w(gspca_dev, tas5130a_sensor_init[i],
+			reg_w_buf(gspca_dev, tas5130a_sensor_init[i],
 					 sizeof tas5130a_sensor_init[0]);
 			i++;
 		}
 		reg_w(gspca_dev, 0x3c80);
 		/* just in case and to keep sync with logs (for mine) */
-		i2c_w(gspca_dev, tas5130a_sensor_init[3],
+		reg_w_buf(gspca_dev, tas5130a_sensor_init[3],
 				 sizeof tas5130a_sensor_init[0]);
 		reg_w(gspca_dev, 0x3c80);
 	} else {
-		other_sensor_init(gspca_dev);
+		om6802_sensor_init(gspca_dev);
 	}
-	/* just in case and to keep sync with logs  (for mine) */
-	i2c_w(gspca_dev, t1, sizeof t1);
-	i2c_w(gspca_dev, t2, sizeof t2);
+	reg_w_buf(gspca_dev, sensor_data[sd->sensor].data4,
+			sizeof sensor_data[sd->sensor].data4);
 	reg_r(gspca_dev, 0x0012);
-	i2c_w(gspca_dev, t3, sizeof t3);
+	reg_w_buf(gspca_dev, t2, sizeof t2);
+	reg_w_buf(gspca_dev, t3, sizeof t3);
 	reg_w(gspca_dev, 0x0013);
-	i2c_w(gspca_dev, t4, sizeof t4);
+	msleep(15);
+	reg_w_buf(gspca_dev, sensor_data[sd->sensor].stream,
+			sizeof sensor_data[sd->sensor].stream);
+	poll_sensor(gspca_dev);
+
 	/* restart on each start, just in case, sometimes regs goes wrong
 	 * when using controls from app */
 	setbrightness(gspca_dev);
@@ -787,6 +876,19 @@
 	return 0;
 }
 
+static void sd_stopN(struct gspca_dev *gspca_dev)
+{
+	struct sd *sd = (struct sd *) gspca_dev;
+
+	reg_w_buf(gspca_dev, sensor_data[sd->sensor].stream,
+			sizeof sensor_data[sd->sensor].stream);
+	msleep(20);
+	reg_w_buf(gspca_dev, sensor_data[sd->sensor].stream,
+			sizeof sensor_data[sd->sensor].stream);
+	msleep(20);
+	reg_w(gspca_dev, 0x0309);
+}
+
 static void sd_pkt_scan(struct gspca_dev *gspca_dev,
 			struct gspca_frame *frame,	/* target */
 			__u8 *data,			/* isoc packet */
@@ -1036,6 +1138,7 @@
 	.config = sd_config,
 	.init = sd_init,
 	.start = sd_start,
+	.stopN = sd_stopN,
 	.pkt_scan = sd_pkt_scan,
 	.querymenu = sd_querymenu,
 };
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index aeaa13f..d364850 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -1211,6 +1211,10 @@
 
 	if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
 		ivtv_call_i2c_clients(itv, VIDIOC_INT_S_STD_OUTPUT, &itv->std);
+		/* Turn off the output signal. The mpeg decoder is not yet
+		   active so without this you would get a green image until the
+		   mpeg decoder becomes active. */
+		ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL);
 	}
 
 	/* clear interrupt mask, effectively disabling interrupts */
@@ -1330,6 +1334,10 @@
 	ivtv_s_frequency(NULL, &fh, &vf);
 
 	if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) {
+		/* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes
+		   the mpeg decoder so now the saa7127 receives a proper
+		   signal. */
+		ivtv_saa7127(itv, VIDIOC_STREAMON, NULL);
 		ivtv_init_mpeg_decoder(itv);
 	}
 	ivtv_s_std(NULL, &fh, &itv->tuner_std);
@@ -1366,6 +1374,10 @@
 
 		/* Stop all decoding */
 		IVTV_DEBUG_INFO("Stopping decoding\n");
+
+		/* Turn off the TV-out */
+		if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
+			ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL);
 		if (atomic_read(&itv->decoding) > 0) {
 			int type;
 
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index bc29436..3733b2a 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -55,6 +55,7 @@
 #include <linux/mutex.h>
 #include <asm/uaccess.h>
 #include <asm/system.h>
+#include <asm/byteorder.h>
 
 #include <linux/dvb/video.h>
 #include <linux/dvb/audio.h>
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index b7457fc..1c404e4 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -600,13 +600,14 @@
 	since we may get here before the stream has been fully set-up */
 	if (mode == OUT_YUV && s->q_full.length == 0 && itv->dma_data_req_size) {
 		while (count >= itv->dma_data_req_size) {
-			if (!ivtv_yuv_udma_stream_frame (itv, (void __user *)user_buf)) {
-				bytes_written += itv->dma_data_req_size;
-				user_buf += itv->dma_data_req_size;
-				count -= itv->dma_data_req_size;
-			} else {
-				break;
-			}
+			rc = ivtv_yuv_udma_stream_frame(itv, (void __user *)user_buf);
+
+			if (rc < 0)
+				return rc;
+
+			bytes_written += itv->dma_data_req_size;
+			user_buf += itv->dma_data_req_size;
+			count -= itv->dma_data_req_size;
 		}
 		if (count == 0) {
 			IVTV_DEBUG_HI_FILE("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index 24700c2..41dbbe9 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -726,6 +726,7 @@
 {
 	return ivtv_call_i2c_client(itv, IVTV_SAA7127_I2C_ADDR, cmd, arg);
 }
+EXPORT_SYMBOL(ivtv_saa7127);
 
 int ivtv_saa717x(struct ivtv *itv, unsigned int cmd, void *arg)
 {
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 8696527..4bae38d 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -509,7 +509,6 @@
 static int ivtv_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt)
 {
 	struct ivtv_open_id *id = fh;
-	struct ivtv *itv = id->itv;
 	s32 w = fmt->fmt.pix.width;
 	s32 h = fmt->fmt.pix.height;
 	int field = fmt->fmt.pix.field;
@@ -517,7 +516,22 @@
 
 	w = min(w, 720);
 	w = max(w, 2);
-	h = min(h, itv->is_out_50hz ? 576 : 480);
+	/* Why can the height be 576 even when the output is NTSC?
+
+	   Internally the buffers of the PVR350 are always set to 720x576. The
+	   decoded video frame will always be placed in the top left corner of
+	   this buffer. For any video which is not 720x576, the buffer will
+	   then be cropped to remove the unused right and lower areas, with
+	   the remaining image being scaled by the hardware to fit the display
+	   area. The video can be scaled both up and down, so a 720x480 video
+	   can be displayed full-screen on PAL and a 720x576 video can be
+	   displayed without cropping on NTSC.
+
+	   Note that the scaling only occurs on the video stream, the osd
+	   resolution is locked to the broadcast standard and not scaled.
+
+	   Thanks to Ian Armstrong for this explanation. */
+	h = min(h, 576);
 	h = max(h, 2);
 	if (id->type == IVTV_DEC_STREAM_TYPE_YUV)
 		fmt->fmt.pix.field = field;
@@ -1742,12 +1756,12 @@
 	return 0;
 }
 
-static int ivtv_serialized_ioctl(struct ivtv *itv, struct inode *inode, struct file *filp,
+static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
 		unsigned int cmd, unsigned long arg)
 {
 	struct video_device *vfd = video_devdata(filp);
 	struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
-	int ret;
+	long ret;
 
 	/* Filter dvb ioctls that cannot be handled by the v4l ioctl framework */
 	switch (cmd) {
@@ -1816,20 +1830,19 @@
 
 	if (ivtv_debug & IVTV_DBGFLG_IOCTL)
 		vfd->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG;
-	ret = video_ioctl2(inode, filp, cmd, arg);
+	ret = __video_ioctl2(filp, cmd, arg);
 	vfd->debug = 0;
 	return ret;
 }
 
-int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
-		    unsigned long arg)
+long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
 	struct ivtv *itv = id->itv;
-	int res;
+	long res;
 
 	mutex_lock(&itv->serialize_lock);
-	res = ivtv_serialized_ioctl(itv, inode, filp, cmd, arg);
+	res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
 	mutex_unlock(&itv->serialize_lock);
 	return res;
 }
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.h b/drivers/media/video/ivtv/ivtv-ioctl.h
index 7018858..58f0034 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.h
+++ b/drivers/media/video/ivtv/ivtv-ioctl.h
@@ -30,7 +30,6 @@
 int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std);
 int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
 int ivtv_s_input(struct file *file, void *fh, unsigned int inp);
-int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
-		    unsigned long arg);
+long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
 
 #endif
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 5bbf31e..9b7aa79 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -48,7 +48,7 @@
 	.read = ivtv_v4l2_read,
 	.write = ivtv_v4l2_write,
 	.open = ivtv_v4l2_open,
-	.ioctl = ivtv_v4l2_ioctl,
+	.unlocked_ioctl = ivtv_v4l2_ioctl,
 	.compat_ioctl = v4l_compat_ioctl32,
 	.release = ivtv_v4l2_close,
 	.poll = ivtv_v4l2_enc_poll,
@@ -59,7 +59,7 @@
 	.read = ivtv_v4l2_read,
 	.write = ivtv_v4l2_write,
 	.open = ivtv_v4l2_open,
-	.ioctl = ivtv_v4l2_ioctl,
+	.unlocked_ioctl = ivtv_v4l2_ioctl,
 	.compat_ioctl = v4l_compat_ioctl32,
 	.release = ivtv_v4l2_close,
 	.poll = ivtv_v4l2_dec_poll,
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index 8a4a150..921e281 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -48,6 +48,7 @@
 #endif
 
 #include "ivtv-driver.h"
+#include "ivtv-i2c.h"
 #include "ivtv-udma.h"
 #include "ivtv-mailbox.h"
 
@@ -894,11 +895,16 @@
 	switch (blank_mode) {
 	case FB_BLANK_UNBLANK:
 		ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 1);
+		ivtv_saa7127(itv, VIDIOC_STREAMON, NULL);
 		break;
 	case FB_BLANK_NORMAL:
 	case FB_BLANK_HSYNC_SUSPEND:
 	case FB_BLANK_VSYNC_SUSPEND:
+		ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
+		ivtv_saa7127(itv, VIDIOC_STREAMON, NULL);
+		break;
 	case FB_BLANK_POWERDOWN:
+		ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL);
 		ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
 		break;
 	}
diff --git a/drivers/media/video/ks0127.c b/drivers/media/video/ks0127.c
index 2fd4b4a..bae2d2b 100644
--- a/drivers/media/video/ks0127.c
+++ b/drivers/media/video/ks0127.c
@@ -33,27 +33,20 @@
  * V1.1 Gerard v.d. Horst  Added some debugoutput, reset the video-standard
  */
 
-#ifndef __KERNEL__
-#define __KERNEL__
-#endif
-
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include "ks0127.h"
-
 #include <linux/i2c.h>
 #include <linux/video_decoder.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-i2c-drv-legacy.h>
+#include "ks0127.h"
 
-#define dprintk     if (debug) printk
-
-/* i2c identification */
-#define I2C_KS0127_ADDON   0xD8
-#define I2C_KS0127_ONBOARD 0xDA
+MODULE_DESCRIPTION("KS0127 video decoder driver");
+MODULE_AUTHOR("Ryan Drake");
+MODULE_LICENSE("GPL");
 
 #define KS_TYPE_UNKNOWN	0
 #define KS_TYPE_0122S	1
@@ -204,8 +197,6 @@
 };
 
 struct ks0127 {
-	struct i2c_client *client;
-	unsigned char	addr;
 	int		format_width;
 	int		format_height;
 	int		cap_width;
@@ -220,16 +211,18 @@
 
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug output");
-MODULE_LICENSE("GPL");
 
 static u8 reg_defaults[64];
 
-
-
 static void init_reg_defaults(void)
 {
+	static int initialized;
 	u8 *table = reg_defaults;
 
+	if (initialized)
+		return;
+	initialized = 1;
+
 	table[KS_CMDA]     = 0x2c;  /* VSE=0, CCIR 601, autodetect standard */
 	table[KS_CMDB]     = 0x12;  /* VALIGN=0, AGC control and input */
 	table[KS_CMDC]     = 0x00;  /* Test options */
@@ -308,50 +301,53 @@
  * An explanation from kayork@mail.utexas.edu:
  *
  * During I2C reads, the KS0127 only samples for a stop condition
- * during the place where the acknoledge bit should be. Any standard
+ * during the place where the acknowledge bit should be. Any standard
  * I2C implementation (correctly) throws in another clock transition
  * at the 9th bit, and the KS0127 will not recognize the stop condition
  * and will continue to clock out data.
  *
  * So we have to do the read ourself.  Big deal.
-	   workaround in i2c-algo-bit
+ *	   workaround in i2c-algo-bit
  */
 
 
-static u8 ks0127_read(struct ks0127 *ks, u8 reg)
+static u8 ks0127_read(struct i2c_client *c, u8 reg)
 {
-	struct i2c_client *c = ks->client;
 	char val = 0;
 	struct i2c_msg msgs[] = {
-		{c->addr, 0, sizeof(reg), &reg},
-		{c->addr, I2C_M_RD | I2C_M_NO_RD_ACK, sizeof(val), &val}};
+		{ c->addr, 0, sizeof(reg), &reg },
+		{ c->addr, I2C_M_RD | I2C_M_NO_RD_ACK, sizeof(val), &val }
+	};
 	int ret;
 
 	ret = i2c_transfer(c->adapter, msgs, ARRAY_SIZE(msgs));
 	if (ret != ARRAY_SIZE(msgs))
-		dprintk("ks0127_write error\n");
+		v4l_dbg(1, debug, c, "read error\n");
 
 	return val;
 }
 
 
-static void ks0127_write(struct ks0127 *ks, u8 reg, u8 val)
+static void ks0127_write(struct i2c_client *c, u8 reg, u8 val)
 {
-	char msg[] = {reg, val};
+	struct ks0127 *ks = i2c_get_clientdata(c);
+	char msg[] = { reg, val };
 
-	if (i2c_master_send(ks->client, msg, sizeof(msg)) != sizeof(msg))
-		dprintk("ks0127_write error\n");
+	if (i2c_master_send(c, msg, sizeof(msg)) != sizeof(msg))
+		v4l_dbg(1, debug, c, "write error\n");
 
 	ks->regs[reg] = val;
 }
 
 
 /* generic bit-twiddling */
-static void ks0127_and_or(struct ks0127 *ks, u8 reg, u8 and_v, u8 or_v)
+static void ks0127_and_or(struct i2c_client *client, u8 reg, u8 and_v, u8 or_v)
 {
+	struct ks0127 *ks = i2c_get_clientdata(client);
+
 	u8 val = ks->regs[reg];
 	val = (val & and_v) | or_v;
-	ks0127_write(ks, reg, val);
+	ks0127_write(client, reg, val);
 }
 
 
@@ -359,73 +355,69 @@
 /****************************************************************************
 * ks0127 private api
 ****************************************************************************/
-static void ks0127_reset(struct ks0127* ks)
+static void ks0127_reset(struct i2c_client *c)
 {
-	int i;
+	struct ks0127 *ks = i2c_get_clientdata(c);
 	u8 *table = reg_defaults;
+	int i;
 
 	ks->ks_type = KS_TYPE_UNKNOWN;
 
-	dprintk("ks0127: reset\n");
+	v4l_dbg(1, debug, c, "reset\n");
 	msleep(1);
 
 	/* initialize all registers to known values */
 	/* (except STAT, 0x21, 0x22, TEST and 0x38,0x39) */
 
-	for(i = 1; i < 33; i++)
-		ks0127_write(ks, i, table[i]);
+	for (i = 1; i < 33; i++)
+		ks0127_write(c, i, table[i]);
 
-	for(i = 35; i < 40; i++)
-		ks0127_write(ks, i, table[i]);
+	for (i = 35; i < 40; i++)
+		ks0127_write(c, i, table[i]);
 
-	for(i = 41; i < 56; i++)
-		ks0127_write(ks, i, table[i]);
+	for (i = 41; i < 56; i++)
+		ks0127_write(c, i, table[i]);
 
-	for(i = 58; i < 64; i++)
-		ks0127_write(ks, i, table[i]);
+	for (i = 58; i < 64; i++)
+		ks0127_write(c, i, table[i]);
 
 
-	if ((ks0127_read(ks, KS_STAT) & 0x80) == 0) {
+	if ((ks0127_read(c, KS_STAT) & 0x80) == 0) {
 		ks->ks_type = KS_TYPE_0122S;
-		dprintk("ks0127: ks0122s Found\n");
+		v4l_dbg(1, debug, c, "ks0122s found\n");
 		return;
 	}
 
-	switch(ks0127_read(ks, KS_CMDE) & 0x0f) {
-
+	switch (ks0127_read(c, KS_CMDE) & 0x0f) {
 	case 0:
 		ks->ks_type = KS_TYPE_0127;
-		dprintk("ks0127: ks0127 found\n");
+		v4l_dbg(1, debug, c, "ks0127 found\n");
 		break;
 
 	case 9:
 		ks->ks_type = KS_TYPE_0127B;
-		dprintk("ks0127: ks0127B Revision A found\n");
+		v4l_dbg(1, debug, c, "ks0127B Revision A found\n");
 		break;
 
 	default:
-		dprintk("ks0127: unknown revision\n");
+		v4l_dbg(1, debug, c, "unknown revision\n");
 		break;
 	}
 }
 
-static int ks0127_command(struct i2c_client *client,
-			  unsigned int cmd, void *arg)
+static int ks0127_command(struct i2c_client *c, unsigned cmd, void *arg)
 {
-	struct ks0127 *ks = i2c_get_clientdata(client);
-
-	int		*iarg = (int*)arg;
-
+	struct ks0127 *ks = i2c_get_clientdata(c);
+	int		*iarg = (int *)arg;
 	int		status;
 
 	if (!ks)
 		return -ENODEV;
 
 	switch (cmd) {
-
 	case DECODER_INIT:
-		dprintk("ks0127: command DECODER_INIT\n");
-		ks0127_reset(ks);
+		v4l_dbg(1, debug, c, "DECODER_INIT\n");
+		ks0127_reset(c);
 		break;
 
 	case DECODER_SET_INPUT:
@@ -436,161 +428,160 @@
 		case KS_INPUT_COMPOSITE_4:
 		case KS_INPUT_COMPOSITE_5:
 		case KS_INPUT_COMPOSITE_6:
-			dprintk("ks0127: command DECODER_SET_INPUT %d: "
-				"Composite\n", *iarg);
+			v4l_dbg(1, debug, c,
+				"DECODER_SET_INPUT %d: Composite\n", *iarg);
 			/* autodetect 50/60 Hz */
-			ks0127_and_or(ks, KS_CMDA,   0xfc, 0x00);
+			ks0127_and_or(c, KS_CMDA,   0xfc, 0x00);
 			/* VSE=0 */
-			ks0127_and_or(ks, KS_CMDA,   ~0x40, 0x00);
+			ks0127_and_or(c, KS_CMDA,   ~0x40, 0x00);
 			/* set input line */
-			ks0127_and_or(ks, KS_CMDB,   0xb0, *iarg);
+			ks0127_and_or(c, KS_CMDB,   0xb0, *iarg);
 			/* non-freerunning mode */
-			ks0127_and_or(ks, KS_CMDC,   0x70, 0x0a);
+			ks0127_and_or(c, KS_CMDC,   0x70, 0x0a);
 			/* analog input */
-			ks0127_and_or(ks, KS_CMDD,   0x03, 0x00);
+			ks0127_and_or(c, KS_CMDD,   0x03, 0x00);
 			/* enable chroma demodulation */
-			ks0127_and_or(ks, KS_CTRACK, 0xcf, 0x00);
+			ks0127_and_or(c, KS_CTRACK, 0xcf, 0x00);
 			/* chroma trap, HYBWR=1 */
-			ks0127_and_or(ks, KS_LUMA,   0x00,
+			ks0127_and_or(c, KS_LUMA,   0x00,
 				       (reg_defaults[KS_LUMA])|0x0c);
 			/* scaler fullbw, luma comb off */
-			ks0127_and_or(ks, KS_VERTIA, 0x08, 0x81);
+			ks0127_and_or(c, KS_VERTIA, 0x08, 0x81);
 			/* manual chroma comb .25 .5 .25 */
-			ks0127_and_or(ks, KS_VERTIC, 0x0f, 0x90);
+			ks0127_and_or(c, KS_VERTIC, 0x0f, 0x90);
 
 			/* chroma path delay */
-			ks0127_and_or(ks, KS_CHROMB, 0x0f, 0x90);
+			ks0127_and_or(c, KS_CHROMB, 0x0f, 0x90);
 
-			ks0127_write(ks, KS_UGAIN, reg_defaults[KS_UGAIN]);
-			ks0127_write(ks, KS_VGAIN, reg_defaults[KS_VGAIN]);
-			ks0127_write(ks, KS_UVOFFH, reg_defaults[KS_UVOFFH]);
-			ks0127_write(ks, KS_UVOFFL, reg_defaults[KS_UVOFFL]);
+			ks0127_write(c, KS_UGAIN, reg_defaults[KS_UGAIN]);
+			ks0127_write(c, KS_VGAIN, reg_defaults[KS_VGAIN]);
+			ks0127_write(c, KS_UVOFFH, reg_defaults[KS_UVOFFH]);
+			ks0127_write(c, KS_UVOFFL, reg_defaults[KS_UVOFFL]);
 			break;
 
 		case KS_INPUT_SVIDEO_1:
 		case KS_INPUT_SVIDEO_2:
 		case KS_INPUT_SVIDEO_3:
-			dprintk("ks0127: command DECODER_SET_INPUT %d: "
-				"S-Video\n", *iarg);
+			v4l_dbg(1, debug, c,
+				"DECODER_SET_INPUT %d: S-Video\n", *iarg);
 			/* autodetect 50/60 Hz */
-			ks0127_and_or(ks, KS_CMDA,   0xfc, 0x00);
+			ks0127_and_or(c, KS_CMDA,   0xfc, 0x00);
 			/* VSE=0 */
-			ks0127_and_or(ks, KS_CMDA,   ~0x40, 0x00);
+			ks0127_and_or(c, KS_CMDA,   ~0x40, 0x00);
 			/* set input line */
-			ks0127_and_or(ks, KS_CMDB,   0xb0, *iarg);
+			ks0127_and_or(c, KS_CMDB,   0xb0, *iarg);
 			/* non-freerunning mode */
-			ks0127_and_or(ks, KS_CMDC,   0x70, 0x0a);
+			ks0127_and_or(c, KS_CMDC,   0x70, 0x0a);
 			/* analog input */
-			ks0127_and_or(ks, KS_CMDD,   0x03, 0x00);
+			ks0127_and_or(c, KS_CMDD,   0x03, 0x00);
 			/* enable chroma demodulation */
-			ks0127_and_or(ks, KS_CTRACK, 0xcf, 0x00);
-			ks0127_and_or(ks, KS_LUMA, 0x00,
+			ks0127_and_or(c, KS_CTRACK, 0xcf, 0x00);
+			ks0127_and_or(c, KS_LUMA, 0x00,
 				       reg_defaults[KS_LUMA]);
 			/* disable luma comb */
-			ks0127_and_or(ks, KS_VERTIA, 0x08,
+			ks0127_and_or(c, KS_VERTIA, 0x08,
 				       (reg_defaults[KS_VERTIA]&0xf0)|0x01);
-			ks0127_and_or(ks, KS_VERTIC, 0x0f,
+			ks0127_and_or(c, KS_VERTIC, 0x0f,
 				       reg_defaults[KS_VERTIC]&0xf0);
 
-			ks0127_and_or(ks, KS_CHROMB, 0x0f,
+			ks0127_and_or(c, KS_CHROMB, 0x0f,
 				       reg_defaults[KS_CHROMB]&0xf0);
 
-			ks0127_write(ks, KS_UGAIN, reg_defaults[KS_UGAIN]);
-			ks0127_write(ks, KS_VGAIN, reg_defaults[KS_VGAIN]);
-			ks0127_write(ks, KS_UVOFFH, reg_defaults[KS_UVOFFH]);
-			ks0127_write(ks, KS_UVOFFL, reg_defaults[KS_UVOFFL]);
+			ks0127_write(c, KS_UGAIN, reg_defaults[KS_UGAIN]);
+			ks0127_write(c, KS_VGAIN, reg_defaults[KS_VGAIN]);
+			ks0127_write(c, KS_UVOFFH, reg_defaults[KS_UVOFFH]);
+			ks0127_write(c, KS_UVOFFL, reg_defaults[KS_UVOFFL]);
 			break;
 
 		case KS_INPUT_YUV656:
-			dprintk("ks0127: command DECODER_SET_INPUT 15: "
-				"YUV656\n");
+			v4l_dbg(1, debug, c,
+				"DECODER_SET_INPUT 15: YUV656\n");
 			if (ks->norm == VIDEO_MODE_NTSC ||
 			    ks->norm == KS_STD_PAL_M)
 				/* force 60 Hz */
-				ks0127_and_or(ks, KS_CMDA,   0xfc, 0x03);
+				ks0127_and_or(c, KS_CMDA,   0xfc, 0x03);
 			else
 				/* force 50 Hz */
-				ks0127_and_or(ks, KS_CMDA,   0xfc, 0x02);
+				ks0127_and_or(c, KS_CMDA,   0xfc, 0x02);
 
-			ks0127_and_or(ks, KS_CMDA,   0xff, 0x40); /* VSE=1 */
+			ks0127_and_or(c, KS_CMDA,   0xff, 0x40); /* VSE=1 */
 			/* set input line and VALIGN */
-			ks0127_and_or(ks, KS_CMDB,   0xb0, (*iarg | 0x40));
+			ks0127_and_or(c, KS_CMDB,   0xb0, (*iarg | 0x40));
 			/* freerunning mode, */
 			/* TSTGEN = 1 TSTGFR=11 TSTGPH=0 TSTGPK=0  VMEM=1*/
-			ks0127_and_or(ks, KS_CMDC,   0x70, 0x87);
+			ks0127_and_or(c, KS_CMDC,   0x70, 0x87);
 			/* digital input, SYNDIR = 0 INPSL=01 CLKDIR=0 EAV=0 */
-			ks0127_and_or(ks, KS_CMDD,   0x03, 0x08);
+			ks0127_and_or(c, KS_CMDD,   0x03, 0x08);
 			/* disable chroma demodulation */
-			ks0127_and_or(ks, KS_CTRACK, 0xcf, 0x30);
+			ks0127_and_or(c, KS_CTRACK, 0xcf, 0x30);
 			/* HYPK =01 CTRAP = 0 HYBWR=0 PED=1 RGBH=1 UNIT=1 */
-			ks0127_and_or(ks, KS_LUMA,   0x00, 0x71);
-			ks0127_and_or(ks, KS_VERTIC, 0x0f,
+			ks0127_and_or(c, KS_LUMA,   0x00, 0x71);
+			ks0127_and_or(c, KS_VERTIC, 0x0f,
 				       reg_defaults[KS_VERTIC]&0xf0);
 
 			/* scaler fullbw, luma comb off */
-			ks0127_and_or(ks, KS_VERTIA, 0x08, 0x81);
+			ks0127_and_or(c, KS_VERTIA, 0x08, 0x81);
 
-			ks0127_and_or(ks, KS_CHROMB, 0x0f,
+			ks0127_and_or(c, KS_CHROMB, 0x0f,
 				       reg_defaults[KS_CHROMB]&0xf0);
 
-			ks0127_and_or(ks, KS_CON, 0x00, 0x00);
-			ks0127_and_or(ks, KS_BRT, 0x00, 32);	/* spec: 34 */
+			ks0127_and_or(c, KS_CON, 0x00, 0x00);
+			ks0127_and_or(c, KS_BRT, 0x00, 32);	/* spec: 34 */
 				/* spec: 229 (e5) */
-			ks0127_and_or(ks, KS_SAT, 0x00, 0xe8);
-			ks0127_and_or(ks, KS_HUE, 0x00, 0);
+			ks0127_and_or(c, KS_SAT, 0x00, 0xe8);
+			ks0127_and_or(c, KS_HUE, 0x00, 0);
 
-			ks0127_and_or(ks, KS_UGAIN, 0x00, 238);
-			ks0127_and_or(ks, KS_VGAIN, 0x00, 0x00);
+			ks0127_and_or(c, KS_UGAIN, 0x00, 238);
+			ks0127_and_or(c, KS_VGAIN, 0x00, 0x00);
 
 			/*UOFF:0x30, VOFF:0x30, TSTCGN=1 */
-			ks0127_and_or(ks, KS_UVOFFH, 0x00, 0x4f);
-			ks0127_and_or(ks, KS_UVOFFL, 0x00, 0x00);
+			ks0127_and_or(c, KS_UVOFFH, 0x00, 0x4f);
+			ks0127_and_or(c, KS_UVOFFL, 0x00, 0x00);
 			break;
 
 		default:
-			dprintk("ks0127: command DECODER_SET_INPUT: "
-				"Unknown input %d\n", *iarg);
+			v4l_dbg(1, debug, c,
+				"DECODER_SET_INPUT: Unknown input %d\n", *iarg);
 			break;
 		}
 
 		/* hack: CDMLPF sometimes spontaneously switches on; */
 		/* force back off */
-		ks0127_write(ks, KS_DEMOD, reg_defaults[KS_DEMOD]);
+		ks0127_write(c, KS_DEMOD, reg_defaults[KS_DEMOD]);
 		break;
 
 	case DECODER_SET_OUTPUT:
 		switch(*iarg) {
 		case KS_OUTPUT_YUV656E:
-			dprintk("ks0127: command DECODER_SET_OUTPUT: "
-				"OUTPUT_YUV656E (Missing)\n");
+			v4l_dbg(1, debug, c,
+				"DECODER_SET_OUTPUT: OUTPUT_YUV656E (Missing)\n");
 			return -EINVAL;
-			break;
 
 		case KS_OUTPUT_EXV:
-			dprintk("ks0127: command DECODER_SET_OUTPUT: "
-				"OUTPUT_EXV\n");
-			ks0127_and_or(ks, KS_OFMTA, 0xf0, 0x09);
+			v4l_dbg(1, debug, c,
+				"DECODER_SET_OUTPUT: OUTPUT_EXV\n");
+			ks0127_and_or(c, KS_OFMTA, 0xf0, 0x09);
 			break;
 		}
 		break;
 
-	case DECODER_SET_NORM: //sam This block mixes old and new norm names...
+	case DECODER_SET_NORM: /* sam This block mixes old and new norm names... */
 		/* Set to automatic SECAM/Fsc mode */
-		ks0127_and_or(ks, KS_DEMOD, 0xf0, 0x00);
+		ks0127_and_or(c, KS_DEMOD, 0xf0, 0x00);
 
 		ks->norm = *iarg;
-		switch(*iarg)
-		{
+		switch (*iarg) {
 		/* this is untested !! */
 		/* It just detects PAL_N/NTSC_M (no special frequencies) */
 		/* And you have to set the standard a second time afterwards */
 		case VIDEO_MODE_AUTO:
-			dprintk("ks0127: command DECODER_SET_NORM: AUTO\n");
+			v4l_dbg(1, debug, c,
+				"DECODER_SET_NORM: AUTO\n");
 
 			/* The chip determines the format */
 			/* based on the current field rate */
-			ks0127_and_or(ks, KS_CMDA,   0xfc, 0x00);
-			ks0127_and_or(ks, KS_CHROMA, 0x9f, 0x20);
+			ks0127_and_or(c, KS_CMDA,   0xfc, 0x00);
+			ks0127_and_or(c, KS_CHROMA, 0x9f, 0x20);
 			/* This is wrong for PAL ! As I said, */
 			/* you need to set the standard once again !! */
 			ks->format_height = 240;
@@ -598,84 +589,86 @@
 			break;
 
 		case VIDEO_MODE_NTSC:
-			dprintk("ks0127: command DECODER_SET_NORM: NTSC_M\n");
-			ks0127_and_or(ks, KS_CHROMA, 0x9f, 0x20);
+			v4l_dbg(1, debug, c,
+				"DECODER_SET_NORM: NTSC_M\n");
+			ks0127_and_or(c, KS_CHROMA, 0x9f, 0x20);
 			ks->format_height = 240;
 			ks->format_width = 704;
 			break;
 
 		case KS_STD_NTSC_N:
-			dprintk("ks0127: command KS0127_SET_STANDARD: "
-				"NTSC_N (fixme)\n");
-			ks0127_and_or(ks, KS_CHROMA, 0x9f, 0x40);
+			v4l_dbg(1, debug, c,
+				"KS0127_SET_NORM: NTSC_N (fixme)\n");
+			ks0127_and_or(c, KS_CHROMA, 0x9f, 0x40);
 			ks->format_height = 240;
 			ks->format_width = 704;
 			break;
 
 		case VIDEO_MODE_PAL:
-			dprintk("ks0127: command DECODER_SET_NORM: PAL_N\n");
-			ks0127_and_or(ks, KS_CHROMA, 0x9f, 0x20);
+			v4l_dbg(1, debug, c,
+				"DECODER_SET_NORM: PAL_N\n");
+			ks0127_and_or(c, KS_CHROMA, 0x9f, 0x20);
 			ks->format_height = 290;
 			ks->format_width = 704;
 			break;
 
 		case KS_STD_PAL_M:
-			dprintk("ks0127: command KS0127_SET_STANDARD: "
-				"PAL_M (fixme)\n");
-			ks0127_and_or(ks, KS_CHROMA, 0x9f, 0x40);
+			v4l_dbg(1, debug, c,
+				"KS0127_SET_NORM: PAL_M (fixme)\n");
+			ks0127_and_or(c, KS_CHROMA, 0x9f, 0x40);
 			ks->format_height = 290;
 			ks->format_width = 704;
 			break;
 
 		case VIDEO_MODE_SECAM:
-			dprintk("ks0127: command KS0127_SET_STANDARD: "
-				"SECAM\n");
+			v4l_dbg(1, debug, c,
+				"KS0127_SET_NORM: SECAM\n");
 			ks->format_height = 290;
 			ks->format_width = 704;
 
 			/* set to secam autodetection */
-			ks0127_and_or(ks, KS_CHROMA, 0xdf, 0x20);
-			ks0127_and_or(ks, KS_DEMOD, 0xf0, 0x00);
+			ks0127_and_or(c, KS_CHROMA, 0xdf, 0x20);
+			ks0127_and_or(c, KS_DEMOD, 0xf0, 0x00);
 			schedule_timeout_interruptible(HZ/10+1);
 
 			/* did it autodetect? */
-			if (ks0127_read(ks, KS_DEMOD) & 0x40)
+			if (ks0127_read(c, KS_DEMOD) & 0x40)
 				break;
 
 			/* force to secam mode */
-			ks0127_and_or(ks, KS_DEMOD, 0xf0, 0x0f);
+			ks0127_and_or(c, KS_DEMOD, 0xf0, 0x0f);
 			break;
 
 		default:
-			dprintk("ks0127: command DECODER_SET_NORM: "
-				"Unknown norm %d\n", *iarg);
+			v4l_dbg(1, debug, c,
+				"DECODER_SET_NORM: Unknown norm %d\n", *iarg);
 			break;
 		}
 		break;
 
 	case DECODER_SET_PICTURE:
-		dprintk("ks0127: command DECODER_SET_PICTURE "
-			"not yet supported (fixme)\n");
+		v4l_dbg(1, debug, c,
+			"DECODER_SET_PICTURE: not yet supported\n");
 		return -EINVAL;
 
-	//sam todo: KS0127_SET_BRIGHTNESS: Merge into DECODER_SET_PICTURE
-	//sam todo: KS0127_SET_CONTRAST: Merge into DECODER_SET_PICTURE
-	//sam todo: KS0127_SET_HUE: Merge into DECODER_SET_PICTURE?
-	//sam todo: KS0127_SET_SATURATION: Merge into DECODER_SET_PICTURE
-	//sam todo: KS0127_SET_AGC_MODE:
-	//sam todo: KS0127_SET_AGC:
-	//sam todo: KS0127_SET_CHROMA_MODE:
-	//sam todo: KS0127_SET_PIXCLK_MODE:
-	//sam todo: KS0127_SET_GAMMA_MODE:
-	//sam todo: KS0127_SET_UGAIN:
-	//sam todo: KS0127_SET_VGAIN:
-	//sam todo: KS0127_SET_INVALY:
-	//sam todo: KS0127_SET_INVALU:
-	//sam todo: KS0127_SET_INVALV:
-	//sam todo: KS0127_SET_UNUSEY:
-	//sam todo: KS0127_SET_UNUSEU:
-	//sam todo: KS0127_SET_UNUSEV:
-	//sam todo: KS0127_SET_VSALIGN_MODE:
+	/* sam todo: KS0127_SET_BRIGHTNESS: Merge into DECODER_SET_PICTURE */
+	/* sam todo: KS0127_SET_CONTRAST: Merge into DECODER_SET_PICTURE */
+	/* sam todo: KS0127_SET_HUE: Merge into DECODER_SET_PICTURE? */
+	/* sam todo: KS0127_SET_SATURATION: Merge into DECODER_SET_PICTURE */
+	/* sam todo: KS0127_SET_AGC_MODE: */
+	/* sam todo: KS0127_SET_AGC: */
+	/* sam todo: KS0127_SET_CHROMA_MODE: */
+	/* sam todo: KS0127_SET_PIXCLK_MODE: */
+	/* sam todo: KS0127_SET_GAMMA_MODE: */
+	/* sam todo: KS0127_SET_UGAIN: */
+	/* sam todo: KS0127_SET_VGAIN: */
+	/* sam todo: KS0127_SET_INVALY: */
+	/* sam todo: KS0127_SET_INVALU: */
+	/* sam todo: KS0127_SET_INVALV: */
+	/* sam todo: KS0127_SET_UNUSEY: */
+	/* sam todo: KS0127_SET_UNUSEU: */
+	/* sam todo: KS0127_SET_UNUSEV: */
+	/* sam todo: KS0127_SET_VSALIGN_MODE: */
 
 	case DECODER_ENABLE_OUTPUT:
 	{
@@ -684,34 +677,32 @@
 		iarg = arg;
 		enable = (*iarg != 0);
 		if (enable) {
-			dprintk("ks0127: command "
-					"DECODER_ENABLE_OUTPUT on "
-					"(%d)\n", enable);
+			v4l_dbg(1, debug, c,
+				"DECODER_ENABLE_OUTPUT on\n");
 			/* All output pins on */
-			ks0127_and_or(ks, KS_OFMTA, 0xcf, 0x30);
+			ks0127_and_or(c, KS_OFMTA, 0xcf, 0x30);
 			/* Obey the OEN pin */
-			ks0127_and_or(ks, KS_CDEM, 0x7f, 0x00);
+			ks0127_and_or(c, KS_CDEM, 0x7f, 0x00);
 		} else {
-			dprintk("ks0127: command "
-					"DECODER_ENABLE_OUTPUT off "
-					"(%d)\n", enable);
+			v4l_dbg(1, debug, c,
+				"DECODER_ENABLE_OUTPUT off\n");
 			/* Video output pins off */
-			ks0127_and_or(ks, KS_OFMTA, 0xcf, 0x00);
+			ks0127_and_or(c, KS_OFMTA, 0xcf, 0x00);
 			/* Ignore the OEN pin */
-			ks0127_and_or(ks, KS_CDEM, 0x7f, 0x80);
+			ks0127_and_or(c, KS_CDEM, 0x7f, 0x80);
 		}
-	}
 		break;
+	}
 
-	//sam todo: KS0127_SET_OUTPUT_MODE:
-	//sam todo: KS0127_SET_WIDTH:
-	//sam todo: KS0127_SET_HEIGHT:
-	//sam todo: KS0127_SET_HSCALE:
+	/* sam todo: KS0127_SET_OUTPUT_MODE: */
+	/* sam todo: KS0127_SET_WIDTH: */
+	/* sam todo: KS0127_SET_HEIGHT: */
+	/* sam todo: KS0127_SET_HSCALE: */
 
 	case DECODER_GET_STATUS:
-		dprintk("ks0127: command DECODER_GET_STATUS\n");
+		v4l_dbg(1, debug, c, "DECODER_GET_STATUS\n");
 		*iarg = 0;
-		status = ks0127_read(ks, KS_STAT);
+		status = ks0127_read(c, KS_STAT);
 		if (!(status & 0x20))		 /* NOVID not set */
 			*iarg = (*iarg | DECODER_STATUS_GOOD);
 		if ((status & 0x01))		      /* CLOCK set */
@@ -722,124 +713,81 @@
 			*iarg = (*iarg | DECODER_STATUS_NTSC);
 		break;
 
-	//Catch any unknown command
+	/* Catch any unknown command */
 	default:
-		dprintk("ks0127: command unknown: %04X\n", cmd);
+		v4l_dbg(1, debug, c, "unknown: 0x%08x\n", cmd);
 		return -EINVAL;
 	}
 	return 0;
 }
 
 
-
-
-static int ks0127_probe(struct i2c_adapter *adapter);
-static int ks0127_detach(struct i2c_client *client);
-static int ks0127_command(struct i2c_client *client,
-			  unsigned int cmd, void *arg);
-
-
-
 /* Addresses to scan */
-static unsigned short normal_i2c[] = {I2C_KS0127_ADDON>>1,
-				       I2C_KS0127_ONBOARD>>1, I2C_CLIENT_END};
-static unsigned short probe[2] =	{I2C_CLIENT_END, I2C_CLIENT_END};
-static unsigned short ignore[2] = 	{I2C_CLIENT_END, I2C_CLIENT_END};
-static struct i2c_client_address_data addr_data = {
-	normal_i2c,
-	probe,
-	ignore,
+#define I2C_KS0127_ADDON   0xD8
+#define I2C_KS0127_ONBOARD 0xDA
+
+static unsigned short normal_i2c[] = {
+	I2C_KS0127_ADDON >> 1,
+	I2C_KS0127_ONBOARD >> 1,
+	I2C_CLIENT_END
 };
 
-static struct i2c_driver i2c_driver_ks0127 = {
-	.driver.name = "ks0127",
-	.id             = I2C_DRIVERID_KS0127,
-	.attach_adapter = ks0127_probe,
-	.detach_client  = ks0127_detach,
-	.command        = ks0127_command
-};
+I2C_CLIENT_INSMOD;
 
-static struct i2c_client ks0127_client_tmpl =
-{
-	.name = "(ks0127 unset)",
-	.addr = 0,
-	.adapter = NULL,
-	.driver = &i2c_driver_ks0127,
-};
-
-static int ks0127_found_proc(struct i2c_adapter *adapter, int addr, int kind)
+static int ks0127_probe(struct i2c_client *c, const struct i2c_device_id *id)
 {
 	struct ks0127 *ks;
-	struct i2c_client *client;
 
-	client = kzalloc(sizeof(*client), GFP_KERNEL);
-	if (client == NULL)
-		return -ENOMEM;
-	memcpy(client, &ks0127_client_tmpl, sizeof(*client));
+	v4l_info(c, "%s chip found @ 0x%x (%s)\n",
+		c->addr == (I2C_KS0127_ADDON >> 1) ? "addon" : "on-board",
+		c->addr << 1, c->adapter->name);
 
 	ks = kzalloc(sizeof(*ks), GFP_KERNEL);
-	if (ks == NULL) {
-		kfree(client);
+	if (ks == NULL)
 		return -ENOMEM;
-	}
 
-	i2c_set_clientdata(client, ks);
-	client->adapter = adapter;
-	client->addr = addr;
-	sprintf(client->name, "ks0127-%02x", adapter->id);
+	i2c_set_clientdata(c, ks);
 
-	ks->client = client;
-	ks->addr = addr;
 	ks->ks_type = KS_TYPE_UNKNOWN;
 
 	/* power up */
-	ks0127_write(ks, KS_CMDA, 0x2c);
+	init_reg_defaults();
+	ks0127_write(c, KS_CMDA, 0x2c);
 	mdelay(10);
 
 	/* reset the device */
-	ks0127_reset(ks);
-	printk(KERN_INFO "ks0127: attach: %s video decoder\n",
-	       ks->addr==(I2C_KS0127_ADDON>>1) ? "addon" : "on-board");
-
-	i2c_attach_client(client);
+	ks0127_reset(c);
 	return 0;
 }
 
-
-static int ks0127_probe(struct i2c_adapter *adapter)
+static int ks0127_remove(struct i2c_client *c)
 {
-	if (adapter->id == I2C_HW_B_ZR36067)
-		return i2c_probe(adapter, &addr_data, ks0127_found_proc);
-	return 0;
-}
+	struct ks0127 *ks = i2c_get_clientdata(c);
 
-static int ks0127_detach(struct i2c_client *client)
-{
-	struct ks0127 *ks = i2c_get_clientdata(client);
+	ks0127_write(c, KS_OFMTA, 0x20); /* tristate */
+	ks0127_write(c, KS_CMDA, 0x2c | 0x80); /* power down */
 
-	ks0127_write(ks, KS_OFMTA, 0x20); /*tristate*/
-	ks0127_write(ks, KS_CMDA, 0x2c | 0x80); /* power down */
-
-	i2c_detach_client(client);
 	kfree(ks);
-	kfree(client);
-
-	dprintk("ks0127: detach\n");
 	return 0;
 }
 
-
-static int __devinit ks0127_init_module(void)
+static int ks0127_legacy_probe(struct i2c_adapter *adapter)
 {
-	init_reg_defaults();
-	return i2c_add_driver(&i2c_driver_ks0127);
+	return adapter->id == I2C_HW_B_ZR36067;
 }
 
-static void __devexit ks0127_cleanup_module(void)
-{
-	i2c_del_driver(&i2c_driver_ks0127);
-}
+static const struct i2c_device_id ks0127_id[] = {
+	{ "ks0127", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, ks0127_id);
 
-
-module_init(ks0127_init_module);
-module_exit(ks0127_cleanup_module);
+static struct v4l2_i2c_driver_data v4l2_i2c_data = {
+	.name = "ks0127",
+	.driverid = I2C_DRIVERID_KS0127,
+	.command = ks0127_command,
+	.probe = ks0127_probe,
+	.remove = ks0127_remove,
+	.legacy_probe = ks0127_legacy_probe,
+	.id_table = ks0127_id,
+};
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index 935d73d..210f124 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -1098,9 +1098,10 @@
 		reg_w(ov, R51x_SYS_SNAP, 0x02);
 		reg_w(ov, R51x_SYS_SNAP, 0x00);
 	} else if (ov->bclass == BCL_OV518) {
-		warn("snapshot reset not supported yet on OV518(+)");
+		dev_warn(&ov->dev->dev,
+			 "snapshot reset not supported yet on OV518(+)\n");
 	} else {
-		err("clear snap: invalid bridge type");
+		dev_err(&ov->dev->dev, "clear snap: invalid bridge type\n");
 	}
 }
 
@@ -1115,14 +1116,16 @@
 	if (ov->bclass == BCL_OV511) {
 		ret = reg_r(ov, R51x_SYS_SNAP);
 		if (ret < 0) {
-			err("Error checking snspshot status (%d)", ret);
+			dev_err(&ov->dev->dev,
+				"Error checking snspshot status (%d)\n", ret);
 		} else if (ret & 0x08) {
 			status = 1;
 		}
 	} else if (ov->bclass == BCL_OV518) {
-		warn("snapshot check not supported yet on OV518(+)");
+		dev_warn(&ov->dev->dev,
+			 "snapshot check not supported yet on OV518(+)\n");
 	} else {
-		err("check snap: invalid bridge type");
+		dev_err(&ov->dev->dev, "clear snap: invalid bridge type\n");
 	}
 
 	return status;
@@ -5217,7 +5220,8 @@
 	if (ov->bclass == BCL_OV511)
 		reg_w(ov, 0x11, 0x00);
 	else
-		warn("SAA7111A not yet supported with OV518/OV518+");
+		dev_warn(&ov->dev->dev,
+			 "SAA7111A not yet supported with OV518/OV518+\n");
 
 	return 0;
 }
@@ -5456,7 +5460,8 @@
 	 * required. OV518 has no uncompressed mode, to save RAM. */
 	if (!dumppix && !ov->compress) {
 		ov->compress = 1;
-		warn("Compression required with OV518...enabling");
+		dev_warn(&ov->dev->dev,
+			 "Compression required with OV518...enabling\n");
 	}
 
 	if (ov->bridge == BRG_OV518) {
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.c b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
index a1252d6..273d2a1 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
@@ -402,6 +402,10 @@
 	ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 0,3,0,0);
 	ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4,15,0,0,0);
 
+	/* prevent the PTSs from slowly drifting away in the generated
+	   MPEG stream */
+	ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC, 2, 4, 1);
+
 	return ret;
 }
 
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 94265bd..5b81ba4 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -60,7 +60,6 @@
 static DEFINE_MUTEX(pvr2_unit_mtx);
 
 static int ctlchg;
-static int initusbreset = 1;
 static int procreload;
 static int tuner[PVR_NUM] = { [0 ... PVR_NUM-1] = -1 };
 static int tolerance[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 };
@@ -71,8 +70,6 @@
 MODULE_PARM_DESC(ctlchg, "0=optimize ctl change 1=always accept new ctl value");
 module_param(init_pause_msec, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(init_pause_msec, "hardware initialization settling delay");
-module_param(initusbreset, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(initusbreset, "Do USB reset device on probe");
 module_param(procreload, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(procreload,
 		 "Attempt init failure recovery with firmware reload");
@@ -1967,9 +1964,6 @@
 	}
 	hdw->fw1_state = FW1_STATE_OK;
 
-	if (initusbreset) {
-		pvr2_hdw_device_reset(hdw);
-	}
 	if (!pvr2_hdw_dev_ok(hdw)) return;
 
 	for (idx = 0; idx < hdw->hdw_desc->client_modules.cnt; idx++) {
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index f048d80..97ed959 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -168,7 +168,7 @@
  * This is part of Video 4 Linux API. The procedure handles ioctl() calls.
  *
  */
-static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
+static int __pvr2_v4l2_do_ioctl(struct file *file,
 			      unsigned int cmd, void *arg)
 {
 	struct pvr2_v4l2_fh *fh = file->private_data;
@@ -863,8 +863,8 @@
 #endif
 
 	default :
-		ret = v4l_compat_translate_ioctl(inode,file,cmd,
-						 arg,pvr2_v4l2_do_ioctl);
+		ret = v4l_compat_translate_ioctl(file, cmd,
+						 arg, __pvr2_v4l2_do_ioctl);
 	}
 
 	pvr2_hdw_commit_ctl(hdw);
@@ -890,10 +890,15 @@
 	return ret;
 }
 
+static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
+			      unsigned int cmd, void *arg)
+{
+	return __pvr2_v4l2_do_ioctl(file, cmd, arg);
+}
 
 static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
 {
-	int minor_id = dip->devbase.minor;
+	int num = dip->devbase.num;
 	struct pvr2_hdw *hdw = dip->v4lp->channel.mc_head->hdw;
 	enum pvr2_config cfg = dip->config;
 	int v4l_type = dip->v4l_type;
@@ -909,7 +914,7 @@
 	video_unregister_device(&dip->devbase);
 
 	printk(KERN_INFO "pvrusb2: unregistered device %s%u [%s]\n",
-	       get_v4l_name(v4l_type),minor_id & 0x1f,
+	       get_v4l_name(v4l_type), num,
 	       pvr2_config_get_name(cfg));
 
 }
@@ -1310,7 +1315,7 @@
 	}
 
 	printk(KERN_INFO "pvrusb2: registered device %s%u [%s]\n",
-	       get_v4l_name(dip->v4l_type),dip->devbase.minor & 0x1f,
+	       get_v4l_name(dip->v4l_type), dip->devbase.num,
 	       pvr2_config_get_name(dip->config));
 
 	pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index ab28389..f3897a3 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -1795,7 +1795,7 @@
 		goto err;
 	}
 	else {
-		PWC_INFO("Registered as /dev/video%d.\n", pdev->vdev->minor & 0x3F);
+		PWC_INFO("Registered as /dev/video%d.\n", pdev->vdev->num);
 	}
 
 	/* occupy slot */
diff --git a/drivers/media/video/saa7110.c b/drivers/media/video/saa7110.c
index 4aa82b3..adf2ba7 100644
--- a/drivers/media/video/saa7110.c
+++ b/drivers/media/video/saa7110.c
@@ -31,36 +31,24 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
-#include <asm/io.h>
 #include <asm/uaccess.h>
+#include <linux/i2c.h>
+#include <linux/videodev.h>
+#include <linux/video_decoder.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-i2c-drv-legacy.h>
 
 MODULE_DESCRIPTION("Philips SAA7110 video decoder driver");
 MODULE_AUTHOR("Pauline Middelink");
 MODULE_LICENSE("GPL");
 
-#include <linux/i2c.h>
-
-#define I2C_NAME(s) (s)->name
-
-#include <linux/videodev.h>
-#include <media/v4l2-common.h>
-#include <linux/video_decoder.h>
-
 static int debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0-1)");
 
-#define dprintk(num, format, args...) \
-	do { \
-		if (debug >= num) \
-			printk(format, ##args); \
-	} while (0)
-
 #define SAA7110_MAX_INPUT	9	/* 6 CVBS, 3 SVHS */
 #define SAA7110_MAX_OUTPUT	0	/* its a decoder only */
 
-#define	I2C_SAA7110		0x9C	/* or 0x9E */
-
 #define SAA7110_NR_REG		0x35
 
 struct saa7110 {
@@ -81,10 +69,7 @@
 /* I2C support functions						   */
 /* ----------------------------------------------------------------------- */
 
-static int
-saa7110_write (struct i2c_client *client,
-	       u8                 reg,
-	       u8                 value)
+static int saa7110_write(struct i2c_client *client, u8 reg, u8 value)
 {
 	struct saa7110 *decoder = i2c_get_clientdata(client);
 
@@ -92,10 +77,7 @@
 	return i2c_smbus_write_byte_data(client, reg, value);
 }
 
-static int
-saa7110_write_block (struct i2c_client *client,
-		     const u8          *data,
-		     unsigned int       len)
+static int saa7110_write_block(struct i2c_client *client, const u8 *data, unsigned int len)
 {
 	int ret = -1;
 	u8 reg = *data;		/* first register to write to */
@@ -115,8 +97,8 @@
 		memcpy(decoder->reg + reg, data + 1, len - 1);
 	} else {
 		for (++data, --len; len; len--) {
-			if ((ret = saa7110_write(client, reg++,
-						 *data++)) < 0)
+			ret = saa7110_write(client, reg++, *data++);
+			if (ret < 0)
 				break;
 		}
 	}
@@ -124,8 +106,7 @@
 	return ret;
 }
 
-static inline int
-saa7110_read (struct i2c_client *client)
+static inline int saa7110_read(struct i2c_client *client)
 {
 	return i2c_smbus_read_byte(client);
 }
@@ -138,9 +119,7 @@
 #define FRESP_06H_SVIDEO 0x83	//0xC0
 
 
-static int
-saa7110_selmux (struct i2c_client *client,
-		int                chan)
+static int saa7110_selmux(struct i2c_client *client, int chan)
 {
 	static const unsigned char modes[9][8] = {
 		/* mode 0 */
@@ -197,8 +176,7 @@
 	/* 0x30 */ 0x44, 0x71, 0x02, 0x8C, 0x02
 };
 
-static int
-determine_norm (struct i2c_client *client)
+static int determine_norm(struct i2c_client *client)
 {
 	DEFINE_WAIT(wait);
 	struct saa7110 *decoder = i2c_get_clientdata(client);
@@ -212,29 +190,23 @@
 	finish_wait(&decoder->wq, &wait);
 	status = saa7110_read(client);
 	if (status & 0x40) {
-		dprintk(1, KERN_INFO "%s: status=0x%02x (no signal)\n",
-			I2C_NAME(client), status);
+		v4l_dbg(1, debug, client, "status=0x%02x (no signal)\n", status);
 		return decoder->norm;	// no change
 	}
 	if ((status & 3) == 0) {
 		saa7110_write(client, 0x06, 0x83);
 		if (status & 0x20) {
-			dprintk(1,
-				KERN_INFO
-				"%s: status=0x%02x (NTSC/no color)\n",
-				I2C_NAME(client), status);
+			v4l_dbg(1, debug, client, "status=0x%02x (NTSC/no color)\n", status);
 			//saa7110_write(client,0x2E,0x81);
 			return VIDEO_MODE_NTSC;
 		}
-		dprintk(1, KERN_INFO "%s: status=0x%02x (PAL/no color)\n",
-			I2C_NAME(client), status);
+		v4l_dbg(1, debug, client, "status=0x%02x (PAL/no color)\n", status);
 		//saa7110_write(client,0x2E,0x9A);
 		return VIDEO_MODE_PAL;
 	}
 	//saa7110_write(client,0x06,0x03);
 	if (status & 0x20) {	/* 60Hz */
-		dprintk(1, KERN_INFO "%s: status=0x%02x (NTSC)\n",
-			I2C_NAME(client), status);
+		v4l_dbg(1, debug, client, "status=0x%02x (NTSC)\n", status);
 		saa7110_write(client, 0x0D, 0x86);
 		saa7110_write(client, 0x0F, 0x50);
 		saa7110_write(client, 0x11, 0x2C);
@@ -254,13 +226,11 @@
 
 	status = saa7110_read(client);
 	if ((status & 0x03) == 0x01) {
-		dprintk(1, KERN_INFO "%s: status=0x%02x (SECAM)\n",
-			I2C_NAME(client), status);
+		v4l_dbg(1, debug, client, "status=0x%02x (SECAM)\n", status);
 		saa7110_write(client, 0x0D, 0x87);
 		return VIDEO_MODE_SECAM;
 	}
-	dprintk(1, KERN_INFO "%s: status=0x%02x (PAL)\n", I2C_NAME(client),
-		status);
+	v4l_dbg(1, debug, client, "status=0x%02x (PAL)\n", status);
 	return VIDEO_MODE_PAL;
 }
 
@@ -286,8 +256,8 @@
 		    VIDEO_DECODER_SECAM | VIDEO_DECODER_AUTO;
 		dc->inputs = SAA7110_MAX_INPUT;
 		dc->outputs = SAA7110_MAX_OUTPUT;
-	}
 		break;
+	}
 
 	case DECODER_GET_STATUS:
 	{
@@ -295,8 +265,8 @@
 		int res = 0;
 
 		status = saa7110_read(client);
-		dprintk(1, KERN_INFO "%s: status=0x%02x norm=%d\n",
-			I2C_NAME(client), status, decoder->norm);
+		v4l_dbg(1, debug, client, "status=0x%02x norm=%d\n",
+			       status, decoder->norm);
 		if (!(status & 0x40))
 			res |= DECODER_STATUS_GOOD;
 		if (status & 0x03)
@@ -314,8 +284,8 @@
 			break;
 		}
 		*(int *) arg = res;
-	}
 		break;
+	}
 
 	case DECODER_SET_NORM:
 		v = *(int *) arg;
@@ -328,34 +298,24 @@
 				saa7110_write(client, 0x0F, 0x50);
 				saa7110_write(client, 0x11, 0x2C);
 				//saa7110_write(client, 0x2E, 0x81);
-				dprintk(1,
-					KERN_INFO "%s: switched to NTSC\n",
-					I2C_NAME(client));
+				v4l_dbg(1, debug, client, "switched to NTSC\n");
 				break;
 			case VIDEO_MODE_PAL:
 				saa7110_write(client, 0x0D, 0x86);
 				saa7110_write(client, 0x0F, 0x10);
 				saa7110_write(client, 0x11, 0x59);
 				//saa7110_write(client, 0x2E, 0x9A);
-				dprintk(1,
-					KERN_INFO "%s: switched to PAL\n",
-					I2C_NAME(client));
+				v4l_dbg(1, debug, client, "switched to PAL\n");
 				break;
 			case VIDEO_MODE_SECAM:
 				saa7110_write(client, 0x0D, 0x87);
 				saa7110_write(client, 0x0F, 0x10);
 				saa7110_write(client, 0x11, 0x59);
 				//saa7110_write(client, 0x2E, 0x9A);
-				dprintk(1,
-					KERN_INFO
-					"%s: switched to SECAM\n",
-					I2C_NAME(client));
+				v4l_dbg(1, debug, client, "switched to SECAM\n");
 				break;
 			case VIDEO_MODE_AUTO:
-				dprintk(1,
-					KERN_INFO
-					"%s: TV standard detection...\n",
-					I2C_NAME(client));
+				v4l_dbg(1, debug, client, "switched to AUTO\n");
 				decoder->norm = determine_norm(client);
 				*(int *) arg = decoder->norm;
 				break;
@@ -368,15 +328,12 @@
 	case DECODER_SET_INPUT:
 		v = *(int *) arg;
 		if (v < 0 || v > SAA7110_MAX_INPUT) {
-			dprintk(1,
-				KERN_INFO "%s: input=%d not available\n",
-				I2C_NAME(client), v);
+			v4l_dbg(1, debug, client, "input=%d not available\n", v);
 			return -EINVAL;
 		}
 		if (decoder->input != v) {
 			saa7110_selmux(client, v);
-			dprintk(1, KERN_INFO "%s: switched to input=%d\n",
-				I2C_NAME(client), v);
+			v4l_dbg(1, debug, client, "switched to input=%d\n", v);
 		}
 		break;
 
@@ -392,8 +349,7 @@
 		if (decoder->enable != v) {
 			decoder->enable = v;
 			saa7110_write(client, 0x0E, v ? 0x18 : 0x80);
-			dprintk(1, KERN_INFO "%s: YUV %s\n", I2C_NAME(client),
-				v ? "on" : "off");
+			v4l_dbg(1, debug, client, "YUV %s\n", v ? "on" : "off");
 		}
 		break;
 
@@ -423,23 +379,23 @@
 			saa7110_write(client, 0x07,
 				      (decoder->hue >> 8) - 128);
 		}
-	}
 		break;
+	}
 
 	case DECODER_DUMP:
+		if (!debug)
+			break;
 		for (v = 0; v < SAA7110_NR_REG; v += 16) {
 			int j;
-			dprintk(1, KERN_DEBUG "%s: %02x:", I2C_NAME(client),
-				v);
+			v4l_dbg(1, debug, client, "%02x:", v);
 			for (j = 0; j < 16 && v + j < SAA7110_NR_REG; j++)
-				dprintk(1, " %02x", decoder->reg[v + j]);
-			dprintk(1, "\n");
+				printk(KERN_CONT " %02x", decoder->reg[v + j]);
+			printk(KERN_CONT "\n");
 		}
 		break;
 
 	default:
-		dprintk(1, KERN_INFO "unknown saa7110_command??(%d)\n",
-			cmd);
+		v4l_dbg(1, debug, client, "unknown command %08x\n", cmd);
 		return -EINVAL;
 	}
 	return 0;
@@ -451,55 +407,28 @@
  * Generic i2c probe
  * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
  */
-static unsigned short normal_i2c[] = {
-	I2C_SAA7110 >> 1,
-	(I2C_SAA7110 >> 1) + 1,
-	I2C_CLIENT_END
-};
 
-static unsigned short ignore = I2C_CLIENT_END;
+static unsigned short normal_i2c[] = { 0x9c >> 1, 0x9e >> 1, I2C_CLIENT_END };
 
-static struct i2c_client_address_data addr_data = {
-	.normal_i2c		= normal_i2c,
-	.probe			= &ignore,
-	.ignore			= &ignore,
-};
+I2C_CLIENT_INSMOD;
 
-static struct i2c_driver i2c_driver_saa7110;
-
-static int
-saa7110_detect_client (struct i2c_adapter *adapter,
-		       int                 address,
-		       int                 kind)
+static int saa7110_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
 {
-	struct i2c_client *client;
 	struct saa7110 *decoder;
 	int rv;
 
-	dprintk(1,
-		KERN_INFO
-		"saa7110.c: detecting saa7110 client on address 0x%x\n",
-		address << 1);
-
 	/* Check if the adapter supports the needed features */
-	if (!i2c_check_functionality
-	    (adapter,
-	     I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
-		return 0;
+	if (!i2c_check_functionality(client->adapter,
+		I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+		return -ENODEV;
 
-	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
-	if (!client)
-		return -ENOMEM;
-	client->addr = address;
-	client->adapter = adapter;
-	client->driver = &i2c_driver_saa7110;
-	strlcpy(I2C_NAME(client), "saa7110", sizeof(I2C_NAME(client)));
+	v4l_info(client, "chip found @ 0x%x (%s)\n",
+			client->addr << 1, client->adapter->name);
 
 	decoder = kzalloc(sizeof(struct saa7110), GFP_KERNEL);
-	if (!decoder) {
-		kfree(client);
+	if (!decoder)
 		return -ENOMEM;
-	}
 	decoder->norm = VIDEO_MODE_PAL;
 	decoder->input = 0;
 	decoder->enable = 1;
@@ -510,18 +439,10 @@
 	init_waitqueue_head(&decoder->wq);
 	i2c_set_clientdata(client, decoder);
 
-	rv = i2c_attach_client(client);
-	if (rv) {
-		kfree(client);
-		kfree(decoder);
-		return rv;
-	}
-
 	rv = saa7110_write_block(client, initseq, sizeof(initseq));
-	if (rv < 0)
-		dprintk(1, KERN_ERR "%s_attach: init status %d\n",
-			I2C_NAME(client), rv);
-	else {
+	if (rv < 0) {
+		v4l_dbg(1, debug, client, "init status %d\n", rv);
+	} else {
 		int ver, status;
 		saa7110_write(client, 0x21, 0x10);
 		saa7110_write(client, 0x0e, 0x18);
@@ -530,10 +451,8 @@
 		saa7110_write(client, 0x0D, 0x06);
 		//mdelay(150);
 		status = saa7110_read(client);
-		dprintk(1,
-			KERN_INFO
-			"%s_attach: SAA7110A version %x at 0x%02x, status=0x%02x\n",
-			I2C_NAME(client), ver, client->addr << 1, status);
+		v4l_dbg(1, debug, client, "version %x, status=0x%02x\n",
+			       ver, status);
 		saa7110_write(client, 0x0D, 0x86);
 		saa7110_write(client, 0x0F, 0x10);
 		saa7110_write(client, 0x11, 0x59);
@@ -547,58 +466,25 @@
 	return 0;
 }
 
-static int
-saa7110_attach_adapter (struct i2c_adapter *adapter)
+static int saa7110_remove(struct i2c_client *client)
 {
-	dprintk(1,
-		KERN_INFO
-		"saa7110.c: starting probe for adapter %s (0x%x)\n",
-		I2C_NAME(adapter), adapter->id);
-	return i2c_probe(adapter, &addr_data, &saa7110_detect_client);
-}
-
-static int
-saa7110_detach_client (struct i2c_client *client)
-{
-	struct saa7110 *decoder = i2c_get_clientdata(client);
-	int err;
-
-	err = i2c_detach_client(client);
-	if (err) {
-		return err;
-	}
-
-	kfree(decoder);
-	kfree(client);
-
+	kfree(i2c_get_clientdata(client));
 	return 0;
 }
 
 /* ----------------------------------------------------------------------- */
 
-static struct i2c_driver i2c_driver_saa7110 = {
-	.driver = {
-		.name = "saa7110",
-	},
-
-	.id = I2C_DRIVERID_SAA7110,
-
-	.attach_adapter = saa7110_attach_adapter,
-	.detach_client = saa7110_detach_client,
-	.command = saa7110_command,
+static const struct i2c_device_id saa7110_id[] = {
+	{ "saa7110", 0 },
+	{ }
 };
+MODULE_DEVICE_TABLE(i2c, saa7110_id);
 
-static int __init
-saa7110_init (void)
-{
-	return i2c_add_driver(&i2c_driver_saa7110);
-}
-
-static void __exit
-saa7110_exit (void)
-{
-	i2c_del_driver(&i2c_driver_saa7110);
-}
-
-module_init(saa7110_init);
-module_exit(saa7110_exit);
+static struct v4l2_i2c_driver_data v4l2_i2c_data = {
+	.name = "saa7110",
+	.driverid = I2C_DRIVERID_SAA7110,
+	.command = saa7110_command,
+	.probe = saa7110_probe,
+	.remove = saa7110_remove,
+	.id_table = saa7110_id,
+};
diff --git a/drivers/media/video/saa7111.c b/drivers/media/video/saa7111.c
index 96c3d43..a4738a2 100644
--- a/drivers/media/video/saa7111.c
+++ b/drivers/media/video/saa7111.c
@@ -28,43 +28,24 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/signal.h>
 #include <linux/types.h>
-#include <linux/i2c.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
+#include <linux/ioctl.h>
 #include <asm/uaccess.h>
-
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
 #include <linux/videodev.h>
 #include <linux/video_decoder.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-i2c-drv-legacy.h>
 
 MODULE_DESCRIPTION("Philips SAA7111 video decoder driver");
 MODULE_AUTHOR("Dave Perks");
 MODULE_LICENSE("GPL");
 
-
-#define I2C_NAME(s) (s)->name
-
-
 static int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "Debug level (0-1)");
 
-#define dprintk(num, format, args...) \
-	do { \
-		if (debug >= num) \
-			printk(format, ##args); \
-	} while (0)
-
 /* ----------------------------------------------------------------------- */
 
 #define SAA7111_NR_REG		0x18
@@ -77,14 +58,9 @@
 	int enable;
 };
 
-#define   I2C_SAA7111        0x48
-
 /* ----------------------------------------------------------------------- */
 
-static inline int
-saa7111_write (struct i2c_client *client,
-	       u8                 reg,
-	       u8                 value)
+static inline int saa7111_write(struct i2c_client *client, u8 reg, u8 value)
 {
 	struct saa7111 *decoder = i2c_get_clientdata(client);
 
@@ -92,8 +68,7 @@
 	return i2c_smbus_write_byte_data(client, reg, value);
 }
 
-static inline void
-saa7111_write_if_changed(struct i2c_client *client, u8 reg, u8 value)
+static inline void saa7111_write_if_changed(struct i2c_client *client, u8 reg, u8 value)
 {
 	struct saa7111 *decoder = i2c_get_clientdata(client);
 
@@ -103,10 +78,7 @@
 	}
 }
 
-static int
-saa7111_write_block (struct i2c_client *client,
-		     const u8          *data,
-		     unsigned int       len)
+static int saa7111_write_block(struct i2c_client *client, const u8 *data, unsigned int len)
 {
 	int ret = -1;
 	u8 reg;
@@ -127,18 +99,17 @@
 				    decoder->reg[reg++] = data[1];
 				len -= 2;
 				data += 2;
-			} while (len >= 2 && data[0] == reg &&
-				 block_len < 32);
-			if ((ret = i2c_master_send(client, block_data,
-						   block_len)) < 0)
+			} while (len >= 2 && data[0] == reg && block_len < 32);
+			ret = i2c_master_send(client, block_data, block_len);
+			if (ret < 0)
 				break;
 		}
 	} else {
 		/* do some slow I2C emulation kind of thing */
 		while (len >= 2) {
 			reg = *data++;
-			if ((ret = saa7111_write(client, reg,
-						 *data++)) < 0)
+			ret = saa7111_write(client, reg, *data++);
+			if (ret < 0)
 				break;
 			len -= 2;
 		}
@@ -147,16 +118,13 @@
 	return ret;
 }
 
-static int
-saa7111_init_decoder (struct i2c_client *client,
-	      struct video_decoder_init *init)
+static int saa7111_init_decoder(struct i2c_client *client,
+		struct video_decoder_init *init)
 {
 	return saa7111_write_block(client, init->data, init->len);
 }
 
-static inline int
-saa7111_read (struct i2c_client *client,
-	      u8                 reg)
+static inline int saa7111_read(struct i2c_client *client, u8 reg)
 {
 	return i2c_smbus_read_byte_data(client, reg);
 }
@@ -203,28 +171,23 @@
 	0x17, 0x00,		/* 17 - VBI */
 };
 
-static int
-saa7111_command (struct i2c_client *client,
-		 unsigned int       cmd,
-		 void              *arg)
+static int saa7111_command(struct i2c_client *client, unsigned cmd, void *arg)
 {
 	struct saa7111 *decoder = i2c_get_clientdata(client);
 
 	switch (cmd) {
-
 	case 0:
 		break;
 	case DECODER_INIT:
 	{
 		struct video_decoder_init *init = arg;
+		struct video_decoder_init vdi;
+
 		if (NULL != init)
 			return saa7111_init_decoder(client, init);
-		else {
-			struct video_decoder_init vdi;
-			vdi.data = saa7111_i2c_init;
-			vdi.len = sizeof(saa7111_i2c_init);
-			return saa7111_init_decoder(client, &vdi);
-		}
+		vdi.data = saa7111_i2c_init;
+		vdi.len = sizeof(saa7111_i2c_init);
+		return saa7111_init_decoder(client, &vdi);
 	}
 
 	case DECODER_DUMP:
@@ -234,15 +197,15 @@
 		for (i = 0; i < SAA7111_NR_REG; i += 16) {
 			int j;
 
-			printk(KERN_DEBUG "%s: %03x", I2C_NAME(client), i);
+			v4l_info(client, "%03x", i);
 			for (j = 0; j < 16 && i + j < SAA7111_NR_REG; ++j) {
-				printk(" %02x",
+				printk(KERN_CONT " %02x",
 				       saa7111_read(client, i + j));
 			}
-			printk("\n");
+			printk(KERN_CONT "\n");
 		}
-	}
 		break;
+	}
 
 	case DECODER_GET_CAPABILITIES:
 	{
@@ -255,8 +218,8 @@
 			     VIDEO_DECODER_CCIR;
 		cap->inputs = 8;
 		cap->outputs = 1;
-	}
 		break;
+	}
 
 	case DECODER_GET_STATUS:
 	{
@@ -265,8 +228,7 @@
 		int res;
 
 		status = saa7111_read(client, 0x1f);
-		dprintk(1, KERN_DEBUG "%s status: 0x%02x\n", I2C_NAME(client),
-			status);
+		v4l_dbg(1, debug, client, "status: 0x%02x\n", status);
 		res = 0;
 		if ((status & (1 << 6)) == 0) {
 			res |= DECODER_STATUS_GOOD;
@@ -294,8 +256,8 @@
 			res |= DECODER_STATUS_COLOR;
 		}
 		*iarg = res;
-	}
 		break;
+	}
 
 	case DECODER_SET_GPIO:
 	{
@@ -362,8 +324,8 @@
 
 		}
 		decoder->norm = *iarg;
-	}
 		break;
+	}
 
 	case DECODER_SET_INPUT:
 	{
@@ -387,8 +349,8 @@
 							     3) ? 0x80 :
 							    0));
 		}
-	}
 		break;
+	}
 
 	case DECODER_SET_OUTPUT:
 	{
@@ -398,8 +360,8 @@
 		if (*iarg != 0) {
 			return -EINVAL;
 		}
-	}
 		break;
+	}
 
 	case DECODER_ENABLE_OUTPUT:
 	{
@@ -439,8 +401,8 @@
 					      (decoder->reg[0x11] & 0xf3));
 			}
 		}
-	}
 		break;
+	}
 
 	case DECODER_SET_PICTURE:
 	{
@@ -454,8 +416,8 @@
 		saa7111_write(client, 0x0c, pic->colour >> 9);
 		/* We want -128 to 127 we get 0-65535 */
 		saa7111_write(client, 0x0d, (pic->hue - 32768) >> 8);
-	}
 		break;
+	}
 
 	default:
 		return -EINVAL;
@@ -466,48 +428,23 @@
 
 /* ----------------------------------------------------------------------- */
 
-/*
- * Generic i2c probe
- * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
- */
-static unsigned short normal_i2c[] = { I2C_SAA7111 >> 1, I2C_CLIENT_END };
+static unsigned short normal_i2c[] = { 0x48 >> 1, I2C_CLIENT_END };
 
-static unsigned short ignore = I2C_CLIENT_END;
+I2C_CLIENT_INSMOD;
 
-static struct i2c_client_address_data addr_data = {
-	.normal_i2c		= normal_i2c,
-	.probe			= &ignore,
-	.ignore			= &ignore,
-};
-
-static struct i2c_driver i2c_driver_saa7111;
-
-static int
-saa7111_detect_client (struct i2c_adapter *adapter,
-		       int                 address,
-		       int                 kind)
+static int saa7111_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
 {
 	int i;
-	struct i2c_client *client;
 	struct saa7111 *decoder;
 	struct video_decoder_init vdi;
 
-	dprintk(1,
-		KERN_INFO
-		"saa7111.c: detecting saa7111 client on address 0x%x\n",
-		address << 1);
-
 	/* Check if the adapter supports the needed features */
-	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-		return 0;
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -ENODEV;
 
-	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
-	if (!client)
-		return -ENOMEM;
-	client->addr = address;
-	client->adapter = adapter;
-	client->driver = &i2c_driver_saa7111;
-	strlcpy(I2C_NAME(client), "saa7111", sizeof(I2C_NAME(client)));
+	v4l_info(client, "chip found @ 0x%x (%s)\n",
+			client->addr << 1, client->adapter->name);
 
 	decoder = kzalloc(sizeof(struct saa7111), GFP_KERNEL);
 	if (decoder == NULL) {
@@ -519,82 +456,37 @@
 	decoder->enable = 1;
 	i2c_set_clientdata(client, decoder);
 
-	i = i2c_attach_client(client);
-	if (i) {
-		kfree(client);
-		kfree(decoder);
-		return i;
-	}
-
 	vdi.data = saa7111_i2c_init;
 	vdi.len = sizeof(saa7111_i2c_init);
 	i = saa7111_init_decoder(client, &vdi);
 	if (i < 0) {
-		dprintk(1, KERN_ERR "%s_attach error: init status %d\n",
-			I2C_NAME(client), i);
+		v4l_dbg(1, debug, client, "init status %d\n", i);
 	} else {
-		dprintk(1,
-			KERN_INFO
-			"%s_attach: chip version %x at address 0x%x\n",
-			I2C_NAME(client), saa7111_read(client, 0x00) >> 4,
-			client->addr << 1);
+		v4l_dbg(1, debug, client, "revision %x\n",
+			saa7111_read(client, 0x00) >> 4);
 	}
-
 	return 0;
 }
 
-static int
-saa7111_attach_adapter (struct i2c_adapter *adapter)
+static int saa7111_remove(struct i2c_client *client)
 {
-	dprintk(1,
-		KERN_INFO
-		"saa7111.c: starting probe for adapter %s (0x%x)\n",
-		I2C_NAME(adapter), adapter->id);
-	return i2c_probe(adapter, &addr_data, &saa7111_detect_client);
-}
-
-static int
-saa7111_detach_client (struct i2c_client *client)
-{
-	struct saa7111 *decoder = i2c_get_clientdata(client);
-	int err;
-
-	err = i2c_detach_client(client);
-	if (err) {
-		return err;
-	}
-
-	kfree(decoder);
-	kfree(client);
-
+	kfree(i2c_get_clientdata(client));
 	return 0;
 }
 
 /* ----------------------------------------------------------------------- */
 
-static struct i2c_driver i2c_driver_saa7111 = {
-	.driver = {
-		.name = "saa7111",
-	},
-
-	.id = I2C_DRIVERID_SAA7111A,
-
-	.attach_adapter = saa7111_attach_adapter,
-	.detach_client = saa7111_detach_client,
-	.command = saa7111_command,
+static const struct i2c_device_id saa7111_id[] = {
+	{ "saa7111_old", 0 },	/* "saa7111" maps to the saa7115 driver */
+	{ }
 };
+MODULE_DEVICE_TABLE(i2c, saa7111_id);
 
-static int __init
-saa7111_init (void)
-{
-	return i2c_add_driver(&i2c_driver_saa7111);
-}
-
-static void __exit
-saa7111_exit (void)
-{
-	i2c_del_driver(&i2c_driver_saa7111);
-}
-
-module_init(saa7111_init);
-module_exit(saa7111_exit);
+static struct v4l2_i2c_driver_data v4l2_i2c_data = {
+	.name = "saa7111",
+	.driverid = I2C_DRIVERID_SAA7111A,
+	.command = saa7111_command,
+	.probe = saa7111_probe,
+	.remove = saa7111_remove,
+	.id_table = saa7111_id,
+};
diff --git a/drivers/media/video/saa7114.c b/drivers/media/video/saa7114.c
index e790755..7ca709f 100644
--- a/drivers/media/video/saa7114.c
+++ b/drivers/media/video/saa7114.c
@@ -29,43 +29,24 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/signal.h>
 #include <linux/types.h>
-#include <linux/i2c.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
+#include <linux/ioctl.h>
 #include <asm/uaccess.h>
-
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
 #include <linux/videodev.h>
 #include <linux/video_decoder.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-i2c-drv-legacy.h>
 
 MODULE_DESCRIPTION("Philips SAA7114H video decoder driver");
 MODULE_AUTHOR("Maxim Yevtyushkin");
 MODULE_LICENSE("GPL");
 
-
-#define I2C_NAME(x) (x)->name
-
-
 static int debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0-1)");
 
-#define dprintk(num, format, args...) \
-	do { \
-		if (debug >= num) \
-			printk(format, ##args); \
-	} while (0)
-
 /* ----------------------------------------------------------------------- */
 
 struct saa7114 {
@@ -81,9 +62,6 @@
 	int playback;
 };
 
-#define   I2C_SAA7114        0x42
-#define   I2C_SAA7114A       0x40
-
 #define   I2C_DELAY   10
 
 
@@ -129,18 +107,12 @@
 
 /* ----------------------------------------------------------------------- */
 
-static inline int
-saa7114_write (struct i2c_client *client,
-	       u8                 reg,
-	       u8                 value)
+static inline int saa7114_write(struct i2c_client *client, u8 reg, u8 value)
 {
 	return i2c_smbus_write_byte_data(client, reg, value);
 }
 
-static int
-saa7114_write_block (struct i2c_client *client,
-		     const u8          *data,
-		     unsigned int       len)
+static int saa7114_write_block(struct i2c_client *client, const u8 *data, unsigned int len)
 {
 	int ret = -1;
 	u8 reg;
@@ -160,18 +132,17 @@
 				reg++;
 				len -= 2;
 				data += 2;
-			} while (len >= 2 && data[0] == reg &&
-				 block_len < 32);
-			if ((ret = i2c_master_send(client, block_data,
-						   block_len)) < 0)
+			} while (len >= 2 && data[0] == reg && block_len < 32);
+			ret = i2c_master_send(client, block_data, block_len);
+			if (ret < 0)
 				break;
 		}
 	} else {
 		/* do some slow I2C emulation kind of thing */
 		while (len >= 2) {
 			reg = *data++;
-			if ((ret = saa7114_write(client, reg,
-						 *data++)) < 0)
+			ret = saa7114_write(client, reg, *data++);
+			if (ret < 0)
 				break;
 			len -= 2;
 		}
@@ -180,9 +151,7 @@
 	return ret;
 }
 
-static inline int
-saa7114_read (struct i2c_client *client,
-	      u8                 reg)
+static inline int saa7114_read(struct i2c_client *client, u8 reg)
 {
 	return i2c_smbus_read_byte_data(client, reg);
 }
@@ -452,15 +421,11 @@
 	0xef, 0x00
 };
 
-static int
-saa7114_command (struct i2c_client *client,
-		 unsigned int       cmd,
-		 void              *arg)
+static int saa7114_command(struct i2c_client *client, unsigned cmd, void *arg)
 {
 	struct saa7114 *decoder = i2c_get_clientdata(client);
 
 	switch (cmd) {
-
 	case 0:
 		//dprintk(1, KERN_INFO "%s: writing init\n", I2C_NAME(client));
 		//saa7114_write_block(client, init, sizeof(init));
@@ -470,27 +435,28 @@
 	{
 		int i;
 
-		dprintk(1, KERN_INFO "%s: decoder dump\n", I2C_NAME(client));
+		if (!debug)
+			break;
+		v4l_info(client, "decoder dump\n");
 
 		for (i = 0; i < 32; i += 16) {
 			int j;
 
-			printk(KERN_DEBUG "%s: %03x", I2C_NAME(client), i);
+			v4l_info(client, "%03x", i);
 			for (j = 0; j < 16; ++j) {
-				printk(" %02x",
+				printk(KERN_CONT " %02x",
 				       saa7114_read(client, i + j));
 			}
-			printk("\n");
+			printk(KERN_CONT "\n");
 		}
-	}
 		break;
+	}
 
 	case DECODER_GET_CAPABILITIES:
 	{
 		struct video_decoder_capability *cap = arg;
 
-		dprintk(1, KERN_DEBUG "%s: decoder get capabilities\n",
-			I2C_NAME(client));
+		v4l_dbg(1, debug, client, "get capabilities\n");
 
 		cap->flags = VIDEO_DECODER_PAL |
 			     VIDEO_DECODER_NTSC |
@@ -498,8 +464,8 @@
 			     VIDEO_DECODER_CCIR;
 		cap->inputs = 8;
 		cap->outputs = 1;
-	}
 		break;
+	}
 
 	case DECODER_GET_STATUS:
 	{
@@ -509,8 +475,7 @@
 
 		status = saa7114_read(client, 0x1f);
 
-		dprintk(1, KERN_DEBUG "%s status: 0x%02x\n", I2C_NAME(client),
-			status);
+		v4l_dbg(1, debug, client, "status: 0x%02x\n", status);
 		res = 0;
 		if ((status & (1 << 6)) == 0) {
 			res |= DECODER_STATUS_GOOD;
@@ -538,8 +503,8 @@
 			res |= DECODER_STATUS_COLOR;
 		}
 		*iarg = res;
-	}
 		break;
+	}
 
 	case DECODER_SET_NORM:
 	{
@@ -547,12 +512,11 @@
 
 		short int hoff = 0, voff = 0, w = 0, h = 0;
 
-		dprintk(1, KERN_DEBUG "%s: decoder set norm ",
-			I2C_NAME(client));
-		switch (*iarg) {
+		v4l_dbg(1, debug, client, "set norm\n");
 
+		switch (*iarg) {
 		case VIDEO_MODE_NTSC:
-			dprintk(1, "NTSC\n");
+			v4l_dbg(1, debug, client, "NTSC\n");
 			decoder->reg[REG_ADDR(0x06)] =
 			    SAA_7114_NTSC_HSYNC_START;
 			decoder->reg[REG_ADDR(0x07)] =
@@ -571,7 +535,7 @@
 			break;
 
 		case VIDEO_MODE_PAL:
-			dprintk(1, "PAL\n");
+			v4l_dbg(1, debug, client, "PAL\n");
 			decoder->reg[REG_ADDR(0x06)] =
 			    SAA_7114_PAL_HSYNC_START;
 			decoder->reg[REG_ADDR(0x07)] =
@@ -590,9 +554,8 @@
 			break;
 
 		default:
-			dprintk(1, " Unknown video mode!!!\n");
+			v4l_dbg(1, debug, client, "Unknown video mode\n");
 			return -EINVAL;
-
 		}
 
 
@@ -644,22 +607,20 @@
 		saa7114_write(client, 0x80, 0x36);	// i-port and scaler back end clock selection
 
 		decoder->norm = *iarg;
-	}
 		break;
+	}
 
 	case DECODER_SET_INPUT:
 	{
 		int *iarg = arg;
 
-		dprintk(1, KERN_DEBUG "%s: decoder set input (%d)\n",
-			I2C_NAME(client), *iarg);
+		v4l_dbg(1, debug, client, "set input (%d)\n", *iarg);
 		if (*iarg < 0 || *iarg > 7) {
 			return -EINVAL;
 		}
 
 		if (decoder->input != *iarg) {
-			dprintk(1, KERN_DEBUG "%s: now setting %s input\n",
-				I2C_NAME(client),
+			v4l_dbg(1, debug, client, "now setting %s input\n",
 				*iarg >= 6 ? "S-Video" : "Composite");
 			decoder->input = *iarg;
 
@@ -690,30 +651,29 @@
 			saa7114_write(client, 0x0e,
 				      decoder->reg[REG_ADDR(0x0e)]);
 		}
-	}
 		break;
+	}
 
 	case DECODER_SET_OUTPUT:
 	{
 		int *iarg = arg;
 
-		dprintk(1, KERN_DEBUG "%s: decoder set output\n",
-			I2C_NAME(client));
+		v4l_dbg(1, debug, client, "set output\n");
 
 		/* not much choice of outputs */
 		if (*iarg != 0) {
 			return -EINVAL;
 		}
-	}
 		break;
+	}
 
 	case DECODER_ENABLE_OUTPUT:
 	{
 		int *iarg = arg;
 		int enable = (*iarg != 0);
 
-		dprintk(1, KERN_DEBUG "%s: decoder %s output\n",
-			I2C_NAME(client), enable ? "enable" : "disable");
+		v4l_dbg(1, debug, client, "%s output\n",
+			enable ? "enable" : "disable");
 
 		decoder->playback = !enable;
 
@@ -754,18 +714,16 @@
 			saa7114_write(client, 0x80, 0x36);
 
 		}
-	}
 		break;
+	}
 
 	case DECODER_SET_PICTURE:
 	{
 		struct video_picture *pic = arg;
 
-		dprintk(1,
-			KERN_DEBUG
-			"%s: decoder set picture bright=%d contrast=%d saturation=%d hue=%d\n",
-			I2C_NAME(client), pic->brightness, pic->contrast,
-			pic->colour, pic->hue);
+		v4l_dbg(1, debug, client,
+			"decoder set picture bright=%d contrast=%d saturation=%d hue=%d\n",
+			pic->brightness, pic->contrast, pic->colour, pic->hue);
 
 		if (decoder->bright != pic->brightness) {
 			/* We want 0 to 255 we get 0-65535 */
@@ -789,8 +747,8 @@
 			saa7114_write(client, 0x0d,
 				      (decoder->hue - 32768) >> 8);
 		}
-	}
 		break;
+	}
 
 	default:
 		return -EINVAL;
@@ -801,58 +759,30 @@
 
 /* ----------------------------------------------------------------------- */
 
-/*
- * Generic i2c probe
- * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
- */
-static unsigned short normal_i2c[] =
-    { I2C_SAA7114 >> 1, I2C_SAA7114A >> 1, I2C_CLIENT_END };
+static unsigned short normal_i2c[] = { 0x42 >> 1, 0x40 >> 1, I2C_CLIENT_END };
 
-static unsigned short ignore = I2C_CLIENT_END;
+I2C_CLIENT_INSMOD;
 
-static struct i2c_client_address_data addr_data = {
-	.normal_i2c		= normal_i2c,
-	.probe			= &ignore,
-	.ignore			= &ignore,
-};
-
-static struct i2c_driver i2c_driver_saa7114;
-
-static int
-saa7114_detect_client (struct i2c_adapter *adapter,
-		       int                 address,
-		       int                 kind)
+static int saa7114_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
 {
 	int i, err[30];
 	short int hoff = SAA_7114_NTSC_HOFFSET;
 	short int voff = SAA_7114_NTSC_VOFFSET;
 	short int w = SAA_7114_NTSC_WIDTH;
 	short int h = SAA_7114_NTSC_HEIGHT;
-	struct i2c_client *client;
 	struct saa7114 *decoder;
 
-	dprintk(1,
-		KERN_INFO
-		"saa7114.c: detecting saa7114 client on address 0x%x\n",
-		address << 1);
-
 	/* Check if the adapter supports the needed features */
-	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-		return 0;
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -ENODEV;
 
-	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
-	if (!client)
-		return -ENOMEM;
-	client->addr = address;
-	client->adapter = adapter;
-	client->driver = &i2c_driver_saa7114;
-	strlcpy(I2C_NAME(client), "saa7114", sizeof(I2C_NAME(client)));
+	v4l_info(client, "chip found @ 0x%x (%s)\n",
+			client->addr << 1, client->adapter->name);
 
 	decoder = kzalloc(sizeof(struct saa7114), GFP_KERNEL);
-	if (decoder == NULL) {
-		kfree(client);
+	if (decoder == NULL)
 		return -ENOMEM;
-	}
 	decoder->norm = VIDEO_MODE_NTSC;
 	decoder->input = -1;
 	decoder->enable = 1;
@@ -937,8 +867,7 @@
 	decoder->reg[REG_ADDR(0x0e)] |= 1;	// combfilter on
 
 
-	dprintk(1, KERN_DEBUG "%s_attach: starting decoder init\n",
-		I2C_NAME(client));
+	v4l_dbg(1, debug, client, "starting init\n");
 
 	err[0] =
 	    saa7114_write_block(client, decoder->reg + (0x20 << 1),
@@ -962,28 +891,23 @@
 
 	for (i = 0; i <= 5; i++) {
 		if (err[i] < 0) {
-			dprintk(1,
-				KERN_ERR
-				"%s_attach: init error %d at stage %d, leaving attach.\n",
-				I2C_NAME(client), i, err[i]);
+			v4l_dbg(1, debug, client,
+				"init error %d at stage %d, leaving attach.\n",
+				i, err[i]);
 			kfree(decoder);
-			kfree(client);
-			return 0;
+			return -EIO;
 		}
 	}
 
 	for (i = 6; i < 8; i++) {
-		dprintk(1,
-			KERN_DEBUG
-			"%s_attach: reg[0x%02x] = 0x%02x (0x%02x)\n",
-			I2C_NAME(client), i, saa7114_read(client, i),
+		v4l_dbg(1, debug, client,
+			"reg[0x%02x] = 0x%02x (0x%02x)\n",
+			i, saa7114_read(client, i),
 			decoder->reg[REG_ADDR(i)]);
 	}
 
-	dprintk(1,
-		KERN_DEBUG
-		"%s_attach: performing decoder reset sequence\n",
-		I2C_NAME(client));
+	v4l_dbg(1, debug, client,
+		"performing decoder reset sequence\n");
 
 	err[6] = saa7114_write(client, 0x80, 0x06);	// i-port and scaler backend clock selection, task A&B off
 	err[7] = saa7114_write(client, 0x88, 0xd8);	// sw reset scaler
@@ -991,19 +915,15 @@
 
 	for (i = 6; i <= 8; i++) {
 		if (err[i] < 0) {
-			dprintk(1,
-				KERN_ERR
-				"%s_attach: init error %d at stage %d, leaving attach.\n",
-				I2C_NAME(client), i, err[i]);
+			v4l_dbg(1, debug, client,
+				"init error %d at stage %d, leaving attach.\n",
+				i, err[i]);
 			kfree(decoder);
-			kfree(client);
-			return 0;
+			return -EIO;
 		}
 	}
 
-	dprintk(1, KERN_INFO "%s_attach: performing the rest of init\n",
-		I2C_NAME(client));
-
+	v4l_dbg(1, debug, client, "performing the rest of init\n");
 
 	err[9] = saa7114_write(client, 0x01, decoder->reg[REG_ADDR(0x01)]);
 	err[10] = saa7114_write_block(client, decoder->reg + (0x03 << 1), (0x1e + 1 - 0x03) << 1);	// big seq
@@ -1039,37 +959,32 @@
 
 	for (i = 9; i <= 18; i++) {
 		if (err[i] < 0) {
-			dprintk(1,
-				KERN_ERR
-				"%s_attach: init error %d at stage %d, leaving attach.\n",
-				I2C_NAME(client), i, err[i]);
+			v4l_dbg(1, debug, client,
+				"init error %d at stage %d, leaving attach.\n",
+				i, err[i]);
 			kfree(decoder);
-			kfree(client);
-			return 0;
+			return -EIO;
 		}
 	}
 
 
 	for (i = 6; i < 8; i++) {
-		dprintk(1,
-			KERN_DEBUG
-			"%s_attach: reg[0x%02x] = 0x%02x (0x%02x)\n",
-			I2C_NAME(client), i, saa7114_read(client, i),
+		v4l_dbg(1, debug, client,
+			"reg[0x%02x] = 0x%02x (0x%02x)\n",
+			i, saa7114_read(client, i),
 			decoder->reg[REG_ADDR(i)]);
 	}
 
 
 	for (i = 0x11; i <= 0x13; i++) {
-		dprintk(1,
-			KERN_DEBUG
-			"%s_attach: reg[0x%02x] = 0x%02x (0x%02x)\n",
-			I2C_NAME(client), i, saa7114_read(client, i),
+		v4l_dbg(1, debug, client,
+			"reg[0x%02x] = 0x%02x (0x%02x)\n",
+			i, saa7114_read(client, i),
 			decoder->reg[REG_ADDR(i)]);
 	}
 
 
-	dprintk(1, KERN_DEBUG "%s_attach: setting video input\n",
-		I2C_NAME(client));
+	v4l_dbg(1, debug, client, "setting video input\n");
 
 	err[19] =
 	    saa7114_write(client, 0x02, decoder->reg[REG_ADDR(0x02)]);
@@ -1080,20 +995,15 @@
 
 	for (i = 19; i <= 21; i++) {
 		if (err[i] < 0) {
-			dprintk(1,
-				KERN_ERR
-				"%s_attach: init error %d at stage %d, leaving attach.\n",
-				I2C_NAME(client), i, err[i]);
+			v4l_dbg(1, debug, client,
+				"init error %d at stage %d, leaving attach.\n",
+				i, err[i]);
 			kfree(decoder);
-			kfree(client);
-			return 0;
+			return -EIO;
 		}
 	}
 
-	dprintk(1,
-		KERN_DEBUG
-		"%s_attach: performing decoder reset sequence\n",
-		I2C_NAME(client));
+	v4l_dbg(1, debug, client, "performing decoder reset sequence\n");
 
 	err[22] = saa7114_write(client, 0x88, 0xd8);	// sw reset scaler
 	err[23] = saa7114_write(client, 0x88, 0xf8);	// sw reset scaler release
@@ -1102,13 +1012,11 @@
 
 	for (i = 22; i <= 24; i++) {
 		if (err[i] < 0) {
-			dprintk(1,
-				KERN_ERR
-				"%s_attach: init error %d at stage %d, leaving attach.\n",
-				I2C_NAME(client), i, err[i]);
+			v4l_dbg(1, debug, client,
+				"init error %d at stage %d, leaving attach.\n",
+				i, err[i]);
 			kfree(decoder);
-			kfree(client);
-			return 0;
+			return -EIO;
 		}
 	}
 
@@ -1116,101 +1024,45 @@
 	err[26] = saa7114_write(client, 0x07, init[REG_ADDR(0x07)]);
 	err[27] = saa7114_write(client, 0x10, init[REG_ADDR(0x10)]);
 
-	dprintk(1,
-		KERN_INFO
-		"%s_attach: chip version %x, decoder status 0x%02x\n",
-		I2C_NAME(client), saa7114_read(client, 0x00) >> 4,
+	v4l_dbg(1, debug, client, "chip version %x, decoder status 0x%02x\n",
+		saa7114_read(client, 0x00) >> 4,
 		saa7114_read(client, 0x1f));
-	dprintk(1,
-		KERN_DEBUG
-		"%s_attach: power save control: 0x%02x, scaler status: 0x%02x\n",
-		I2C_NAME(client), saa7114_read(client, 0x88),
+	v4l_dbg(1, debug, client,
+		"power save control: 0x%02x, scaler status: 0x%02x\n",
+		saa7114_read(client, 0x88),
 		saa7114_read(client, 0x8f));
 
 
 	for (i = 0x94; i < 0x96; i++) {
-		dprintk(1,
-			KERN_DEBUG
-			"%s_attach: reg[0x%02x] = 0x%02x (0x%02x)\n",
-			I2C_NAME(client), i, saa7114_read(client, i),
+		v4l_dbg(1, debug, client,
+			"reg[0x%02x] = 0x%02x (0x%02x)\n",
+			i, saa7114_read(client, i),
 			decoder->reg[REG_ADDR(i)]);
 	}
 
-	i = i2c_attach_client(client);
-	if (i) {
-		kfree(client);
-		kfree(decoder);
-		return i;
-	}
-
 	//i = saa7114_write_block(client, init, sizeof(init));
-	i = 0;
-	if (i < 0) {
-		dprintk(1, KERN_ERR "%s_attach error: init status %d\n",
-			I2C_NAME(client), i);
-	} else {
-		dprintk(1,
-			KERN_INFO
-			"%s_attach: chip version %x at address 0x%x\n",
-			I2C_NAME(client), saa7114_read(client, 0x00) >> 4,
-			client->addr << 1);
-	}
-
 	return 0;
 }
 
-static int
-saa7114_attach_adapter (struct i2c_adapter *adapter)
+static int saa7114_remove(struct i2c_client *client)
 {
-	dprintk(1,
-		KERN_INFO
-		"saa7114.c: starting probe for adapter %s (0x%x)\n",
-		I2C_NAME(adapter), adapter->id);
-	return i2c_probe(adapter, &addr_data, &saa7114_detect_client);
-}
-
-static int
-saa7114_detach_client (struct i2c_client *client)
-{
-	struct saa7114 *decoder = i2c_get_clientdata(client);
-	int err;
-
-	err = i2c_detach_client(client);
-	if (err) {
-		return err;
-	}
-
-	kfree(decoder);
-	kfree(client);
-
+	kfree(i2c_get_clientdata(client));
 	return 0;
 }
 
 /* ----------------------------------------------------------------------- */
 
-static struct i2c_driver i2c_driver_saa7114 = {
-	.driver = {
-		.name = "saa7114",
-	},
-
-	.id = I2C_DRIVERID_SAA7114,
-
-	.attach_adapter = saa7114_attach_adapter,
-	.detach_client = saa7114_detach_client,
-	.command = saa7114_command,
+static const struct i2c_device_id saa7114_id[] = {
+	{ "saa7114_old", 0 },	/* "saa7114" maps to the saa7115 driver */
+	{ }
 };
+MODULE_DEVICE_TABLE(i2c, saa7114_id);
 
-static int __init
-saa7114_init (void)
-{
-	return i2c_add_driver(&i2c_driver_saa7114);
-}
-
-static void __exit
-saa7114_exit (void)
-{
-	i2c_del_driver(&i2c_driver_saa7114);
-}
-
-module_init(saa7114_init);
-module_exit(saa7114_exit);
+static struct v4l2_i2c_driver_data v4l2_i2c_data = {
+	.name = "saa7114",
+	.driverid = I2C_DRIVERID_SAA7114,
+	.command = saa7114_command,
+	.probe = saa7114_probe,
+	.remove = saa7114_remove,
+	.id_table = saa7114_id,
+};
diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c
index d0e83fe..cc02fb1 100644
--- a/drivers/media/video/saa7127.c
+++ b/drivers/media/video/saa7127.c
@@ -29,7 +29,7 @@
  * Note: the saa7126 is identical to the saa7127, and the saa7128 is
  * identical to the saa7129, except that the saa7126 and saa7128 have
  * macrovision anti-taping support. This driver will almost certainly
- * work find for those chips, except of course for the missing anti-taping
+ * work fine for those chips, except of course for the missing anti-taping
  * support.
  *
  * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index b686bfa..2491844 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -996,7 +996,7 @@
 		goto fail4;
 	}
 	printk(KERN_INFO "%s: registered device video%d [v4l2]\n",
-	       dev->name,dev->video_dev->minor & 0x1f);
+	       dev->name, dev->video_dev->num);
 
 	dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi");
 
@@ -1005,7 +1005,7 @@
 	if (err < 0)
 		goto fail4;
 	printk(KERN_INFO "%s: registered device vbi%d\n",
-	       dev->name,dev->vbi_dev->minor & 0x1f);
+	       dev->name, dev->vbi_dev->num);
 
 	if (card_has_radio(dev)) {
 		dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio");
@@ -1014,7 +1014,7 @@
 		if (err < 0)
 			goto fail4;
 		printk(KERN_INFO "%s: registered device radio%d\n",
-		       dev->name,dev->radio_dev->minor & 0x1f);
+		       dev->name, dev->radio_dev->num);
 	}
 
 	/* everything worked */
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 87c1098..8c46115 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -535,11 +535,16 @@
 				struct tda1004x_config *cdec_conf,
 				struct tda827x_config *tuner_conf)
 {
-	dev->dvb.frontend = dvb_attach(tda10046_attach, cdec_conf, &dev->i2c_adap);
-	if (dev->dvb.frontend) {
+	struct videobuf_dvb_frontend *fe0;
+
+	/* Get the first frontend */
+	fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
+
+	fe0->dvb.frontend = dvb_attach(tda10046_attach, cdec_conf, &dev->i2c_adap);
+	if (fe0->dvb.frontend) {
 		if (cdec_conf->i2c_gate)
-			dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
-		if (dvb_attach(tda827x_attach, dev->dvb.frontend,
+			fe0->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
+		if (dvb_attach(tda827x_attach, fe0->dvb.frontend,
 			       cdec_conf->tuner_address,
 			       &dev->i2c_adap, tuner_conf))
 			return 0;
@@ -944,12 +949,30 @@
 {
 	int ret;
 	int attach_xc3028 = 0;
+	struct videobuf_dvb_frontend *fe0;
+
+	/* FIXME: add support for multi-frontend */
+	mutex_init(&dev->frontends.lock);
+	INIT_LIST_HEAD(&dev->frontends.felist);
+	dev->frontends.active_fe_id = 0;
+
+	printk(KERN_INFO "%s() allocating 1 frontend\n", __func__);
+
+	if (videobuf_dvb_alloc_frontend(&dev->frontends, 1) == NULL) {
+		printk(KERN_ERR "%s() failed to alloc\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* Get the first frontend */
+	fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
+	if (!fe0)
+		return -EINVAL;
 
 	/* init struct videobuf_dvb */
 	dev->ts.nr_bufs    = 32;
 	dev->ts.nr_packets = 32*4;
-	dev->dvb.name = dev->name;
-	videobuf_queue_sg_init(&dev->dvb.dvbq, &saa7134_ts_qops,
+	fe0->dvb.name = dev->name;
+	videobuf_queue_sg_init(&fe0->dvb.dvbq, &saa7134_ts_qops,
 			    &dev->pci->dev, &dev->slock,
 			    V4L2_BUF_TYPE_VIDEO_CAPTURE,
 			    V4L2_FIELD_ALTERNATE,
@@ -959,47 +982,47 @@
 	switch (dev->board) {
 	case SAA7134_BOARD_PINNACLE_300I_DVBT_PAL:
 		dprintk("pinnacle 300i dvb setup\n");
-		dev->dvb.frontend = dvb_attach(mt352_attach, &pinnacle_300i,
+		fe0->dvb.frontend = dvb_attach(mt352_attach, &pinnacle_300i,
 					       &dev->i2c_adap);
-		if (dev->dvb.frontend) {
-			dev->dvb.frontend->ops.tuner_ops.set_params = mt352_pinnacle_tuner_set_params;
+		if (fe0->dvb.frontend) {
+			fe0->dvb.frontend->ops.tuner_ops.set_params = mt352_pinnacle_tuner_set_params;
 		}
 		break;
 	case SAA7134_BOARD_AVERMEDIA_777:
 	case SAA7134_BOARD_AVERMEDIA_A16AR:
 		dprintk("avertv 777 dvb setup\n");
-		dev->dvb.frontend = dvb_attach(mt352_attach, &avermedia_777,
+		fe0->dvb.frontend = dvb_attach(mt352_attach, &avermedia_777,
 					       &dev->i2c_adap);
-		if (dev->dvb.frontend) {
-			dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend) {
+			dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 				   &dev->i2c_adap, 0x61,
 				   TUNER_PHILIPS_TD1316);
 		}
 		break;
 	case SAA7134_BOARD_AVERMEDIA_A16D:
 		dprintk("AverMedia A16D dvb setup\n");
-		dev->dvb.frontend = dvb_attach(mt352_attach,
+		fe0->dvb.frontend = dvb_attach(mt352_attach,
 						&avermedia_xc3028_mt352_dev,
 						&dev->i2c_adap);
 		attach_xc3028 = 1;
 		break;
 	case SAA7134_BOARD_MD7134:
-		dev->dvb.frontend = dvb_attach(tda10046_attach,
+		fe0->dvb.frontend = dvb_attach(tda10046_attach,
 					       &medion_cardbus,
 					       &dev->i2c_adap);
-		if (dev->dvb.frontend) {
-			dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend) {
+			dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 				   &dev->i2c_adap, medion_cardbus.tuner_address,
 				   TUNER_PHILIPS_FMD1216ME_MK3);
 		}
 		break;
 	case SAA7134_BOARD_PHILIPS_TOUGH:
-		dev->dvb.frontend = dvb_attach(tda10046_attach,
+		fe0->dvb.frontend = dvb_attach(tda10046_attach,
 					       &philips_tu1216_60_config,
 					       &dev->i2c_adap);
-		if (dev->dvb.frontend) {
-			dev->dvb.frontend->ops.tuner_ops.init = philips_tu1216_init;
-			dev->dvb.frontend->ops.tuner_ops.set_params = philips_tda6651_pll_set;
+		if (fe0->dvb.frontend) {
+			fe0->dvb.frontend->ops.tuner_ops.init = philips_tu1216_init;
+			fe0->dvb.frontend->ops.tuner_ops.set_params = philips_tda6651_pll_set;
 		}
 		break;
 	case SAA7134_BOARD_FLYDVBTDUO:
@@ -1010,24 +1033,24 @@
 		break;
 	case SAA7134_BOARD_PHILIPS_EUROPA:
 	case SAA7134_BOARD_VIDEOMATE_DVBT_300:
-		dev->dvb.frontend = dvb_attach(tda10046_attach,
+		fe0->dvb.frontend = dvb_attach(tda10046_attach,
 					       &philips_europa_config,
 					       &dev->i2c_adap);
-		if (dev->dvb.frontend) {
-			dev->original_demod_sleep = dev->dvb.frontend->ops.sleep;
-			dev->dvb.frontend->ops.sleep = philips_europa_demod_sleep;
-			dev->dvb.frontend->ops.tuner_ops.init = philips_europa_tuner_init;
-			dev->dvb.frontend->ops.tuner_ops.sleep = philips_europa_tuner_sleep;
-			dev->dvb.frontend->ops.tuner_ops.set_params = philips_td1316_tuner_set_params;
+		if (fe0->dvb.frontend) {
+			dev->original_demod_sleep = fe0->dvb.frontend->ops.sleep;
+			fe0->dvb.frontend->ops.sleep = philips_europa_demod_sleep;
+			fe0->dvb.frontend->ops.tuner_ops.init = philips_europa_tuner_init;
+			fe0->dvb.frontend->ops.tuner_ops.sleep = philips_europa_tuner_sleep;
+			fe0->dvb.frontend->ops.tuner_ops.set_params = philips_td1316_tuner_set_params;
 		}
 		break;
 	case SAA7134_BOARD_VIDEOMATE_DVBT_200:
-		dev->dvb.frontend = dvb_attach(tda10046_attach,
+		fe0->dvb.frontend = dvb_attach(tda10046_attach,
 					       &philips_tu1216_61_config,
 					       &dev->i2c_adap);
-		if (dev->dvb.frontend) {
-			dev->dvb.frontend->ops.tuner_ops.init = philips_tu1216_init;
-			dev->dvb.frontend->ops.tuner_ops.set_params = philips_tda6651_pll_set;
+		if (fe0->dvb.frontend) {
+			fe0->dvb.frontend->ops.tuner_ops.init = philips_tu1216_init;
+			fe0->dvb.frontend->ops.tuner_ops.set_params = philips_tda6651_pll_set;
 		}
 		break;
 	case SAA7134_BOARD_KWORLD_DVBT_210:
@@ -1066,14 +1089,14 @@
 						 &tda827x_cfg_0) < 0)
 				goto dettach_frontend;
 		} else {  		/* satellite */
-			dev->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs, &dev->i2c_adap);
-			if (dev->dvb.frontend) {
-				if (dvb_attach(tda826x_attach, dev->dvb.frontend, 0x63,
+			fe0->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs, &dev->i2c_adap);
+			if (fe0->dvb.frontend) {
+				if (dvb_attach(tda826x_attach, fe0->dvb.frontend, 0x63,
 									&dev->i2c_adap, 0) == NULL) {
 					wprintk("%s: Lifeview Trio, No tda826x found!\n", __func__);
 					goto dettach_frontend;
 				}
-				if (dvb_attach(isl6421_attach, dev->dvb.frontend, &dev->i2c_adap,
+				if (dvb_attach(isl6421_attach, fe0->dvb.frontend, &dev->i2c_adap,
 										0x08, 0, 0) == NULL) {
 					wprintk("%s: Lifeview Trio, No ISL6421 found!\n", __func__);
 					goto dettach_frontend;
@@ -1083,11 +1106,11 @@
 		break;
 	case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
 	case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS:
-		dev->dvb.frontend = dvb_attach(tda10046_attach,
+		fe0->dvb.frontend = dvb_attach(tda10046_attach,
 					       &ads_tech_duo_config,
 					       &dev->i2c_adap);
-		if (dev->dvb.frontend) {
-			if (dvb_attach(tda827x_attach,dev->dvb.frontend,
+		if (fe0->dvb.frontend) {
+			if (dvb_attach(tda827x_attach,fe0->dvb.frontend,
 				   ads_tech_duo_config.tuner_address, &dev->i2c_adap,
 								&ads_duo_cfg) == NULL) {
 				wprintk("no tda827x tuner found at addr: %02x\n",
@@ -1108,15 +1131,15 @@
 						 &tda827x_cfg_0) < 0)
 				goto dettach_frontend;
 		} else {        /* satellite */
-			dev->dvb.frontend = dvb_attach(tda10086_attach,
+			fe0->dvb.frontend = dvb_attach(tda10086_attach,
 							&flydvbs, &dev->i2c_adap);
-			if (dev->dvb.frontend) {
-				struct dvb_frontend *fe = dev->dvb.frontend;
+			if (fe0->dvb.frontend) {
+				struct dvb_frontend *fe = fe0->dvb.frontend;
 				u8 dev_id = dev->eedata[2];
 				u8 data = 0xc4;
 				struct i2c_msg msg = {.addr = 0x08, .flags = 0, .len = 1};
 
-				if (dvb_attach(tda826x_attach, dev->dvb.frontend,
+				if (dvb_attach(tda826x_attach, fe0->dvb.frontend,
 						0x60, &dev->i2c_adap, 0) == NULL) {
 					wprintk("%s: Medion Quadro, no tda826x "
 						"found !\n", __func__);
@@ -1150,31 +1173,31 @@
 		}
 		break;
 	case SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180:
-		dev->dvb.frontend = dvb_attach(nxt200x_attach, &avertvhda180,
+		fe0->dvb.frontend = dvb_attach(nxt200x_attach, &avertvhda180,
 					       &dev->i2c_adap);
-		if (dev->dvb.frontend)
-			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
+		if (fe0->dvb.frontend)
+			dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x61,
 				   NULL, DVB_PLL_TDHU2);
 		break;
 	case SAA7134_BOARD_ADS_INSTANT_HDTV_PCI:
 	case SAA7134_BOARD_KWORLD_ATSC110:
-		dev->dvb.frontend = dvb_attach(nxt200x_attach, &kworldatsc110,
+		fe0->dvb.frontend = dvb_attach(nxt200x_attach, &kworldatsc110,
 					       &dev->i2c_adap);
-		if (dev->dvb.frontend)
-			dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+		if (fe0->dvb.frontend)
+			dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 				   &dev->i2c_adap, 0x61,
 				   TUNER_PHILIPS_TUV1236D);
 		break;
 	case SAA7134_BOARD_FLYDVBS_LR300:
-		dev->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs,
+		fe0->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs,
 					       &dev->i2c_adap);
-		if (dev->dvb.frontend) {
-			if (dvb_attach(tda826x_attach, dev->dvb.frontend, 0x60,
+		if (fe0->dvb.frontend) {
+			if (dvb_attach(tda826x_attach, fe0->dvb.frontend, 0x60,
 				       &dev->i2c_adap, 0) == NULL) {
 				wprintk("%s: No tda826x found!\n", __func__);
 				goto dettach_frontend;
 			}
-			if (dvb_attach(isl6421_attach, dev->dvb.frontend,
+			if (dvb_attach(isl6421_attach, fe0->dvb.frontend,
 				       &dev->i2c_adap, 0x08, 0, 0) == NULL) {
 				wprintk("%s: No ISL6421 found!\n", __func__);
 				goto dettach_frontend;
@@ -1182,25 +1205,25 @@
 		}
 		break;
 	case SAA7134_BOARD_ASUS_EUROPA2_HYBRID:
-		dev->dvb.frontend = dvb_attach(tda10046_attach,
+		fe0->dvb.frontend = dvb_attach(tda10046_attach,
 					       &medion_cardbus,
 					       &dev->i2c_adap);
-		if (dev->dvb.frontend) {
-			dev->original_demod_sleep = dev->dvb.frontend->ops.sleep;
-			dev->dvb.frontend->ops.sleep = philips_europa_demod_sleep;
+		if (fe0->dvb.frontend) {
+			dev->original_demod_sleep = fe0->dvb.frontend->ops.sleep;
+			fe0->dvb.frontend->ops.sleep = philips_europa_demod_sleep;
 
-			dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+			dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 				   &dev->i2c_adap, medion_cardbus.tuner_address,
 				   TUNER_PHILIPS_FMD1216ME_MK3);
 		}
 		break;
 	case SAA7134_BOARD_VIDEOMATE_DVBT_200A:
-		dev->dvb.frontend = dvb_attach(tda10046_attach,
+		fe0->dvb.frontend = dvb_attach(tda10046_attach,
 				&philips_europa_config,
 				&dev->i2c_adap);
-		if (dev->dvb.frontend) {
-			dev->dvb.frontend->ops.tuner_ops.init = philips_td1316_tuner_init;
-			dev->dvb.frontend->ops.tuner_ops.set_params = philips_td1316_tuner_set_params;
+		if (fe0->dvb.frontend) {
+			fe0->dvb.frontend->ops.tuner_ops.init = philips_td1316_tuner_init;
+			fe0->dvb.frontend->ops.tuner_ops.set_params = philips_td1316_tuner_set_params;
 		}
 		break;
 	case SAA7134_BOARD_CINERGY_HT_PCMCIA:
@@ -1239,15 +1262,15 @@
 			goto dettach_frontend;
 		break;
 	case SAA7134_BOARD_PHILIPS_SNAKE:
-		dev->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs,
+		fe0->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs,
 						&dev->i2c_adap);
-		if (dev->dvb.frontend) {
-			if (dvb_attach(tda826x_attach, dev->dvb.frontend, 0x60,
+		if (fe0->dvb.frontend) {
+			if (dvb_attach(tda826x_attach, fe0->dvb.frontend, 0x60,
 					&dev->i2c_adap, 0) == NULL) {
 				wprintk("%s: No tda826x found!\n", __func__);
 				goto dettach_frontend;
 			}
-			if (dvb_attach(lnbp21_attach, dev->dvb.frontend,
+			if (dvb_attach(lnbp21_attach, fe0->dvb.frontend,
 					&dev->i2c_adap, 0, 0) == NULL) {
 				wprintk("%s: No lnbp21 found!\n", __func__);
 				goto dettach_frontend;
@@ -1269,24 +1292,24 @@
 		saa7134_set_gpio(dev, 25, 0);
 		msleep(10);
 		saa7134_set_gpio(dev, 25, 1);
-		dev->dvb.frontend = dvb_attach(mt352_attach,
+		fe0->dvb.frontend = dvb_attach(mt352_attach,
 						&avermedia_xc3028_mt352_dev,
 						&dev->i2c_adap);
 		attach_xc3028 = 1;
 		break;
 	case SAA7134_BOARD_MD7134_BRIDGE_2:
-		dev->dvb.frontend = dvb_attach(tda10086_attach,
+		fe0->dvb.frontend = dvb_attach(tda10086_attach,
 						&sd1878_4m, &dev->i2c_adap);
-		if (dev->dvb.frontend) {
+		if (fe0->dvb.frontend) {
 			struct dvb_frontend *fe;
-			if (dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x60,
+			if (dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x60,
 				  &dev->i2c_adap, DVB_PLL_PHILIPS_SD1878_TDA8261) == NULL) {
 				wprintk("%s: MD7134 DVB-S, no SD1878 "
 					"found !\n", __func__);
 				goto dettach_frontend;
 			}
 			/* we need to open the i2c gate (we know it exists) */
-			fe = dev->dvb.frontend;
+			fe = fe0->dvb.frontend;
 			fe->ops.i2c_gate_ctrl(fe, 1);
 			if (dvb_attach(isl6405_attach, fe,
 					&dev->i2c_adap, 0x08, 0, 0) == NULL) {
@@ -1305,7 +1328,7 @@
 		saa7134_set_gpio(dev, 25, 0);
 		msleep(10);
 		saa7134_set_gpio(dev, 25, 1);
-		dev->dvb.frontend = dvb_attach(mt352_attach,
+		fe0->dvb.frontend = dvb_attach(mt352_attach,
 						&avermedia_xc3028_mt352_dev,
 						&dev->i2c_adap);
 		attach_xc3028 = 1;
@@ -1316,17 +1339,17 @@
 							&tda827x_cfg_2) < 0)
 				goto dettach_frontend;
 		} else {  		/* satellite */
-			dev->dvb.frontend = dvb_attach(tda10086_attach,
+			fe0->dvb.frontend = dvb_attach(tda10086_attach,
 						&flydvbs, &dev->i2c_adap);
-			if (dev->dvb.frontend) {
+			if (fe0->dvb.frontend) {
 				if (dvb_attach(tda826x_attach,
-						dev->dvb.frontend, 0x60,
+						fe0->dvb.frontend, 0x60,
 						&dev->i2c_adap, 0) == NULL) {
 					wprintk("%s: Asus Tiger 3in1, no "
 						"tda826x found!\n", __func__);
 					goto dettach_frontend;
 				}
-				if (dvb_attach(lnbp21_attach, dev->dvb.frontend,
+				if (dvb_attach(lnbp21_attach, fe0->dvb.frontend,
 						&dev->i2c_adap, 0, 0) == NULL) {
 					wprintk("%s: Asus Tiger 3in1, no lnbp21"
 						" found!\n", __func__);
@@ -1352,10 +1375,10 @@
 			.i2c_addr  = 0x61,
 		};
 
-		if (!dev->dvb.frontend)
+		if (!fe0->dvb.frontend)
 			return -1;
 
-		fe = dvb_attach(xc2028_attach, dev->dvb.frontend, &cfg);
+		fe = dvb_attach(xc2028_attach, fe0->dvb.frontend, &cfg);
 		if (!fe) {
 			printk(KERN_ERR "%s/2: xc3028 attach failed\n",
 			       dev->name);
@@ -1363,40 +1386,47 @@
 		}
 	}
 
-	if (NULL == dev->dvb.frontend) {
+	if (NULL == fe0->dvb.frontend) {
 		printk(KERN_ERR "%s/dvb: frontend initialization failed\n", dev->name);
 		return -1;
 	}
 	/* define general-purpose callback pointer */
-	dev->dvb.frontend->callback = saa7134_tuner_callback;
+	fe0->dvb.frontend->callback = saa7134_tuner_callback;
 
 	/* register everything else */
-	ret = videobuf_dvb_register(&dev->dvb, THIS_MODULE, dev, &dev->pci->dev,
-				    adapter_nr);
+	ret = videobuf_dvb_register_bus(&dev->frontends, THIS_MODULE, dev,
+		&dev->pci->dev, adapter_nr, 0);
 
 	/* this sequence is necessary to make the tda1004x load its firmware
 	 * and to enter analog mode of hybrid boards
 	 */
 	if (!ret) {
-		if (dev->dvb.frontend->ops.init)
-			dev->dvb.frontend->ops.init(dev->dvb.frontend);
-		if (dev->dvb.frontend->ops.sleep)
-			dev->dvb.frontend->ops.sleep(dev->dvb.frontend);
-		if (dev->dvb.frontend->ops.tuner_ops.sleep)
-			dev->dvb.frontend->ops.tuner_ops.sleep(dev->dvb.frontend);
+		if (fe0->dvb.frontend->ops.init)
+			fe0->dvb.frontend->ops.init(fe0->dvb.frontend);
+		if (fe0->dvb.frontend->ops.sleep)
+			fe0->dvb.frontend->ops.sleep(fe0->dvb.frontend);
+		if (fe0->dvb.frontend->ops.tuner_ops.sleep)
+			fe0->dvb.frontend->ops.tuner_ops.sleep(fe0->dvb.frontend);
 	}
 	return ret;
 
 dettach_frontend:
-	if (dev->dvb.frontend)
-		dvb_frontend_detach(dev->dvb.frontend);
-	dev->dvb.frontend = NULL;
+	if (fe0->dvb.frontend)
+		dvb_frontend_detach(fe0->dvb.frontend);
+	fe0->dvb.frontend = NULL;
 
 	return -1;
 }
 
 static int dvb_fini(struct saa7134_dev *dev)
 {
+	struct videobuf_dvb_frontend *fe0;
+
+	/* Get the first frontend */
+	fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
+	if (!fe0)
+		return -EINVAL;
+
 	/* FIXME: I suspect that this code is bogus, since the entry for
 	   Pinnacle 300I DVB-T PAL already defines the proper init to allow
 	   the detection of mt2032 (TDA9887_PORT2_INACTIVE)
@@ -1416,7 +1446,7 @@
 			u8 data = 0x80;
 			struct i2c_msg msg = {.addr = 0x08, .buf = &data, .flags = 0, .len = 1};
 			struct dvb_frontend *fe;
-			fe = dev->dvb.frontend;
+			fe = fe0->dvb.frontend;
 			if (fe->ops.i2c_gate_ctrl) {
 				fe->ops.i2c_gate_ctrl(fe, 1);
 				i2c_transfer(&dev->i2c_adap, &msg, 1);
@@ -1424,8 +1454,8 @@
 			}
 		}
 	}
-	if (dev->dvb.frontend)
-		videobuf_dvb_unregister(&dev->dvb);
+	if (fe0->dvb.frontend)
+		videobuf_dvb_unregister_bus(&dev->frontends);
 	return 0;
 }
 
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 9a8766a..7f40511 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -534,7 +534,7 @@
 		return err;
 	}
 	printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
-	       dev->name,dev->empress_dev->minor & 0x1f);
+	       dev->name, dev->empress_dev->num);
 
 	videobuf_queue_sg_init(&dev->empress_tsq, &saa7134_ts_qops,
 			    &dev->pci->dev, &dev->slock,
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 491ab1f..24096d6 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -581,7 +581,7 @@
 
 #if defined(CONFIG_VIDEO_SAA7134_DVB) || defined(CONFIG_VIDEO_SAA7134_DVB_MODULE)
 	/* SAA7134_MPEG_DVB only */
-	struct videobuf_dvb        dvb;
+	struct videobuf_dvb_frontends frontends;
 	int (*original_demod_sleep)(struct dvb_frontend *fe);
 	int (*original_set_voltage)(struct dvb_frontend *fe, fe_sec_voltage_t voltage);
 	int (*original_set_high_voltage)(struct dvb_frontend *fe, long arg);
diff --git a/drivers/media/video/saa7185.c b/drivers/media/video/saa7185.c
index 02fda4e..6debb65 100644
--- a/drivers/media/video/saa7185.c
+++ b/drivers/media/video/saa7185.c
@@ -25,43 +25,25 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/signal.h>
 #include <linux/types.h>
-#include <linux/i2c.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
+#include <linux/ioctl.h>
 #include <asm/uaccess.h>
-
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
 #include <linux/videodev.h>
 #include <linux/video_encoder.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-i2c-drv-legacy.h>
 
 MODULE_DESCRIPTION("Philips SAA7185 video encoder driver");
 MODULE_AUTHOR("Dave Perks");
 MODULE_LICENSE("GPL");
 
 
-#define I2C_NAME(s) (s)->name
-
-
 static int debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0-1)");
 
-#define dprintk(num, format, args...) \
-	do { \
-		if (debug >= num) \
-			printk(format, ##args); \
-	} while (0)
-
 /* ----------------------------------------------------------------------- */
 
 struct saa7185 {
@@ -75,32 +57,24 @@
 	int sat;
 };
 
-#define   I2C_SAA7185        0x88
-
 /* ----------------------------------------------------------------------- */
 
-static inline int
-saa7185_read (struct i2c_client *client)
+static inline int saa7185_read(struct i2c_client *client)
 {
 	return i2c_smbus_read_byte(client);
 }
 
-static int
-saa7185_write (struct i2c_client *client,
-	       u8                 reg,
-	       u8                 value)
+static int saa7185_write(struct i2c_client *client, u8 reg, u8 value)
 {
 	struct saa7185 *encoder = i2c_get_clientdata(client);
 
-	dprintk(1, KERN_DEBUG "SAA7185: %02x set to %02x\n", reg, value);
+	v4l_dbg(1, debug, client, "%02x set to %02x\n", reg, value);
 	encoder->reg[reg] = value;
 	return i2c_smbus_write_byte_data(client, reg, value);
 }
 
-static int
-saa7185_write_block (struct i2c_client *client,
-		     const u8          *data,
-		     unsigned int       len)
+static int saa7185_write_block(struct i2c_client *client,
+		const u8 *data, unsigned int len)
 {
 	int ret = -1;
 	u8 reg;
@@ -121,18 +95,17 @@
 				    encoder->reg[reg++] = data[1];
 				len -= 2;
 				data += 2;
-			} while (len >= 2 && data[0] == reg &&
-				 block_len < 32);
-			if ((ret = i2c_master_send(client, block_data,
-						   block_len)) < 0)
+			} while (len >= 2 && data[0] == reg && block_len < 32);
+			ret = i2c_master_send(client, block_data, block_len);
+			if (ret < 0)
 				break;
 		}
 	} else {
 		/* do some slow I2C emulation kind of thing */
 		while (len >= 2) {
 			reg = *data++;
-			if ((ret = saa7185_write(client, reg,
-						 *data++)) < 0)
+			ret = saa7185_write(client, reg, *data++);
+			if (ret < 0)
 				break;
 			len -= 2;
 		}
@@ -240,15 +213,11 @@
 	0x66, 0x21,		/* FSC3 */
 };
 
-static int
-saa7185_command (struct i2c_client *client,
-		 unsigned int       cmd,
-		 void              *arg)
+static int saa7185_command(struct i2c_client *client, unsigned cmd, void *arg)
 {
 	struct saa7185 *encoder = i2c_get_clientdata(client);
 
 	switch (cmd) {
-
 	case 0:
 		saa7185_write_block(client, init_common,
 				    sizeof(init_common));
@@ -264,7 +233,6 @@
 					    sizeof(init_pal));
 			break;
 		}
-
 		break;
 
 	case ENCODER_GET_CAPABILITIES:
@@ -276,8 +244,8 @@
 		    VIDEO_ENCODER_SECAM | VIDEO_ENCODER_CCIR;
 		cap->inputs = 1;
 		cap->outputs = 1;
-	}
 		break;
+	}
 
 	case ENCODER_SET_NORM:
 	{
@@ -286,7 +254,6 @@
 		//saa7185_write_block(client, init_common, sizeof(init_common));
 
 		switch (*iarg) {
-
 		case VIDEO_MODE_NTSC:
 			saa7185_write_block(client, init_ntsc,
 					    sizeof(init_ntsc));
@@ -300,11 +267,10 @@
 		case VIDEO_MODE_SECAM:
 		default:
 			return -EINVAL;
-
 		}
 		encoder->norm = *iarg;
-	}
 		break;
+	}
 
 	case ENCODER_SET_INPUT:
 	{
@@ -314,7 +280,6 @@
 		 *iarg = 1: input is from ZR36060 */
 
 		switch (*iarg) {
-
 		case 0:
 			/* Switch RTCE to 1 */
 			saa7185_write(client, 0x61,
@@ -332,21 +297,19 @@
 
 		default:
 			return -EINVAL;
-
 		}
-	}
 		break;
+	}
 
 	case ENCODER_SET_OUTPUT:
 	{
 		int *iarg = arg;
 
 		/* not much choice of outputs */
-		if (*iarg != 0) {
+		if (*iarg != 0)
 			return -EINVAL;
-		}
-	}
 		break;
+	}
 
 	case ENCODER_ENABLE_OUTPUT:
 	{
@@ -356,8 +319,8 @@
 		saa7185_write(client, 0x61,
 			      (encoder->reg[0x61] & 0xbf) |
 			      (encoder->enable ? 0x00 : 0x40));
-	}
 		break;
+	}
 
 	default:
 		return -EINVAL;
@@ -368,138 +331,65 @@
 
 /* ----------------------------------------------------------------------- */
 
-/*
- * Generic i2c probe
- * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
- */
-static unsigned short normal_i2c[] = { I2C_SAA7185 >> 1, I2C_CLIENT_END };
+static unsigned short normal_i2c[] = { 0x88 >> 1, I2C_CLIENT_END };
 
-static unsigned short ignore = I2C_CLIENT_END;
+I2C_CLIENT_INSMOD;
 
-static struct i2c_client_address_data addr_data = {
-	.normal_i2c		= normal_i2c,
-	.probe			= &ignore,
-	.ignore			= &ignore,
-};
-
-static struct i2c_driver i2c_driver_saa7185;
-
-static int
-saa7185_detect_client (struct i2c_adapter *adapter,
-		       int                 address,
-		       int                 kind)
+static int saa7185_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
 {
 	int i;
-	struct i2c_client *client;
 	struct saa7185 *encoder;
 
-	dprintk(1,
-		KERN_INFO
-		"saa7185.c: detecting saa7185 client on address 0x%x\n",
-		address << 1);
-
 	/* Check if the adapter supports the needed features */
-	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-		return 0;
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -ENODEV;
 
-	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
-	if (!client)
-		return -ENOMEM;
-	client->addr = address;
-	client->adapter = adapter;
-	client->driver = &i2c_driver_saa7185;
-	strlcpy(I2C_NAME(client), "saa7185", sizeof(I2C_NAME(client)));
+	v4l_info(client, "chip found @ 0x%x (%s)\n",
+			client->addr << 1, client->adapter->name);
 
 	encoder = kzalloc(sizeof(struct saa7185), GFP_KERNEL);
-	if (encoder == NULL) {
-		kfree(client);
+	if (encoder == NULL)
 		return -ENOMEM;
-	}
 	encoder->norm = VIDEO_MODE_NTSC;
 	encoder->enable = 1;
 	i2c_set_clientdata(client, encoder);
 
-	i = i2c_attach_client(client);
-	if (i) {
-		kfree(client);
-		kfree(encoder);
-		return i;
-	}
-
 	i = saa7185_write_block(client, init_common, sizeof(init_common));
-	if (i >= 0) {
-		i = saa7185_write_block(client, init_ntsc,
-					sizeof(init_ntsc));
-	}
-	if (i < 0) {
-		dprintk(1, KERN_ERR "%s_attach: init error %d\n",
-			I2C_NAME(client), i);
-	} else {
-		dprintk(1,
-			KERN_INFO
-			"%s_attach: chip version %d at address 0x%x\n",
-			I2C_NAME(client), saa7185_read(client) >> 5,
-			client->addr << 1);
-	}
-
+	if (i >= 0)
+		i = saa7185_write_block(client, init_ntsc, sizeof(init_ntsc));
+	if (i < 0)
+		v4l_dbg(1, debug, client, "init error %d\n", i);
+	else
+		v4l_dbg(1, debug, client, "revision 0x%x\n",
+				saa7185_read(client) >> 5);
 	return 0;
 }
 
-static int
-saa7185_attach_adapter (struct i2c_adapter *adapter)
-{
-	dprintk(1,
-		KERN_INFO
-		"saa7185.c: starting probe for adapter %s (0x%x)\n",
-		I2C_NAME(adapter), adapter->id);
-	return i2c_probe(adapter, &addr_data, &saa7185_detect_client);
-}
-
-static int
-saa7185_detach_client (struct i2c_client *client)
+static int saa7185_remove(struct i2c_client *client)
 {
 	struct saa7185 *encoder = i2c_get_clientdata(client);
-	int err;
-
-	err = i2c_detach_client(client);
-	if (err) {
-		return err;
-	}
 
 	saa7185_write(client, 0x61, (encoder->reg[0x61]) | 0x40);	/* SW: output off is active */
 	//saa7185_write(client, 0x3a, (encoder->reg[0x3a]) | 0x80); /* SW: color bar */
 
 	kfree(encoder);
-	kfree(client);
-
 	return 0;
 }
 
 /* ----------------------------------------------------------------------- */
 
-static struct i2c_driver i2c_driver_saa7185 = {
-	.driver = {
-		.name = "saa7185",	/* name */
-	},
-
-	.id = I2C_DRIVERID_SAA7185B,
-
-	.attach_adapter = saa7185_attach_adapter,
-	.detach_client = saa7185_detach_client,
-	.command = saa7185_command,
+static const struct i2c_device_id saa7185_id[] = {
+	{ "saa7185", 0 },
+	{ }
 };
+MODULE_DEVICE_TABLE(i2c, saa7185_id);
 
-static int __init
-saa7185_init (void)
-{
-	return i2c_add_driver(&i2c_driver_saa7185);
-}
-
-static void __exit
-saa7185_exit (void)
-{
-	i2c_del_driver(&i2c_driver_saa7185);
-}
-
-module_init(saa7185_init);
-module_exit(saa7185_exit);
+static struct v4l2_i2c_driver_data v4l2_i2c_data = {
+	.name = "saa7185",
+	.driverid = I2C_DRIVERID_SAA7185B,
+	.command = saa7185_command,
+	.probe = saa7185_probe,
+	.remove = saa7185_remove,
+	.id_table = saa7185_id,
+};
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index ae39491..044a2e9 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -1412,7 +1412,7 @@
 		return -EIO;
 	}
 	dev_info(&intf->dev, "registered new video device: video%d\n",
-		 se401->vdev.minor);
+		 se401->vdev.num);
 
 	usb_set_intfdata (intf, se401);
 	return 0;
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 7683809..2407607 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -40,39 +40,39 @@
 
 /* register offsets for sh7722 / sh7723 */
 
-#define CAPSR  0x00
-#define CAPCR  0x04
-#define CAMCR  0x08
-#define CMCYR  0x0c
-#define CAMOR  0x10
-#define CAPWR  0x14
-#define CAIFR  0x18
-#define CSTCR  0x20 /* not on sh7723 */
-#define CSECR  0x24 /* not on sh7723 */
-#define CRCNTR 0x28
-#define CRCMPR 0x2c
-#define CFLCR  0x30
-#define CFSZR  0x34
-#define CDWDR  0x38
-#define CDAYR  0x3c
-#define CDACR  0x40
-#define CDBYR  0x44
-#define CDBCR  0x48
-#define CBDSR  0x4c
-#define CFWCR  0x5c
-#define CLFCR  0x60
-#define CDOCR  0x64
-#define CDDCR  0x68
-#define CDDAR  0x6c
-#define CEIER  0x70
-#define CETCR  0x74
-#define CSTSR  0x7c
-#define CSRTR  0x80
-#define CDSSR  0x84
-#define CDAYR2 0x90
-#define CDACR2 0x94
-#define CDBYR2 0x98
-#define CDBCR2 0x9c
+#define CAPSR  0x00 /* Capture start register */
+#define CAPCR  0x04 /* Capture control register */
+#define CAMCR  0x08 /* Capture interface control register */
+#define CMCYR  0x0c /* Capture interface cycle  register */
+#define CAMOR  0x10 /* Capture interface offset register */
+#define CAPWR  0x14 /* Capture interface width register */
+#define CAIFR  0x18 /* Capture interface input format register */
+#define CSTCR  0x20 /* Camera strobe control register (<= sh7722) */
+#define CSECR  0x24 /* Camera strobe emission count register (<= sh7722) */
+#define CRCNTR 0x28 /* CEU register control register */
+#define CRCMPR 0x2c /* CEU register forcible control register */
+#define CFLCR  0x30 /* Capture filter control register */
+#define CFSZR  0x34 /* Capture filter size clip register */
+#define CDWDR  0x38 /* Capture destination width register */
+#define CDAYR  0x3c /* Capture data address Y register */
+#define CDACR  0x40 /* Capture data address C register */
+#define CDBYR  0x44 /* Capture data bottom-field address Y register */
+#define CDBCR  0x48 /* Capture data bottom-field address C register */
+#define CBDSR  0x4c /* Capture bundle destination size register */
+#define CFWCR  0x5c /* Firewall operation control register */
+#define CLFCR  0x60 /* Capture low-pass filter control register */
+#define CDOCR  0x64 /* Capture data output control register */
+#define CDDCR  0x68 /* Capture data complexity level register */
+#define CDDAR  0x6c /* Capture data complexity level address register */
+#define CEIER  0x70 /* Capture event interrupt enable register */
+#define CETCR  0x74 /* Capture event flag clear register */
+#define CSTSR  0x7c /* Capture status register */
+#define CSRTR  0x80 /* Capture software reset register */
+#define CDSSR  0x84 /* Capture data size register */
+#define CDAYR2 0x90 /* Capture data address Y register 2 */
+#define CDACR2 0x94 /* Capture data address C register 2 */
+#define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */
+#define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */
 
 static DEFINE_MUTEX(camera_lock);
 
@@ -165,6 +165,7 @@
 	ceu_write(pcdev, CETCR, 0x0317f313 ^ 0x10);
 
 	if (pcdev->active) {
+		pcdev->active->state = VIDEOBUF_ACTIVE;
 		ceu_write(pcdev, CDAYR, videobuf_to_dma_contig(pcdev->active));
 		ceu_write(pcdev, CAPSR, 0x1); /* start capture */
 	}
@@ -236,7 +237,7 @@
 	dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %zd\n", __func__,
 		vb, vb->baddr, vb->bsize);
 
-	vb->state = VIDEOBUF_ACTIVE;
+	vb->state = VIDEOBUF_QUEUED;
 	spin_lock_irqsave(&pcdev->lock, flags);
 	list_add_tail(&vb->queue, &pcdev->capture);
 
@@ -323,12 +324,24 @@
 {
 	struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
 	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	unsigned long flags;
 
 	BUG_ON(icd != pcdev->icd);
 
 	/* disable capture, disable interrupts */
 	ceu_write(pcdev, CEIER, 0);
 	ceu_write(pcdev, CAPSR, 1 << 16); /* reset */
+
+	/* make sure active buffer is canceled */
+	spin_lock_irqsave(&pcdev->lock, flags);
+	if (pcdev->active) {
+		list_del(&pcdev->active->queue);
+		pcdev->active->state = VIDEOBUF_ERROR;
+		wake_up_all(&pcdev->active->done);
+		pcdev->active = NULL;
+	}
+	spin_unlock_irqrestore(&pcdev->lock, flags);
+
 	icd->ops->release(icd);
 
 	dev_info(&icd->dev,
@@ -391,7 +404,20 @@
 	ceu_write(pcdev, CFLCR, 0); /* data fetch mode - no scaling */
 	ceu_write(pcdev, CFSZR, (icd->height << 16) | cfszr_width);
 	ceu_write(pcdev, CLFCR, 0); /* data fetch mode - no lowpass filter */
-	ceu_write(pcdev, CDOCR, 0x00000016);
+
+	/* A few words about byte order (observed in Big Endian mode)
+	 *
+	 * In data fetch mode bytes are received in chunks of 8 bytes.
+	 * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first)
+	 *
+	 * The data is however by default written to memory in reverse order:
+	 * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte)
+	 *
+	 * The lowest three bits of CDOCR allows us to do swapping,
+	 * using 7 we swap the data bytes to match the incoming order:
+	 * D0, D1, D2, D3, D4, D5, D6, D7
+	 */
+	ceu_write(pcdev, CDOCR, 0x00000017);
 
 	ceu_write(pcdev, CDWDR, cdwdr_width);
 	ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 20e30bd..fcd2b62 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -1008,7 +1008,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "URB timeout reached. The camera is misconfigured. "
 		       "To use it, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -EIO;
 	}
 
@@ -1734,7 +1734,7 @@
 
 	cam = container_of(kref, struct sn9c102_device, kref);
 
-	DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor);
+	DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
 	video_set_drvdata(cam->v4ldev, NULL);
 	video_unregister_device(cam->v4ldev);
 	usb_put_dev(cam->usbdev);
@@ -1792,7 +1792,7 @@
 
 	if (cam->users) {
 		DBG(2, "Device /dev/video%d is already in use",
-		       cam->v4ldev->minor);
+		       cam->v4ldev->num);
 		DBG(3, "Simultaneous opens are not supported");
 		/*
 		   open() must follow the open flags and should block
@@ -1845,7 +1845,7 @@
 	cam->frame_count = 0;
 	sn9c102_empty_framequeues(cam);
 
-	DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor);
+	DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
 
 out:
 	mutex_unlock(&cam->open_mutex);
@@ -1870,7 +1870,7 @@
 	cam->users--;
 	wake_up_interruptible_nr(&cam->wait_open, 1);
 
-	DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor);
+	DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
 
 	kref_put(&cam->kref, sn9c102_release_resources);
 
@@ -2432,7 +2432,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
 		       "use the camera, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -EIO;
 	}
 
@@ -2445,7 +2445,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
 		       "use the camera, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -ENOMEM;
 	}
 
@@ -2689,7 +2689,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
 		       "use the camera, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -EIO;
 	}
 
@@ -2701,7 +2701,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
 		       "use the camera, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -ENOMEM;
 	}
 
@@ -2748,7 +2748,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
 		       "problems. To use the camera, close and open "
-		       "/dev/video%d again.", cam->v4ldev->minor);
+		       "/dev/video%d again.", cam->v4ldev->num);
 		return -EIO;
 	}
 
@@ -3348,7 +3348,7 @@
 		goto fail;
 	}
 
-	DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor);
+	DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
 
 	video_set_drvdata(cam->v4ldev, cam);
 	cam->module_param.force_munmap = force_munmap[dev_nr];
@@ -3402,7 +3402,7 @@
 	if (cam->users) {
 		DBG(2, "Device /dev/video%d is open! Deregistration and "
 		       "memory deallocation are deferred.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		cam->state |= DEV_MISCONFIGURED;
 		sn9c102_stop_transfer(cam);
 		cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c
index 1adc257..bb7a9d4 100644
--- a/drivers/media/video/soc_camera_platform.c
+++ b/drivers/media/video/soc_camera_platform.c
@@ -18,15 +18,7 @@
 #include <linux/videodev2.h>
 #include <media/v4l2-common.h>
 #include <media/soc_camera.h>
-
-struct soc_camera_platform_info {
-	int iface;
-	char *format_name;
-	unsigned long format_depth;
-	struct v4l2_pix_format format;
-	unsigned long bus_param;
-	int (*set_capture)(struct soc_camera_platform_info *info, int enable);
-};
+#include <media/soc_camera_platform.h>
 
 struct soc_camera_platform_priv {
 	struct soc_camera_platform_info *info;
@@ -44,11 +36,21 @@
 
 static int soc_camera_platform_init(struct soc_camera_device *icd)
 {
+	struct soc_camera_platform_info *p = soc_camera_platform_get_info(icd);
+
+	if (p->power)
+		p->power(1);
+
 	return 0;
 }
 
 static int soc_camera_platform_release(struct soc_camera_device *icd)
 {
+	struct soc_camera_platform_info *p = soc_camera_platform_get_info(icd);
+
+	if (p->power)
+		p->power(0);
+
 	return 0;
 }
 
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index db69bc5..e9eb6d7 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -27,7 +27,6 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
-#include <linux/kref.h>
 
 #include <linux/usb.h>
 #include <linux/mm.h>
@@ -560,7 +559,7 @@
 
 		urb = dev->isobufs[i].urb;
 		if (urb) {
-			if (atomic_read(&dev->urbs_used))
+			if (atomic_read(&dev->urbs_used) && is_present(dev))
 				usb_kill_urb(urb);
 			usb_free_urb(urb);
 		}
@@ -689,18 +688,14 @@
 {
 	struct stk_camera *dev = fp->private_data;
 
-	if (dev->owner != fp) {
-		usb_autopm_put_interface(dev->interface);
-		return 0;
+	if (dev->owner == fp) {
+		stk_stop_stream(dev);
+		stk_free_buffers(dev);
+		dev->owner = NULL;
 	}
 
-	stk_stop_stream(dev);
-
-	stk_free_buffers(dev);
-
-	dev->owner = NULL;
-
-	usb_autopm_put_interface(dev->interface);
+	if(is_present(dev))
+		usb_autopm_put_interface(dev->interface);
 
 	return 0;
 }
@@ -714,9 +709,6 @@
 	struct stk_sio_buffer *sbuf;
 	struct stk_camera *dev = fp->private_data;
 
-	if (dev == NULL)
-		return -EIO;
-
 	if (!is_present(dev))
 		return -EIO;
 	if (dev->owner && dev->owner != fp)
@@ -773,9 +765,6 @@
 {
 	struct stk_camera *dev = fp->private_data;
 
-	if (dev == NULL)
-		return -ENODEV;
-
 	poll_wait(fp, &dev->wait_frame, wait);
 
 	if (!is_present(dev))
@@ -1342,7 +1331,7 @@
 		STK_ERROR("v4l registration failed\n");
 	else
 		STK_INFO("Syntek USB2.0 Camera is now controlling video device"
-			" /dev/video%d\n", dev->vdev.minor);
+			" /dev/video%d\n", dev->vdev.num);
 	return err;
 }
 
@@ -1436,8 +1425,8 @@
 	wake_up_interruptible(&dev->wait_frame);
 	stk_remove_sysfs_files(&dev->vdev);
 
-	STK_INFO("Syntek USB2.0 Camera release resources"
-		"video device /dev/video%d\n", dev->vdev.minor);
+	STK_INFO("Syntek USB2.0 Camera release resources "
+		"video device /dev/video%d\n", dev->vdev.num);
 
 	video_unregister_device(&dev->vdev);
 }
diff --git a/drivers/media/video/stk-webcam.h b/drivers/media/video/stk-webcam.h
index 084a85b..9f67366 100644
--- a/drivers/media/video/stk-webcam.h
+++ b/drivers/media/video/stk-webcam.h
@@ -122,7 +122,6 @@
 
 #define vdev_to_camera(d) container_of(d, struct stk_camera, vdev)
 
-void stk_camera_delete(struct kref *);
 int stk_camera_write_reg(struct stk_camera *, u16, u8);
 int stk_camera_read_reg(struct stk_camera *, u16, int *);
 
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index 9c549d9..328c41b 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -1470,7 +1470,8 @@
 		retval = -EIO;
 		goto error_vdev;
 	}
-	PDEBUG (0, "STV(i): registered new video device: video%d", stv680->vdev->minor);
+	PDEBUG(0, "STV(i): registered new video device: video%d",
+		stv680->vdev->num);
 
 	usb_set_intfdata (intf, stv680);
 	retval = stv680_create_sysfs_files(stv680->vdev);
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index bcc32fa..3b0b84c 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -242,7 +242,7 @@
 	{ TUNER_ABSENT,        		"TCL M2523_3DBH_E"},
 	{ TUNER_ABSENT,        		"TCL M2523_3DIH_E"},
 	{ TUNER_ABSENT,        		"TCL MFPE05_2_U"},
-	{ TUNER_PHILIPS_FMD1216ME_MK3,	"Philips FMD1216MEX"},
+	{ TUNER_PHILIPS_FMD1216MEX_MK3,	"Philips FMD1216MEX"},
 	{ TUNER_ABSENT,        		"Philips FRH2036B"},
 	{ TUNER_ABSENT,        		"Panasonic ENGF75_01GF"},
 	{ TUNER_ABSENT,        		"MaxLinear MXL5005"},
diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
index e986c28..da27a52 100644
--- a/drivers/media/video/usbvideo/konicawc.c
+++ b/drivers/media/video/usbvideo/konicawc.c
@@ -229,7 +229,8 @@
 
 	cam->input = input_dev = input_allocate_device();
 	if (!input_dev) {
-		warn("Not enough memory for camera's input device\n");
+		dev_warn(&dev->dev,
+			 "Not enough memory for camera's input device\n");
 		return;
 	}
 
@@ -243,8 +244,9 @@
 
 	error = input_register_device(cam->input);
 	if (error) {
-		warn("Failed to register camera's input device, err: %d\n",
-		     error);
+		dev_warn(&dev->dev,
+			 "Failed to register camera's input device, err: %d\n",
+			 error);
 		input_free_device(cam->input);
 		cam->input = NULL;
 	}
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index 05c61b5..4459b8a 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -93,7 +93,7 @@
 
 	cam->input = input_dev = input_allocate_device();
 	if (!input_dev) {
-		warn("insufficient mem for cam input device");
+		dev_warn(&dev->dev, "insufficient mem for cam input device\n");
 		return;
 	}
 
@@ -107,8 +107,9 @@
 
 	error = input_register_device(cam->input);
 	if (error) {
-		warn("Failed to register camera's input device, err: %d\n",
-		     error);
+		dev_warn(&dev->dev,
+			 "Failed to register camera's input device, err: %d\n",
+			 error);
 		input_free_device(cam->input);
 		cam->input = NULL;
 	}
@@ -587,8 +588,9 @@
 			dataurb->iso_frame_desc[i].offset;
 
 		if (st < 0) {
-			warn("Data error: packet=%d. len=%d. status=%d.",
-			      i, n, st);
+			dev_warn(&uvd->dev->dev,
+				 "Data error: packet=%d. len=%d. status=%d.\n",
+				 i, n, st);
 			uvd->stats.iso_err_count++;
 			continue;
 		}
@@ -699,7 +701,7 @@
 
 	ret = qcm_camera_off(uvd);
 	if (ret)
-		warn("couldn't turn the cam off.");
+		dev_warn(&uvd->dev->dev, "couldn't turn the cam off.\n");
 
 	uvd->streaming = 0;
 
diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
index 07cd87d..7c575bb 100644
--- a/drivers/media/video/usbvideo/usbvideo.c
+++ b/drivers/media/video/usbvideo/usbvideo.c
@@ -1059,7 +1059,7 @@
 
 	dev_info(&uvd->dev->dev, "%s on /dev/video%d: canvas=%s videosize=%s\n",
 		 (uvd->handle != NULL) ? uvd->handle->drvName : "???",
-		 uvd->vdev.minor, tmp2, tmp1);
+		 uvd->vdev.num, tmp2, tmp1);
 
 	usb_get_dev(uvd->dev);
 	return 0;
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 7a127d6..8e2d58b 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -877,7 +877,8 @@
 		return -EIO;
 	}
 
-	printk(KERN_INFO "ViCam webcam driver now controlling video device %d\n",cam->vdev.minor);
+	printk(KERN_INFO "ViCam webcam driver now controlling video device %d\n",
+			cam->vdev.num);
 
 	usb_set_intfdata (intf, cam);
 
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index 92427fd..9907b9a 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -236,7 +236,7 @@
 	       sizeof(struct i2c_client));
 
 	sprintf(usbvision->i2c_adap.name + strlen(usbvision->i2c_adap.name),
-		" #%d", usbvision->vdev->minor & 0x1f);
+		" #%d", usbvision->vdev->num);
 	PDEBUG(DBG_I2C,"Adaptername: %s", usbvision->i2c_adap.name);
 	usbvision->i2c_adap.dev.parent = &usbvision->dev->dev;
 
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 77aeb39..d185b57 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1440,7 +1440,7 @@
 	// vbi Device:
 	if (usbvision->vbi) {
 		PDEBUG(DBG_PROBE, "unregister /dev/vbi%d [v4l2]",
-		       usbvision->vbi->minor & 0x1f);
+		       usbvision->vbi->num);
 		if (usbvision->vbi->minor != -1) {
 			video_unregister_device(usbvision->vbi);
 		} else {
@@ -1452,7 +1452,7 @@
 	// Radio Device:
 	if (usbvision->rdev) {
 		PDEBUG(DBG_PROBE, "unregister /dev/radio%d [v4l2]",
-		       usbvision->rdev->minor & 0x1f);
+		       usbvision->rdev->num);
 		if (usbvision->rdev->minor != -1) {
 			video_unregister_device(usbvision->rdev);
 		} else {
@@ -1464,7 +1464,7 @@
 	// Video Device:
 	if (usbvision->vdev) {
 		PDEBUG(DBG_PROBE, "unregister /dev/video%d [v4l2]",
-		       usbvision->vdev->minor & 0x1f);
+		       usbvision->vdev->num);
 		if (usbvision->vdev->minor != -1) {
 			video_unregister_device(usbvision->vdev);
 		} else {
@@ -1490,7 +1490,7 @@
 		goto err_exit;
 	}
 	printk(KERN_INFO "USBVision[%d]: registered USBVision Video device /dev/video%d [v4l2]\n",
-	       usbvision->nr,usbvision->vdev->minor & 0x1f);
+	       usbvision->nr, usbvision->vdev->num);
 
 	// Radio Device:
 	if (usbvision_device_data[usbvision->DevModel].Radio) {
@@ -1507,7 +1507,7 @@
 			goto err_exit;
 		}
 		printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device /dev/radio%d [v4l2]\n",
-		       usbvision->nr, usbvision->rdev->minor & 0x1f);
+		       usbvision->nr, usbvision->rdev->num);
 	}
 	// vbi Device:
 	if (usbvision_device_data[usbvision->DevModel].vbi) {
@@ -1523,7 +1523,7 @@
 			goto err_exit;
 		}
 		printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device /dev/vbi%d [v4l2] (Not Working Yet!)\n",
-		       usbvision->nr,usbvision->vbi->minor & 0x1f);
+		       usbvision->nr, usbvision->vbi->num);
 	}
 	// all done
 	return 0;
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 78e4c4e..758dfef 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -464,7 +464,7 @@
 	return 0;
 }
 
-static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file,
+static int __uvc_v4l2_do_ioctl(struct file *file,
 		     unsigned int cmd, void *arg)
 {
 	struct video_device *vdev = video_devdata(file);
@@ -978,8 +978,8 @@
 		return uvc_xu_ctrl_query(video, arg, 1);
 
 	default:
-		if ((ret = v4l_compat_translate_ioctl(inode, file, cmd, arg,
-			uvc_v4l2_do_ioctl)) == -ENOIOCTLCMD)
+		if ((ret = v4l_compat_translate_ioctl(file, cmd, arg,
+			__uvc_v4l2_do_ioctl)) == -ENOIOCTLCMD)
 			uvc_trace(UVC_TRACE_IOCTL, "Unknown ioctl 0x%08x\n",
 				  cmd);
 		return ret;
@@ -988,6 +988,12 @@
 	return ret;
 }
 
+static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file,
+			      unsigned int cmd, void *arg)
+{
+	return __uvc_v4l2_do_ioctl(file, cmd, arg);
+}
+
 static int uvc_v4l2_ioctl(struct inode *inode, struct file *file,
 		     unsigned int cmd, unsigned long arg)
 {
diff --git a/drivers/media/video/v4l1-compat.c b/drivers/media/video/v4l1-compat.c
index 928cb40..f13c0a9 100644
--- a/drivers/media/video/v4l1-compat.c
+++ b/drivers/media/video/v4l1-compat.c
@@ -57,8 +57,7 @@
  */
 
 static int
-get_v4l_control(struct inode            *inode,
-		struct file             *file,
+get_v4l_control(struct file             *file,
 		int			cid,
 		v4l2_kioctl             drv)
 {
@@ -67,12 +66,12 @@
 	int			err;
 
 	qctrl2.id = cid;
-	err = drv(inode, file, VIDIOC_QUERYCTRL, &qctrl2);
+	err = drv(file, VIDIOC_QUERYCTRL, &qctrl2);
 	if (err < 0)
 		dprintk("VIDIOC_QUERYCTRL: %d\n", err);
 	if (err == 0 && !(qctrl2.flags & V4L2_CTRL_FLAG_DISABLED)) {
 		ctrl2.id = qctrl2.id;
-		err = drv(inode, file, VIDIOC_G_CTRL, &ctrl2);
+		err = drv(file, VIDIOC_G_CTRL, &ctrl2);
 		if (err < 0) {
 			dprintk("VIDIOC_G_CTRL: %d\n", err);
 			return 0;
@@ -85,8 +84,7 @@
 }
 
 static int
-set_v4l_control(struct inode            *inode,
-		struct file             *file,
+set_v4l_control(struct file             *file,
 		int			cid,
 		int			value,
 		v4l2_kioctl             drv)
@@ -96,7 +94,7 @@
 	int			err;
 
 	qctrl2.id = cid;
-	err = drv(inode, file, VIDIOC_QUERYCTRL, &qctrl2);
+	err = drv(file, VIDIOC_QUERYCTRL, &qctrl2);
 	if (err < 0)
 		dprintk("VIDIOC_QUERYCTRL: %d\n", err);
 	if (err == 0 &&
@@ -114,7 +112,7 @@
 			 + 32767)
 			/ 65535;
 		ctrl2.value += qctrl2.minimum;
-		err = drv(inode, file, VIDIOC_S_CTRL, &ctrl2);
+		err = drv(file, VIDIOC_S_CTRL, &ctrl2);
 		if (err < 0)
 			dprintk("VIDIOC_S_CTRL: %d\n", err);
 	}
@@ -222,7 +220,6 @@
 }
 
 static int count_inputs(
-			struct inode *inode,
 			struct file *file,
 			v4l2_kioctl drv)
 {
@@ -232,14 +229,13 @@
 	for (i = 0;; i++) {
 		memset(&input2, 0, sizeof(input2));
 		input2.index = i;
-		if (0 != drv(inode, file, VIDIOC_ENUMINPUT, &input2))
+		if (0 != drv(file, VIDIOC_ENUMINPUT, &input2))
 			break;
 	}
 	return i;
 }
 
 static int check_size(
-		struct inode *inode,
 		struct file *file,
 		v4l2_kioctl drv,
 		int *maxw,
@@ -252,14 +248,14 @@
 	memset(&fmt2, 0, sizeof(fmt2));
 
 	desc2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	if (0 != drv(inode, file, VIDIOC_ENUM_FMT, &desc2))
+	if (0 != drv(file, VIDIOC_ENUM_FMT, &desc2))
 		goto done;
 
 	fmt2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
 	fmt2.fmt.pix.width       = 10000;
 	fmt2.fmt.pix.height      = 10000;
 	fmt2.fmt.pix.pixelformat = desc2.pixelformat;
-	if (0 != drv(inode, file, VIDIOC_TRY_FMT, &fmt2))
+	if (0 != drv(file, VIDIOC_TRY_FMT, &fmt2))
 		goto done;
 
 	*maxw = fmt2.fmt.pix.width;
@@ -273,7 +269,6 @@
 
 static noinline int v4l1_compat_get_capabilities(
 					struct video_capability *cap,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -289,13 +284,13 @@
 	memset(cap, 0, sizeof(*cap));
 	memset(&fbuf, 0, sizeof(fbuf));
 
-	err = drv(inode, file, VIDIOC_QUERYCAP, cap2);
+	err = drv(file, VIDIOC_QUERYCAP, cap2);
 	if (err < 0) {
 		dprintk("VIDIOCGCAP / VIDIOC_QUERYCAP: %d\n", err);
 		goto done;
 	}
 	if (cap2->capabilities & V4L2_CAP_VIDEO_OVERLAY) {
-		err = drv(inode, file, VIDIOC_G_FBUF, &fbuf);
+		err = drv(file, VIDIOC_G_FBUF, &fbuf);
 		if (err < 0) {
 			dprintk("VIDIOCGCAP / VIDIOC_G_FBUF: %d\n", err);
 			memset(&fbuf, 0, sizeof(fbuf));
@@ -317,8 +312,8 @@
 	if (fbuf.capability & V4L2_FBUF_CAP_LIST_CLIPPING)
 		cap->type |= VID_TYPE_CLIPPING;
 
-	cap->channels  = count_inputs(inode, file, drv);
-	check_size(inode, file, drv,
+	cap->channels  = count_inputs(file, drv);
+	check_size(file, drv,
 		   &cap->maxwidth, &cap->maxheight);
 	cap->audios    =  0; /* FIXME */
 	cap->minwidth  = 48; /* FIXME */
@@ -331,7 +326,6 @@
 
 static noinline int v4l1_compat_get_frame_buffer(
 					struct video_buffer *buffer,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -341,7 +335,7 @@
 	memset(buffer, 0, sizeof(*buffer));
 	memset(&fbuf, 0, sizeof(fbuf));
 
-	err = drv(inode, file, VIDIOC_G_FBUF, &fbuf);
+	err = drv(file, VIDIOC_G_FBUF, &fbuf);
 	if (err < 0) {
 		dprintk("VIDIOCGFBUF / VIDIOC_G_FBUF: %d\n", err);
 		goto done;
@@ -386,7 +380,6 @@
 
 static noinline int v4l1_compat_set_frame_buffer(
 					struct video_buffer *buffer,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -415,7 +408,7 @@
 		break;
 	}
 	fbuf.fmt.bytesperline = buffer->bytesperline;
-	err = drv(inode, file, VIDIOC_S_FBUF, &fbuf);
+	err = drv(file, VIDIOC_S_FBUF, &fbuf);
 	if (err < 0)
 		dprintk("VIDIOCSFBUF / VIDIOC_S_FBUF: %d\n", err);
 	return err;
@@ -423,7 +416,6 @@
 
 static noinline int v4l1_compat_get_win_cap_dimensions(
 					struct video_window *win,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -438,7 +430,7 @@
 	memset(win, 0, sizeof(*win));
 
 	fmt->type = V4L2_BUF_TYPE_VIDEO_OVERLAY;
-	err = drv(inode, file, VIDIOC_G_FMT, fmt);
+	err = drv(file, VIDIOC_G_FMT, fmt);
 	if (err < 0)
 		dprintk("VIDIOCGWIN / VIDIOC_G_WIN: %d\n", err);
 	if (err == 0) {
@@ -453,7 +445,7 @@
 	}
 
 	fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	err = drv(inode, file, VIDIOC_G_FMT, fmt);
+	err = drv(file, VIDIOC_G_FMT, fmt);
 	if (err < 0) {
 		dprintk("VIDIOCGWIN / VIDIOC_G_FMT: %d\n", err);
 		goto done;
@@ -472,7 +464,6 @@
 
 static noinline int v4l1_compat_set_win_cap_dimensions(
 					struct video_window *win,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -485,8 +476,8 @@
 		return err;
 	}
 	fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	drv(inode, file, VIDIOC_STREAMOFF, &fmt->type);
-	err1 = drv(inode, file, VIDIOC_G_FMT, fmt);
+	drv(file, VIDIOC_STREAMOFF, &fmt->type);
+	err1 = drv(file, VIDIOC_G_FMT, fmt);
 	if (err1 < 0)
 		dprintk("VIDIOCSWIN / VIDIOC_G_FMT: %d\n", err1);
 	if (err1 == 0) {
@@ -494,7 +485,7 @@
 		fmt->fmt.pix.height = win->height;
 		fmt->fmt.pix.field  = V4L2_FIELD_ANY;
 		fmt->fmt.pix.bytesperline = 0;
-		err = drv(inode, file, VIDIOC_S_FMT, fmt);
+		err = drv(file, VIDIOC_S_FMT, fmt);
 		if (err < 0)
 			dprintk("VIDIOCSWIN / VIDIOC_S_FMT #1: %d\n",
 				err);
@@ -511,7 +502,7 @@
 	fmt->fmt.win.chromakey = win->chromakey;
 	fmt->fmt.win.clips     = (void __user *)win->clips;
 	fmt->fmt.win.clipcount = win->clipcount;
-	err2 = drv(inode, file, VIDIOC_S_FMT, fmt);
+	err2 = drv(file, VIDIOC_S_FMT, fmt);
 	if (err2 < 0)
 		dprintk("VIDIOCSWIN / VIDIOC_S_FMT #2: %d\n", err2);
 
@@ -525,7 +516,6 @@
 
 static noinline int v4l1_compat_turn_preview_on_off(
 					int *on,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -536,9 +526,9 @@
 		/* dirty hack time.  But v4l1 has no STREAMOFF
 		 * equivalent in the API, and this one at
 		 * least comes close ... */
-		drv(inode, file, VIDIOC_STREAMOFF, &captype);
+		drv(file, VIDIOC_STREAMOFF, &captype);
 	}
-	err = drv(inode, file, VIDIOC_OVERLAY, on);
+	err = drv(file, VIDIOC_OVERLAY, on);
 	if (err < 0)
 		dprintk("VIDIOCCAPTURE / VIDIOC_PREVIEW: %d\n", err);
 	return err;
@@ -546,7 +536,6 @@
 
 static noinline int v4l1_compat_get_input_info(
 					struct video_channel *chan,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -556,7 +545,7 @@
 
 	memset(&input2, 0, sizeof(input2));
 	input2.index = chan->channel;
-	err = drv(inode, file, VIDIOC_ENUMINPUT, &input2);
+	err = drv(file, VIDIOC_ENUMINPUT, &input2);
 	if (err < 0) {
 		dprintk("VIDIOCGCHAN / VIDIOC_ENUMINPUT: "
 			"channel=%d err=%d\n", chan->channel, err);
@@ -578,7 +567,7 @@
 		break;
 	}
 	chan->norm = 0;
-	err = drv(inode, file, VIDIOC_G_STD, &sid);
+	err = drv(file, VIDIOC_G_STD, &sid);
 	if (err < 0)
 		dprintk("VIDIOCGCHAN / VIDIOC_G_STD: %d\n", err);
 	if (err == 0) {
@@ -595,14 +584,13 @@
 
 static noinline int v4l1_compat_set_input(
 					struct video_channel *chan,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
 	int err;
 	v4l2_std_id sid = 0;
 
-	err = drv(inode, file, VIDIOC_S_INPUT, &chan->channel);
+	err = drv(file, VIDIOC_S_INPUT, &chan->channel);
 	if (err < 0)
 		dprintk("VIDIOCSCHAN / VIDIOC_S_INPUT: %d\n", err);
 	switch (chan->norm) {
@@ -617,7 +605,7 @@
 		break;
 	}
 	if (0 != sid) {
-		err = drv(inode, file, VIDIOC_S_STD, &sid);
+		err = drv(file, VIDIOC_S_STD, &sid);
 		if (err < 0)
 			dprintk("VIDIOCSCHAN / VIDIOC_S_STD: %d\n", err);
 	}
@@ -626,7 +614,6 @@
 
 static noinline int v4l1_compat_get_picture(
 					struct video_picture *pict,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -639,19 +626,19 @@
 		return err;
 	}
 
-	pict->brightness = get_v4l_control(inode, file,
+	pict->brightness = get_v4l_control(file,
 					   V4L2_CID_BRIGHTNESS, drv);
-	pict->hue = get_v4l_control(inode, file,
+	pict->hue = get_v4l_control(file,
 				    V4L2_CID_HUE, drv);
-	pict->contrast = get_v4l_control(inode, file,
+	pict->contrast = get_v4l_control(file,
 					 V4L2_CID_CONTRAST, drv);
-	pict->colour = get_v4l_control(inode, file,
+	pict->colour = get_v4l_control(file,
 				       V4L2_CID_SATURATION, drv);
-	pict->whiteness = get_v4l_control(inode, file,
+	pict->whiteness = get_v4l_control(file,
 					  V4L2_CID_WHITENESS, drv);
 
 	fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	err = drv(inode, file, VIDIOC_G_FMT, fmt);
+	err = drv(file, VIDIOC_G_FMT, fmt);
 	if (err < 0) {
 		dprintk("VIDIOCGPICT / VIDIOC_G_FMT: %d\n", err);
 		goto done;
@@ -669,7 +656,6 @@
 
 static noinline int v4l1_compat_set_picture(
 					struct video_picture *pict,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -685,15 +671,15 @@
 	}
 	memset(&fbuf, 0, sizeof(fbuf));
 
-	set_v4l_control(inode, file,
+	set_v4l_control(file,
 			V4L2_CID_BRIGHTNESS, pict->brightness, drv);
-	set_v4l_control(inode, file,
+	set_v4l_control(file,
 			V4L2_CID_HUE, pict->hue, drv);
-	set_v4l_control(inode, file,
+	set_v4l_control(file,
 			V4L2_CID_CONTRAST, pict->contrast, drv);
-	set_v4l_control(inode, file,
+	set_v4l_control(file,
 			V4L2_CID_SATURATION, pict->colour, drv);
-	set_v4l_control(inode, file,
+	set_v4l_control(file,
 			V4L2_CID_WHITENESS, pict->whiteness, drv);
 	/*
 	 * V4L1 uses this ioctl to set both memory capture and overlay
@@ -703,7 +689,7 @@
 	 */
 
 	fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	err = drv(inode, file, VIDIOC_G_FMT, fmt);
+	err = drv(file, VIDIOC_G_FMT, fmt);
 	/* If VIDIOC_G_FMT failed, then the driver likely doesn't
 	   support memory capture.  Trying to set the memory capture
 	   parameters would be pointless.  */
@@ -714,13 +700,13 @@
 		 palette_to_pixelformat(pict->palette)) {
 		fmt->fmt.pix.pixelformat = palette_to_pixelformat(
 			pict->palette);
-		mem_err = drv(inode, file, VIDIOC_S_FMT, fmt);
+		mem_err = drv(file, VIDIOC_S_FMT, fmt);
 		if (mem_err < 0)
 			dprintk("VIDIOCSPICT / VIDIOC_S_FMT: %d\n",
 				mem_err);
 	}
 
-	err = drv(inode, file, VIDIOC_G_FBUF, &fbuf);
+	err = drv(file, VIDIOC_G_FBUF, &fbuf);
 	/* If VIDIOC_G_FBUF failed, then the driver likely doesn't
 	   support overlay.  Trying to set the overlay parameters
 	   would be quite pointless.  */
@@ -731,7 +717,7 @@
 		 palette_to_pixelformat(pict->palette)) {
 		fbuf.fmt.pixelformat = palette_to_pixelformat(
 			pict->palette);
-		ovl_err = drv(inode, file, VIDIOC_S_FBUF, &fbuf);
+		ovl_err = drv(file, VIDIOC_S_FBUF, &fbuf);
 		if (ovl_err < 0)
 			dprintk("VIDIOCSPICT / VIDIOC_S_FBUF: %d\n",
 				ovl_err);
@@ -752,7 +738,6 @@
 
 static noinline int v4l1_compat_get_tuner(
 					struct video_tuner *tun,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -762,7 +747,7 @@
 	v4l2_std_id    		sid;
 
 	memset(&tun2, 0, sizeof(tun2));
-	err = drv(inode, file, VIDIOC_G_TUNER, &tun2);
+	err = drv(file, VIDIOC_G_TUNER, &tun2);
 	if (err < 0) {
 		dprintk("VIDIOCGTUNER / VIDIOC_G_TUNER: %d\n", err);
 		goto done;
@@ -778,7 +763,7 @@
 	for (i = 0; i < 64; i++) {
 		memset(&std2, 0, sizeof(std2));
 		std2.index = i;
-		if (0 != drv(inode, file, VIDIOC_ENUMSTD, &std2))
+		if (0 != drv(file, VIDIOC_ENUMSTD, &std2))
 			break;
 		if (std2.id & V4L2_STD_PAL)
 			tun->flags |= VIDEO_TUNER_PAL;
@@ -788,7 +773,7 @@
 			tun->flags |= VIDEO_TUNER_SECAM;
 	}
 
-	err = drv(inode, file, VIDIOC_G_STD, &sid);
+	err = drv(file, VIDIOC_G_STD, &sid);
 	if (err < 0)
 		dprintk("VIDIOCGTUNER / VIDIOC_G_STD: %d\n", err);
 	if (err == 0) {
@@ -811,7 +796,6 @@
 
 static noinline int v4l1_compat_select_tuner(
 					struct video_tuner *tun,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -821,7 +805,7 @@
 
 	t.index = tun->tuner;
 
-	err = drv(inode, file, VIDIOC_S_INPUT, &t);
+	err = drv(file, VIDIOC_S_INPUT, &t);
 	if (err < 0)
 		dprintk("VIDIOCSTUNER / VIDIOC_S_INPUT: %d\n", err);
 	return err;
@@ -829,7 +813,6 @@
 
 static noinline int v4l1_compat_get_frequency(
 					unsigned long *freq,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -838,7 +821,7 @@
 	memset(&freq2, 0, sizeof(freq2));
 
 	freq2.tuner = 0;
-	err = drv(inode, file, VIDIOC_G_FREQUENCY, &freq2);
+	err = drv(file, VIDIOC_G_FREQUENCY, &freq2);
 	if (err < 0)
 		dprintk("VIDIOCGFREQ / VIDIOC_G_FREQUENCY: %d\n", err);
 	if (0 == err)
@@ -848,7 +831,6 @@
 
 static noinline int v4l1_compat_set_frequency(
 					unsigned long *freq,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -856,9 +838,9 @@
 	struct v4l2_frequency   freq2;
 	memset(&freq2, 0, sizeof(freq2));
 
-	drv(inode, file, VIDIOC_G_FREQUENCY, &freq2);
+	drv(file, VIDIOC_G_FREQUENCY, &freq2);
 	freq2.frequency = *freq;
-	err = drv(inode, file, VIDIOC_S_FREQUENCY, &freq2);
+	err = drv(file, VIDIOC_S_FREQUENCY, &freq2);
 	if (err < 0)
 		dprintk("VIDIOCSFREQ / VIDIOC_S_FREQUENCY: %d\n", err);
 	return err;
@@ -866,7 +848,6 @@
 
 static noinline int v4l1_compat_get_audio(
 					struct video_audio *aud,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -876,7 +857,7 @@
 	struct v4l2_tuner	tun2;
 	memset(&aud2, 0, sizeof(aud2));
 
-	err = drv(inode, file, VIDIOC_G_AUDIO, &aud2);
+	err = drv(file, VIDIOC_G_AUDIO, &aud2);
 	if (err < 0) {
 		dprintk("VIDIOCGAUDIO / VIDIOC_G_AUDIO: %d\n", err);
 		goto done;
@@ -886,27 +867,27 @@
 	aud->name[sizeof(aud->name) - 1] = 0;
 	aud->audio = aud2.index;
 	aud->flags = 0;
-	i = get_v4l_control(inode, file, V4L2_CID_AUDIO_VOLUME, drv);
+	i = get_v4l_control(file, V4L2_CID_AUDIO_VOLUME, drv);
 	if (i >= 0) {
 		aud->volume = i;
 		aud->flags |= VIDEO_AUDIO_VOLUME;
 	}
-	i = get_v4l_control(inode, file, V4L2_CID_AUDIO_BASS, drv);
+	i = get_v4l_control(file, V4L2_CID_AUDIO_BASS, drv);
 	if (i >= 0) {
 		aud->bass = i;
 		aud->flags |= VIDEO_AUDIO_BASS;
 	}
-	i = get_v4l_control(inode, file, V4L2_CID_AUDIO_TREBLE, drv);
+	i = get_v4l_control(file, V4L2_CID_AUDIO_TREBLE, drv);
 	if (i >= 0) {
 		aud->treble = i;
 		aud->flags |= VIDEO_AUDIO_TREBLE;
 	}
-	i = get_v4l_control(inode, file, V4L2_CID_AUDIO_BALANCE, drv);
+	i = get_v4l_control(file, V4L2_CID_AUDIO_BALANCE, drv);
 	if (i >= 0) {
 		aud->balance = i;
 		aud->flags |= VIDEO_AUDIO_BALANCE;
 	}
-	i = get_v4l_control(inode, file, V4L2_CID_AUDIO_MUTE, drv);
+	i = get_v4l_control(file, V4L2_CID_AUDIO_MUTE, drv);
 	if (i >= 0) {
 		if (i)
 			aud->flags |= VIDEO_AUDIO_MUTE;
@@ -914,13 +895,13 @@
 	}
 	aud->step = 1;
 	qctrl2.id = V4L2_CID_AUDIO_VOLUME;
-	if (drv(inode, file, VIDIOC_QUERYCTRL, &qctrl2) == 0 &&
+	if (drv(file, VIDIOC_QUERYCTRL, &qctrl2) == 0 &&
 	    !(qctrl2.flags & V4L2_CTRL_FLAG_DISABLED))
 		aud->step = qctrl2.step;
 	aud->mode = 0;
 
 	memset(&tun2, 0, sizeof(tun2));
-	err = drv(inode, file, VIDIOC_G_TUNER, &tun2);
+	err = drv(file, VIDIOC_G_TUNER, &tun2);
 	if (err < 0) {
 		dprintk("VIDIOCGAUDIO / VIDIOC_G_TUNER: %d\n", err);
 		err = 0;
@@ -939,7 +920,6 @@
 
 static noinline int v4l1_compat_set_audio(
 					struct video_audio *aud,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -951,24 +931,24 @@
 	memset(&tun2, 0, sizeof(tun2));
 
 	aud2.index = aud->audio;
-	err = drv(inode, file, VIDIOC_S_AUDIO, &aud2);
+	err = drv(file, VIDIOC_S_AUDIO, &aud2);
 	if (err < 0) {
 		dprintk("VIDIOCSAUDIO / VIDIOC_S_AUDIO: %d\n", err);
 		goto done;
 	}
 
-	set_v4l_control(inode, file, V4L2_CID_AUDIO_VOLUME,
+	set_v4l_control(file, V4L2_CID_AUDIO_VOLUME,
 			aud->volume, drv);
-	set_v4l_control(inode, file, V4L2_CID_AUDIO_BASS,
+	set_v4l_control(file, V4L2_CID_AUDIO_BASS,
 			aud->bass, drv);
-	set_v4l_control(inode, file, V4L2_CID_AUDIO_TREBLE,
+	set_v4l_control(file, V4L2_CID_AUDIO_TREBLE,
 			aud->treble, drv);
-	set_v4l_control(inode, file, V4L2_CID_AUDIO_BALANCE,
+	set_v4l_control(file, V4L2_CID_AUDIO_BALANCE,
 			aud->balance, drv);
-	set_v4l_control(inode, file, V4L2_CID_AUDIO_MUTE,
+	set_v4l_control(file, V4L2_CID_AUDIO_MUTE,
 			!!(aud->flags & VIDEO_AUDIO_MUTE), drv);
 
-	err = drv(inode, file, VIDIOC_G_TUNER, &tun2);
+	err = drv(file, VIDIOC_G_TUNER, &tun2);
 	if (err < 0)
 		dprintk("VIDIOCSAUDIO / VIDIOC_G_TUNER: %d\n", err);
 	if (err == 0) {
@@ -985,7 +965,7 @@
 			tun2.audmode = V4L2_TUNER_MODE_LANG2;
 			break;
 		}
-		err = drv(inode, file, VIDIOC_S_TUNER, &tun2);
+		err = drv(file, VIDIOC_S_TUNER, &tun2);
 		if (err < 0)
 			dprintk("VIDIOCSAUDIO / VIDIOC_S_TUNER: %d\n", err);
 	}
@@ -996,7 +976,6 @@
 
 static noinline int v4l1_compat_capture_frame(
 					struct video_mmap *mm,
-					struct inode *inode,
 					struct file *file,
 					v4l2_kioctl drv)
 {
@@ -1013,7 +992,7 @@
 	memset(&buf, 0, sizeof(buf));
 
 	fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	err = drv(inode, file, VIDIOC_G_FMT, fmt);
+	err = drv(file, VIDIOC_G_FMT, fmt);
 	if (err < 0) {
 		dprintk("VIDIOCMCAPTURE / VIDIOC_G_FMT: %d\n", err);
 		goto done;
@@ -1029,7 +1008,7 @@
 			palette_to_pixelformat(mm->format);
 		fmt->fmt.pix.field = V4L2_FIELD_ANY;
 		fmt->fmt.pix.bytesperline = 0;
-		err = drv(inode, file, VIDIOC_S_FMT, fmt);
+		err = drv(file, VIDIOC_S_FMT, fmt);
 		if (err < 0) {
 			dprintk("VIDIOCMCAPTURE / VIDIOC_S_FMT: %d\n", err);
 			goto done;
@@ -1037,17 +1016,17 @@
 	}
 	buf.index = mm->frame;
 	buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	err = drv(inode, file, VIDIOC_QUERYBUF, &buf);
+	err = drv(file, VIDIOC_QUERYBUF, &buf);
 	if (err < 0) {
 		dprintk("VIDIOCMCAPTURE / VIDIOC_QUERYBUF: %d\n", err);
 		goto done;
 	}
-	err = drv(inode, file, VIDIOC_QBUF, &buf);
+	err = drv(file, VIDIOC_QBUF, &buf);
 	if (err < 0) {
 		dprintk("VIDIOCMCAPTURE / VIDIOC_QBUF: %d\n", err);
 		goto done;
 	}
-	err = drv(inode, file, VIDIOC_STREAMON, &captype);
+	err = drv(file, VIDIOC_STREAMON, &captype);
 	if (err < 0)
 		dprintk("VIDIOCMCAPTURE / VIDIOC_STREAMON: %d\n", err);
 done:
@@ -1057,7 +1036,6 @@
 
 static noinline int v4l1_compat_sync(
 				int *i,
-				struct inode *inode,
 				struct file *file,
 				v4l2_kioctl drv)
 {
@@ -1069,7 +1047,7 @@
 	memset(&buf, 0, sizeof(buf));
 	buf.index = *i;
 	buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	err = drv(inode, file, VIDIOC_QUERYBUF, &buf);
+	err = drv(file, VIDIOC_QUERYBUF, &buf);
 	if (err < 0) {
 		/*  No such buffer */
 		dprintk("VIDIOCSYNC / VIDIOC_QUERYBUF: %d\n", err);
@@ -1082,7 +1060,7 @@
 	}
 
 	/* make sure capture actually runs so we don't block forever */
-	err = drv(inode, file, VIDIOC_STREAMON, &captype);
+	err = drv(file, VIDIOC_STREAMON, &captype);
 	if (err < 0) {
 		dprintk("VIDIOCSYNC / VIDIOC_STREAMON: %d\n", err);
 		goto done;
@@ -1096,7 +1074,7 @@
 		if (err < 0 ||	/* error or sleep was interrupted  */
 		    err == 0)	/* timeout? Shouldn't occur.  */
 			break;
-		err = drv(inode, file, VIDIOC_QUERYBUF, &buf);
+		err = drv(file, VIDIOC_QUERYBUF, &buf);
 		if (err < 0)
 			dprintk("VIDIOCSYNC / VIDIOC_QUERYBUF: %d\n", err);
 	}
@@ -1104,7 +1082,7 @@
 	if (!(buf.flags & V4L2_BUF_FLAG_DONE)) /* not done */
 		goto done;
 	do {
-		err = drv(inode, file, VIDIOC_DQBUF, &buf);
+		err = drv(file, VIDIOC_DQBUF, &buf);
 		if (err < 0)
 			dprintk("VIDIOCSYNC / VIDIOC_DQBUF: %d\n", err);
 	} while (err == 0 && buf.index != *i);
@@ -1114,7 +1092,6 @@
 
 static noinline int v4l1_compat_get_vbi_format(
 				struct vbi_format *fmt,
-				struct inode *inode,
 				struct file *file,
 				v4l2_kioctl drv)
 {
@@ -1128,7 +1105,7 @@
 	}
 	fmt2->type = V4L2_BUF_TYPE_VBI_CAPTURE;
 
-	err = drv(inode, file, VIDIOC_G_FMT, fmt2);
+	err = drv(file, VIDIOC_G_FMT, fmt2);
 	if (err < 0) {
 		dprintk("VIDIOCGVBIFMT / VIDIOC_G_FMT: %d\n", err);
 		goto done;
@@ -1153,7 +1130,6 @@
 
 static noinline int v4l1_compat_set_vbi_format(
 				struct vbi_format *fmt,
-				struct inode *inode,
 				struct file *file,
 				v4l2_kioctl drv)
 {
@@ -1179,7 +1155,7 @@
 	fmt2->fmt.vbi.start[1]         = fmt->start[1];
 	fmt2->fmt.vbi.count[1]         = fmt->count[1];
 	fmt2->fmt.vbi.flags            = fmt->flags;
-	err = drv(inode, file, VIDIOC_TRY_FMT, fmt2);
+	err = drv(file, VIDIOC_TRY_FMT, fmt2);
 	if (err < 0) {
 		dprintk("VIDIOCSVBIFMT / VIDIOC_TRY_FMT: %d\n", err);
 		goto done;
@@ -1196,7 +1172,7 @@
 		err = -EINVAL;
 		goto done;
 	}
-	err = drv(inode, file, VIDIOC_S_FMT, fmt2);
+	err = drv(file, VIDIOC_S_FMT, fmt2);
 	if (err < 0)
 		dprintk("VIDIOCSVBIFMT / VIDIOC_S_FMT: %d\n", err);
 done:
@@ -1208,8 +1184,7 @@
  *	This function is exported.
  */
 int
-v4l_compat_translate_ioctl(struct inode         *inode,
-			   struct file		*file,
+v4l_compat_translate_ioctl(struct file		*file,
 			   int			cmd,
 			   void			*arg,
 			   v4l2_kioctl          drv)
@@ -1218,64 +1193,64 @@
 
 	switch (cmd) {
 	case VIDIOCGCAP:	/* capability */
-		err = v4l1_compat_get_capabilities(arg, inode, file, drv);
+		err = v4l1_compat_get_capabilities(arg, file, drv);
 		break;
 	case VIDIOCGFBUF: /*  get frame buffer  */
-		err = v4l1_compat_get_frame_buffer(arg, inode, file, drv);
+		err = v4l1_compat_get_frame_buffer(arg, file, drv);
 		break;
 	case VIDIOCSFBUF: /*  set frame buffer  */
-		err = v4l1_compat_set_frame_buffer(arg, inode, file, drv);
+		err = v4l1_compat_set_frame_buffer(arg, file, drv);
 		break;
 	case VIDIOCGWIN: /*  get window or capture dimensions  */
-		err = v4l1_compat_get_win_cap_dimensions(arg, inode, file, drv);
+		err = v4l1_compat_get_win_cap_dimensions(arg, file, drv);
 		break;
 	case VIDIOCSWIN: /*  set window and/or capture dimensions  */
-		err = v4l1_compat_set_win_cap_dimensions(arg, inode, file, drv);
+		err = v4l1_compat_set_win_cap_dimensions(arg, file, drv);
 		break;
 	case VIDIOCCAPTURE: /*  turn on/off preview  */
-		err = v4l1_compat_turn_preview_on_off(arg, inode, file, drv);
+		err = v4l1_compat_turn_preview_on_off(arg, file, drv);
 		break;
 	case VIDIOCGCHAN: /*  get input information  */
-		err = v4l1_compat_get_input_info(arg, inode, file, drv);
+		err = v4l1_compat_get_input_info(arg, file, drv);
 		break;
 	case VIDIOCSCHAN: /*  set input  */
-		err = v4l1_compat_set_input(arg, inode, file, drv);
+		err = v4l1_compat_set_input(arg, file, drv);
 		break;
 	case VIDIOCGPICT: /*  get tone controls & partial capture format  */
-		err = v4l1_compat_get_picture(arg, inode, file, drv);
+		err = v4l1_compat_get_picture(arg, file, drv);
 		break;
 	case VIDIOCSPICT: /*  set tone controls & partial capture format  */
-		err = v4l1_compat_set_picture(arg, inode, file, drv);
+		err = v4l1_compat_set_picture(arg, file, drv);
 		break;
 	case VIDIOCGTUNER: /*  get tuner information  */
-		err = v4l1_compat_get_tuner(arg, inode, file, drv);
+		err = v4l1_compat_get_tuner(arg, file, drv);
 		break;
 	case VIDIOCSTUNER: /*  select a tuner input  */
-		err = v4l1_compat_select_tuner(arg, inode, file, drv);
+		err = v4l1_compat_select_tuner(arg, file, drv);
 		break;
 	case VIDIOCGFREQ: /*  get frequency  */
-		err = v4l1_compat_get_frequency(arg, inode, file, drv);
+		err = v4l1_compat_get_frequency(arg, file, drv);
 		break;
 	case VIDIOCSFREQ: /*  set frequency  */
-		err = v4l1_compat_set_frequency(arg, inode, file, drv);
+		err = v4l1_compat_set_frequency(arg, file, drv);
 		break;
 	case VIDIOCGAUDIO: /*  get audio properties/controls  */
-		err = v4l1_compat_get_audio(arg, inode, file, drv);
+		err = v4l1_compat_get_audio(arg, file, drv);
 		break;
 	case VIDIOCSAUDIO: /*  set audio controls  */
-		err = v4l1_compat_set_audio(arg, inode, file, drv);
+		err = v4l1_compat_set_audio(arg, file, drv);
 		break;
 	case VIDIOCMCAPTURE: /*  capture a frame  */
-		err = v4l1_compat_capture_frame(arg, inode, file, drv);
+		err = v4l1_compat_capture_frame(arg, file, drv);
 		break;
 	case VIDIOCSYNC: /*  wait for a frame  */
-		err = v4l1_compat_sync(arg, inode, file, drv);
+		err = v4l1_compat_sync(arg, file, drv);
 		break;
 	case VIDIOCGVBIFMT: /* query VBI data capture format */
-		err = v4l1_compat_get_vbi_format(arg, inode, file, drv);
+		err = v4l1_compat_get_vbi_format(arg, file, drv);
 		break;
 	case VIDIOCSVBIFMT:
-		err = v4l1_compat_set_vbi_format(arg, inode, file, drv);
+		err = v4l1_compat_set_vbi_format(arg, file, drv);
 		break;
 	default:
 		err = -ENOIOCTLCMD;
diff --git a/drivers/media/video/v4l2-int-device.c b/drivers/media/video/v4l2-int-device.c
index 0e45499..a935bae 100644
--- a/drivers/media/video/v4l2-int-device.c
+++ b/drivers/media/video/v4l2-int-device.c
@@ -32,7 +32,7 @@
 static DEFINE_MUTEX(mutex);
 static LIST_HEAD(int_list);
 
-static void v4l2_int_device_try_attach_all(void)
+void v4l2_int_device_try_attach_all(void)
 {
 	struct v4l2_int_device *m, *s;
 
@@ -66,6 +66,7 @@
 		}
 	}
 }
+EXPORT_SYMBOL_GPL(v4l2_int_device_try_attach_all);
 
 static int ioctl_sort_cmp(const void *a, const void *b)
 {
@@ -144,6 +145,7 @@
 		find_ioctl(d->u.slave, cmd,
 			   (v4l2_int_ioctl_func *)no_such_ioctl_0))(d);
 }
+EXPORT_SYMBOL_GPL(v4l2_int_ioctl_0);
 
 static int no_such_ioctl_1(struct v4l2_int_device *d, void *arg)
 {
@@ -156,5 +158,6 @@
 		find_ioctl(d->u.slave, cmd,
 			   (v4l2_int_ioctl_func *)no_such_ioctl_1))(d, arg);
 }
+EXPORT_SYMBOL_GPL(v4l2_int_ioctl_1);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 155c9d7..710e1a4 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -625,13 +625,13 @@
 	return -EINVAL;
 }
 
-static int __video_do_ioctl(struct inode *inode, struct file *file,
+static int __video_do_ioctl(struct file *file,
 		unsigned int cmd, void *arg)
 {
 	struct video_device *vfd = video_devdata(file);
 	const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
-	void                 *fh = file->private_data;
-	int                  ret = -EINVAL;
+	void *fh = file->private_data;
+	int ret = -EINVAL;
 
 	if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
 				!(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) {
@@ -675,7 +675,7 @@
 	 V4L2 ioctls.
 	 ********************************************************/
 	if (_IOC_TYPE(cmd) == 'v' && _IOC_NR(cmd) < BASE_VIDIOCPRIVATE)
-		return v4l_compat_translate_ioctl(inode, file, cmd, arg,
+		return v4l_compat_translate_ioctl(file, cmd, arg,
 						__video_do_ioctl);
 #endif
 
@@ -1768,7 +1768,7 @@
 	return ret;
 }
 
-int video_ioctl2(struct inode *inode, struct file *file,
+int __video_ioctl2(struct file *file,
 	       unsigned int cmd, unsigned long arg)
 {
 	char	sbuf[128];
@@ -1832,7 +1832,7 @@
 	}
 
 	/* Handles IOCTL */
-	err = __video_do_ioctl(inode, file, cmd, parg);
+	err = __video_do_ioctl(file, cmd, parg);
 	if (err == -ENOIOCTLCMD)
 		err = -EINVAL;
 	if (is_ext_ctrl) {
@@ -1860,4 +1860,11 @@
 	kfree(mbuf);
 	return err;
 }
+EXPORT_SYMBOL(__video_ioctl2);
+
+int video_ioctl2(struct inode *inode, struct file *file,
+	       unsigned int cmd, unsigned long arg)
+{
+	return __video_ioctl2(file, cmd, arg);
+}
 EXPORT_SYMBOL(video_ioctl2);
diff --git a/drivers/media/video/videobuf-dvb.c b/drivers/media/video/videobuf-dvb.c
index b56cffc..0e7dcba 100644
--- a/drivers/media/video/videobuf-dvb.c
+++ b/drivers/media/video/videobuf-dvb.c
@@ -126,7 +126,6 @@
 	mutex_lock(&dvb->lock);
 	dvb->nfeeds--;
 	if (0 == dvb->nfeeds  &&  NULL != dvb->thread) {
-		// FIXME: cx8802_cancel_buffers(dev);
 		err = kthread_stop(dvb->thread);
 		dvb->thread = NULL;
 	}
@@ -134,30 +133,38 @@
 	return err;
 }
 
-/* ------------------------------------------------------------------ */
-
-int videobuf_dvb_register(struct videobuf_dvb *dvb,
+static int videobuf_dvb_register_adapter(struct videobuf_dvb_frontends *fe,
 			  struct module *module,
 			  void *adapter_priv,
 			  struct device *device,
-			  short *adapter_nr)
+			  char *adapter_name,
+			  short *adapter_nr,
+			  int mfe_shared)
 {
 	int result;
 
-	mutex_init(&dvb->lock);
+	mutex_init(&fe->lock);
 
 	/* register adapter */
-	result = dvb_register_adapter(&dvb->adapter, dvb->name, module, device,
-				      adapter_nr);
+	result = dvb_register_adapter(&fe->adapter, adapter_name, module,
+		device, adapter_nr);
 	if (result < 0) {
 		printk(KERN_WARNING "%s: dvb_register_adapter failed (errno = %d)\n",
-		       dvb->name, result);
-		goto fail_adapter;
+		       adapter_name, result);
 	}
-	dvb->adapter.priv = adapter_priv;
+	fe->adapter.priv = adapter_priv;
+	fe->adapter.mfe_shared = mfe_shared;
+
+	return result;
+}
+
+static int videobuf_dvb_register_frontend(struct dvb_adapter *adapter,
+	struct videobuf_dvb *dvb)
+{
+	int result;
 
 	/* register frontend */
-	result = dvb_register_frontend(&dvb->adapter, dvb->frontend);
+	result = dvb_register_frontend(adapter, dvb->frontend);
 	if (result < 0) {
 		printk(KERN_WARNING "%s: dvb_register_frontend failed (errno = %d)\n",
 		       dvb->name, result);
@@ -183,7 +190,8 @@
 	dvb->dmxdev.filternum    = 256;
 	dvb->dmxdev.demux        = &dvb->demux.dmx;
 	dvb->dmxdev.capabilities = 0;
-	result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
+	result = dvb_dmxdev_init(&dvb->dmxdev, adapter);
+
 	if (result < 0) {
 		printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n",
 		       dvb->name, result);
@@ -214,7 +222,11 @@
 	}
 
 	/* register network adapter */
-	dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx);
+	dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
+	if (dvb->net.dvbdev == NULL) {
+		result = -ENOMEM;
+		goto fail_fe_conn;
+	}
 	return 0;
 
 fail_fe_conn:
@@ -229,30 +241,157 @@
 	dvb_unregister_frontend(dvb->frontend);
 fail_frontend:
 	dvb_frontend_detach(dvb->frontend);
-	dvb_unregister_adapter(&dvb->adapter);
-fail_adapter:
+	dvb->frontend = NULL;
+
 	return result;
 }
 
-void videobuf_dvb_unregister(struct videobuf_dvb *dvb)
-{
-	dvb_net_release(&dvb->net);
-	dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
-	dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
-	dvb_dmxdev_release(&dvb->dmxdev);
-	dvb_dmx_release(&dvb->demux);
-	dvb_unregister_frontend(dvb->frontend);
-	dvb_frontend_detach(dvb->frontend);
-	dvb_unregister_adapter(&dvb->adapter);
-}
-
-EXPORT_SYMBOL(videobuf_dvb_register);
-EXPORT_SYMBOL(videobuf_dvb_unregister);
-
 /* ------------------------------------------------------------------ */
-/*
- * Local variables:
- * c-basic-offset: 8
- * compile-command: "make DVB=1"
- * End:
- */
+/* Register a single adapter and one or more frontends */
+int videobuf_dvb_register_bus(struct videobuf_dvb_frontends *f,
+			  struct module *module,
+			  void *adapter_priv,
+			  struct device *device,
+			  short *adapter_nr,
+			  int mfe_shared)
+{
+	struct list_head *list, *q;
+	struct videobuf_dvb_frontend *fe;
+	int res;
+
+	fe = videobuf_dvb_get_frontend(f, 1);
+	if (!fe) {
+		printk(KERN_WARNING "Unable to register the adapter which has no frontends\n");
+		return -EINVAL;
+	}
+
+	/* Bring up the adapter */
+	res = videobuf_dvb_register_adapter(f, module, adapter_priv, device,
+		fe->dvb.name, adapter_nr, mfe_shared);
+	if (res < 0) {
+		printk(KERN_WARNING "videobuf_dvb_register_adapter failed (errno = %d)\n", res);
+		return res;
+	}
+
+	/* Attach all of the frontends to the adapter */
+	mutex_lock(&f->lock);
+	list_for_each_safe(list, q, &f->felist) {
+		fe = list_entry(list, struct videobuf_dvb_frontend, felist);
+		res = videobuf_dvb_register_frontend(&f->adapter, &fe->dvb);
+		if (res < 0) {
+			printk(KERN_WARNING "%s: videobuf_dvb_register_frontend failed (errno = %d)\n",
+				fe->dvb.name, res);
+			goto err;
+		}
+	}
+	mutex_unlock(&f->lock);
+	return 0;
+
+err:
+	mutex_unlock(&f->lock);
+	videobuf_dvb_unregister_bus(f);
+	return res;
+}
+EXPORT_SYMBOL(videobuf_dvb_register_bus);
+
+void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f)
+{
+	videobuf_dvb_dealloc_frontends(f);
+
+	dvb_unregister_adapter(&f->adapter);
+}
+EXPORT_SYMBOL(videobuf_dvb_unregister_bus);
+
+struct videobuf_dvb_frontend *videobuf_dvb_get_frontend(
+	struct videobuf_dvb_frontends *f, int id)
+{
+	struct list_head *list, *q;
+	struct videobuf_dvb_frontend *fe, *ret = NULL;
+
+	mutex_lock(&f->lock);
+
+	list_for_each_safe(list, q, &f->felist) {
+		fe = list_entry(list, struct videobuf_dvb_frontend, felist);
+		if (fe->id == id) {
+			ret = fe;
+			break;
+		}
+	}
+
+	mutex_unlock(&f->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(videobuf_dvb_get_frontend);
+
+int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f,
+	struct dvb_frontend *p)
+{
+	struct list_head *list, *q;
+	struct videobuf_dvb_frontend *fe = NULL;
+	int ret = 0;
+
+	mutex_lock(&f->lock);
+
+	list_for_each_safe(list, q, &f->felist) {
+		fe = list_entry(list, struct videobuf_dvb_frontend, felist);
+		if (fe->dvb.frontend == p) {
+			ret = fe->id;
+			break;
+		}
+	}
+
+	mutex_unlock(&f->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(videobuf_dvb_find_frontend);
+
+struct videobuf_dvb_frontend *videobuf_dvb_alloc_frontend(
+	struct videobuf_dvb_frontends *f, int id)
+{
+	struct videobuf_dvb_frontend *fe;
+
+	fe = kzalloc(sizeof(struct videobuf_dvb_frontend), GFP_KERNEL);
+	if (fe == NULL)
+		goto fail_alloc;
+
+	fe->id = id;
+	mutex_init(&fe->dvb.lock);
+
+	mutex_lock(&f->lock);
+	list_add_tail(&fe->felist, &f->felist);
+	mutex_unlock(&f->lock);
+
+fail_alloc:
+	return fe;
+}
+EXPORT_SYMBOL(videobuf_dvb_alloc_frontend);
+
+void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f)
+{
+	struct list_head *list, *q;
+	struct videobuf_dvb_frontend *fe;
+
+	mutex_lock(&f->lock);
+	list_for_each_safe(list, q, &f->felist) {
+		fe = list_entry(list, struct videobuf_dvb_frontend, felist);
+		if (fe->dvb.net.dvbdev) {
+			dvb_net_release(&fe->dvb.net);
+			fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+				&fe->dvb.fe_mem);
+			fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+				&fe->dvb.fe_hw);
+			dvb_dmxdev_release(&fe->dvb.dmxdev);
+			dvb_dmx_release(&fe->dvb.demux);
+			dvb_unregister_frontend(fe->dvb.frontend);
+		}
+		if (fe->dvb.frontend)
+			/* always allocated, may have been reset */
+			dvb_frontend_detach(fe->dvb.frontend);
+		list_del(list); /* remove list entry */
+		kfree(fe);	/* free frontend allocation */
+	}
+	mutex_unlock(&f->lock);
+}
+EXPORT_SYMBOL(videobuf_dvb_dealloc_frontends);
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 65c8af1..e15e48f 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -128,12 +128,56 @@
 	int   depth;
 };
 
-static struct vivi_fmt format = {
-	.name     = "4:2:2, packed, YUYV",
-	.fourcc   = V4L2_PIX_FMT_YUYV,
-	.depth    = 16,
+static struct vivi_fmt formats[] = {
+	{
+		.name     = "4:2:2, packed, YUYV",
+		.fourcc   = V4L2_PIX_FMT_YUYV,
+		.depth    = 16,
+	},
+	{
+		.name     = "4:2:2, packed, UYVY",
+		.fourcc   = V4L2_PIX_FMT_UYVY,
+		.depth    = 16,
+	},
+	{
+		.name     = "RGB565 (LE)",
+		.fourcc   = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
+		.depth    = 16,
+	},
+	{
+		.name     = "RGB565 (BE)",
+		.fourcc   = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+		.depth    = 16,
+	},
+	{
+		.name     = "RGB555 (LE)",
+		.fourcc   = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
+		.depth    = 16,
+	},
+	{
+		.name     = "RGB555 (BE)",
+		.fourcc   = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
+		.depth    = 16,
+	},
 };
 
+static struct vivi_fmt *get_format(struct v4l2_format *f)
+{
+	struct vivi_fmt *fmt;
+	unsigned int k;
+
+	for (k = 0; k < ARRAY_SIZE(formats); k++) {
+		fmt = &formats[k];
+		if (fmt->fourcc == f->fmt.pix.pixelformat)
+			break;
+	}
+
+	if (k == ARRAY_SIZE(formats))
+		return NULL;
+
+	return &formats[k];
+}
+
 struct sg_to_addr {
 	int pos;
 	struct scatterlist *sg;
@@ -190,6 +234,7 @@
 	struct videobuf_queue      vb_vidq;
 
 	enum v4l2_buf_type         type;
+	unsigned char              bars[8][3];
 };
 
 /* ------------------------------------------------------------------
@@ -234,13 +279,107 @@
 #define TSTAMP_MAX_Y TSTAMP_MIN_Y+15
 #define TSTAMP_MIN_X 64
 
-static void gen_line(char *basep, int inipos, int wmax,
+static void gen_twopix(struct vivi_fh *fh, unsigned char *buf, int colorpos)
+{
+	unsigned char r_y, g_u, b_v;
+	unsigned char *p;
+	int color;
+
+	r_y = fh->bars[colorpos][0]; /* R or precalculated Y */
+	g_u = fh->bars[colorpos][1]; /* G or precalculated U */
+	b_v = fh->bars[colorpos][2]; /* B or precalculated V */
+
+	for (color = 0; color < 4; color++) {
+		p = buf + color;
+
+		switch (fh->fmt->fourcc) {
+		case V4L2_PIX_FMT_YUYV:
+			switch (color) {
+			case 0:
+			case 2:
+				*p = r_y;
+				break;
+			case 1:
+				*p = g_u;
+				break;
+			case 3:
+				*p = b_v;
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_UYVY:
+			switch (color) {
+			case 1:
+			case 3:
+				*p = r_y;
+				break;
+			case 0:
+				*p = g_u;
+				break;
+			case 2:
+				*p = b_v;
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_RGB565:
+			switch (color) {
+			case 0:
+			case 2:
+				*p = (g_u << 5) | b_v;
+				break;
+			case 1:
+			case 3:
+				*p = (r_y << 3) | (g_u >> 3);
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_RGB565X:
+			switch (color) {
+			case 0:
+			case 2:
+				*p = (r_y << 3) | (g_u >> 3);
+				break;
+			case 1:
+			case 3:
+				*p = (g_u << 5) | b_v;
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_RGB555:
+			switch (color) {
+			case 0:
+			case 2:
+				*p = (g_u << 5) | b_v;
+				break;
+			case 1:
+			case 3:
+				*p = (r_y << 2) | (g_u >> 3);
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_RGB555X:
+			switch (color) {
+			case 0:
+			case 2:
+				*p = (r_y << 2) | (g_u >> 3);
+				break;
+			case 1:
+			case 3:
+				*p = (g_u << 5) | b_v;
+				break;
+			}
+			break;
+		}
+	}
+}
+
+static void gen_line(struct vivi_fh *fh, char *basep, int inipos, int wmax,
 		int hmax, int line, int count, char *timestr)
 {
-	int  w, i, j, y;
+	int  w, i, j;
 	int pos = inipos;
-	char *p, *s;
-	u8   chr, r, g, b, color;
+	char *s;
+	u8 chr;
 
 	/* We will just duplicate the second pixel at the packet */
 	wmax /= 2;
@@ -248,27 +387,9 @@
 	/* Generate a standard color bar pattern */
 	for (w = 0; w < wmax; w++) {
 		int colorpos = ((w + count) * 8/(wmax + 1)) % 8;
-		r = bars[colorpos][0];
-		g = bars[colorpos][1];
-		b = bars[colorpos][2];
 
-		for (color = 0; color < 4; color++) {
-			p = basep + pos;
-
-			switch (color) {
-			case 0:
-			case 2:
-				*p = TO_Y(r, g, b);	/* Luma */
-				break;
-			case 1:
-				*p = TO_U(r, g, b);	/* Cb */
-				break;
-			case 3:
-				*p = TO_V(r, g, b);	/* Cr */
-				break;
-			}
-			pos++;
-		}
+		gen_twopix(fh, basep + pos, colorpos);
+		pos += 4; /* only 16 bpp supported for now */
 	}
 
 	/* Checks if it is possible to show timestamp */
@@ -283,38 +404,12 @@
 		for (s = timestr; *s; s++) {
 			chr = rom8x16_bits[(*s-0x30)*16+line-TSTAMP_MIN_Y];
 			for (i = 0; i < 7; i++) {
-				if (chr & 1 << (7 - i)) {
-					/* Font color*/
-					r = 0;
-					g = 198;
-					b = 0;
-				} else {
-					/* Background color */
-					r = bars[BLACK][0];
-					g = bars[BLACK][1];
-					b = bars[BLACK][2];
-				}
-
 				pos = inipos + j * 2;
-				for (color = 0; color < 4; color++) {
-					p = basep + pos;
-
-					y = TO_Y(r, g, b);
-
-					switch (color) {
-					case 0:
-					case 2:
-						*p = TO_Y(r, g, b); /* Luma */
-						break;
-					case 1:
-						*p = TO_U(r, g, b); /* Cb */
-						break;
-					case 3:
-						*p = TO_V(r, g, b); /* Cr */
-						break;
-					}
-					pos++;
-				}
+				/* Draw white font on black background */
+				if (chr & 1 << (7 - i))
+					gen_twopix(fh, basep + pos, WHITE);
+				else
+					gen_twopix(fh, basep + pos, BLACK);
 				j++;
 			}
 		}
@@ -324,8 +419,9 @@
 	return;
 }
 
-static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
+static void vivi_fillbuff(struct vivi_fh *fh, struct vivi_buffer *buf)
 {
+	struct vivi_dev *dev = fh->dev;
 	int h , pos = 0;
 	int hmax  = buf->vb.height;
 	int wmax  = buf->vb.width;
@@ -341,7 +437,7 @@
 		return;
 
 	for (h = 0; h < hmax; h++) {
-		gen_line(tmpbuf, 0, wmax, hmax, h, dev->mv_count,
+		gen_line(fh, tmpbuf, 0, wmax, hmax, h, dev->mv_count,
 			 dev->timestr);
 		memcpy(vbuf + pos, tmpbuf, wmax * 2);
 		pos += wmax*2;
@@ -410,7 +506,7 @@
 	do_gettimeofday(&buf->vb.ts);
 
 	/* Fill buffer */
-	vivi_fillbuff(dev, buf);
+	vivi_fillbuff(fh, buf);
 	dprintk(dev, 1, "filled buffer %p\n", buf);
 
 	wake_up(&buf->vb.done);
@@ -636,11 +732,15 @@
 static int vidioc_enum_fmt_vid_cap(struct file *file, void  *priv,
 					struct v4l2_fmtdesc *f)
 {
-	if (f->index > 0)
+	struct vivi_fmt *fmt;
+
+	if (f->index >= ARRAY_SIZE(formats))
 		return -EINVAL;
 
-	strlcpy(f->description, format.name, sizeof(f->description));
-	f->pixelformat = format.fourcc;
+	fmt = &formats[f->index];
+
+	strlcpy(f->description, fmt->name, sizeof(f->description));
+	f->pixelformat = fmt->fourcc;
 	return 0;
 }
 
@@ -670,13 +770,12 @@
 	enum v4l2_field field;
 	unsigned int maxw, maxh;
 
-	if (format.fourcc != f->fmt.pix.pixelformat) {
-		dprintk(dev, 1, "Fourcc format (0x%08x) invalid. "
-			"Driver accepts only 0x%08x\n",
-			f->fmt.pix.pixelformat, format.fourcc);
+	fmt = get_format(f);
+	if (!fmt) {
+		dprintk(dev, 1, "Fourcc format (0x%08x) invalid.\n",
+			f->fmt.pix.pixelformat);
 		return -EINVAL;
 	}
-	fmt = &format;
 
 	field = f->fmt.pix.field;
 
@@ -714,6 +813,8 @@
 {
 	struct vivi_fh  *fh = priv;
 	struct videobuf_queue *q = &fh->vb_vidq;
+	unsigned char r, g, b;
+	int k, is_yuv;
 
 	int ret = vidioc_try_fmt_vid_cap(file, fh, f);
 	if (ret < 0)
@@ -727,12 +828,49 @@
 		goto out;
 	}
 
-	fh->fmt           = &format;
+	fh->fmt           = get_format(f);
 	fh->width         = f->fmt.pix.width;
 	fh->height        = f->fmt.pix.height;
 	fh->vb_vidq.field = f->fmt.pix.field;
 	fh->type          = f->type;
 
+	/* precalculate color bar values to speed up rendering */
+	for (k = 0; k < 8; k++) {
+		r = bars[k][0];
+		g = bars[k][1];
+		b = bars[k][2];
+		is_yuv = 0;
+
+		switch (fh->fmt->fourcc) {
+		case V4L2_PIX_FMT_YUYV:
+		case V4L2_PIX_FMT_UYVY:
+			is_yuv = 1;
+			break;
+		case V4L2_PIX_FMT_RGB565:
+		case V4L2_PIX_FMT_RGB565X:
+			r >>= 3;
+			g >>= 2;
+			b >>= 3;
+			break;
+		case V4L2_PIX_FMT_RGB555:
+		case V4L2_PIX_FMT_RGB555X:
+			r >>= 3;
+			g >>= 3;
+			b >>= 3;
+			break;
+		}
+
+		if (is_yuv) {
+			fh->bars[k][0] = TO_Y(r, g, b);	/* Luma */
+			fh->bars[k][1] = TO_U(r, g, b);	/* Cb */
+			fh->bars[k][2] = TO_V(r, g, b);	/* Cr */
+		} else {
+			fh->bars[k][0] = r;
+			fh->bars[k][1] = g;
+			fh->bars[k][2] = b;
+		}
+	}
+
 	ret = 0;
 out:
 	mutex_unlock(&q->vb_lock);
@@ -886,8 +1024,6 @@
 	File operations for the device
    ------------------------------------------------------------------*/
 
-#define line_buf_size(norm) (norm_maxw(norm)*(format.depth+7)/8)
-
 static int vivi_open(struct inode *inode, struct file *file)
 {
 	int minor = iminor(inode);
@@ -936,7 +1072,7 @@
 	fh->dev      = dev;
 
 	fh->type     = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	fh->fmt      = &format;
+	fh->fmt      = &formats[0];
 	fh->width    = 640;
 	fh->height   = 480;
 
@@ -1027,11 +1163,11 @@
 
 		if (-1 != dev->vfd->minor) {
 			printk(KERN_INFO "%s: unregistering /dev/video%d\n",
-				VIVI_MODULE_NAME, dev->vfd->minor);
+				VIVI_MODULE_NAME, dev->vfd->num);
 			video_unregister_device(dev->vfd);
 		} else {
 			printk(KERN_INFO "%s: releasing /dev/video%d\n",
-				VIVI_MODULE_NAME, dev->vfd->minor);
+				VIVI_MODULE_NAME, dev->vfd->num);
 			video_device_release(dev->vfd);
 		}
 
@@ -1171,7 +1307,7 @@
 
 		dev->vfd = vfd;
 		printk(KERN_INFO "%s: V4L2 device registered as /dev/video%d\n",
-			VIVI_MODULE_NAME, vfd->minor);
+			VIVI_MODULE_NAME, vfd->num);
 	}
 
 	if (ret < 0) {
diff --git a/drivers/media/video/vpx3220.c b/drivers/media/video/vpx3220.c
index 45be9ec..67aa0db 100644
--- a/drivers/media/video/vpx3220.c
+++ b/drivers/media/video/vpx3220.c
@@ -22,32 +22,21 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/types.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
 #include <asm/uaccess.h>
-
 #include <linux/i2c.h>
-
-#define I2C_NAME(x) (x)->name
-
-#include <linux/videodev.h>
 #include <media/v4l2-common.h>
+#include <media/v4l2-i2c-drv-legacy.h>
+#include <linux/videodev.h>
 #include <linux/video_decoder.h>
 
-#define I2C_VPX3220        0x86
-#define VPX3220_DEBUG	KERN_DEBUG "vpx3220: "
+MODULE_DESCRIPTION("vpx3220a/vpx3216b/vpx3214c video decoder driver");
+MODULE_AUTHOR("Laurent Pinchart");
+MODULE_LICENSE("GPL");
 
 static int debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0-1)");
 
-#define dprintk(num, format, args...) \
-	do { \
-		if (debug >= num) \
-			printk(format, ##args); \
-	} while (0)
-
 #define VPX_TIMEOUT_COUNT  10
 
 /* ----------------------------------------------------------------------- */
@@ -67,10 +56,8 @@
 static char *inputs[] = { "internal", "composite", "svideo" };
 
 /* ----------------------------------------------------------------------- */
-static inline int
-vpx3220_write (struct i2c_client *client,
-	       u8                 reg,
-	       u8                 value)
+
+static inline int vpx3220_write(struct i2c_client *client, u8 reg, u8 value)
 {
 	struct vpx3220 *decoder = i2c_get_clientdata(client);
 
@@ -78,15 +65,12 @@
 	return i2c_smbus_write_byte_data(client, reg, value);
 }
 
-static inline int
-vpx3220_read (struct i2c_client *client,
-	      u8                 reg)
+static inline int vpx3220_read(struct i2c_client *client, u8 reg)
 {
 	return i2c_smbus_read_byte_data(client, reg);
 }
 
-static int
-vpx3220_fp_status (struct i2c_client *client)
+static int vpx3220_fp_status(struct i2c_client *client)
 {
 	unsigned char status;
 	unsigned int i;
@@ -106,14 +90,11 @@
 	return -1;
 }
 
-static int
-vpx3220_fp_write (struct i2c_client *client,
-		  u8                 fpaddr,
-		  u16                data)
+static int vpx3220_fp_write(struct i2c_client *client, u8 fpaddr, u16 data)
 {
 	/* Write the 16-bit address to the FPWR register */
 	if (i2c_smbus_write_word_data(client, 0x27, swab16(fpaddr)) == -1) {
-		dprintk(1, VPX3220_DEBUG "%s: failed\n", __func__);
+		v4l_dbg(1, debug, client, "%s: failed\n", __func__);
 		return -1;
 	}
 
@@ -122,22 +103,20 @@
 
 	/* Write the 16-bit data to the FPDAT register */
 	if (i2c_smbus_write_word_data(client, 0x28, swab16(data)) == -1) {
-		dprintk(1, VPX3220_DEBUG "%s: failed\n", __func__);
+		v4l_dbg(1, debug, client, "%s: failed\n", __func__);
 		return -1;
 	}
 
 	return 0;
 }
 
-static u16
-vpx3220_fp_read (struct i2c_client *client,
-		 u16                fpaddr)
+static u16 vpx3220_fp_read(struct i2c_client *client, u16 fpaddr)
 {
 	s16 data;
 
 	/* Write the 16-bit address to the FPRD register */
 	if (i2c_smbus_write_word_data(client, 0x26, swab16(fpaddr)) == -1) {
-		dprintk(1, VPX3220_DEBUG "%s: failed\n", __func__);
+		v4l_dbg(1, debug, client, "%s: failed\n", __func__);
 		return -1;
 	}
 
@@ -147,25 +126,22 @@
 	/* Read the 16-bit data from the FPDAT register */
 	data = i2c_smbus_read_word_data(client, 0x28);
 	if (data == -1) {
-		dprintk(1, VPX3220_DEBUG "%s: failed\n", __func__);
+		v4l_dbg(1, debug, client, "%s: failed\n", __func__);
 		return -1;
 	}
 
 	return swab16(data);
 }
 
-static int
-vpx3220_write_block (struct i2c_client *client,
-		     const u8          *data,
-		     unsigned int       len)
+static int vpx3220_write_block(struct i2c_client *client, const u8 *data, unsigned int len)
 {
 	u8 reg;
 	int ret = -1;
 
 	while (len >= 2) {
 		reg = *data++;
-		if ((ret =
-		     vpx3220_write(client, reg, *data++)) < 0)
+		ret = vpx3220_write(client, reg, *data++);
+		if (ret < 0)
 			break;
 		len -= 2;
 	}
@@ -173,10 +149,8 @@
 	return ret;
 }
 
-static int
-vpx3220_write_fp_block (struct i2c_client *client,
-			const u16         *data,
-			unsigned int       len)
+static int vpx3220_write_fp_block(struct i2c_client *client,
+		const u16 *data, unsigned int len)
 {
 	u8 reg;
 	int ret = 0;
@@ -285,25 +259,20 @@
 	0x4b, 0x298,		/* PLL gain */
 };
 
-static void
-vpx3220_dump_i2c (struct i2c_client *client)
+static void vpx3220_dump_i2c(struct i2c_client *client)
 {
 	int len = sizeof(init_common);
 	const unsigned char *data = init_common;
 
 	while (len > 1) {
-		dprintk(1,
-			KERN_DEBUG "vpx3216b i2c reg 0x%02x data 0x%02x\n",
+		v4l_dbg(1, debug, client, "i2c reg 0x%02x data 0x%02x\n",
 			*data, vpx3220_read(client, *data));
 		data += 2;
 		len -= 2;
 	}
 }
 
-static int
-vpx3220_command (struct i2c_client *client,
-		 unsigned int       cmd,
-		 void              *arg)
+static int vpx3220_command(struct i2c_client *client, unsigned cmd, void *arg)
 {
 	struct vpx3220 *decoder = i2c_get_clientdata(client);
 
@@ -315,7 +284,6 @@
 		vpx3220_write_fp_block(client, init_fp,
 				       sizeof(init_fp) >> 1);
 		switch (decoder->norm) {
-
 		case VIDEO_MODE_NTSC:
 			vpx3220_write_fp_block(client, init_ntsc,
 					       sizeof(init_ntsc) >> 1);
@@ -334,21 +302,20 @@
 					       sizeof(init_pal) >> 1);
 			break;
 		}
-	}
 		break;
+	}
 
 	case DECODER_DUMP:
 	{
 		vpx3220_dump_i2c(client);
-	}
 		break;
+	}
 
 	case DECODER_GET_CAPABILITIES:
 	{
 		struct video_decoder_capability *cap = arg;
 
-		dprintk(1, KERN_DEBUG "%s: DECODER_GET_CAPABILITIES\n",
-			I2C_NAME(client));
+		v4l_dbg(1, debug, client, "DECODER_GET_CAPABILITIES\n");
 
 		cap->flags = VIDEO_DECODER_PAL |
 			     VIDEO_DECODER_NTSC |
@@ -357,20 +324,18 @@
 			     VIDEO_DECODER_CCIR;
 		cap->inputs = 3;
 		cap->outputs = 1;
-	}
 		break;
+	}
 
 	case DECODER_GET_STATUS:
 	{
 		int res = 0, status;
 
-		dprintk(1, KERN_INFO "%s: DECODER_GET_STATUS\n",
-			I2C_NAME(client));
+		v4l_dbg(1, debug, client, "DECODER_GET_STATUS\n");
 
 		status = vpx3220_fp_read(client, 0x0f3);
 
-		dprintk(1, KERN_INFO "%s: status: 0x%04x\n", I2C_NAME(client),
-			status);
+		v4l_dbg(1, debug, client, "status: 0x%04x\n", status);
 
 		if (status < 0)
 			return status;
@@ -379,7 +344,6 @@
 			res |= DECODER_STATUS_GOOD | DECODER_STATUS_COLOR;
 
 			switch (status & 0x18) {
-
 			case 0x00:
 			case 0x10:
 			case 0x14:
@@ -400,8 +364,8 @@
 		}
 
 		*(int *) arg = res;
-	}
 		break;
+	}
 
 	case DECODER_SET_NORM:
 	{
@@ -413,50 +377,43 @@
 		   choosen video norm */
 		temp_input = vpx3220_fp_read(client, 0xf2);
 
-		dprintk(1, KERN_DEBUG "%s: DECODER_SET_NORM %d\n",
-			I2C_NAME(client), *iarg);
+		v4l_dbg(1, debug, client, "DECODER_SET_NORM %d\n", *iarg);
 		switch (*iarg) {
-
 		case VIDEO_MODE_NTSC:
 			vpx3220_write_fp_block(client, init_ntsc,
 					       sizeof(init_ntsc) >> 1);
-			dprintk(1, KERN_INFO "%s: norm switched to NTSC\n",
-				I2C_NAME(client));
+			v4l_dbg(1, debug, client, "norm switched to NTSC\n");
 			break;
 
 		case VIDEO_MODE_PAL:
 			vpx3220_write_fp_block(client, init_pal,
 					       sizeof(init_pal) >> 1);
-			dprintk(1, KERN_INFO "%s: norm switched to PAL\n",
-				I2C_NAME(client));
+			v4l_dbg(1, debug, client, "norm switched to PAL\n");
 			break;
 
 		case VIDEO_MODE_SECAM:
 			vpx3220_write_fp_block(client, init_secam,
 					       sizeof(init_secam) >> 1);
-			dprintk(1, KERN_INFO "%s: norm switched to SECAM\n",
-				I2C_NAME(client));
+			v4l_dbg(1, debug, client, "norm switched to SECAM\n");
 			break;
 
 		case VIDEO_MODE_AUTO:
 			/* FIXME This is only preliminary support */
 			data = vpx3220_fp_read(client, 0xf2) & 0x20;
 			vpx3220_fp_write(client, 0xf2, 0x00c0 | data);
-			dprintk(1, KERN_INFO "%s: norm switched to Auto\n",
-				I2C_NAME(client));
+			v4l_dbg(1, debug, client, "norm switched to AUTO\n");
 			break;
 
 		default:
 			return -EINVAL;
-
 		}
 		decoder->norm = *iarg;
 
 		/* And here we set the backed up video input again */
 		vpx3220_fp_write(client, 0xf2, temp_input | 0x0010);
 		udelay(10);
-	}
 		break;
+	}
 
 	case DECODER_SET_INPUT:
 	{
@@ -475,8 +432,7 @@
 		if (*iarg < 0 || *iarg > 2)
 			return -EINVAL;
 
-		dprintk(1, KERN_INFO "%s: input switched to %s\n",
-			I2C_NAME(client), inputs[*iarg]);
+		v4l_dbg(1, debug, client, "input switched to %s\n", inputs[*iarg]);
 
 		vpx3220_write(client, 0x33, input[*iarg][0]);
 
@@ -488,8 +444,8 @@
 				 data | (input[*iarg][1] << 5) | 0x0010);
 
 		udelay(10);
-	}
 		break;
+	}
 
 	case DECODER_SET_OUTPUT:
 	{
@@ -499,19 +455,18 @@
 		if (*iarg != 0) {
 			return -EINVAL;
 		}
-	}
 		break;
+	}
 
 	case DECODER_ENABLE_OUTPUT:
 	{
 		int *iarg = arg;
 
-		dprintk(1, KERN_DEBUG "%s: DECODER_ENABLE_OUTPUT %d\n",
-			I2C_NAME(client), *iarg);
+		v4l_dbg(1, debug, client, "DECODER_ENABLE_OUTPUT %d\n", *iarg);
 
 		vpx3220_write(client, 0xf2, (*iarg ? 0x1b : 0x00));
-	}
 		break;
+	}
 
 	case DECODER_SET_PICTURE:
 	{
@@ -542,8 +497,8 @@
 			vpx3220_fp_write(client, 0x1c,
 					 ((decoder->hue - 32768) >> 6) & 0xFFF);
 		}
-	}
 		break;
+	}
 
 	default:
 		return -EINVAL;
@@ -552,8 +507,7 @@
 	return 0;
 }
 
-static int
-vpx3220_init_client (struct i2c_client *client)
+static int vpx3220_init_client(struct i2c_client *client)
 {
 	vpx3220_write_block(client, init_common, sizeof(init_common));
 	vpx3220_write_fp_block(client, init_fp, sizeof(init_fp) >> 1);
@@ -567,115 +521,26 @@
  * Client management code
  */
 
-/*
- * Generic i2c probe
- * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
- */
-static unsigned short normal_i2c[] =
-    { I2C_VPX3220 >> 1, (I2C_VPX3220 >> 1) + 4,
-	I2C_CLIENT_END
-};
+static unsigned short normal_i2c[] = { 0x86 >> 1, 0x8e >> 1, I2C_CLIENT_END };
 
-static unsigned short ignore = I2C_CLIENT_END;
+I2C_CLIENT_INSMOD;
 
-static struct i2c_client_address_data addr_data = {
-	.normal_i2c		= normal_i2c,
-	.probe			= &ignore,
-	.ignore			= &ignore,
-};
-
-static struct i2c_driver vpx3220_i2c_driver;
-
-static int
-vpx3220_detach_client (struct i2c_client *client)
+static int vpx3220_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
 {
-	struct vpx3220 *decoder = i2c_get_clientdata(client);
-	int err;
-
-	err = i2c_detach_client(client);
-	if (err) {
-		return err;
-	}
-
-	kfree(decoder);
-	kfree(client);
-
-	return 0;
-}
-
-static int
-vpx3220_detect_client (struct i2c_adapter *adapter,
-		       int                 address,
-		       int                 kind)
-{
-	int err;
-	struct i2c_client *client;
 	struct vpx3220 *decoder;
-
-	dprintk(1, VPX3220_DEBUG "%s\n", __func__);
+	const char *name = NULL;
+	u8 ver;
+	u16 pn;
 
 	/* Check if the adapter supports the needed features */
-	if (!i2c_check_functionality
-	    (adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
-		return 0;
-
-	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
-	if (client == NULL) {
-		return -ENOMEM;
-	}
-
-	client->addr = address;
-	client->adapter = adapter;
-	client->driver = &vpx3220_i2c_driver;
-
-	/* Check for manufacture ID and part number */
-	if (kind < 0) {
-		u8 id;
-		u16 pn;
-
-		id = vpx3220_read(client, 0x00);
-		if (id != 0xec) {
-			dprintk(1,
-				KERN_INFO
-				"vpx3220_attach: Wrong manufacturer ID (0x%02x)\n",
-				id);
-			kfree(client);
-			return 0;
-		}
-
-		pn = (vpx3220_read(client, 0x02) << 8) +
-		    vpx3220_read(client, 0x01);
-		switch (pn) {
-		case 0x4680:
-			strlcpy(I2C_NAME(client), "vpx3220a",
-				sizeof(I2C_NAME(client)));
-			break;
-		case 0x4260:
-			strlcpy(I2C_NAME(client), "vpx3216b",
-				sizeof(I2C_NAME(client)));
-			break;
-		case 0x4280:
-			strlcpy(I2C_NAME(client), "vpx3214c",
-				sizeof(I2C_NAME(client)));
-			break;
-		default:
-			dprintk(1,
-				KERN_INFO
-				"%s: Wrong part number (0x%04x)\n",
-				__func__, pn);
-			kfree(client);
-			return 0;
-		}
-	} else {
-		strlcpy(I2C_NAME(client), "forced vpx32xx",
-			sizeof(I2C_NAME(client)));
-	}
+	if (!i2c_check_functionality(client->adapter,
+		I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
+		return -ENODEV;
 
 	decoder = kzalloc(sizeof(struct vpx3220), GFP_KERNEL);
-	if (decoder == NULL) {
-		kfree(client);
+	if (decoder == NULL)
 		return -ENOMEM;
-	}
 	decoder->norm = VIDEO_MODE_PAL;
 	decoder->input = 0;
 	decoder->enable = 1;
@@ -685,63 +550,52 @@
 	decoder->sat = 32768;
 	i2c_set_clientdata(client, decoder);
 
-	err = i2c_attach_client(client);
-	if (err) {
-		kfree(client);
-		kfree(decoder);
-		return err;
+	ver = i2c_smbus_read_byte_data(client, 0x00);
+	pn = (i2c_smbus_read_byte_data(client, 0x02) << 8) +
+		i2c_smbus_read_byte_data(client, 0x01);
+	if (ver == 0xec) {
+		switch (pn) {
+		case 0x4680:
+			name = "vpx3220a";
+			break;
+		case 0x4260:
+			name = "vpx3216b";
+			break;
+		case 0x4280:
+			name = "vpx3214c";
+			break;
+		}
 	}
-
-	dprintk(1, KERN_INFO "%s: vpx32xx client found at address 0x%02x\n",
-		I2C_NAME(client), client->addr << 1);
+	if (name)
+		v4l_info(client, "%s found @ 0x%x (%s)\n", name,
+			client->addr << 1, client->adapter->name);
+	else
+		v4l_info(client, "chip (%02x:%04x) found @ 0x%x (%s)\n",
+			ver, pn, client->addr << 1, client->adapter->name);
 
 	vpx3220_init_client(client);
-
 	return 0;
 }
 
-static int
-vpx3220_attach_adapter (struct i2c_adapter *adapter)
+static int vpx3220_remove(struct i2c_client *client)
 {
-	int ret;
-
-	ret = i2c_probe(adapter, &addr_data, &vpx3220_detect_client);
-	dprintk(1, VPX3220_DEBUG "%s: i2c_probe returned %d\n",
-		__func__, ret);
-	return ret;
+	kfree(i2c_get_clientdata(client));
+	return 0;
 }
 
-/* -----------------------------------------------------------------------
- * Driver initialization and cleanup code
- */
-
-static struct i2c_driver vpx3220_i2c_driver = {
-	.driver = {
-		.name = "vpx3220",
-	},
-
-	.id = I2C_DRIVERID_VPX3220,
-
-	.attach_adapter = vpx3220_attach_adapter,
-	.detach_client = vpx3220_detach_client,
-	.command = vpx3220_command,
+static const struct i2c_device_id vpx3220_id[] = {
+	{ "vpx3220a", 0 },
+	{ "vpx3216b", 0 },
+	{ "vpx3214c", 0 },
+	{ }
 };
+MODULE_DEVICE_TABLE(i2c, vpx3220_id);
 
-static int __init
-vpx3220_init (void)
-{
-	return i2c_add_driver(&vpx3220_i2c_driver);
-}
-
-static void __exit
-vpx3220_cleanup (void)
-{
-	i2c_del_driver(&vpx3220_i2c_driver);
-}
-
-module_init(vpx3220_init);
-module_exit(vpx3220_cleanup);
-
-MODULE_DESCRIPTION("vpx3220a/vpx3216b/vpx3214c video decoder driver");
-MODULE_AUTHOR("Laurent Pinchart");
-MODULE_LICENSE("GPL");
+static struct v4l2_i2c_driver_data v4l2_i2c_data = {
+	.name = "vpx3220",
+	.driverid = I2C_DRIVERID_VPX3220,
+	.command = vpx3220_command,
+	.probe = vpx3220_probe,
+	.remove = vpx3220_remove,
+	.id_table = vpx3220_id,
+};
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index dcd45db..4dfb43b 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -2398,7 +2398,7 @@
 	cam->sensor = CC_UNKNOWN;
 	DBG(1, "Image sensor initialization failed for %s (/dev/video%d). "
 	       "Try to detach and attach this device again",
-	    symbolic(camlist, cam->id), cam->v4ldev->minor)
+	    symbolic(camlist, cam->id), cam->v4ldev->num)
 	return err;
 }
 
@@ -2644,7 +2644,7 @@
 {
 	mutex_lock(&w9968cf_devlist_mutex);
 
-	DBG(2, "V4L device deregistered: /dev/video%d", cam->v4ldev->minor)
+	DBG(2, "V4L device deregistered: /dev/video%d", cam->v4ldev->num)
 
 	video_unregister_device(cam->v4ldev);
 	list_del(&cam->v4llist);
@@ -2679,7 +2679,7 @@
 		DBG(2, "No supported image sensor has been detected by the "
 		       "'ovcamchip' module for the %s (/dev/video%d). Make "
 		       "sure it is loaded *before* (re)connecting the camera.",
-		    symbolic(camlist, cam->id), cam->v4ldev->minor)
+		    symbolic(camlist, cam->id), cam->v4ldev->num)
 		mutex_unlock(&cam->dev_mutex);
 		up_read(&w9968cf_disconnect);
 		return -ENODEV;
@@ -2687,7 +2687,7 @@
 
 	if (cam->users) {
 		DBG(2, "%s (/dev/video%d) has been already occupied by '%s'",
-		    symbolic(camlist, cam->id),cam->v4ldev->minor,cam->command)
+		    symbolic(camlist, cam->id), cam->v4ldev->num, cam->command)
 		if ((filp->f_flags & O_NONBLOCK)||(filp->f_flags & O_NDELAY)) {
 			mutex_unlock(&cam->dev_mutex);
 			up_read(&w9968cf_disconnect);
@@ -2709,7 +2709,7 @@
 	}
 
 	DBG(5, "Opening '%s', /dev/video%d ...",
-	    symbolic(camlist, cam->id), cam->v4ldev->minor)
+	    symbolic(camlist, cam->id), cam->v4ldev->num)
 
 	cam->streaming = 0;
 	cam->misconfigured = 0;
@@ -2947,7 +2947,7 @@
 			.minheight = cam->minheight,
 		};
 		sprintf(cap.name, "W996[87]CF USB Camera #%d",
-			cam->v4ldev->minor);
+			cam->v4ldev->num);
 		cap.maxwidth = (cam->upscaling && w9968cf_vpp)
 			       ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth)
 				 : cam->maxwidth;
@@ -3567,7 +3567,7 @@
 		goto fail;
 	}
 
-	DBG(2, "V4L device registered as /dev/video%d", cam->v4ldev->minor)
+	DBG(2, "V4L device registered as /dev/video%d", cam->v4ldev->num)
 
 	/* Set some basic constants */
 	w9968cf_configure_camera(cam, udev, mod_id, dev_nr);
@@ -3618,7 +3618,7 @@
 			DBG(2, "The device is open (/dev/video%d)! "
 			       "Process name: %s. Deregistration and memory "
 			       "deallocation are deferred on close.",
-			    cam->v4ldev->minor, cam->command)
+			    cam->v4ldev->num, cam->command)
 			cam->misconfigured = 1;
 			w9968cf_stop_transfer(cam);
 			wake_up_interruptible(&cam->wait_queue);
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index 6a0902b..9fc5817 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -539,7 +539,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "URB timeout reached. The camera is misconfigured. To "
 		       "use it, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -EIO;
 	}
 
@@ -640,7 +640,7 @@
 {
 	struct zc0301_device *cam = container_of(kref, struct zc0301_device,
 						 kref);
-	DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor);
+	DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
 	video_set_drvdata(cam->v4ldev, NULL);
 	video_unregister_device(cam->v4ldev);
 	usb_put_dev(cam->usbdev);
@@ -679,7 +679,7 @@
 	}
 
 	if (cam->users) {
-		DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->minor);
+		DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->num);
 		DBG(3, "Simultaneous opens are not supported");
 		if ((filp->f_flags & O_NONBLOCK) ||
 		    (filp->f_flags & O_NDELAY)) {
@@ -722,7 +722,7 @@
 	cam->frame_count = 0;
 	zc0301_empty_framequeues(cam);
 
-	DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor);
+	DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
 
 out:
 	mutex_unlock(&cam->open_mutex);
@@ -746,7 +746,7 @@
 	cam->users--;
 	wake_up_interruptible_nr(&cam->wait_open, 1);
 
-	DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor);
+	DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
 
 	kref_put(&cam->kref, zc0301_release_resources);
 
@@ -1275,7 +1275,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
 		       "use the camera, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -EIO;
 	}
 
@@ -1288,7 +1288,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
 		       "use the camera, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -ENOMEM;
 	}
 
@@ -1470,7 +1470,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
 		       "use the camera, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -EIO;
 	}
 
@@ -1482,7 +1482,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
 		       "use the camera, close and open /dev/video%d again.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		return -ENOMEM;
 	}
 
@@ -1529,7 +1529,7 @@
 		cam->state |= DEV_MISCONFIGURED;
 		DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
 		       "problems. To use the camera, close and open "
-		       "/dev/video%d again.", cam->v4ldev->minor);
+		       "/dev/video%d again.", cam->v4ldev->num);
 		return -EIO;
 	}
 
@@ -2005,7 +2005,7 @@
 		goto fail;
 	}
 
-	DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor);
+	DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
 
 	cam->module_param.force_munmap = force_munmap[dev_nr];
 	cam->module_param.frame_timeout = frame_timeout[dev_nr];
@@ -2044,7 +2044,7 @@
 	if (cam->users) {
 		DBG(2, "Device /dev/video%d is open! Deregistration and "
 		       "memory deallocation are deferred.",
-		    cam->v4ldev->minor);
+		    cam->v4ldev->num);
 		cam->state |= DEV_MISCONFIGURED;
 		zc0301_stop_transfer(cam);
 		cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index 3282be7..fa5f2f8 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -817,6 +817,7 @@
 	memcpy(&zr->i2c_algo, &zoran_i2c_bit_data_template,
 	       sizeof(struct i2c_algo_bit_data));
 	zr->i2c_algo.data = zr;
+	zr->i2c_adapter.class = I2C_CLASS_TV_ANALOG;
 	zr->i2c_adapter.id = I2C_HW_B_ZR36067;
 	zr->i2c_adapter.client_register = zoran_i2c_client_register;
 	zr->i2c_adapter.client_unregister = zoran_i2c_client_unregister;
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 25de763..db11ab9 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -2996,7 +2996,6 @@
 			break;
 
 		default:
-			dprintk(3, "unsupported\n");
 			dprintk(1,
 				KERN_ERR
 				"%s: VIDIOC_S_FMT - unsupported type %d\n",
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index 7cdac99..a1d81ed 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -885,7 +885,7 @@
 	usb_set_intfdata(intf, cam);
 
 	dev_info(&udev->dev, DRIVER_DESC " controlling video device %d\n",
-		 cam->vdev->minor);
+		 cam->vdev->num);
 	return 0;
 }
 
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 6e291bf..5263913 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1044,7 +1044,6 @@
 
 		s_attr->dev_attr.attr.name = s_attr->name;
 		s_attr->dev_attr.attr.mode = S_IRUGO;
-		s_attr->dev_attr.attr.owner = THIS_MODULE;
 		s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id);
 
 		if (!rc)
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 5eff8ad..5a79d2d 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -52,6 +52,8 @@
 
 config UCB1400_CORE
 	tristate "Philips UCB1400 Core driver"
+	depends on AC97_BUS
+	depends on GPIOLIB
 	help
 	  This enables support for the Philips UCB1400 core functions.
 	  The UCB1400 is an AC97 audio codec.
@@ -59,6 +61,20 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called ucb1400_core.
 
+config TWL4030_CORE
+	bool "Texas Instruments TWL4030/TPS659x0 Support"
+	depends on I2C=y && GENERIC_HARDIRQS && (ARCH_OMAP2 || ARCH_OMAP3)
+	help
+	  Say yes here if you have TWL4030 family chip on your board.
+	  This core driver provides register access and IRQ handling
+	  facilities, and registers devices for the various functions
+	  so that function-specific drivers can bind to them.
+
+	  These multi-function chips are found on many OMAP2 and OMAP3
+	  boards, providing power management, RTC, GPIO, keypad, a
+	  high speed USB OTG transceiver, an audio codec (on most
+	  versions) and many other features.
+
 config MFD_TMIO
 	bool
 	default n
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 759b1fe..0acefe8 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -17,6 +17,8 @@
 obj-$(CONFIG_MFD_WM8350)	+= wm8350.o
 obj-$(CONFIG_MFD_WM8350_I2C)	+= wm8350-i2c.o
 
+obj-$(CONFIG_TWL4030_CORE)	+= twl4030-core.o twl4030-irq.o
+
 obj-$(CONFIG_MFD_CORE)		+= mfd-core.o
 
 obj-$(CONFIG_MCP)		+= mcp-core.o
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index ba5aa20..e4c0db4 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -123,7 +123,7 @@
 					irqnr = asic->irq_base +
 						(ASIC3_GPIOS_PER_BANK * bank)
 						+ i;
-					desc = irq_desc + irqnr;
+					desc = irq_to_desc(irqnr);
 					desc->handle_irq(irqnr, desc);
 					if (asic->irq_bothedge[bank] & bit)
 						asic3_irq_flip_edge(asic, base,
@@ -136,7 +136,7 @@
 		for (i = ASIC3_NUM_GPIOS; i < ASIC3_NR_IRQS; i++) {
 			/* They start at bit 4 and go up */
 			if (status & (1 << (i - ASIC3_NUM_GPIOS + 4))) {
-				desc = irq_desc + asic->irq_base + i;
+				desc = irq_to_desc(asic->irq_base + i);
 				desc->handle_irq(asic->irq_base + i,
 						 desc);
 			}
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
new file mode 100644
index 0000000..b57326a
--- /dev/null
+++ b/drivers/mfd/da903x.c
@@ -0,0 +1,563 @@
+/*
+ * Base driver for Dialog Semiconductor DA9030/DA9034
+ *
+ * Copyright (C) 2008 Compulab, Ltd.
+ * 	Mike Rapoport <mike@compulab.co.il>
+ *
+ * Copyright (C) 2006-2008 Marvell International Ltd.
+ * 	Eric Miao <eric.miao@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/mfd/da903x.h>
+
+#define DA9030_CHIP_ID		0x00
+#define DA9030_EVENT_A		0x01
+#define DA9030_EVENT_B		0x02
+#define DA9030_EVENT_C		0x03
+#define DA9030_STATUS		0x04
+#define DA9030_IRQ_MASK_A	0x05
+#define DA9030_IRQ_MASK_B	0x06
+#define DA9030_IRQ_MASK_C	0x07
+#define DA9030_SYS_CTRL_A	0x08
+#define DA9030_SYS_CTRL_B	0x09
+#define DA9030_FAULT_LOG	0x0a
+
+#define DA9034_CHIP_ID		0x00
+#define DA9034_EVENT_A		0x01
+#define DA9034_EVENT_B		0x02
+#define DA9034_EVENT_C		0x03
+#define DA9034_EVENT_D		0x04
+#define DA9034_STATUS_A		0x05
+#define DA9034_STATUS_B		0x06
+#define DA9034_IRQ_MASK_A	0x07
+#define DA9034_IRQ_MASK_B	0x08
+#define DA9034_IRQ_MASK_C	0x09
+#define DA9034_IRQ_MASK_D	0x0a
+#define DA9034_SYS_CTRL_A	0x0b
+#define DA9034_SYS_CTRL_B	0x0c
+#define DA9034_FAULT_LOG	0x0d
+
+struct da903x_chip;
+
+struct da903x_chip_ops {
+	int	(*init_chip)(struct da903x_chip *);
+	int	(*unmask_events)(struct da903x_chip *, unsigned int events);
+	int	(*mask_events)(struct da903x_chip *, unsigned int events);
+	int	(*read_events)(struct da903x_chip *, unsigned int *events);
+	int	(*read_status)(struct da903x_chip *, unsigned int *status);
+};
+
+struct da903x_chip {
+	struct i2c_client	*client;
+	struct device		*dev;
+	struct da903x_chip_ops	*ops;
+
+	int			type;
+	uint32_t		events_mask;
+
+	struct mutex		lock;
+	struct work_struct	irq_work;
+
+	struct blocking_notifier_head notifier_list;
+};
+
+static inline int __da903x_read(struct i2c_client *client,
+				int reg, uint8_t *val)
+{
+	int ret;
+
+	ret = i2c_smbus_read_byte_data(client, reg);
+	if (ret < 0) {
+		dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
+		return ret;
+	}
+
+	*val = (uint8_t)ret;
+	return 0;
+}
+
+static inline int __da903x_reads(struct i2c_client *client, int reg,
+				 int len, uint8_t *val)
+{
+	int ret;
+
+	ret = i2c_smbus_read_i2c_block_data(client, reg, len, val);
+	if (ret < 0) {
+		dev_err(&client->dev, "failed reading from 0x%02x\n", reg);
+		return ret;
+	}
+	return 0;
+}
+
+static inline int __da903x_write(struct i2c_client *client,
+				 int reg, uint8_t val)
+{
+	int ret;
+
+	ret = i2c_smbus_write_byte_data(client, reg, val);
+	if (ret < 0) {
+		dev_err(&client->dev, "failed writing 0x%02x to 0x%02x\n",
+				val, reg);
+		return ret;
+	}
+	return 0;
+}
+
+static inline int __da903x_writes(struct i2c_client *client, int reg,
+				  int len, uint8_t *val)
+{
+	int ret;
+
+	ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
+	if (ret < 0) {
+		dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
+		return ret;
+	}
+	return 0;
+}
+
+int da903x_register_notifier(struct device *dev, struct notifier_block *nb,
+				unsigned int events)
+{
+	struct da903x_chip *chip = dev_get_drvdata(dev);
+
+	chip->ops->unmask_events(chip, events);
+	return blocking_notifier_chain_register(&chip->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(da903x_register_notifier);
+
+int da903x_unregister_notifier(struct device *dev, struct notifier_block *nb,
+				unsigned int events)
+{
+	struct da903x_chip *chip = dev_get_drvdata(dev);
+
+	chip->ops->mask_events(chip, events);
+	return blocking_notifier_chain_unregister(&chip->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(da903x_unregister_notifier);
+
+int da903x_write(struct device *dev, int reg, uint8_t val)
+{
+	return __da903x_write(to_i2c_client(dev), reg, val);
+}
+EXPORT_SYMBOL_GPL(da903x_write);
+
+int da903x_read(struct device *dev, int reg, uint8_t *val)
+{
+	return __da903x_read(to_i2c_client(dev), reg, val);
+}
+EXPORT_SYMBOL_GPL(da903x_read);
+
+int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
+{
+	struct da903x_chip *chip = dev_get_drvdata(dev);
+	uint8_t reg_val;
+	int ret = 0;
+
+	mutex_lock(&chip->lock);
+
+	ret = __da903x_read(chip->client, reg, &reg_val);
+	if (ret)
+		goto out;
+
+	if ((reg_val & bit_mask) == 0) {
+		reg_val |= bit_mask;
+		ret = __da903x_write(chip->client, reg, reg_val);
+	}
+out:
+	mutex_unlock(&chip->lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(da903x_set_bits);
+
+int da903x_clr_bits(struct device *dev, int reg, uint8_t bit_mask)
+{
+	struct da903x_chip *chip = dev_get_drvdata(dev);
+	uint8_t reg_val;
+	int ret = 0;
+
+	mutex_lock(&chip->lock);
+
+	ret = __da903x_read(chip->client, reg, &reg_val);
+	if (ret)
+		goto out;
+
+	if (reg_val & bit_mask) {
+		reg_val &= ~bit_mask;
+		ret = __da903x_write(chip->client, reg, reg_val);
+	}
+out:
+	mutex_unlock(&chip->lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(da903x_clr_bits);
+
+int da903x_update(struct device *dev, int reg, uint8_t val, uint8_t mask)
+{
+	struct da903x_chip *chip = dev_get_drvdata(dev);
+	uint8_t reg_val;
+	int ret = 0;
+
+	mutex_lock(&chip->lock);
+
+	ret = __da903x_read(chip->client, reg, &reg_val);
+	if (ret)
+		goto out;
+
+	if ((reg_val & mask) != val) {
+		reg_val = (reg_val & ~mask) | val;
+		ret = __da903x_write(chip->client, reg, reg_val);
+	}
+out:
+	mutex_unlock(&chip->lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(da903x_update);
+
+int da903x_query_status(struct device *dev, unsigned int sbits)
+{
+	struct da903x_chip *chip = dev_get_drvdata(dev);
+	unsigned int status = 0;
+
+	chip->ops->read_status(chip, &status);
+	return ((status & sbits) == sbits);
+}
+EXPORT_SYMBOL(da903x_query_status);
+
+static int __devinit da9030_init_chip(struct da903x_chip *chip)
+{
+	uint8_t chip_id;
+	int err;
+
+	err = __da903x_read(chip->client, DA9030_CHIP_ID, &chip_id);
+	if (err)
+		return err;
+
+	err = __da903x_write(chip->client, DA9030_SYS_CTRL_A, 0xE8);
+	if (err)
+		return err;
+
+	dev_info(chip->dev, "DA9030 (CHIP ID: 0x%02x) detected\n", chip_id);
+	return 0;
+}
+
+static int da9030_unmask_events(struct da903x_chip *chip, unsigned int events)
+{
+	uint8_t v[3];
+
+	chip->events_mask &= ~events;
+
+	v[0] = (chip->events_mask & 0xff);
+	v[1] = (chip->events_mask >> 8) & 0xff;
+	v[2] = (chip->events_mask >> 16) & 0xff;
+
+	return __da903x_writes(chip->client, DA9030_IRQ_MASK_A, 3, v);
+}
+
+static int da9030_mask_events(struct da903x_chip *chip, unsigned int events)
+{
+	uint8_t v[3];
+
+	chip->events_mask &= ~events;
+
+	v[0] = (chip->events_mask & 0xff);
+	v[1] = (chip->events_mask >> 8) & 0xff;
+	v[2] = (chip->events_mask >> 16) & 0xff;
+
+	return __da903x_writes(chip->client, DA9030_IRQ_MASK_A, 3, v);
+}
+
+static int da9030_read_events(struct da903x_chip *chip, unsigned int *events)
+{
+	uint8_t v[3] = {0, 0, 0};
+	int ret;
+
+	ret = __da903x_reads(chip->client, DA9030_EVENT_A, 3, v);
+	if (ret < 0)
+		return ret;
+
+	*events = (v[2] << 16) | (v[1] << 8) | v[0];
+	return 0;
+}
+
+static int da9030_read_status(struct da903x_chip *chip, unsigned int *status)
+{
+	return __da903x_read(chip->client, DA9030_STATUS, (uint8_t *)status);
+}
+
+static int da9034_init_chip(struct da903x_chip *chip)
+{
+	uint8_t chip_id;
+	int err;
+
+	err = __da903x_read(chip->client, DA9034_CHIP_ID, &chip_id);
+	if (err)
+		return err;
+
+	err = __da903x_write(chip->client, DA9034_SYS_CTRL_A, 0xE8);
+	if (err)
+		return err;
+
+	/* avoid SRAM power off during sleep*/
+	__da903x_write(chip->client, 0x10, 0x07);
+	__da903x_write(chip->client, 0x11, 0xff);
+	__da903x_write(chip->client, 0x12, 0xff);
+
+	/* Enable the ONKEY power down functionality */
+	__da903x_write(chip->client, DA9034_SYS_CTRL_B, 0x20);
+	__da903x_write(chip->client, DA9034_SYS_CTRL_A, 0x60);
+
+	/* workaround to make LEDs work */
+	__da903x_write(chip->client, 0x90, 0x01);
+	__da903x_write(chip->client, 0xB0, 0x08);
+
+	/* make ADTV1 and SDTV1 effective */
+	__da903x_write(chip->client, 0x20, 0x00);
+
+	dev_info(chip->dev, "DA9034 (CHIP ID: 0x%02x) detected\n", chip_id);
+	return 0;
+}
+
+static int da9034_unmask_events(struct da903x_chip *chip, unsigned int events)
+{
+	uint8_t v[4];
+
+	chip->events_mask &= ~events;
+
+	v[0] = (chip->events_mask & 0xff);
+	v[1] = (chip->events_mask >> 8) & 0xff;
+	v[2] = (chip->events_mask >> 16) & 0xff;
+	v[3] = (chip->events_mask >> 24) & 0xff;
+
+	return __da903x_writes(chip->client, DA9034_IRQ_MASK_A, 4, v);
+}
+
+static int da9034_mask_events(struct da903x_chip *chip, unsigned int events)
+{
+	uint8_t v[4];
+
+	chip->events_mask |= events;
+
+	v[0] = (chip->events_mask & 0xff);
+	v[1] = (chip->events_mask >> 8) & 0xff;
+	v[2] = (chip->events_mask >> 16) & 0xff;
+	v[3] = (chip->events_mask >> 24) & 0xff;
+
+	return __da903x_writes(chip->client, DA9034_IRQ_MASK_A, 4, v);
+}
+
+static int da9034_read_events(struct da903x_chip *chip, unsigned int *events)
+{
+	uint8_t v[4] = {0, 0, 0, 0};
+	int ret;
+
+	ret = __da903x_reads(chip->client, DA9034_EVENT_A, 4, v);
+	if (ret < 0)
+		return ret;
+
+	*events = (v[3] << 24) | (v[2] << 16) | (v[1] << 8) | v[0];
+	return 0;
+}
+
+static int da9034_read_status(struct da903x_chip *chip, unsigned int *status)
+{
+	uint8_t v[2] = {0, 0};
+	int ret = 0;
+
+	ret = __da903x_reads(chip->client, DA9034_STATUS_A, 2, v);
+	if (ret)
+		return ret;
+
+	*status = (v[1] << 8) | v[0];
+	return 0;
+}
+
+static void da903x_irq_work(struct work_struct *work)
+{
+	struct da903x_chip *chip =
+		container_of(work, struct da903x_chip, irq_work);
+	unsigned int events = 0;
+
+	while (1) {
+		if (chip->ops->read_events(chip, &events))
+			break;
+
+		events &= ~chip->events_mask;
+		if (events == 0)
+			break;
+
+		blocking_notifier_call_chain(
+				&chip->notifier_list, events, NULL);
+	}
+	enable_irq(chip->client->irq);
+}
+
+static int da903x_irq_handler(int irq, void *data)
+{
+	struct da903x_chip *chip = data;
+
+	disable_irq_nosync(irq);
+	(void)schedule_work(&chip->irq_work);
+
+	return IRQ_HANDLED;
+}
+
+static struct da903x_chip_ops da903x_ops[] = {
+	[0] = {
+		.init_chip	= da9030_init_chip,
+		.unmask_events	= da9030_unmask_events,
+		.mask_events	= da9030_mask_events,
+		.read_events	= da9030_read_events,
+		.read_status	= da9030_read_status,
+	},
+	[1] = {
+		.init_chip	= da9034_init_chip,
+		.unmask_events	= da9034_unmask_events,
+		.mask_events	= da9034_mask_events,
+		.read_events	= da9034_read_events,
+		.read_status	= da9034_read_status,
+	}
+};
+
+static const struct i2c_device_id da903x_id_table[] = {
+	{ "da9030", 0 },
+	{ "da9034", 1 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, da903x_id_table);
+
+static int __devexit __remove_subdev(struct device *dev, void *unused)
+{
+	platform_device_unregister(to_platform_device(dev));
+	return 0;
+}
+
+static int __devexit da903x_remove_subdevs(struct da903x_chip *chip)
+{
+	return device_for_each_child(chip->dev, NULL, __remove_subdev);
+}
+
+static int __devinit da903x_add_subdevs(struct da903x_chip *chip,
+					struct da903x_platform_data *pdata)
+{
+	struct da903x_subdev_info *subdev;
+	struct platform_device *pdev;
+	int i, ret = 0;
+
+	for (i = 0; i < pdata->num_subdevs; i++) {
+		subdev = &pdata->subdevs[i];
+
+		pdev = platform_device_alloc(subdev->name, subdev->id);
+
+		pdev->dev.parent = chip->dev;
+		pdev->dev.platform_data = subdev->platform_data;
+
+		ret = platform_device_add(pdev);
+		if (ret)
+			goto failed;
+	}
+	return 0;
+
+failed:
+	da903x_remove_subdevs(chip);
+	return ret;
+}
+
+static int __devinit da903x_probe(struct i2c_client *client,
+				  const struct i2c_device_id *id)
+{
+	struct da903x_platform_data *pdata = client->dev.platform_data;
+	struct da903x_chip *chip;
+	unsigned int tmp;
+	int ret;
+
+	chip = kzalloc(sizeof(struct da903x_chip), GFP_KERNEL);
+	if (chip == NULL)
+		return -ENOMEM;
+
+	chip->client = client;
+	chip->dev = &client->dev;
+	chip->ops = &da903x_ops[id->driver_data];
+
+	mutex_init(&chip->lock);
+	INIT_WORK(&chip->irq_work, da903x_irq_work);
+	BLOCKING_INIT_NOTIFIER_HEAD(&chip->notifier_list);
+
+	i2c_set_clientdata(client, chip);
+
+	ret = chip->ops->init_chip(chip);
+	if (ret)
+		goto out_free_chip;
+
+	/* mask and clear all IRQs */
+	chip->events_mask = 0xffffffff;
+	chip->ops->mask_events(chip, chip->events_mask);
+	chip->ops->read_events(chip, &tmp);
+
+	ret = request_irq(client->irq, da903x_irq_handler,
+			IRQF_DISABLED | IRQF_TRIGGER_FALLING,
+			"da903x", chip);
+	if (ret) {
+		dev_err(&client->dev, "failed to request irq %d\n",
+				client->irq);
+		goto out_free_chip;
+	}
+
+	ret = da903x_add_subdevs(chip, pdata);
+	if (ret)
+		goto out_free_irq;
+
+	return 0;
+
+out_free_irq:
+	free_irq(client->irq, chip);
+out_free_chip:
+	i2c_set_clientdata(client, NULL);
+	kfree(chip);
+	return ret;
+}
+
+static int __devexit da903x_remove(struct i2c_client *client)
+{
+	struct da903x_chip *chip = i2c_get_clientdata(client);
+
+	da903x_remove_subdevs(chip);
+	kfree(chip);
+	return 0;
+}
+
+static struct i2c_driver da903x_driver = {
+	.driver	= {
+		.name	= "da903x",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= da903x_probe,
+	.remove		= __devexit_p(da903x_remove),
+	.id_table	= da903x_id_table,
+};
+
+static int __init da903x_init(void)
+{
+	return i2c_add_driver(&da903x_driver);
+}
+module_init(da903x_init);
+
+static void __exit da903x_exit(void)
+{
+	i2c_del_driver(&da903x_driver);
+}
+module_exit(da903x_exit);
+
+MODULE_DESCRIPTION("PMIC Driver for Dialog Semiconductor DA9034");
+MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
+	      "Mike Rapoport <mike@compulab.co.il>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index 6be4317..1a4d046 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -112,7 +112,7 @@
 		/* Run irq handler */
 		pr_debug("got IRQ %d\n", irqpin);
 		irq = ei->irq_start + irqpin;
-		desc = &irq_desc[irq];
+		desc = irq_to_desc(irq);
 		desc->handle_irq(irq, desc);
 	}
 }
@@ -289,7 +289,7 @@
 	ei->base_addr = ioremap_nocache(res->start, res->end - res->start);
 	if (!ei->base_addr)
 		goto fail;
-	pr_debug("EGPIO phys=%08x virt=%p\n", res->start, ei->base_addr);
+	pr_debug("EGPIO phys=%08x virt=%p\n", (u32)res->start, ei->base_addr);
 
 	if ((pdata->bus_width != 16) && (pdata->bus_width != 32))
 		goto fail;
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 9c9c126..6c0d1be 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -20,7 +20,7 @@
 			  struct resource *mem_base,
 			  int irq_base)
 {
-	struct resource res[cell->num_resources];
+	struct resource *res;
 	struct platform_device *pdev;
 	int ret = -ENOMEM;
 	int r;
@@ -29,14 +29,17 @@
 	if (!pdev)
 		goto fail_alloc;
 
+	res = kzalloc(sizeof(*res) * cell->num_resources, GFP_KERNEL);
+	if (!res)
+		goto fail_device;
+
 	pdev->dev.parent = parent;
 
 	ret = platform_device_add_data(pdev,
 			cell->platform_data, cell->data_size);
 	if (ret)
-		goto fail_device;
+		goto fail_res;
 
-	memset(res, 0, sizeof(res));
 	for (r = 0; r < cell->num_resources; r++) {
 		res[r].name = cell->resources[r].name;
 		res[r].flags = cell->resources[r].flags;
@@ -64,11 +67,15 @@
 
 	ret = platform_device_add(pdev);
 	if (ret)
-		goto fail_device;
+		goto fail_res;
+
+	kfree(res);
 
 	return 0;
 
 /*	platform_device_del(pdev); */
+fail_res:
+	kfree(res);
 fail_device:
 	platform_device_put(pdev);
 fail_alloc:
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 7aebad4..170f9d4 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -623,8 +623,8 @@
 
 	sm501_sync_regs(sm);
 
-	dev_info(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n",
-		 gate, clock, mode);
+	dev_dbg(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n",
+		gate, clock, mode);
 
 	sm501_mdelay(sm, 16);
 	mutex_unlock(&sm->clock_lock);
@@ -742,7 +742,7 @@
 	int ret;
 
 	for (ptr = 0; ptr < pdev->num_resources; ptr++) {
-		printk("%s[%d] flags %08lx: %08llx..%08llx\n",
+		printk(KERN_DEBUG "%s[%d] flags %08lx: %08llx..%08llx\n",
 		       pdev->name, ptr,
 		       pdev->resource[ptr].flags,
 		       (unsigned long long)pdev->resource[ptr].start,
@@ -1374,31 +1374,31 @@
 static int sm501_plat_probe(struct platform_device *dev)
 {
 	struct sm501_devdata *sm;
-	int err;
+	int ret;
 
 	sm = kzalloc(sizeof(struct sm501_devdata), GFP_KERNEL);
 	if (sm == NULL) {
 		dev_err(&dev->dev, "no memory for device data\n");
-		err = -ENOMEM;
+		ret = -ENOMEM;
 		goto err1;
 	}
 
 	sm->dev = &dev->dev;
 	sm->pdev_id = dev->id;
-	sm->irq = platform_get_irq(dev, 0);
-	sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
-	sm->mem_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
 	sm->platdata = dev->dev.platform_data;
 
-	if (sm->irq < 0) {
+	ret = platform_get_irq(dev, 0);
+	if (ret < 0) {
 		dev_err(&dev->dev, "failed to get irq resource\n");
-		err = sm->irq;
 		goto err_res;
 	}
+	sm->irq = ret;
 
+	sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
+	sm->mem_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
 	if (sm->io_res == NULL || sm->mem_res == NULL) {
 		dev_err(&dev->dev, "failed to get IO resource\n");
-		err = -ENOENT;
+		ret = -ENOENT;
 		goto err_res;
 	}
 
@@ -1407,7 +1407,7 @@
 
 	if (sm->regs_claim == NULL) {
 		dev_err(&dev->dev, "cannot claim registers\n");
-		err= -EBUSY;
+		ret = -EBUSY;
 		goto err_res;
 	}
 
@@ -1418,7 +1418,7 @@
 
 	if (sm->regs == NULL) {
 		dev_err(&dev->dev, "cannot remap registers\n");
-		err = -EIO;
+		ret = -EIO;
 		goto err_claim;
 	}
 
@@ -1430,7 +1430,7 @@
  err_res:
 	kfree(sm);
  err1:
-	return err;
+	return ret;
 
 }
 
@@ -1625,8 +1625,7 @@
 		goto err3;
 	}
 
-	sm->regs = ioremap(pci_resource_start(dev, 1),
-			   pci_resource_len(dev, 1));
+	sm->regs = pci_ioremap_bar(dev, 1);
 
 	if (sm->regs == NULL) {
 		dev_err(&dev->dev, "cannot remap registers\n");
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 49a0fff..9f7024c0 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -24,8 +24,10 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/err.h>
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/clk.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/tmio.h>
@@ -56,6 +58,8 @@
 	spinlock_t		lock;
 
 	struct resource		rscr;
+	struct clk		*clk48m;
+	struct clk		*clk32k;
 	int			irq;
 	int			irq_base;
 };
@@ -65,13 +69,11 @@
 static int t7l66xb_mmc_enable(struct platform_device *mmc)
 {
 	struct platform_device *dev = to_platform_device(mmc->dev.parent);
-	struct t7l66xb_platform_data   *pdata = dev->dev.platform_data;
 	struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
 	unsigned long flags;
 	u8 dev_ctl;
 
-	if (pdata->enable_clk32k)
-		pdata->enable_clk32k(dev);
+	clk_enable(t7l66xb->clk32k);
 
 	spin_lock_irqsave(&t7l66xb->lock, flags);
 
@@ -87,7 +89,6 @@
 static int t7l66xb_mmc_disable(struct platform_device *mmc)
 {
 	struct platform_device *dev = to_platform_device(mmc->dev.parent);
-	struct t7l66xb_platform_data   *pdata = dev->dev.platform_data;
 	struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
 	unsigned long flags;
 	u8 dev_ctl;
@@ -100,8 +101,7 @@
 
 	spin_unlock_irqrestore(&t7l66xb->lock, flags);
 
-	if (pdata->disable_clk32k)
-		pdata->disable_clk32k(dev);
+	clk_disable(t7l66xb->clk32k);
 
 	return 0;
 }
@@ -258,18 +258,22 @@
 #ifdef CONFIG_PM
 static int t7l66xb_suspend(struct platform_device *dev, pm_message_t state)
 {
+	struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
 	struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
 
 	if (pdata && pdata->suspend)
 		pdata->suspend(dev);
+	clk_disable(t7l66xb->clk48m);
 
 	return 0;
 }
 
 static int t7l66xb_resume(struct platform_device *dev)
 {
+	struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
 	struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
 
+	clk_enable(t7l66xb->clk48m);
 	if (pdata && pdata->resume)
 		pdata->resume(dev);
 
@@ -309,6 +313,19 @@
 
 	t7l66xb->irq_base = pdata->irq_base;
 
+	t7l66xb->clk32k = clk_get(&dev->dev, "CLK_CK32K");
+	if (IS_ERR(t7l66xb->clk32k)) {
+		ret = PTR_ERR(t7l66xb->clk32k);
+		goto err_clk32k_get;
+	}
+
+	t7l66xb->clk48m = clk_get(&dev->dev, "CLK_CK48M");
+	if (IS_ERR(t7l66xb->clk48m)) {
+		ret = PTR_ERR(t7l66xb->clk48m);
+		clk_put(t7l66xb->clk32k);
+		goto err_clk48m_get;
+	}
+
 	rscr = &t7l66xb->rscr;
 	rscr->name = "t7l66xb-core";
 	rscr->start = iomem->start;
@@ -325,6 +342,8 @@
 		goto err_ioremap;
 	}
 
+	clk_enable(t7l66xb->clk48m);
+
 	if (pdata && pdata->enable)
 		pdata->enable(dev);
 
@@ -359,9 +378,13 @@
 	iounmap(t7l66xb->scr);
 err_ioremap:
 	release_resource(&t7l66xb->rscr);
-err_noirq:
 err_request_scr:
 	kfree(t7l66xb);
+	clk_put(t7l66xb->clk48m);
+err_clk48m_get:
+	clk_put(t7l66xb->clk32k);
+err_clk32k_get:
+err_noirq:
 	return ret;
 }
 
@@ -372,7 +395,8 @@
 	int ret;
 
 	ret = pdata->disable(dev);
-
+	clk_disable(t7l66xb->clk48m);
+	clk_put(t7l66xb->clk48m);
 	t7l66xb_detach_irq(dev);
 	iounmap(t7l66xb->scr);
 	release_resource(&t7l66xb->rscr);
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index a22b21a..43222c1 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -12,6 +12,7 @@
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/tmio.h>
@@ -24,18 +25,22 @@
 #ifdef CONFIG_PM
 static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state)
 {
-	struct tc6387xb_platform_data *pdata = platform_get_drvdata(dev);
+	struct clk *clk32k = platform_get_drvdata(dev);
+	struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
 
 	if (pdata && pdata->suspend)
 		pdata->suspend(dev);
+	clk_disable(clk32k);
 
 	return 0;
 }
 
 static int tc6387xb_resume(struct platform_device *dev)
 {
-	struct tc6387xb_platform_data *pdata = platform_get_drvdata(dev);
+	struct clk *clk32k = platform_get_drvdata(dev);
+	struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
 
+	clk_enable(clk32k);
 	if (pdata && pdata->resume)
 		pdata->resume(dev);
 
@@ -51,10 +56,9 @@
 static int tc6387xb_mmc_enable(struct platform_device *mmc)
 {
 	struct platform_device *dev      = to_platform_device(mmc->dev.parent);
-	struct tc6387xb_platform_data *tc6387xb = dev->dev.platform_data;
+	struct clk *clk32k = platform_get_drvdata(dev);
 
-	if (tc6387xb->enable_clk32k)
-		tc6387xb->enable_clk32k(dev);
+	clk_enable(clk32k);
 
 	return 0;
 }
@@ -62,10 +66,9 @@
 static int tc6387xb_mmc_disable(struct platform_device *mmc)
 {
 	struct platform_device *dev      = to_platform_device(mmc->dev.parent);
-	struct tc6387xb_platform_data *tc6387xb = dev->dev.platform_data;
+	struct clk *clk32k = platform_get_drvdata(dev);
 
-	if (tc6387xb->disable_clk32k)
-		tc6387xb->disable_clk32k(dev);
+	clk_disable(clk32k);
 
 	return 0;
 }
@@ -102,14 +105,14 @@
 
 static int tc6387xb_probe(struct platform_device *dev)
 {
-	struct tc6387xb_platform_data *data = platform_get_drvdata(dev);
+	struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
 	struct resource *iomem;
+	struct clk *clk32k;
 	int irq, ret;
 
 	iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
 	if (!iomem) {
-		ret = -EINVAL;
-		goto err_resource;
+		return -EINVAL;
 	}
 
 	ret  = platform_get_irq(dev, 0);
@@ -118,8 +121,15 @@
 	else
 		goto err_resource;
 
-	if (data && data->enable)
-		data->enable(dev);
+	clk32k = clk_get(&dev->dev, "CLK_CK32K");
+	if (IS_ERR(clk32k)) {
+		ret = PTR_ERR(clk32k);
+		goto err_resource;
+	}
+	platform_set_drvdata(dev, clk32k);
+
+	if (pdata && pdata->enable)
+		pdata->enable(dev);
 
 	printk(KERN_INFO "Toshiba tc6387xb initialised\n");
 
@@ -134,18 +144,19 @@
 	if (!ret)
 		return 0;
 
+	clk_put(clk32k);
 err_resource:
 	return ret;
 }
 
 static int tc6387xb_remove(struct platform_device *dev)
 {
-	struct tc6387xb_platform_data *data = platform_get_drvdata(dev);
+	struct clk *clk32k = platform_get_drvdata(dev);
 
-	if (data && data->disable)
-		data->disable(dev);
-
-	/* FIXME - free the resources! */
+	mfd_remove_devices(&dev->dev);
+	clk_disable(clk32k);
+	clk_put(clk32k);
+	platform_set_drvdata(dev, NULL);
 
 	return 0;
 }
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index e4c1c78..f856e94 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -113,6 +113,8 @@
 enum {
 	TC6393XB_CELL_NAND,
 	TC6393XB_CELL_MMC,
+	TC6393XB_CELL_OHCI,
+	TC6393XB_CELL_FB,
 };
 
 /*--------------------------------------------------------------------------*/
@@ -170,6 +172,176 @@
 	},
 };
 
+const static struct resource tc6393xb_ohci_resources[] = {
+	{
+		.start	= 0x3000,
+		.end	= 0x31ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= 0x0300,
+		.end	= 0x03ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= 0x010000,
+		.end	= 0x017fff,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= 0x018000,
+		.end	= 0x01ffff,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= IRQ_TC6393_OHCI,
+		.end	= IRQ_TC6393_OHCI,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct resource __devinitdata tc6393xb_fb_resources[] = {
+	{
+		.start	= 0x5000,
+		.end	= 0x51ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= 0x0500,
+		.end	= 0x05ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= 0x100000,
+		.end	= 0x1fffff,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= IRQ_TC6393_FB,
+		.end	= IRQ_TC6393_FB,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static int tc6393xb_ohci_enable(struct platform_device *dev)
+{
+	struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
+	unsigned long flags;
+	u16 ccr;
+	u8 fer;
+
+	spin_lock_irqsave(&tc6393xb->lock, flags);
+
+	ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
+	ccr |= SCR_CCR_USBCK;
+	tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
+
+	fer = tmio_ioread8(tc6393xb->scr + SCR_FER);
+	fer |= SCR_FER_USBEN;
+	tmio_iowrite8(fer, tc6393xb->scr + SCR_FER);
+
+	spin_unlock_irqrestore(&tc6393xb->lock, flags);
+
+	return 0;
+}
+
+static int tc6393xb_ohci_disable(struct platform_device *dev)
+{
+	struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
+	unsigned long flags;
+	u16 ccr;
+	u8 fer;
+
+	spin_lock_irqsave(&tc6393xb->lock, flags);
+
+	fer = tmio_ioread8(tc6393xb->scr + SCR_FER);
+	fer &= ~SCR_FER_USBEN;
+	tmio_iowrite8(fer, tc6393xb->scr + SCR_FER);
+
+	ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
+	ccr &= ~SCR_CCR_USBCK;
+	tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
+
+	spin_unlock_irqrestore(&tc6393xb->lock, flags);
+
+	return 0;
+}
+
+static int tc6393xb_fb_enable(struct platform_device *dev)
+{
+	struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
+	unsigned long flags;
+	u16 ccr;
+
+	spin_lock_irqsave(&tc6393xb->lock, flags);
+
+	ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
+	ccr &= ~SCR_CCR_MCLK_MASK;
+	ccr |= SCR_CCR_MCLK_48;
+	tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
+
+	spin_unlock_irqrestore(&tc6393xb->lock, flags);
+
+	return 0;
+}
+
+static int tc6393xb_fb_disable(struct platform_device *dev)
+{
+	struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
+	unsigned long flags;
+	u16 ccr;
+
+	spin_lock_irqsave(&tc6393xb->lock, flags);
+
+	ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
+	ccr &= ~SCR_CCR_MCLK_MASK;
+	ccr |= SCR_CCR_MCLK_OFF;
+	tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
+
+	spin_unlock_irqrestore(&tc6393xb->lock, flags);
+
+	return 0;
+}
+
+int tc6393xb_lcd_set_power(struct platform_device *fb, bool on)
+{
+	struct platform_device *dev = to_platform_device(fb->dev.parent);
+	struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+	u8 fer;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tc6393xb->lock, flags);
+
+	fer = ioread8(tc6393xb->scr + SCR_FER);
+	if (on)
+		fer |= SCR_FER_SLCDEN;
+	else
+		fer &= ~SCR_FER_SLCDEN;
+	iowrite8(fer, tc6393xb->scr + SCR_FER);
+
+	spin_unlock_irqrestore(&tc6393xb->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(tc6393xb_lcd_set_power);
+
+int tc6393xb_lcd_mode(struct platform_device *fb,
+					const struct fb_videomode *mode) {
+	struct platform_device *dev = to_platform_device(fb->dev.parent);
+	struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&tc6393xb->lock, flags);
+
+	iowrite16(mode->pixclock, tc6393xb->scr + SCR_PLL1CR + 0);
+	iowrite16(mode->pixclock >> 16, tc6393xb->scr + SCR_PLL1CR + 2);
+
+	spin_unlock_irqrestore(&tc6393xb->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(tc6393xb_lcd_mode);
+
 static struct mfd_cell __devinitdata tc6393xb_cells[] = {
 	[TC6393XB_CELL_NAND] = {
 		.name = "tmio-nand",
@@ -182,6 +354,24 @@
 		.num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
 		.resources = tc6393xb_mmc_resources,
 	},
+	[TC6393XB_CELL_OHCI] = {
+		.name = "tmio-ohci",
+		.num_resources = ARRAY_SIZE(tc6393xb_ohci_resources),
+		.resources = tc6393xb_ohci_resources,
+		.enable = tc6393xb_ohci_enable,
+		.suspend = tc6393xb_ohci_disable,
+		.resume = tc6393xb_ohci_enable,
+		.disable = tc6393xb_ohci_disable,
+	},
+	[TC6393XB_CELL_FB] = {
+		.name = "tmio-fb",
+		.num_resources = ARRAY_SIZE(tc6393xb_fb_resources),
+		.resources = tc6393xb_fb_resources,
+		.enable = tc6393xb_fb_enable,
+		.suspend = tc6393xb_fb_disable,
+		.resume = tc6393xb_fb_enable,
+		.disable = tc6393xb_fb_disable,
+	},
 };
 
 /*--------------------------------------------------------------------------*/
@@ -369,41 +559,12 @@
 
 /*--------------------------------------------------------------------------*/
 
-static int tc6393xb_hw_init(struct platform_device *dev)
-{
-	struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
-	struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
-	int i;
-
-	iowrite8(tc6393xb->suspend_state.fer,	tc6393xb->scr + SCR_FER);
-	iowrite16(tcpd->scr_pll2cr,		tc6393xb->scr + SCR_PLL2CR);
-	iowrite16(tc6393xb->suspend_state.ccr,	tc6393xb->scr + SCR_CCR);
-	iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN |
-		  SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN |
-		  BIT(15),			tc6393xb->scr + SCR_MCR);
-	iowrite16(tcpd->scr_gper,		tc6393xb->scr + SCR_GPER);
-	iowrite8(0,				tc6393xb->scr + SCR_IRR);
-	iowrite8(0xbf,				tc6393xb->scr + SCR_IMR);
-
-	for (i = 0; i < 3; i++) {
-		iowrite8(tc6393xb->suspend_state.gpo_dsr[i],
-					tc6393xb->scr + SCR_GPO_DSR(i));
-		iowrite8(tc6393xb->suspend_state.gpo_doecr[i],
-					tc6393xb->scr + SCR_GPO_DOECR(i));
-		iowrite8(tc6393xb->suspend_state.gpi_bcr[i],
-					tc6393xb->scr + SCR_GPI_BCR(i));
-	}
-
-	return 0;
-}
-
 static int __devinit tc6393xb_probe(struct platform_device *dev)
 {
 	struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
 	struct tc6393xb *tc6393xb;
 	struct resource *iomem, *rscr;
 	int ret, temp;
-	int i;
 
 	iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
 	if (!iomem)
@@ -458,21 +619,16 @@
 	if (ret)
 		goto err_enable;
 
-	tc6393xb->suspend_state.fer = 0;
-
-	for (i = 0; i < 3; i++) {
-		tc6393xb->suspend_state.gpo_dsr[i] =
-			(tcpd->scr_gpo_dsr >> (8 * i)) & 0xff;
-		tc6393xb->suspend_state.gpo_doecr[i] =
-			(tcpd->scr_gpo_doecr >> (8 * i)) & 0xff;
-	}
-
-	tc6393xb->suspend_state.ccr = SCR_CCR_UNK1 |
-					SCR_CCR_HCLK_48;
-
-	ret = tc6393xb_hw_init(dev);
-	if (ret)
-		goto err_hw_init;
+	iowrite8(0,				tc6393xb->scr + SCR_FER);
+	iowrite16(tcpd->scr_pll2cr,		tc6393xb->scr + SCR_PLL2CR);
+	iowrite16(SCR_CCR_UNK1 | SCR_CCR_HCLK_48,
+						tc6393xb->scr + SCR_CCR);
+	iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN |
+		  SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN |
+		  BIT(15),			tc6393xb->scr + SCR_MCR);
+	iowrite16(tcpd->scr_gper,		tc6393xb->scr + SCR_GPER);
+	iowrite8(0,				tc6393xb->scr + SCR_IRR);
+	iowrite8(0xbf,				tc6393xb->scr + SCR_IMR);
 
 	printk(KERN_INFO "Toshiba tc6393xb revision %d at 0x%08lx, irq %d\n",
 			tmio_ioread8(tc6393xb->scr + SCR_REVID),
@@ -488,16 +644,33 @@
 
 	tc6393xb_attach_irq(dev);
 
+	if (tcpd->setup) {
+		ret = tcpd->setup(dev);
+		if (ret)
+			goto err_setup;
+	}
+
 	tc6393xb_cells[TC6393XB_CELL_NAND].driver_data = tcpd->nand_data;
 	tc6393xb_cells[TC6393XB_CELL_NAND].platform_data =
 		&tc6393xb_cells[TC6393XB_CELL_NAND];
 	tc6393xb_cells[TC6393XB_CELL_NAND].data_size =
 		sizeof(tc6393xb_cells[TC6393XB_CELL_NAND]);
+
 	tc6393xb_cells[TC6393XB_CELL_MMC].platform_data =
 		&tc6393xb_cells[TC6393XB_CELL_MMC];
 	tc6393xb_cells[TC6393XB_CELL_MMC].data_size =
 		sizeof(tc6393xb_cells[TC6393XB_CELL_MMC]);
 
+	tc6393xb_cells[TC6393XB_CELL_OHCI].platform_data =
+		&tc6393xb_cells[TC6393XB_CELL_OHCI];
+	tc6393xb_cells[TC6393XB_CELL_OHCI].data_size =
+		sizeof(tc6393xb_cells[TC6393XB_CELL_OHCI]);
+
+	tc6393xb_cells[TC6393XB_CELL_FB].driver_data = tcpd->fb_data;
+	tc6393xb_cells[TC6393XB_CELL_FB].platform_data =
+		&tc6393xb_cells[TC6393XB_CELL_FB];
+	tc6393xb_cells[TC6393XB_CELL_FB].data_size =
+		sizeof(tc6393xb_cells[TC6393XB_CELL_FB]);
 
 	ret = mfd_add_devices(&dev->dev, dev->id,
 			tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
@@ -506,12 +679,15 @@
 	if (!ret)
 		return 0;
 
+	if (tcpd->teardown)
+		tcpd->teardown(dev);
+
+err_setup:
 	tc6393xb_detach_irq(dev);
 
 err_gpio_add:
 	if (tc6393xb->gpio.base != -1)
 		temp = gpiochip_remove(&tc6393xb->gpio);
-err_hw_init:
 	tcpd->disable(dev);
 err_clk_enable:
 	clk_disable(tc6393xb->clk);
@@ -535,6 +711,10 @@
 	int ret;
 
 	mfd_remove_devices(&dev->dev);
+
+	if (tcpd->teardown)
+		tcpd->teardown(dev);
+
 	tc6393xb_detach_irq(dev);
 
 	if (tc6393xb->gpio.base != -1) {
@@ -585,15 +765,37 @@
 	struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
 	struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
 	int ret;
+	int i;
 
 	clk_enable(tc6393xb->clk);
 
 	ret = tcpd->resume(dev);
-
 	if (ret)
 		return ret;
 
-	return tc6393xb_hw_init(dev);
+	if (!tcpd->resume_restore)
+		return 0;
+
+	iowrite8(tc6393xb->suspend_state.fer,	tc6393xb->scr + SCR_FER);
+	iowrite16(tcpd->scr_pll2cr,		tc6393xb->scr + SCR_PLL2CR);
+	iowrite16(tc6393xb->suspend_state.ccr,	tc6393xb->scr + SCR_CCR);
+	iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN |
+		  SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN |
+		  BIT(15),			tc6393xb->scr + SCR_MCR);
+	iowrite16(tcpd->scr_gper,		tc6393xb->scr + SCR_GPER);
+	iowrite8(0,				tc6393xb->scr + SCR_IRR);
+	iowrite8(0xbf,				tc6393xb->scr + SCR_IMR);
+
+	for (i = 0; i < 3; i++) {
+		iowrite8(tc6393xb->suspend_state.gpo_dsr[i],
+					tc6393xb->scr + SCR_GPO_DSR(i));
+		iowrite8(tc6393xb->suspend_state.gpo_doecr[i],
+					tc6393xb->scr + SCR_GPO_DOECR(i));
+		iowrite8(tc6393xb->suspend_state.gpi_bcr[i],
+					tc6393xb->scr + SCR_GPI_BCR(i));
+	}
+
+	return 0;
 }
 #else
 #define tc6393xb_suspend NULL
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c
new file mode 100644
index 0000000..dd843c4
--- /dev/null
+++ b/drivers/mfd/twl4030-core.c
@@ -0,0 +1,806 @@
+/*
+ * twl4030_core.c - driver for TWL4030/TPS659x0 PM and audio CODEC devices
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * Modifications to defer interrupt handling to a kernel thread:
+ * Copyright (C) 2006 MontaVista Software, Inc.
+ *
+ * Based on tlv320aic23.c:
+ * Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
+ *
+ * Code cleanup and modifications to IRQ handler.
+ * by syed khasim <x0khasim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <linux/i2c.h>
+#include <linux/i2c/twl4030.h>
+
+
+/*
+ * The TWL4030 "Triton 2" is one of a family of a multi-function "Power
+ * Management and System Companion Device" chips originally designed for
+ * use in OMAP2 and OMAP 3 based systems.  Its control interfaces use I2C,
+ * often at around 3 Mbit/sec, including for interrupt handling.
+ *
+ * This driver core provides genirq support for the interrupts emitted,
+ * by the various modules, and exports register access primitives.
+ *
+ * FIXME this driver currently requires use of the first interrupt line
+ * (and associated registers).
+ */
+
+#define DRIVER_NAME			"twl4030"
+
+#if defined(CONFIG_TWL4030_BCI_BATTERY) || \
+	defined(CONFIG_TWL4030_BCI_BATTERY_MODULE)
+#define twl_has_bci()		true
+#else
+#define twl_has_bci()		false
+#endif
+
+#if defined(CONFIG_KEYBOARD_TWL4030) || defined(CONFIG_KEYBOARD_TWL4030_MODULE)
+#define twl_has_keypad()	true
+#else
+#define twl_has_keypad()	false
+#endif
+
+#if defined(CONFIG_GPIO_TWL4030) || defined(CONFIG_GPIO_TWL4030_MODULE)
+#define twl_has_gpio()	true
+#else
+#define twl_has_gpio()	false
+#endif
+
+#if defined(CONFIG_TWL4030_MADC) || defined(CONFIG_TWL4030_MADC_MODULE)
+#define twl_has_madc()	true
+#else
+#define twl_has_madc()	false
+#endif
+
+#if defined(CONFIG_RTC_DRV_TWL4030) || defined(CONFIG_RTC_DRV_TWL4030_MODULE)
+#define twl_has_rtc()	true
+#else
+#define twl_has_rtc()	false
+#endif
+
+#if defined(CONFIG_TWL4030_USB) || defined(CONFIG_TWL4030_USB_MODULE)
+#define twl_has_usb()	true
+#else
+#define twl_has_usb()	false
+#endif
+
+
+/* Triton Core internal information (BEGIN) */
+
+/* Last - for index max*/
+#define TWL4030_MODULE_LAST		TWL4030_MODULE_SECURED_REG
+
+#define TWL4030_NUM_SLAVES		4
+
+
+/* Base Address defns for twl4030_map[] */
+
+/* subchip/slave 0 - USB ID */
+#define TWL4030_BASEADD_USB		0x0000
+
+/* subchip/slave 1 - AUD ID */
+#define TWL4030_BASEADD_AUDIO_VOICE	0x0000
+#define TWL4030_BASEADD_GPIO		0x0098
+#define TWL4030_BASEADD_INTBR		0x0085
+#define TWL4030_BASEADD_PIH		0x0080
+#define TWL4030_BASEADD_TEST		0x004C
+
+/* subchip/slave 2 - AUX ID */
+#define TWL4030_BASEADD_INTERRUPTS	0x00B9
+#define TWL4030_BASEADD_LED		0x00EE
+#define TWL4030_BASEADD_MADC		0x0000
+#define TWL4030_BASEADD_MAIN_CHARGE	0x0074
+#define TWL4030_BASEADD_PRECHARGE	0x00AA
+#define TWL4030_BASEADD_PWM0		0x00F8
+#define TWL4030_BASEADD_PWM1		0x00FB
+#define TWL4030_BASEADD_PWMA		0x00EF
+#define TWL4030_BASEADD_PWMB		0x00F1
+#define TWL4030_BASEADD_KEYPAD		0x00D2
+
+/* subchip/slave 3 - POWER ID */
+#define TWL4030_BASEADD_BACKUP		0x0014
+#define TWL4030_BASEADD_INT		0x002E
+#define TWL4030_BASEADD_PM_MASTER	0x0036
+#define TWL4030_BASEADD_PM_RECEIVER	0x005B
+#define TWL4030_BASEADD_RTC		0x001C
+#define TWL4030_BASEADD_SECURED_REG	0x0000
+
+/* Triton Core internal information (END) */
+
+
+/* Few power values */
+#define R_CFG_BOOT			0x05
+#define R_PROTECT_KEY			0x0E
+
+/* access control values for R_PROTECT_KEY */
+#define KEY_UNLOCK1			0xce
+#define KEY_UNLOCK2			0xec
+#define KEY_LOCK			0x00
+
+/* some fields in R_CFG_BOOT */
+#define HFCLK_FREQ_19p2_MHZ		(1 << 0)
+#define HFCLK_FREQ_26_MHZ		(2 << 0)
+#define HFCLK_FREQ_38p4_MHZ		(3 << 0)
+#define HIGH_PERF_SQ			(1 << 3)
+
+
+/*----------------------------------------------------------------------*/
+
+/* is driver active, bound to a chip? */
+static bool inuse;
+
+/* Structure for each TWL4030 Slave */
+struct twl4030_client {
+	struct i2c_client *client;
+	u8 address;
+
+	/* max numb of i2c_msg required is for read =2 */
+	struct i2c_msg xfer_msg[2];
+
+	/* To lock access to xfer_msg */
+	struct mutex xfer_lock;
+};
+
+static struct twl4030_client twl4030_modules[TWL4030_NUM_SLAVES];
+
+
+/* mapping the module id to slave id and base address */
+struct twl4030mapping {
+	unsigned char sid;	/* Slave ID */
+	unsigned char base;	/* base address */
+};
+
+static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
+	/*
+	 * NOTE:  don't change this table without updating the
+	 * <linux/i2c/twl4030.h> defines for TWL4030_MODULE_*
+	 * so they continue to match the order in this table.
+	 */
+
+	{ 0, TWL4030_BASEADD_USB },
+
+	{ 1, TWL4030_BASEADD_AUDIO_VOICE },
+	{ 1, TWL4030_BASEADD_GPIO },
+	{ 1, TWL4030_BASEADD_INTBR },
+	{ 1, TWL4030_BASEADD_PIH },
+	{ 1, TWL4030_BASEADD_TEST },
+
+	{ 2, TWL4030_BASEADD_KEYPAD },
+	{ 2, TWL4030_BASEADD_MADC },
+	{ 2, TWL4030_BASEADD_INTERRUPTS },
+	{ 2, TWL4030_BASEADD_LED },
+	{ 2, TWL4030_BASEADD_MAIN_CHARGE },
+	{ 2, TWL4030_BASEADD_PRECHARGE },
+	{ 2, TWL4030_BASEADD_PWM0 },
+	{ 2, TWL4030_BASEADD_PWM1 },
+	{ 2, TWL4030_BASEADD_PWMA },
+	{ 2, TWL4030_BASEADD_PWMB },
+
+	{ 3, TWL4030_BASEADD_BACKUP },
+	{ 3, TWL4030_BASEADD_INT },
+	{ 3, TWL4030_BASEADD_PM_MASTER },
+	{ 3, TWL4030_BASEADD_PM_RECEIVER },
+	{ 3, TWL4030_BASEADD_RTC },
+	{ 3, TWL4030_BASEADD_SECURED_REG },
+};
+
+/*----------------------------------------------------------------------*/
+
+/* Exported Functions */
+
+/**
+ * twl4030_i2c_write - Writes a n bit register in TWL4030
+ * @mod_no: module number
+ * @value: an array of num_bytes+1 containing data to write
+ * @reg: register address (just offset will do)
+ * @num_bytes: number of bytes to transfer
+ *
+ * IMPORTANT: for 'value' parameter: Allocate value num_bytes+1 and
+ * valid data starts at Offset 1.
+ *
+ * Returns the result of operation - 0 is success
+ */
+int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, u8 num_bytes)
+{
+	int ret;
+	int sid;
+	struct twl4030_client *twl;
+	struct i2c_msg *msg;
+
+	if (unlikely(mod_no > TWL4030_MODULE_LAST)) {
+		pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+		return -EPERM;
+	}
+	sid = twl4030_map[mod_no].sid;
+	twl = &twl4030_modules[sid];
+
+	if (unlikely(!inuse)) {
+		pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+		return -EPERM;
+	}
+	mutex_lock(&twl->xfer_lock);
+	/*
+	 * [MSG1]: fill the register address data
+	 * fill the data Tx buffer
+	 */
+	msg = &twl->xfer_msg[0];
+	msg->addr = twl->address;
+	msg->len = num_bytes + 1;
+	msg->flags = 0;
+	msg->buf = value;
+	/* over write the first byte of buffer with the register address */
+	*value = twl4030_map[mod_no].base + reg;
+	ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 1);
+	mutex_unlock(&twl->xfer_lock);
+
+	/* i2cTransfer returns num messages.translate it pls.. */
+	if (ret >= 0)
+		ret = 0;
+	return ret;
+}
+EXPORT_SYMBOL(twl4030_i2c_write);
+
+/**
+ * twl4030_i2c_read - Reads a n bit register in TWL4030
+ * @mod_no: module number
+ * @value: an array of num_bytes containing data to be read
+ * @reg: register address (just offset will do)
+ * @num_bytes: number of bytes to transfer
+ *
+ * Returns result of operation - num_bytes is success else failure.
+ */
+int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, u8 num_bytes)
+{
+	int ret;
+	u8 val;
+	int sid;
+	struct twl4030_client *twl;
+	struct i2c_msg *msg;
+
+	if (unlikely(mod_no > TWL4030_MODULE_LAST)) {
+		pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+		return -EPERM;
+	}
+	sid = twl4030_map[mod_no].sid;
+	twl = &twl4030_modules[sid];
+
+	if (unlikely(!inuse)) {
+		pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+		return -EPERM;
+	}
+	mutex_lock(&twl->xfer_lock);
+	/* [MSG1] fill the register address data */
+	msg = &twl->xfer_msg[0];
+	msg->addr = twl->address;
+	msg->len = 1;
+	msg->flags = 0;	/* Read the register value */
+	val = twl4030_map[mod_no].base + reg;
+	msg->buf = &val;
+	/* [MSG2] fill the data rx buffer */
+	msg = &twl->xfer_msg[1];
+	msg->addr = twl->address;
+	msg->flags = I2C_M_RD;	/* Read the register value */
+	msg->len = num_bytes;	/* only n bytes */
+	msg->buf = value;
+	ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 2);
+	mutex_unlock(&twl->xfer_lock);
+
+	/* i2cTransfer returns num messages.translate it pls.. */
+	if (ret >= 0)
+		ret = 0;
+	return ret;
+}
+EXPORT_SYMBOL(twl4030_i2c_read);
+
+/**
+ * twl4030_i2c_write_u8 - Writes a 8 bit register in TWL4030
+ * @mod_no: module number
+ * @value: the value to be written 8 bit
+ * @reg: register address (just offset will do)
+ *
+ * Returns result of operation - 0 is success
+ */
+int twl4030_i2c_write_u8(u8 mod_no, u8 value, u8 reg)
+{
+
+	/* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */
+	u8 temp_buffer[2] = { 0 };
+	/* offset 1 contains the data */
+	temp_buffer[1] = value;
+	return twl4030_i2c_write(mod_no, temp_buffer, reg, 1);
+}
+EXPORT_SYMBOL(twl4030_i2c_write_u8);
+
+/**
+ * twl4030_i2c_read_u8 - Reads a 8 bit register from TWL4030
+ * @mod_no: module number
+ * @value: the value read 8 bit
+ * @reg: register address (just offset will do)
+ *
+ * Returns result of operation - 0 is success
+ */
+int twl4030_i2c_read_u8(u8 mod_no, u8 *value, u8 reg)
+{
+	return twl4030_i2c_read(mod_no, value, reg, 1);
+}
+EXPORT_SYMBOL(twl4030_i2c_read_u8);
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * NOTE:  We know the first 8 IRQs after pdata->base_irq are
+ * for the PIH, and the next are for the PWR_INT SIH, since
+ * that's how twl_init_irq() sets things up.
+ */
+
+static int add_children(struct twl4030_platform_data *pdata)
+{
+	struct platform_device	*pdev = NULL;
+	struct twl4030_client	*twl = NULL;
+	int			status = 0;
+
+	if (twl_has_bci() && pdata->bci) {
+		twl = &twl4030_modules[3];
+
+		pdev = platform_device_alloc("twl4030_bci", -1);
+		if (!pdev) {
+			pr_debug("%s: can't alloc bci dev\n", DRIVER_NAME);
+			status = -ENOMEM;
+			goto err;
+		}
+
+		if (status == 0) {
+			pdev->dev.parent = &twl->client->dev;
+			status = platform_device_add_data(pdev, pdata->bci,
+					sizeof(*pdata->bci));
+			if (status < 0) {
+				dev_dbg(&twl->client->dev,
+					"can't add bci data, %d\n",
+					status);
+				goto err;
+			}
+		}
+
+		if (status == 0) {
+			struct resource r = {
+				.start = pdata->irq_base + 8 + 1,
+				.flags = IORESOURCE_IRQ,
+			};
+
+			status = platform_device_add_resources(pdev, &r, 1);
+		}
+
+		if (status == 0)
+			status = platform_device_add(pdev);
+
+		if (status < 0) {
+			platform_device_put(pdev);
+			dev_dbg(&twl->client->dev,
+					"can't create bci dev, %d\n",
+					status);
+			goto err;
+		}
+	}
+
+	if (twl_has_gpio() && pdata->gpio) {
+		twl = &twl4030_modules[1];
+
+		pdev = platform_device_alloc("twl4030_gpio", -1);
+		if (!pdev) {
+			pr_debug("%s: can't alloc gpio dev\n", DRIVER_NAME);
+			status = -ENOMEM;
+			goto err;
+		}
+
+		/* more driver model init */
+		if (status == 0) {
+			pdev->dev.parent = &twl->client->dev;
+			/* device_init_wakeup(&pdev->dev, 1); */
+
+			status = platform_device_add_data(pdev, pdata->gpio,
+					sizeof(*pdata->gpio));
+			if (status < 0) {
+				dev_dbg(&twl->client->dev,
+					"can't add gpio data, %d\n",
+					status);
+				goto err;
+			}
+		}
+
+		/* GPIO module IRQ */
+		if (status == 0) {
+			struct resource	r = {
+				.start = pdata->irq_base + 0,
+				.flags = IORESOURCE_IRQ,
+			};
+
+			status = platform_device_add_resources(pdev, &r, 1);
+		}
+
+		if (status == 0)
+			status = platform_device_add(pdev);
+
+		if (status < 0) {
+			platform_device_put(pdev);
+			dev_dbg(&twl->client->dev,
+					"can't create gpio dev, %d\n",
+					status);
+			goto err;
+		}
+	}
+
+	if (twl_has_keypad() && pdata->keypad) {
+		pdev = platform_device_alloc("twl4030_keypad", -1);
+		if (pdev) {
+			twl = &twl4030_modules[2];
+			pdev->dev.parent = &twl->client->dev;
+			device_init_wakeup(&pdev->dev, 1);
+			status = platform_device_add_data(pdev, pdata->keypad,
+					sizeof(*pdata->keypad));
+			if (status < 0) {
+				dev_dbg(&twl->client->dev,
+					"can't add keypad data, %d\n",
+					status);
+				platform_device_put(pdev);
+				goto err;
+			}
+			status = platform_device_add(pdev);
+			if (status < 0) {
+				platform_device_put(pdev);
+				dev_dbg(&twl->client->dev,
+						"can't create keypad dev, %d\n",
+						status);
+				goto err;
+			}
+		} else {
+			pr_debug("%s: can't alloc keypad dev\n", DRIVER_NAME);
+			status = -ENOMEM;
+			goto err;
+		}
+	}
+
+	if (twl_has_madc() && pdata->madc) {
+		pdev = platform_device_alloc("twl4030_madc", -1);
+		if (pdev) {
+			twl = &twl4030_modules[2];
+			pdev->dev.parent = &twl->client->dev;
+			device_init_wakeup(&pdev->dev, 1);
+			status = platform_device_add_data(pdev, pdata->madc,
+					sizeof(*pdata->madc));
+			if (status < 0) {
+				platform_device_put(pdev);
+				dev_dbg(&twl->client->dev,
+					"can't add madc data, %d\n",
+					status);
+				goto err;
+			}
+			status = platform_device_add(pdev);
+			if (status < 0) {
+				platform_device_put(pdev);
+				dev_dbg(&twl->client->dev,
+						"can't create madc dev, %d\n",
+						status);
+				goto err;
+			}
+		} else {
+			pr_debug("%s: can't alloc madc dev\n", DRIVER_NAME);
+			status = -ENOMEM;
+			goto err;
+		}
+	}
+
+	if (twl_has_rtc()) {
+		twl = &twl4030_modules[3];
+
+		pdev = platform_device_alloc("twl4030_rtc", -1);
+		if (!pdev) {
+			pr_debug("%s: can't alloc rtc dev\n", DRIVER_NAME);
+			status = -ENOMEM;
+		} else {
+			pdev->dev.parent = &twl->client->dev;
+			device_init_wakeup(&pdev->dev, 1);
+		}
+
+		/*
+		 * REVISIT platform_data here currently might use of
+		 * "msecure" line ... but for now we just expect board
+		 * setup to tell the chip "we are secure" at all times.
+		 * Eventually, Linux might become more aware of such
+		 * HW security concerns, and "least privilege".
+		 */
+
+		/* RTC module IRQ */
+		if (status == 0) {
+			struct resource	r = {
+				.start = pdata->irq_base + 8 + 3,
+				.flags = IORESOURCE_IRQ,
+			};
+
+			status = platform_device_add_resources(pdev, &r, 1);
+		}
+
+		if (status == 0)
+			status = platform_device_add(pdev);
+
+		if (status < 0) {
+			platform_device_put(pdev);
+			dev_dbg(&twl->client->dev,
+					"can't create rtc dev, %d\n",
+					status);
+			goto err;
+		}
+	}
+
+	if (twl_has_usb() && pdata->usb) {
+		twl = &twl4030_modules[0];
+
+		pdev = platform_device_alloc("twl4030_usb", -1);
+		if (!pdev) {
+			pr_debug("%s: can't alloc usb dev\n", DRIVER_NAME);
+			status = -ENOMEM;
+			goto err;
+		}
+
+		if (status == 0) {
+			pdev->dev.parent = &twl->client->dev;
+			device_init_wakeup(&pdev->dev, 1);
+			status = platform_device_add_data(pdev, pdata->usb,
+					sizeof(*pdata->usb));
+			if (status < 0) {
+				platform_device_put(pdev);
+				dev_dbg(&twl->client->dev,
+					"can't add usb data, %d\n",
+					status);
+				goto err;
+			}
+		}
+
+		if (status == 0) {
+			struct resource r = {
+				.start = pdata->irq_base + 8 + 2,
+				.flags = IORESOURCE_IRQ,
+			};
+
+			status = platform_device_add_resources(pdev, &r, 1);
+		}
+
+		if (status == 0)
+			status = platform_device_add(pdev);
+
+		if (status < 0) {
+			platform_device_put(pdev);
+			dev_dbg(&twl->client->dev,
+					"can't create usb dev, %d\n",
+					status);
+		}
+	}
+
+err:
+	if (status)
+		pr_err("failed to add twl4030's children (status %d)\n", status);
+	return status;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * These three functions initialize the on-chip clock framework,
+ * letting it generate the right frequencies for USB, MADC, and
+ * other purposes.
+ */
+static inline int __init protect_pm_master(void)
+{
+	int e = 0;
+
+	e = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_LOCK,
+			R_PROTECT_KEY);
+	return e;
+}
+
+static inline int __init unprotect_pm_master(void)
+{
+	int e = 0;
+
+	e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK1,
+			R_PROTECT_KEY);
+	e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK2,
+			R_PROTECT_KEY);
+	return e;
+}
+
+static void __init clocks_init(void)
+{
+	int e = 0;
+	struct clk *osc;
+	u32 rate;
+	u8 ctrl = HFCLK_FREQ_26_MHZ;
+
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+	if (cpu_is_omap2430())
+		osc = clk_get(NULL, "osc_ck");
+	else
+		osc = clk_get(NULL, "osc_sys_ck");
+#else
+	/* REVISIT for non-OMAP systems, pass the clock rate from
+	 * board init code, using platform_data.
+	 */
+	osc = ERR_PTR(-EIO);
+#endif
+	if (IS_ERR(osc)) {
+		printk(KERN_WARNING "Skipping twl4030 internal clock init and "
+				"using bootloader value (unknown osc rate)\n");
+		return;
+	}
+
+	rate = clk_get_rate(osc);
+	clk_put(osc);
+
+	switch (rate) {
+	case 19200000:
+		ctrl = HFCLK_FREQ_19p2_MHZ;
+		break;
+	case 26000000:
+		ctrl = HFCLK_FREQ_26_MHZ;
+		break;
+	case 38400000:
+		ctrl = HFCLK_FREQ_38p4_MHZ;
+		break;
+	}
+
+	ctrl |= HIGH_PERF_SQ;
+	e |= unprotect_pm_master();
+	/* effect->MADC+USB ck en */
+	e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, ctrl, R_CFG_BOOT);
+	e |= protect_pm_master();
+
+	if (e < 0)
+		pr_err("%s: clock init err [%d]\n", DRIVER_NAME, e);
+}
+
+/*----------------------------------------------------------------------*/
+
+int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end);
+int twl_exit_irq(void);
+
+static int twl4030_remove(struct i2c_client *client)
+{
+	unsigned i;
+	int status;
+
+	status = twl_exit_irq();
+	if (status < 0)
+		return status;
+
+	for (i = 0; i < TWL4030_NUM_SLAVES; i++) {
+		struct twl4030_client	*twl = &twl4030_modules[i];
+
+		if (twl->client && twl->client != client)
+			i2c_unregister_device(twl->client);
+		twl4030_modules[i].client = NULL;
+	}
+	inuse = false;
+	return 0;
+}
+
+/* NOTE:  this driver only handles a single twl4030/tps659x0 chip */
+static int
+twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	int				status;
+	unsigned			i;
+	struct twl4030_platform_data	*pdata = client->dev.platform_data;
+
+	if (!pdata) {
+		dev_dbg(&client->dev, "no platform data?\n");
+		return -EINVAL;
+	}
+
+	if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
+		dev_dbg(&client->dev, "can't talk I2C?\n");
+		return -EIO;
+	}
+
+	if (inuse) {
+		dev_dbg(&client->dev, "driver is already in use\n");
+		return -EBUSY;
+	}
+
+	for (i = 0; i < TWL4030_NUM_SLAVES; i++) {
+		struct twl4030_client	*twl = &twl4030_modules[i];
+
+		twl->address = client->addr + i;
+		if (i == 0)
+			twl->client = client;
+		else {
+			twl->client = i2c_new_dummy(client->adapter,
+					twl->address);
+			if (!twl->client) {
+				dev_err(&twl->client->dev,
+					"can't attach client %d\n", i);
+				status = -ENOMEM;
+				goto fail;
+			}
+			strlcpy(twl->client->name, id->name,
+					sizeof(twl->client->name));
+		}
+		mutex_init(&twl->xfer_lock);
+	}
+	inuse = true;
+
+	/* setup clock framework */
+	clocks_init();
+
+	/* Maybe init the T2 Interrupt subsystem */
+	if (client->irq
+			&& pdata->irq_base
+			&& pdata->irq_end > pdata->irq_base) {
+		status = twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end);
+		if (status < 0)
+			goto fail;
+	}
+
+	status = add_children(pdata);
+fail:
+	if (status < 0)
+		twl4030_remove(client);
+	return status;
+}
+
+static const struct i2c_device_id twl4030_ids[] = {
+	{ "twl4030", 0 },	/* "Triton 2" */
+	{ "tps65950", 0 },	/* catalog version of twl4030 */
+	{ "tps65930", 0 },	/* fewer LDOs and DACs; no charger */
+	{ "tps65920", 0 },	/* fewer LDOs; no codec or charger */
+	{ "twl5030", 0 },	/* T2 updated */
+	{ /* end of list */ },
+};
+MODULE_DEVICE_TABLE(i2c, twl4030_ids);
+
+/* One Client Driver , 4 Clients */
+static struct i2c_driver twl4030_driver = {
+	.driver.name	= DRIVER_NAME,
+	.id_table	= twl4030_ids,
+	.probe		= twl4030_probe,
+	.remove		= twl4030_remove,
+};
+
+static int __init twl4030_init(void)
+{
+	return i2c_add_driver(&twl4030_driver);
+}
+subsys_initcall(twl4030_init);
+
+static void __exit twl4030_exit(void)
+{
+	i2c_del_driver(&twl4030_driver);
+}
+module_exit(twl4030_exit);
+
+MODULE_AUTHOR("Texas Instruments, Inc.");
+MODULE_DESCRIPTION("I2C Core interface for TWL4030");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
new file mode 100644
index 0000000..fae868a
--- /dev/null
+++ b/drivers/mfd/twl4030-irq.c
@@ -0,0 +1,743 @@
+/*
+ * twl4030-irq.c - TWL4030/TPS659x0 irq support
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * Modifications to defer interrupt handling to a kernel thread:
+ * Copyright (C) 2006 MontaVista Software, Inc.
+ *
+ * Based on tlv320aic23.c:
+ * Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
+ *
+ * Code cleanup and modifications to IRQ handler.
+ * by syed khasim <x0khasim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include <linux/i2c/twl4030.h>
+
+
+/*
+ * TWL4030 IRQ handling has two stages in hardware, and thus in software.
+ * The Primary Interrupt Handler (PIH) stage exposes status bits saying
+ * which Secondary Interrupt Handler (SIH) stage is raising an interrupt.
+ * SIH modules are more traditional IRQ components, which support per-IRQ
+ * enable/disable and trigger controls; they do most of the work.
+ *
+ * These chips are designed to support IRQ handling from two different
+ * I2C masters.  Each has a dedicated IRQ line, and dedicated IRQ status
+ * and mask registers in the PIH and SIH modules.
+ *
+ * We set up IRQs starting at a platform-specified base, always starting
+ * with PIH and the SIH for PWR_INT and then usually adding GPIO:
+ *	base + 0  .. base + 7	PIH
+ *	base + 8  .. base + 15	SIH for PWR_INT
+ *	base + 16 .. base + 33	SIH for GPIO
+ */
+
+/* PIH register offsets */
+#define REG_PIH_ISR_P1			0x01
+#define REG_PIH_ISR_P2			0x02
+#define REG_PIH_SIR			0x03	/* for testing */
+
+
+/* Linux could (eventually) use either IRQ line */
+static int irq_line;
+
+struct sih {
+	char	name[8];
+	u8	module;			/* module id */
+	u8	control_offset;		/* for SIH_CTRL */
+	bool	set_cor;
+
+	u8	bits;			/* valid in isr/imr */
+	u8	bytes_ixr;		/* bytelen of ISR/IMR/SIR */
+
+	u8	edr_offset;
+	u8	bytes_edr;		/* bytelen of EDR */
+
+	/* SIR ignored -- set interrupt, for testing only */
+	struct irq_data {
+		u8	isr_offset;
+		u8	imr_offset;
+	} mask[2];
+	/* + 2 bytes padding */
+};
+
+#define SIH_INITIALIZER(modname, nbits) \
+	.module		= TWL4030_MODULE_ ## modname, \
+	.control_offset = TWL4030_ ## modname ## _SIH_CTRL, \
+	.bits		= nbits, \
+	.bytes_ixr	= DIV_ROUND_UP(nbits, 8), \
+	.edr_offset	= TWL4030_ ## modname ## _EDR, \
+	.bytes_edr	= DIV_ROUND_UP((2*(nbits)), 8), \
+	.mask = { { \
+		.isr_offset	= TWL4030_ ## modname ## _ISR1, \
+		.imr_offset	= TWL4030_ ## modname ## _IMR1, \
+	}, \
+	{ \
+		.isr_offset	= TWL4030_ ## modname ## _ISR2, \
+		.imr_offset	= TWL4030_ ## modname ## _IMR2, \
+	}, },
+
+/* register naming policies are inconsistent ... */
+#define TWL4030_INT_PWR_EDR		TWL4030_INT_PWR_EDR1
+#define TWL4030_MODULE_KEYPAD_KEYP	TWL4030_MODULE_KEYPAD
+#define TWL4030_MODULE_INT_PWR		TWL4030_MODULE_INT
+
+
+/* Order in this table matches order in PIH_ISR.  That is,
+ * BIT(n) in PIH_ISR is sih_modules[n].
+ */
+static const struct sih sih_modules[6] = {
+	[0] = {
+		.name		= "gpio",
+		.module		= TWL4030_MODULE_GPIO,
+		.control_offset	= REG_GPIO_SIH_CTRL,
+		.set_cor	= true,
+		.bits		= TWL4030_GPIO_MAX,
+		.bytes_ixr	= 3,
+		/* Note: *all* of these IRQs default to no-trigger */
+		.edr_offset	= REG_GPIO_EDR1,
+		.bytes_edr	= 5,
+		.mask = { {
+			.isr_offset	= REG_GPIO_ISR1A,
+			.imr_offset	= REG_GPIO_IMR1A,
+		}, {
+			.isr_offset	= REG_GPIO_ISR1B,
+			.imr_offset	= REG_GPIO_IMR1B,
+		}, },
+	},
+	[1] = {
+		.name		= "keypad",
+		.set_cor	= true,
+		SIH_INITIALIZER(KEYPAD_KEYP, 4)
+	},
+	[2] = {
+		.name		= "bci",
+		.module		= TWL4030_MODULE_INTERRUPTS,
+		.control_offset	= TWL4030_INTERRUPTS_BCISIHCTRL,
+		.bits		= 12,
+		.bytes_ixr	= 2,
+		.edr_offset	= TWL4030_INTERRUPTS_BCIEDR1,
+		/* Note: most of these IRQs default to no-trigger */
+		.bytes_edr	= 3,
+		.mask = { {
+			.isr_offset	= TWL4030_INTERRUPTS_BCIISR1A,
+			.imr_offset	= TWL4030_INTERRUPTS_BCIIMR1A,
+		}, {
+			.isr_offset	= TWL4030_INTERRUPTS_BCIISR1B,
+			.imr_offset	= TWL4030_INTERRUPTS_BCIIMR1B,
+		}, },
+	},
+	[3] = {
+		.name		= "madc",
+		SIH_INITIALIZER(MADC, 4)
+	},
+	[4] = {
+		/* USB doesn't use the same SIH organization */
+		.name		= "usb",
+	},
+	[5] = {
+		.name		= "power",
+		.set_cor	= true,
+		SIH_INITIALIZER(INT_PWR, 8)
+	},
+		/* there are no SIH modules #6 or #7 ... */
+};
+
+#undef TWL4030_MODULE_KEYPAD_KEYP
+#undef TWL4030_MODULE_INT_PWR
+#undef TWL4030_INT_PWR_EDR
+
+/*----------------------------------------------------------------------*/
+
+static unsigned twl4030_irq_base;
+
+static struct completion irq_event;
+
+/*
+ * This thread processes interrupts reported by the Primary Interrupt Handler.
+ */
+static int twl4030_irq_thread(void *data)
+{
+	long irq = (long)data;
+	irq_desc_t *desc = irq_desc + irq;
+	static unsigned i2c_errors;
+	const static unsigned max_i2c_errors = 100;
+
+	current->flags |= PF_NOFREEZE;
+
+	while (!kthread_should_stop()) {
+		int ret;
+		int module_irq;
+		u8 pih_isr;
+
+		/* Wait for IRQ, then read PIH irq status (also blocking) */
+		wait_for_completion_interruptible(&irq_event);
+
+		ret = twl4030_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
+					  REG_PIH_ISR_P1);
+		if (ret) {
+			pr_warning("twl4030: I2C error %d reading PIH ISR\n",
+					ret);
+			if (++i2c_errors >= max_i2c_errors) {
+				printk(KERN_ERR "Maximum I2C error count"
+						" exceeded.  Terminating %s.\n",
+						__func__);
+				break;
+			}
+			complete(&irq_event);
+			continue;
+		}
+
+		/* these handlers deal with the relevant SIH irq status */
+		local_irq_disable();
+		for (module_irq = twl4030_irq_base;
+				pih_isr;
+				pih_isr >>= 1, module_irq++) {
+			if (pih_isr & 0x1) {
+				irq_desc_t *d = irq_desc + module_irq;
+
+				/* These can't be masked ... always warn
+				 * if we get any surprises.
+				 */
+				if (d->status & IRQ_DISABLED)
+					note_interrupt(module_irq, d,
+							IRQ_NONE);
+				else
+					d->handle_irq(module_irq, d);
+			}
+		}
+		local_irq_enable();
+
+		desc->chip->unmask(irq);
+	}
+
+	return 0;
+}
+
+/*
+ * handle_twl4030_pih() is the desc->handle method for the twl4030 interrupt.
+ * This is a chained interrupt, so there is no desc->action method for it.
+ * Now we need to query the interrupt controller in the twl4030 to determine
+ * which module is generating the interrupt request.  However, we can't do i2c
+ * transactions in interrupt context, so we must defer that work to a kernel
+ * thread.  All we do here is acknowledge and mask the interrupt and wakeup
+ * the kernel thread.
+ */
+static void handle_twl4030_pih(unsigned int irq, irq_desc_t *desc)
+{
+	/* Acknowledge, clear *AND* mask the interrupt... */
+	desc->chip->ack(irq);
+	complete(&irq_event);
+}
+
+static struct task_struct *start_twl4030_irq_thread(long irq)
+{
+	struct task_struct *thread;
+
+	init_completion(&irq_event);
+	thread = kthread_run(twl4030_irq_thread, (void *)irq, "twl4030-irq");
+	if (!thread)
+		pr_err("twl4030: could not create irq %ld thread!\n", irq);
+
+	return thread;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * twl4030_init_sih_modules() ... start from a known state where no
+ * IRQs will be coming in, and where we can quickly enable them then
+ * handle them as they arrive.  Mask all IRQs: maybe init SIH_CTRL.
+ *
+ * NOTE:  we don't touch EDR registers here; they stay with hardware
+ * defaults or whatever the last value was.  Note that when both EDR
+ * bits for an IRQ are clear, that's as if its IMR bit is set...
+ */
+static int twl4030_init_sih_modules(unsigned line)
+{
+	const struct sih *sih;
+	u8 buf[4];
+	int i;
+	int status;
+
+	/* line 0 == int1_n signal; line 1 == int2_n signal */
+	if (line > 1)
+		return -EINVAL;
+
+	irq_line = line;
+
+	/* disable all interrupts on our line */
+	memset(buf, 0xff, sizeof buf);
+	sih = sih_modules;
+	for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) {
+
+		/* skip USB -- it's funky */
+		if (!sih->bytes_ixr)
+			continue;
+
+		status = twl4030_i2c_write(sih->module, buf,
+				sih->mask[line].imr_offset, sih->bytes_ixr);
+		if (status < 0)
+			pr_err("twl4030: err %d initializing %s %s\n",
+					status, sih->name, "IMR");
+
+		/* Maybe disable "exclusive" mode; buffer second pending irq;
+		 * set Clear-On-Read (COR) bit.
+		 *
+		 * NOTE that sometimes COR polarity is documented as being
+		 * inverted:  for MADC and BCI, COR=1 means "clear on write".
+		 * And for PWR_INT it's not documented...
+		 */
+		if (sih->set_cor) {
+			status = twl4030_i2c_write_u8(sih->module,
+					TWL4030_SIH_CTRL_COR_MASK,
+					sih->control_offset);
+			if (status < 0)
+				pr_err("twl4030: err %d initializing %s %s\n",
+						status, sih->name, "SIH_CTRL");
+		}
+	}
+
+	sih = sih_modules;
+	for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) {
+		u8 rxbuf[4];
+		int j;
+
+		/* skip USB */
+		if (!sih->bytes_ixr)
+			continue;
+
+		/* Clear pending interrupt status.  Either the read was
+		 * enough, or we need to write those bits.  Repeat, in
+		 * case an IRQ is pending (PENDDIS=0) ... that's not
+		 * uncommon with PWR_INT.PWRON.
+		 */
+		for (j = 0; j < 2; j++) {
+			status = twl4030_i2c_read(sih->module, rxbuf,
+				sih->mask[line].isr_offset, sih->bytes_ixr);
+			if (status < 0)
+				pr_err("twl4030: err %d initializing %s %s\n",
+					status, sih->name, "ISR");
+
+			if (!sih->set_cor)
+				status = twl4030_i2c_write(sih->module, buf,
+					sih->mask[line].isr_offset,
+					sih->bytes_ixr);
+			/* else COR=1 means read sufficed.
+			 * (for most SIH modules...)
+			 */
+		}
+	}
+
+	return 0;
+}
+
+static inline void activate_irq(int irq)
+{
+#ifdef CONFIG_ARM
+	/* ARM requires an extra step to clear IRQ_NOREQUEST, which it
+	 * sets on behalf of every irq_chip.  Also sets IRQ_NOPROBE.
+	 */
+	set_irq_flags(irq, IRQF_VALID);
+#else
+	/* same effect on other architectures */
+	set_irq_noprobe(irq);
+#endif
+}
+
+/*----------------------------------------------------------------------*/
+
+static DEFINE_SPINLOCK(sih_agent_lock);
+
+static struct workqueue_struct *wq;
+
+struct sih_agent {
+	int			irq_base;
+	const struct sih	*sih;
+
+	u32			imr;
+	bool			imr_change_pending;
+	struct work_struct	mask_work;
+
+	u32			edge_change;
+	struct work_struct	edge_work;
+};
+
+static void twl4030_sih_do_mask(struct work_struct *work)
+{
+	struct sih_agent	*agent;
+	const struct sih	*sih;
+	union {
+		u8	bytes[4];
+		u32	word;
+	}			imr;
+	int			status;
+
+	agent = container_of(work, struct sih_agent, mask_work);
+
+	/* see what work we have */
+	spin_lock_irq(&sih_agent_lock);
+	if (agent->imr_change_pending) {
+		sih = agent->sih;
+		/* byte[0] gets overwritten as we write ... */
+		imr.word = cpu_to_le32(agent->imr << 8);
+		agent->imr_change_pending = false;
+	} else
+		sih = NULL;
+	spin_unlock_irq(&sih_agent_lock);
+	if (!sih)
+		return;
+
+	/* write the whole mask ... simpler than subsetting it */
+	status = twl4030_i2c_write(sih->module, imr.bytes,
+			sih->mask[irq_line].imr_offset, sih->bytes_ixr);
+	if (status)
+		pr_err("twl4030: %s, %s --> %d\n", __func__,
+				"write", status);
+}
+
+static void twl4030_sih_do_edge(struct work_struct *work)
+{
+	struct sih_agent	*agent;
+	const struct sih	*sih;
+	u8			bytes[6];
+	u32			edge_change;
+	int			status;
+
+	agent = container_of(work, struct sih_agent, edge_work);
+
+	/* see what work we have */
+	spin_lock_irq(&sih_agent_lock);
+	edge_change = agent->edge_change;
+	agent->edge_change = 0;;
+	sih = edge_change ? agent->sih : NULL;
+	spin_unlock_irq(&sih_agent_lock);
+	if (!sih)
+		return;
+
+	/* Read, reserving first byte for write scratch.  Yes, this
+	 * could be cached for some speedup ... but be careful about
+	 * any processor on the other IRQ line, EDR registers are
+	 * shared.
+	 */
+	status = twl4030_i2c_read(sih->module, bytes + 1,
+			sih->edr_offset, sih->bytes_edr);
+	if (status) {
+		pr_err("twl4030: %s, %s --> %d\n", __func__,
+				"read", status);
+		return;
+	}
+
+	/* Modify only the bits we know must change */
+	while (edge_change) {
+		int		i = fls(edge_change) - 1;
+		struct irq_desc	*d = irq_desc + i + agent->irq_base;
+		int		byte = 1 + (i >> 2);
+		int		off = (i & 0x3) * 2;
+
+		bytes[byte] &= ~(0x03 << off);
+
+		spin_lock_irq(&d->lock);
+		if (d->status & IRQ_TYPE_EDGE_RISING)
+			bytes[byte] |= BIT(off + 1);
+		if (d->status & IRQ_TYPE_EDGE_FALLING)
+			bytes[byte] |= BIT(off + 0);
+		spin_unlock_irq(&d->lock);
+
+		edge_change &= ~BIT(i);
+	}
+
+	/* Write */
+	status = twl4030_i2c_write(sih->module, bytes,
+			sih->edr_offset, sih->bytes_edr);
+	if (status)
+		pr_err("twl4030: %s, %s --> %d\n", __func__,
+				"write", status);
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * All irq_chip methods get issued from code holding irq_desc[irq].lock,
+ * which can't perform the underlying I2C operations (because they sleep).
+ * So we must hand them off to a thread (workqueue) and cope with asynch
+ * completion, potentially including some re-ordering, of these requests.
+ */
+
+static void twl4030_sih_mask(unsigned irq)
+{
+	struct sih_agent *sih = get_irq_chip_data(irq);
+	unsigned long flags;
+
+	spin_lock_irqsave(&sih_agent_lock, flags);
+	sih->imr |= BIT(irq - sih->irq_base);
+	sih->imr_change_pending = true;
+	queue_work(wq, &sih->mask_work);
+	spin_unlock_irqrestore(&sih_agent_lock, flags);
+}
+
+static void twl4030_sih_unmask(unsigned irq)
+{
+	struct sih_agent *sih = get_irq_chip_data(irq);
+	unsigned long flags;
+
+	spin_lock_irqsave(&sih_agent_lock, flags);
+	sih->imr &= ~BIT(irq - sih->irq_base);
+	sih->imr_change_pending = true;
+	queue_work(wq, &sih->mask_work);
+	spin_unlock_irqrestore(&sih_agent_lock, flags);
+}
+
+static int twl4030_sih_set_type(unsigned irq, unsigned trigger)
+{
+	struct sih_agent *sih = get_irq_chip_data(irq);
+	struct irq_desc *desc = irq_desc + irq;
+	unsigned long flags;
+
+	if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+		return -EINVAL;
+
+	spin_lock_irqsave(&sih_agent_lock, flags);
+	if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) {
+		desc->status &= ~IRQ_TYPE_SENSE_MASK;
+		desc->status |= trigger;
+		sih->edge_change |= BIT(irq - sih->irq_base);
+		queue_work(wq, &sih->edge_work);
+	}
+	spin_unlock_irqrestore(&sih_agent_lock, flags);
+	return 0;
+}
+
+static struct irq_chip twl4030_sih_irq_chip = {
+	.name		= "twl4030",
+	.mask		= twl4030_sih_mask,
+	.unmask		= twl4030_sih_unmask,
+	.set_type	= twl4030_sih_set_type,
+};
+
+/*----------------------------------------------------------------------*/
+
+static inline int sih_read_isr(const struct sih *sih)
+{
+	int status;
+	union {
+		u8 bytes[4];
+		u32 word;
+	} isr;
+
+	/* FIXME need retry-on-error ... */
+
+	isr.word = 0;
+	status = twl4030_i2c_read(sih->module, isr.bytes,
+			sih->mask[irq_line].isr_offset, sih->bytes_ixr);
+
+	return (status < 0) ? status : le32_to_cpu(isr.word);
+}
+
+/*
+ * Generic handler for SIH interrupts ... we "know" this is called
+ * in task context, with IRQs enabled.
+ */
+static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+{
+	struct sih_agent *agent = get_irq_data(irq);
+	const struct sih *sih = agent->sih;
+	int isr;
+
+	/* reading ISR acks the IRQs, using clear-on-read mode */
+	local_irq_enable();
+	isr = sih_read_isr(sih);
+	local_irq_disable();
+
+	if (isr < 0) {
+		pr_err("twl4030: %s SIH, read ISR error %d\n",
+			sih->name, isr);
+		/* REVISIT:  recover; eventually mask it all, etc */
+		return;
+	}
+
+	while (isr) {
+		irq = fls(isr);
+		irq--;
+		isr &= ~BIT(irq);
+
+		if (irq < sih->bits)
+			generic_handle_irq(agent->irq_base + irq);
+		else
+			pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
+				sih->name, irq);
+	}
+}
+
+static unsigned twl4030_irq_next;
+
+/* returns the first IRQ used by this SIH bank,
+ * or negative errno
+ */
+int twl4030_sih_setup(int module)
+{
+	int			sih_mod;
+	const struct sih	*sih = NULL;
+	struct sih_agent	*agent;
+	int			i, irq;
+	int			status = -EINVAL;
+	unsigned		irq_base = twl4030_irq_next;
+
+	/* only support modules with standard clear-on-read for now */
+	for (sih_mod = 0, sih = sih_modules;
+			sih_mod < ARRAY_SIZE(sih_modules);
+			sih_mod++, sih++) {
+		if (sih->module == module && sih->set_cor) {
+			if (!WARN((irq_base + sih->bits) > NR_IRQS,
+					"irq %d for %s too big\n",
+					irq_base + sih->bits,
+					sih->name))
+				status = 0;
+			break;
+		}
+	}
+	if (status < 0)
+		return status;
+
+	agent = kzalloc(sizeof *agent, GFP_KERNEL);
+	if (!agent)
+		return -ENOMEM;
+
+	status = 0;
+
+	agent->irq_base = irq_base;
+	agent->sih = sih;
+	agent->imr = ~0;
+	INIT_WORK(&agent->mask_work, twl4030_sih_do_mask);
+	INIT_WORK(&agent->edge_work, twl4030_sih_do_edge);
+
+	for (i = 0; i < sih->bits; i++) {
+		irq = irq_base + i;
+
+		set_irq_chip_and_handler(irq, &twl4030_sih_irq_chip,
+				handle_edge_irq);
+		set_irq_chip_data(irq, agent);
+		activate_irq(irq);
+	}
+
+	status = irq_base;
+	twl4030_irq_next += i;
+
+	/* replace generic PIH handler (handle_simple_irq) */
+	irq = sih_mod + twl4030_irq_base;
+	set_irq_data(irq, agent);
+	set_irq_chained_handler(irq, handle_twl4030_sih);
+
+	pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
+			irq, irq_base, twl4030_irq_next - 1);
+
+	return status;
+}
+
+/* FIXME need a call to reverse twl4030_sih_setup() ... */
+
+
+/*----------------------------------------------------------------------*/
+
+/* FIXME pass in which interrupt line we'll use ... */
+#define twl_irq_line	0
+
+int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
+{
+	static struct irq_chip	twl4030_irq_chip;
+
+	int			status;
+	int			i;
+	struct task_struct	*task;
+
+	/*
+	 * Mask and clear all TWL4030 interrupts since initially we do
+	 * not have any TWL4030 module interrupt handlers present
+	 */
+	status = twl4030_init_sih_modules(twl_irq_line);
+	if (status < 0)
+		return status;
+
+	wq = create_singlethread_workqueue("twl4030-irqchip");
+	if (!wq) {
+		pr_err("twl4030: workqueue FAIL\n");
+		return -ESRCH;
+	}
+
+	twl4030_irq_base = irq_base;
+
+	/* install an irq handler for each of the SIH modules;
+	 * clone dummy irq_chip since PIH can't *do* anything
+	 */
+	twl4030_irq_chip = dummy_irq_chip;
+	twl4030_irq_chip.name = "twl4030";
+
+	twl4030_sih_irq_chip.ack = dummy_irq_chip.ack;
+
+	for (i = irq_base; i < irq_end; i++) {
+		set_irq_chip_and_handler(i, &twl4030_irq_chip,
+				handle_simple_irq);
+		activate_irq(i);
+	}
+	twl4030_irq_next = i;
+	pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", "PIH",
+			irq_num, irq_base, twl4030_irq_next - 1);
+
+	/* ... and the PWR_INT module ... */
+	status = twl4030_sih_setup(TWL4030_MODULE_INT);
+	if (status < 0) {
+		pr_err("twl4030: sih_setup PWR INT --> %d\n", status);
+		goto fail;
+	}
+
+	/* install an irq handler to demultiplex the TWL4030 interrupt */
+	task = start_twl4030_irq_thread(irq_num);
+	if (!task) {
+		pr_err("twl4030: irq thread FAIL\n");
+		status = -ESRCH;
+		goto fail;
+	}
+
+	set_irq_data(irq_num, task);
+	set_irq_chained_handler(irq_num, handle_twl4030_pih);
+
+	return status;
+
+fail:
+	for (i = irq_base; i < irq_end; i++)
+		set_irq_chip_and_handler(i, NULL, NULL);
+	destroy_workqueue(wq);
+	wq = NULL;
+	return status;
+}
+
+int twl_exit_irq(void)
+{
+	/* FIXME undo twl_init_irq() */
+	if (twl4030_irq_base) {
+		pr_err("twl4030: can't yet clean up IRQs?\n");
+		return -ENOSYS;
+	}
+	return 0;
+}
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index 25a7a5d..0d47fb9 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -183,6 +183,9 @@
 			(wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable)
 			| src[i - reg];
 
+		/* Don't store volatile bits */
+		wm8350->reg_cache[i] &= ~wm8350_reg_io_map[i].vol;
+
 		src[i - reg] = cpu_to_be16(src[i - reg]);
 	}
 
@@ -1120,6 +1123,7 @@
 			}
 			value = be16_to_cpu(value);
 			value &= wm8350_reg_io_map[i].readable;
+			value &= ~wm8350_reg_io_map[i].vol;
 			wm8350->reg_cache[i] = value;
 		} else
 			wm8350->reg_cache[i] = reg_map[i];
@@ -1128,7 +1132,6 @@
 out:
 	return ret;
 }
-EXPORT_SYMBOL_GPL(wm8350_create_cache);
 
 /*
  * Register a client device.  This is non-fatal since there is no need to
@@ -1217,7 +1220,7 @@
 
 	mutex_init(&wm8350->irq_mutex);
 	INIT_WORK(&wm8350->irq_work, wm8350_irq_worker);
-	if (irq != NO_IRQ) {
+	if (irq) {
 		ret = request_irq(irq, wm8350_irq, 0,
 				  "wm8350", wm8350);
 		if (ret != 0) {
diff --git a/drivers/misc/hp-wmi.c b/drivers/misc/hp-wmi.c
index 5dabfb6..4b7c24c 100644
--- a/drivers/misc/hp-wmi.c
+++ b/drivers/misc/hp-wmi.c
@@ -82,6 +82,7 @@
 	{KE_KEY, 0x03, KEY_BRIGHTNESSDOWN},
 	{KE_KEY, 0x20e6, KEY_PROG1},
 	{KE_KEY, 0x2142, KEY_MEDIA},
+	{KE_KEY, 0x213b, KEY_INFO},
 	{KE_KEY, 0x231b, KEY_HELP},
 	{KE_END, 0}
 };
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 406989e..7a72e75 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -132,6 +132,7 @@
 
 	blk_queue_prep_rq(mq->queue, mmc_prep_request);
 	blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
+	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
 
 #ifdef CONFIG_MMC_BLOCK_BOUNCE
 	if (host->max_hw_segs == 1) {
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index ae16d84..3b2085b 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -3,6 +3,9 @@
  *
  *  Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de>
  *
+ * Current driver maintained by Ben Dooks and Simtec Electronics
+ *  Copyright (C) 2008 Simtec Electronics <ben-linux@fluff.org>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -13,6 +16,7 @@
 #include <linux/clk.h>
 #include <linux/mmc/host.h>
 #include <linux/platform_device.h>
+#include <linux/cpufreq.h>
 #include <linux/irq.h>
 #include <linux/io.h>
 
@@ -39,9 +43,9 @@
 	dbg_conf  = (1 << 8),
 };
 
-static const int dbgmap_err   = dbg_err | dbg_fail;
+static const int dbgmap_err   = dbg_fail;
 static const int dbgmap_info  = dbg_info | dbg_conf;
-static const int dbgmap_debug = dbg_debug;
+static const int dbgmap_debug = dbg_err | dbg_debug;
 
 #define dbg(host, channels, args...)		  \
 	do {					  \
@@ -189,7 +193,7 @@
 }
 
 static inline int get_data_buffer(struct s3cmci_host *host,
-				  u32 *words, u32 **pointer)
+				  u32 *bytes, u32 **pointer)
 {
 	struct scatterlist *sg;
 
@@ -206,7 +210,7 @@
 	}
 	sg = &host->mrq->data->sg[host->pio_sgptr];
 
-	*words = sg->length >> 2;
+	*bytes = sg->length;
 	*pointer = sg_virt(sg);
 
 	host->pio_sgptr++;
@@ -222,7 +226,7 @@
 	u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
 
 	fifostat &= S3C2410_SDIFSTA_COUNTMASK;
-	return fifostat >> 2;
+	return fifostat;
 }
 
 static inline u32 fifo_free(struct s3cmci_host *host)
@@ -230,13 +234,15 @@
 	u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
 
 	fifostat &= S3C2410_SDIFSTA_COUNTMASK;
-	return (63 - fifostat) >> 2;
+	return 63 - fifostat;
 }
 
 static void do_pio_read(struct s3cmci_host *host)
 {
 	int res;
 	u32 fifo;
+	u32 *ptr;
+	u32 fifo_words;
 	void __iomem *from_ptr;
 
 	/* write real prescaler to host, it might be set slow to fix */
@@ -245,8 +251,8 @@
 	from_ptr = host->base + host->sdidata;
 
 	while ((fifo = fifo_count(host))) {
-		if (!host->pio_words) {
-			res = get_data_buffer(host, &host->pio_words,
+		if (!host->pio_bytes) {
+			res = get_data_buffer(host, &host->pio_bytes,
 					      &host->pio_ptr);
 			if (res) {
 				host->pio_active = XFER_NONE;
@@ -259,26 +265,47 @@
 
 			dbg(host, dbg_pio,
 			    "pio_read(): new target: [%i]@[%p]\n",
-			    host->pio_words, host->pio_ptr);
+			    host->pio_bytes, host->pio_ptr);
 		}
 
 		dbg(host, dbg_pio,
 		    "pio_read(): fifo:[%02i] buffer:[%03i] dcnt:[%08X]\n",
-		    fifo, host->pio_words,
+		    fifo, host->pio_bytes,
 		    readl(host->base + S3C2410_SDIDCNT));
 
-		if (fifo > host->pio_words)
-			fifo = host->pio_words;
+		/* If we have reached the end of the block, we can
+		 * read a word and get 1 to 3 bytes.  If we in the
+		 * middle of the block, we have to read full words,
+		 * otherwise we will write garbage, so round down to
+		 * an even multiple of 4. */
+		if (fifo >= host->pio_bytes)
+			fifo = host->pio_bytes;
+		else
+			fifo -= fifo & 3;
 
-		host->pio_words -= fifo;
+		host->pio_bytes -= fifo;
 		host->pio_count += fifo;
 
-		while (fifo--)
-			*(host->pio_ptr++) = readl(from_ptr);
+		fifo_words = fifo >> 2;
+		ptr = host->pio_ptr;
+		while (fifo_words--)
+			*ptr++ = readl(from_ptr);
+		host->pio_ptr = ptr;
+
+		if (fifo & 3) {
+			u32 n = fifo & 3;
+			u32 data = readl(from_ptr);
+			u8 *p = (u8 *)host->pio_ptr;
+
+			while (n--) {
+				*p++ = data;
+				data >>= 8;
+			}
+		}
 	}
 
-	if (!host->pio_words) {
-		res = get_data_buffer(host, &host->pio_words, &host->pio_ptr);
+	if (!host->pio_bytes) {
+		res = get_data_buffer(host, &host->pio_bytes, &host->pio_ptr);
 		if (res) {
 			dbg(host, dbg_pio,
 			    "pio_read(): complete (no more buffers).\n");
@@ -298,12 +325,13 @@
 	void __iomem *to_ptr;
 	int res;
 	u32 fifo;
+	u32 *ptr;
 
 	to_ptr = host->base + host->sdidata;
 
 	while ((fifo = fifo_free(host))) {
-		if (!host->pio_words) {
-			res = get_data_buffer(host, &host->pio_words,
+		if (!host->pio_bytes) {
+			res = get_data_buffer(host, &host->pio_bytes,
 							&host->pio_ptr);
 			if (res) {
 				dbg(host, dbg_pio,
@@ -315,18 +343,27 @@
 
 			dbg(host, dbg_pio,
 			    "pio_write(): new source: [%i]@[%p]\n",
-			    host->pio_words, host->pio_ptr);
+			    host->pio_bytes, host->pio_ptr);
 
 		}
 
-		if (fifo > host->pio_words)
-			fifo = host->pio_words;
+		/* If we have reached the end of the block, we have to
+		 * write exactly the remaining number of bytes.  If we
+		 * in the middle of the block, we have to write full
+		 * words, so round down to an even multiple of 4. */
+		if (fifo >= host->pio_bytes)
+			fifo = host->pio_bytes;
+		else
+			fifo -= fifo & 3;
 
-		host->pio_words -= fifo;
+		host->pio_bytes -= fifo;
 		host->pio_count += fifo;
 
+		fifo = (fifo + 3) >> 2;
+		ptr = host->pio_ptr;
 		while (fifo--)
-			writel(*(host->pio_ptr++), to_ptr);
+			writel(*ptr++, to_ptr);
+		host->pio_ptr = ptr;
 	}
 
 	enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
@@ -349,9 +386,9 @@
 		clear_imask(host);
 		if (host->pio_active != XFER_NONE) {
 			dbg(host, dbg_err, "unfinished %s "
-			    "- pio_count:[%u] pio_words:[%u]\n",
+			    "- pio_count:[%u] pio_bytes:[%u]\n",
 			    (host->pio_active == XFER_READ) ? "read" : "write",
-			    host->pio_count, host->pio_words);
+			    host->pio_count, host->pio_bytes);
 
 			if (host->mrq->data)
 				host->mrq->data->error = -EINVAL;
@@ -812,11 +849,10 @@
 		/* We cannot deal with unaligned blocks with more than
 		 * one block being transfered. */
 
-		if (data->blocks > 1)
+		if (data->blocks > 1) {
+			pr_warning("%s: can't do non-word sized block transfers (blksz %d)\n", __func__, data->blksz);
 			return -EINVAL;
-
-		/* No support yet for non-word block transfers. */
-		return -EINVAL;
+		}
 	}
 
 	while (readl(host->base + S3C2410_SDIDSTA) &
@@ -896,7 +932,7 @@
 	BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
 
 	host->pio_sgptr = 0;
-	host->pio_words = 0;
+	host->pio_bytes = 0;
 	host->pio_count = 0;
 	host->pio_active = rw ? XFER_WRITE : XFER_READ;
 
@@ -1033,10 +1069,33 @@
 		s3cmci_send_request(mmc);
 }
 
+static void s3cmci_set_clk(struct s3cmci_host *host, struct mmc_ios *ios)
+{
+	u32 mci_psc;
+
+	/* Set clock */
+	for (mci_psc = 0; mci_psc < 255; mci_psc++) {
+		host->real_rate = host->clk_rate / (host->clk_div*(mci_psc+1));
+
+		if (host->real_rate <= ios->clock)
+			break;
+	}
+
+	if (mci_psc > 255)
+		mci_psc = 255;
+
+	host->prescaler = mci_psc;
+	writel(host->prescaler, host->base + S3C2410_SDIPRE);
+
+	/* If requested clock is 0, real_rate will be 0, too */
+	if (ios->clock == 0)
+		host->real_rate = 0;
+}
+
 static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 {
 	struct s3cmci_host *host = mmc_priv(mmc);
-	u32 mci_psc, mci_con;
+	u32 mci_con;
 
 	/* Set the power state */
 
@@ -1074,23 +1133,7 @@
 		break;
 	}
 
-	/* Set clock */
-	for (mci_psc = 0; mci_psc < 255; mci_psc++) {
-		host->real_rate = host->clk_rate / (host->clk_div*(mci_psc+1));
-
-		if (host->real_rate <= ios->clock)
-			break;
-	}
-
-	if (mci_psc > 255)
-		mci_psc = 255;
-
-	host->prescaler = mci_psc;
-	writel(host->prescaler, host->base + S3C2410_SDIPRE);
-
-	/* If requested clock is 0, real_rate will be 0, too */
-	if (ios->clock == 0)
-		host->real_rate = 0;
+	s3cmci_set_clk(host, ios);
 
 	/* Set CLOCK_ENABLE */
 	if (ios->clock)
@@ -1148,6 +1191,61 @@
 	 * checks. Any zero fields to ensure reaonable defaults are picked. */
 };
 
+#ifdef CONFIG_CPU_FREQ
+
+static int s3cmci_cpufreq_transition(struct notifier_block *nb,
+				     unsigned long val, void *data)
+{
+	struct s3cmci_host *host;
+	struct mmc_host *mmc;
+	unsigned long newclk;
+	unsigned long flags;
+
+	host = container_of(nb, struct s3cmci_host, freq_transition);
+	newclk = clk_get_rate(host->clk);
+	mmc = host->mmc;
+
+	if ((val == CPUFREQ_PRECHANGE && newclk > host->clk_rate) ||
+	    (val == CPUFREQ_POSTCHANGE && newclk < host->clk_rate)) {
+		spin_lock_irqsave(&mmc->lock, flags);
+
+		host->clk_rate = newclk;
+
+		if (mmc->ios.power_mode != MMC_POWER_OFF &&
+		    mmc->ios.clock != 0)
+			s3cmci_set_clk(host, &mmc->ios);
+
+		spin_unlock_irqrestore(&mmc->lock, flags);
+	}
+
+	return 0;
+}
+
+static inline int s3cmci_cpufreq_register(struct s3cmci_host *host)
+{
+	host->freq_transition.notifier_call = s3cmci_cpufreq_transition;
+
+	return cpufreq_register_notifier(&host->freq_transition,
+					 CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
+{
+	cpufreq_unregister_notifier(&host->freq_transition,
+				    CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+#else
+static inline int s3cmci_cpufreq_register(struct s3cmci_host *host)
+{
+	return 0;
+}
+
+static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
+{
+}
+#endif
+
 static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
 {
 	struct s3cmci_host *host;
@@ -1298,10 +1396,16 @@
 	    (host->is2440?"2440":""),
 	    host->base, host->irq, host->irq_cd, host->dma);
 
+	ret = s3cmci_cpufreq_register(host);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register cpufreq\n");
+		goto free_dmabuf;
+	}
+
 	ret = mmc_add_host(mmc);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to add mmc host.\n");
-		goto free_dmabuf;
+		goto free_cpufreq;
 	}
 
 	platform_set_drvdata(pdev, mmc);
@@ -1309,6 +1413,9 @@
 
 	return 0;
 
+ free_cpufreq:
+	s3cmci_cpufreq_deregister(host);
+
  free_dmabuf:
 	clk_disable(host->clk);
 
@@ -1342,6 +1449,7 @@
 	if (host->irq_cd >= 0)
 		free_irq(host->irq_cd, host);
 
+	s3cmci_cpufreq_deregister(host);
 	mmc_remove_host(mmc);
 	clk_disable(host->clk);
 }
@@ -1455,7 +1563,7 @@
 
 MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
 MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>");
+MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>");
 MODULE_ALIAS("platform:s3c2410-sdi");
 MODULE_ALIAS("platform:s3c2412-sdi");
 MODULE_ALIAS("platform:s3c2440-sdi");
diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h
index 37d9c60..ca1ba3d 100644
--- a/drivers/mmc/host/s3cmci.h
+++ b/drivers/mmc/host/s3cmci.h
@@ -51,7 +51,7 @@
 	int			dma_complete;
 
 	u32			pio_sgptr;
-	u32			pio_words;
+	u32			pio_bytes;
 	u32			pio_count;
 	u32			*pio_ptr;
 #define XFER_NONE 0
@@ -67,4 +67,8 @@
 
 	unsigned int		ccnt, dcnt;
 	struct tasklet_struct	pio_tasklet;
+
+#ifdef CONFIG_CPU_FREQ
+	struct notifier_block	freq_transition;
+#endif
 };
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 14f11f8..a90d50c 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -172,6 +172,11 @@
 	  memory chips, and also use ioctl() to obtain information about
 	  the device, or to erase parts of it.
 
+config HAVE_MTD_OTP
+	bool
+	help
+	  Enable access to OTP regions using MTD_CHAR.
+
 config MTD_BLKDEVS
 	tristate "Common interface to block layer for MTD 'translation layers'"
 	depends on BLOCK
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index 479d32b..9408099 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -6,6 +6,7 @@
 config MTD_CFI
 	tristate "Detect flash chips by Common Flash Interface (CFI) probe"
 	select MTD_GEN_PROBE
+	select MTD_CFI_UTIL
 	help
 	  The Common Flash Interface specification was developed by Intel,
 	  AMD and other flash manufactures that provides a universal method
@@ -154,6 +155,7 @@
 config MTD_OTP
 	bool "Protection Registers aka one-time programmable (OTP) bits"
 	depends on MTD_CFI_ADV_OPTIONS
+	select HAVE_MTD_OTP
 	default n
 	help
 	  This enables support for reading, writing and locking so called
@@ -187,7 +189,7 @@
 	  StrataFlash and other parts.
 
 config MTD_CFI_AMDSTD
-	tristate "Support for AMD/Fujitsu flash chips"
+	tristate "Support for AMD/Fujitsu/Spansion flash chips"
 	depends on MTD_GEN_PROBE
 	select MTD_CFI_UTIL
 	help
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 5f1b472..c93a8be 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -478,6 +478,28 @@
 		else
 			cfi->chips[i].erase_time = 2000000;
 
+		if (cfi->cfiq->WordWriteTimeoutTyp &&
+		    cfi->cfiq->WordWriteTimeoutMax)
+			cfi->chips[i].word_write_time_max =
+				1<<(cfi->cfiq->WordWriteTimeoutTyp +
+				    cfi->cfiq->WordWriteTimeoutMax);
+		else
+			cfi->chips[i].word_write_time_max = 50000 * 8;
+
+		if (cfi->cfiq->BufWriteTimeoutTyp &&
+		    cfi->cfiq->BufWriteTimeoutMax)
+			cfi->chips[i].buffer_write_time_max =
+				1<<(cfi->cfiq->BufWriteTimeoutTyp +
+				    cfi->cfiq->BufWriteTimeoutMax);
+
+		if (cfi->cfiq->BlockEraseTimeoutTyp &&
+		    cfi->cfiq->BlockEraseTimeoutMax)
+			cfi->chips[i].erase_time_max =
+				1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
+				       cfi->cfiq->BlockEraseTimeoutMax);
+		else
+			cfi->chips[i].erase_time_max = 2000000 * 8;
+
 		cfi->chips[i].ref_point_counter = 0;
 		init_waitqueue_head(&(cfi->chips[i].wq));
 	}
@@ -703,6 +725,10 @@
 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 	unsigned long timeo = jiffies + HZ;
 
+	/* Prevent setting state FL_SYNCING for chip in suspended state. */
+	if (mode == FL_SYNCING && chip->oldstate != FL_READY)
+		goto sleep;
+
 	switch (chip->state) {
 
 	case FL_STATUS:
@@ -808,8 +834,9 @@
 	DECLARE_WAITQUEUE(wait, current);
 
  retry:
-	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
-			   || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
+	if (chip->priv &&
+	    (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
+	    || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
 		/*
 		 * OK. We have possibility for contention on the write/erase
 		 * operations which are global to the real chip and not per
@@ -859,6 +886,14 @@
 				return ret;
 			}
 			spin_lock(&shared->lock);
+
+			/* We should not own chip if it is already
+			 * in FL_SYNCING state. Put contender and retry. */
+			if (chip->state == FL_SYNCING) {
+				put_chip(map, contender, contender->start);
+				spin_unlock(contender->mutex);
+				goto retry;
+			}
 			spin_unlock(contender->mutex);
 		}
 
@@ -1012,7 +1047,7 @@
 
 static int __xipram xip_wait_for_operation(
 		struct map_info *map, struct flchip *chip,
-		unsigned long adr, unsigned int chip_op_time )
+		unsigned long adr, unsigned int chip_op_time_max)
 {
 	struct cfi_private *cfi = map->fldrv_priv;
 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
@@ -1021,7 +1056,7 @@
 	flstate_t oldstate, newstate;
 
        	start = xip_currtime();
-	usec = chip_op_time * 8;
+	usec = chip_op_time_max;
 	if (usec == 0)
 		usec = 500000;
 	done = 0;
@@ -1131,8 +1166,8 @@
 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
 	INVALIDATE_CACHED_RANGE(map, from, size)
 
-#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
-	xip_wait_for_operation(map, chip, cmd_adr, usec)
+#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
+	xip_wait_for_operation(map, chip, cmd_adr, usec_max)
 
 #else
 
@@ -1144,7 +1179,7 @@
 static int inval_cache_and_wait_for_operation(
 		struct map_info *map, struct flchip *chip,
 		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
-		unsigned int chip_op_time)
+		unsigned int chip_op_time, unsigned int chip_op_time_max)
 {
 	struct cfi_private *cfi = map->fldrv_priv;
 	map_word status, status_OK = CMD(0x80);
@@ -1156,8 +1191,7 @@
 		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
 	spin_lock(chip->mutex);
 
-	/* set our timeout to 8 times the expected delay */
-	timeo = chip_op_time * 8;
+	timeo = chip_op_time_max;
 	if (!timeo)
 		timeo = 500000;
 	reset_timeo = timeo;
@@ -1217,8 +1251,8 @@
 
 #endif
 
-#define WAIT_TIMEOUT(map, chip, adr, udelay) \
-	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
+#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
+	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
 
 
 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
@@ -1452,7 +1486,8 @@
 
 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
 				   adr, map_bankwidth(map),
-				   chip->word_write_time);
+				   chip->word_write_time,
+				   chip->word_write_time_max);
 	if (ret) {
 		xip_enable(map, chip, adr);
 		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
@@ -1623,7 +1658,7 @@
 
 	chip->state = FL_WRITING_TO_BUFFER;
 	map_write(map, write_cmd, cmd_adr);
-	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
+	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
 	if (ret) {
 		/* Argh. Not ready for write to buffer */
 		map_word Xstatus = map_read(map, cmd_adr);
@@ -1640,7 +1675,7 @@
 
 	/* Figure out the number of words to write */
 	word_gap = (-adr & (map_bankwidth(map)-1));
-	words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
+	words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
 	if (!word_gap) {
 		words--;
 	} else {
@@ -1692,7 +1727,8 @@
 
 	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
 				   initial_adr, initial_len,
-				   chip->buffer_write_time);
+				   chip->buffer_write_time,
+				   chip->buffer_write_time_max);
 	if (ret) {
 		map_write(map, CMD(0x70), cmd_adr);
 		chip->state = FL_STATUS;
@@ -1827,7 +1863,8 @@
 
 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
 				   adr, len,
-				   chip->erase_time);
+				   chip->erase_time,
+				   chip->erase_time_max);
 	if (ret) {
 		map_write(map, CMD(0x70), adr);
 		chip->state = FL_STATUS;
@@ -2006,7 +2043,7 @@
 	 */
 	udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
 
-	ret = WAIT_TIMEOUT(map, chip, adr, udelay);
+	ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
 	if (ret) {
 		map_write(map, CMD(0x70), adr);
 		chip->state = FL_STATUS;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index a972cc6..3e6f5d8 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -13,6 +13,8 @@
  * XIP support hooks by Vitaly Wool (based on code for Intel flash
  * by Nicolas Pitre)
  *
+ * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
+ *
  * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
  *
  * This code is GPL
@@ -43,6 +45,7 @@
 
 #define MANUFACTURER_AMD	0x0001
 #define MANUFACTURER_ATMEL	0x001F
+#define MANUFACTURER_MACRONIX	0x00C2
 #define MANUFACTURER_SST	0x00BF
 #define SST49LF004B	        0x0060
 #define SST49LF040B	        0x0050
@@ -144,12 +147,44 @@
 
 	if (((major << 8) | minor) < 0x3131) {
 		/* CFI version 1.0 => don't trust bootloc */
+
+		DEBUG(MTD_DEBUG_LEVEL1,
+			"%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
+			map->name, cfi->mfr, cfi->id);
+
+		/* AFAICS all 29LV400 with a bottom boot block have a device ID
+		 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
+		 * These were badly detected as they have the 0x80 bit set
+		 * so treat them as a special case.
+		 */
+		if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
+
+			/* Macronix added CFI to their 2nd generation
+			 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
+			 * Fujitsu, Spansion, EON, ESI and older Macronix)
+			 * has CFI.
+			 *
+			 * Therefore also check the manufacturer.
+			 * This reduces the risk of false detection due to
+			 * the 8-bit device ID.
+			 */
+			(cfi->mfr == MANUFACTURER_MACRONIX)) {
+			DEBUG(MTD_DEBUG_LEVEL1,
+				"%s: Macronix MX29LV400C with bottom boot block"
+				" detected\n", map->name);
+			extp->TopBottom = 2;	/* bottom boot */
+		} else
 		if (cfi->id & 0x80) {
 			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
 			extp->TopBottom = 3;	/* top boot */
 		} else {
 			extp->TopBottom = 2;	/* bottom boot */
 		}
+
+		DEBUG(MTD_DEBUG_LEVEL1,
+			"%s: AMD CFI PRI V%c.%c has no boot block field;"
+			" deduced %s from Device ID\n", map->name, major, minor,
+			extp->TopBottom == 2 ? "bottom" : "top");
 	}
 }
 #endif
@@ -178,10 +213,18 @@
 	if (atmel_pri.Features & 0x02)
 		extp->EraseSuspend = 2;
 
-	if (atmel_pri.BottomBoot)
-		extp->TopBottom = 2;
-	else
-		extp->TopBottom = 3;
+	/* Some chips got it backwards... */
+	if (cfi->id == AT49BV6416) {
+		if (atmel_pri.BottomBoot)
+			extp->TopBottom = 3;
+		else
+			extp->TopBottom = 2;
+	} else {
+		if (atmel_pri.BottomBoot)
+			extp->TopBottom = 2;
+		else
+			extp->TopBottom = 3;
+	}
 
 	/* burst write mode not supported */
 	cfi->cfiq->BufWriteTimeoutTyp = 0;
@@ -243,6 +286,7 @@
 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
 #ifdef AMD_BOOTLOC_BUG
 	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
+	{ MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
 #endif
 	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
 	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index c418e92..e63e674 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -44,17 +44,14 @@
 
 #define xip_enable(base, map, cfi) \
 do { \
-	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \
-	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \
+	cfi_qry_mode_off(base, map, cfi);		\
 	xip_allowed(base, map); \
 } while (0)
 
 #define xip_disable_qry(base, map, cfi) \
 do { \
 	xip_disable(); \
-	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \
-	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \
-	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); \
+	cfi_qry_mode_on(base, map, cfi); \
 } while (0)
 
 #else
@@ -70,32 +67,6 @@
    in: interleave,type,mode
    ret: table index, <0 for error
  */
-static int __xipram qry_present(struct map_info *map, __u32 base,
-				struct cfi_private *cfi)
-{
-	int osf = cfi->interleave * cfi->device_type;	// scale factor
-	map_word val[3];
-	map_word qry[3];
-
-	qry[0] = cfi_build_cmd('Q', map, cfi);
-	qry[1] = cfi_build_cmd('R', map, cfi);
-	qry[2] = cfi_build_cmd('Y', map, cfi);
-
-	val[0] = map_read(map, base + osf*0x10);
-	val[1] = map_read(map, base + osf*0x11);
-	val[2] = map_read(map, base + osf*0x12);
-
-	if (!map_word_equal(map, qry[0], val[0]))
-		return 0;
-
-	if (!map_word_equal(map, qry[1], val[1]))
-		return 0;
-
-	if (!map_word_equal(map, qry[2], val[2]))
-		return 0;
-
-	return 1; 	// "QRY" found
-}
 
 static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
 				   unsigned long *chip_map, struct cfi_private *cfi)
@@ -116,11 +87,7 @@
 	}
 
 	xip_disable();
-	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
-	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
-	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
-
-	if (!qry_present(map,base,cfi)) {
+	if (!cfi_qry_mode_on(base, map, cfi)) {
 		xip_enable(base, map, cfi);
 		return 0;
 	}
@@ -141,14 +108,13 @@
  		start = i << cfi->chipshift;
 		/* This chip should be in read mode if it's one
 		   we've already touched. */
-		if (qry_present(map, start, cfi)) {
+		if (cfi_qry_present(map, start, cfi)) {
 			/* Eep. This chip also had the QRY marker.
 			 * Is it an alias for the new one? */
-			cfi_send_gen_cmd(0xF0, 0, start, map, cfi, cfi->device_type, NULL);
-			cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
+			cfi_qry_mode_off(start, map, cfi);
 
 			/* If the QRY marker goes away, it's an alias */
-			if (!qry_present(map, start, cfi)) {
+			if (!cfi_qry_present(map, start, cfi)) {
 				xip_allowed(base, map);
 				printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
 				       map->name, base, start);
@@ -158,10 +124,9 @@
 			 * unfortunate. Stick the new chip in read mode
 			 * too and if it's the same, assume it's an alias. */
 			/* FIXME: Use other modes to do a proper check */
-			cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
-			cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
+			cfi_qry_mode_off(base, map, cfi);
 
-			if (qry_present(map, base, cfi)) {
+			if (cfi_qry_present(map, base, cfi)) {
 				xip_allowed(base, map);
 				printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
 				       map->name, base, start);
@@ -176,8 +141,7 @@
 	cfi->numchips++;
 
 	/* Put it back into Read Mode */
-	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
-	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+	cfi_qry_mode_off(base, map, cfi);
 	xip_allowed(base, map);
 
 	printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
@@ -237,9 +201,7 @@
 			  cfi_read_query(map, base + 0xf * ofs_factor);
 
 	/* Put it back into Read Mode */
-	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
-	/* ... even if it's an Intel chip */
-	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+	cfi_qry_mode_off(base, map, cfi);
 	xip_allowed(base, map);
 
 	/* Do any necessary byteswapping */
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 0ee4570..34d40e2 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -24,6 +24,66 @@
 #include <linux/mtd/cfi.h>
 #include <linux/mtd/compatmac.h>
 
+int __xipram cfi_qry_present(struct map_info *map, __u32 base,
+			     struct cfi_private *cfi)
+{
+	int osf = cfi->interleave * cfi->device_type;	/* scale factor */
+	map_word val[3];
+	map_word qry[3];
+
+	qry[0] = cfi_build_cmd('Q', map, cfi);
+	qry[1] = cfi_build_cmd('R', map, cfi);
+	qry[2] = cfi_build_cmd('Y', map, cfi);
+
+	val[0] = map_read(map, base + osf*0x10);
+	val[1] = map_read(map, base + osf*0x11);
+	val[2] = map_read(map, base + osf*0x12);
+
+	if (!map_word_equal(map, qry[0], val[0]))
+		return 0;
+
+	if (!map_word_equal(map, qry[1], val[1]))
+		return 0;
+
+	if (!map_word_equal(map, qry[2], val[2]))
+		return 0;
+
+	return 1; 	/* "QRY" found */
+}
+EXPORT_SYMBOL_GPL(cfi_qry_present);
+
+int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
+			     struct cfi_private *cfi)
+{
+	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+	if (cfi_qry_present(map, base, cfi))
+		return 1;
+	/* QRY not found probably we deal with some odd CFI chips */
+	/* Some revisions of some old Intel chips? */
+	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+	if (cfi_qry_present(map, base, cfi))
+		return 1;
+	/* ST M29DW chips */
+	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+	cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
+	if (cfi_qry_present(map, base, cfi))
+		return 1;
+	/* QRY not found */
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
+
+void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
+			       struct cfi_private *cfi)
+{
+	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+}
+EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
+
 struct cfi_extquery *
 __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
 {
@@ -48,8 +108,7 @@
 #endif
 
 	/* Switch it into Query Mode */
-	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
-
+	cfi_qry_mode_on(base, map, cfi);
 	/* Read in the Extended Query Table */
 	for (i=0; i<size; i++) {
 		((unsigned char *)extp)[i] =
@@ -57,8 +116,7 @@
 	}
 
 	/* Make sure it returns to read mode */
-	cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
-	cfi_send_gen_cmd(0xff, 0, base, map, cfi, cfi->device_type, NULL);
+	cfi_qry_mode_off(base, map, cfi);
 
 #ifdef CONFIG_MTD_XIP
 	(void) map_read(map, base);
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index f061885..e2dc964 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -111,7 +111,7 @@
 		max_chips = 1;
 	}
 
-	mapsize = sizeof(long) * ( (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG );
+	mapsize = sizeof(long) * DIV_ROUND_UP(max_chips, BITS_PER_LONG);
 	chip_map = kzalloc(mapsize, GFP_KERNEL);
 	if (!chip_map) {
 		printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 71bc07f..50a3403 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -7,6 +7,7 @@
  *
  * mtdparts=<mtddef>[;<mtddef]
  * <mtddef>  := <mtd-id>:<partdef>[,<partdef>]
+ *              where <mtd-id> is the name from the "cat /proc/mtd" command
  * <partdef> := <size>[@offset][<name>][ro][lk]
  * <mtd-id>  := unique name used in mapping driver/device (mtd->name)
  * <size>    := standard linux memsize OR "-" to denote all remaining space
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 9c613f0..6fde0a2 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -59,6 +59,27 @@
 	  Sometimes DataFlash chips are packaged inside MMC-format
 	  cards; at this writing, the MMC stack won't handle those.
 
+config MTD_DATAFLASH_WRITE_VERIFY
+	bool "Verify DataFlash page writes"
+	depends on MTD_DATAFLASH
+	help
+	  This adds an extra check when data is written to the flash.
+	  It may help if you are verifying chip setup (timings etc) on
+	  your board.  There is a rare possibility that even though the
+	  device thinks the write was successful, a bit could have been
+	  flipped accidentally due to device wear or something else.
+
+config MTD_DATAFLASH_OTP
+	bool "DataFlash OTP support (Security Register)"
+	depends on MTD_DATAFLASH
+	select HAVE_MTD_OTP
+	help
+	  Newer DataFlash chips (revisions C and D) support 128 bytes of
+	  one-time-programmable (OTP) data.  The first half may be written
+	  (once) with up to 64 bytes of data, such as a serial number or
+	  other key product data.  The second half is programmed with a
+	  unique-to-each-chip bit pattern at the factory.
+
 config MTD_M25P80
 	tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
 	depends on SPI_MASTER && EXPERIMENTAL
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index b35c333..76a7675 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -39,6 +39,7 @@
 #define	OPCODE_PP		0x02	/* Page program (up to 256 bytes) */
 #define	OPCODE_BE_4K 		0x20	/* Erase 4KiB block */
 #define	OPCODE_BE_32K		0x52	/* Erase 32KiB block */
+#define	OPCODE_BE		0xc7	/* Erase whole flash block */
 #define	OPCODE_SE		0xd8	/* Sector erase (usually 64KiB) */
 #define	OPCODE_RDID		0x9f	/* Read JEDEC ID */
 
@@ -161,6 +162,31 @@
 	return 1;
 }
 
+/*
+ * Erase the whole flash memory
+ *
+ * Returns 0 if successful, non-zero otherwise.
+ */
+static int erase_block(struct m25p *flash)
+{
+	DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n",
+			flash->spi->dev.bus_id, __func__,
+			flash->mtd.size / 1024);
+
+	/* Wait until finished previous write command. */
+	if (wait_till_ready(flash))
+		return 1;
+
+	/* Send write enable, then erase commands. */
+	write_enable(flash);
+
+	/* Set up command buffer. */
+	flash->command[0] = OPCODE_BE;
+
+	spi_write(flash->spi, flash->command, 1);
+
+	return 0;
+}
 
 /*
  * Erase one sector of flash memory at offset ``offset'' which is any
@@ -229,15 +255,21 @@
 	 */
 
 	/* now erase those sectors */
-	while (len) {
-		if (erase_sector(flash, addr)) {
-			instr->state = MTD_ERASE_FAILED;
-			mutex_unlock(&flash->lock);
-			return -EIO;
-		}
+	if (len == flash->mtd.size && erase_block(flash)) {
+		instr->state = MTD_ERASE_FAILED;
+		mutex_unlock(&flash->lock);
+		return -EIO;
+	} else {
+		while (len) {
+			if (erase_sector(flash, addr)) {
+				instr->state = MTD_ERASE_FAILED;
+				mutex_unlock(&flash->lock);
+				return -EIO;
+			}
 
-		addr += mtd->erasesize;
-		len -= mtd->erasesize;
+			addr += mtd->erasesize;
+			len -= mtd->erasesize;
+		}
 	}
 
 	mutex_unlock(&flash->lock);
@@ -437,6 +469,7 @@
 	 * then a two byte device id.
 	 */
 	u32		jedec_id;
+	u16             ext_id;
 
 	/* The size listed here is what works with OPCODE_SE, which isn't
 	 * necessarily called a "sector" by the vendor.
@@ -456,72 +489,75 @@
 static struct flash_info __devinitdata m25p_data [] = {
 
 	/* Atmel -- some are (confusingly) marketed as "DataFlash" */
-	{ "at25fs010",  0x1f6601, 32 * 1024, 4, SECT_4K, },
-	{ "at25fs040",  0x1f6604, 64 * 1024, 8, SECT_4K, },
+	{ "at25fs010",  0x1f6601, 0, 32 * 1024, 4, SECT_4K, },
+	{ "at25fs040",  0x1f6604, 0, 64 * 1024, 8, SECT_4K, },
 
-	{ "at25df041a", 0x1f4401, 64 * 1024, 8, SECT_4K, },
-	{ "at25df641",  0x1f4800, 64 * 1024, 128, SECT_4K, },
+	{ "at25df041a", 0x1f4401, 0, 64 * 1024, 8, SECT_4K, },
+	{ "at25df641",  0x1f4800, 0, 64 * 1024, 128, SECT_4K, },
 
-	{ "at26f004",   0x1f0400, 64 * 1024, 8, SECT_4K, },
-	{ "at26df081a", 0x1f4501, 64 * 1024, 16, SECT_4K, },
-	{ "at26df161a", 0x1f4601, 64 * 1024, 32, SECT_4K, },
-	{ "at26df321",  0x1f4701, 64 * 1024, 64, SECT_4K, },
+	{ "at26f004",   0x1f0400, 0, 64 * 1024, 8, SECT_4K, },
+	{ "at26df081a", 0x1f4501, 0, 64 * 1024, 16, SECT_4K, },
+	{ "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, },
+	{ "at26df321",  0x1f4701, 0, 64 * 1024, 64, SECT_4K, },
 
 	/* Spansion -- single (large) sector size only, at least
 	 * for the chips listed here (without boot sectors).
 	 */
-	{ "s25sl004a", 0x010212, 64 * 1024, 8, },
-	{ "s25sl008a", 0x010213, 64 * 1024, 16, },
-	{ "s25sl016a", 0x010214, 64 * 1024, 32, },
-	{ "s25sl032a", 0x010215, 64 * 1024, 64, },
-	{ "s25sl064a", 0x010216, 64 * 1024, 128, },
+	{ "s25sl004a", 0x010212, 0, 64 * 1024, 8, },
+	{ "s25sl008a", 0x010213, 0, 64 * 1024, 16, },
+	{ "s25sl016a", 0x010214, 0, 64 * 1024, 32, },
+	{ "s25sl032a", 0x010215, 0, 64 * 1024, 64, },
+	{ "s25sl064a", 0x010216, 0, 64 * 1024, 128, },
+        { "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, },
+	{ "s25sl12801", 0x012018, 0x0301, 64 * 1024, 256, },
 
 	/* SST -- large erase sizes are "overlays", "sectors" are 4K */
-	{ "sst25vf040b", 0xbf258d, 64 * 1024, 8, SECT_4K, },
-	{ "sst25vf080b", 0xbf258e, 64 * 1024, 16, SECT_4K, },
-	{ "sst25vf016b", 0xbf2541, 64 * 1024, 32, SECT_4K, },
-	{ "sst25vf032b", 0xbf254a, 64 * 1024, 64, SECT_4K, },
+	{ "sst25vf040b", 0xbf258d, 0, 64 * 1024, 8, SECT_4K, },
+	{ "sst25vf080b", 0xbf258e, 0, 64 * 1024, 16, SECT_4K, },
+	{ "sst25vf016b", 0xbf2541, 0, 64 * 1024, 32, SECT_4K, },
+	{ "sst25vf032b", 0xbf254a, 0, 64 * 1024, 64, SECT_4K, },
 
 	/* ST Microelectronics -- newer production may have feature updates */
-	{ "m25p05",  0x202010,  32 * 1024, 2, },
-	{ "m25p10",  0x202011,  32 * 1024, 4, },
-	{ "m25p20",  0x202012,  64 * 1024, 4, },
-	{ "m25p40",  0x202013,  64 * 1024, 8, },
-	{ "m25p80",         0,  64 * 1024, 16, },
-	{ "m25p16",  0x202015,  64 * 1024, 32, },
-	{ "m25p32",  0x202016,  64 * 1024, 64, },
-	{ "m25p64",  0x202017,  64 * 1024, 128, },
-	{ "m25p128", 0x202018, 256 * 1024, 64, },
+	{ "m25p05",  0x202010,  0, 32 * 1024, 2, },
+	{ "m25p10",  0x202011,  0, 32 * 1024, 4, },
+	{ "m25p20",  0x202012,  0, 64 * 1024, 4, },
+	{ "m25p40",  0x202013,  0, 64 * 1024, 8, },
+	{ "m25p80",         0,  0, 64 * 1024, 16, },
+	{ "m25p16",  0x202015,  0, 64 * 1024, 32, },
+	{ "m25p32",  0x202016,  0, 64 * 1024, 64, },
+	{ "m25p64",  0x202017,  0, 64 * 1024, 128, },
+	{ "m25p128", 0x202018, 0, 256 * 1024, 64, },
 
-	{ "m45pe80", 0x204014,  64 * 1024, 16, },
-	{ "m45pe16", 0x204015,  64 * 1024, 32, },
+	{ "m45pe80", 0x204014,  0, 64 * 1024, 16, },
+	{ "m45pe16", 0x204015,  0, 64 * 1024, 32, },
 
-	{ "m25pe80", 0x208014,  64 * 1024, 16, },
-	{ "m25pe16", 0x208015,  64 * 1024, 32, SECT_4K, },
+	{ "m25pe80", 0x208014,  0, 64 * 1024, 16, },
+	{ "m25pe16", 0x208015,  0, 64 * 1024, 32, SECT_4K, },
 
 	/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
-	{ "w25x10", 0xef3011, 64 * 1024, 2, SECT_4K, },
-	{ "w25x20", 0xef3012, 64 * 1024, 4, SECT_4K, },
-	{ "w25x40", 0xef3013, 64 * 1024, 8, SECT_4K, },
-	{ "w25x80", 0xef3014, 64 * 1024, 16, SECT_4K, },
-	{ "w25x16", 0xef3015, 64 * 1024, 32, SECT_4K, },
-	{ "w25x32", 0xef3016, 64 * 1024, 64, SECT_4K, },
-	{ "w25x64", 0xef3017, 64 * 1024, 128, SECT_4K, },
+	{ "w25x10", 0xef3011, 0, 64 * 1024, 2, SECT_4K, },
+	{ "w25x20", 0xef3012, 0, 64 * 1024, 4, SECT_4K, },
+	{ "w25x40", 0xef3013, 0, 64 * 1024, 8, SECT_4K, },
+	{ "w25x80", 0xef3014, 0, 64 * 1024, 16, SECT_4K, },
+	{ "w25x16", 0xef3015, 0, 64 * 1024, 32, SECT_4K, },
+	{ "w25x32", 0xef3016, 0, 64 * 1024, 64, SECT_4K, },
+	{ "w25x64", 0xef3017, 0, 64 * 1024, 128, SECT_4K, },
 };
 
 static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
 {
 	int			tmp;
 	u8			code = OPCODE_RDID;
-	u8			id[3];
+	u8			id[5];
 	u32			jedec;
+	u16                     ext_jedec;
 	struct flash_info	*info;
 
 	/* JEDEC also defines an optional "extended device information"
 	 * string for after vendor-specific data, after the three bytes
 	 * we use here.  Supporting some chips might require using it.
 	 */
-	tmp = spi_write_then_read(spi, &code, 1, id, 3);
+	tmp = spi_write_then_read(spi, &code, 1, id, 5);
 	if (tmp < 0) {
 		DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
 			spi->dev.bus_id, tmp);
@@ -533,10 +569,14 @@
 	jedec = jedec << 8;
 	jedec |= id[2];
 
+	ext_jedec = id[3] << 8 | id[4];
+
 	for (tmp = 0, info = m25p_data;
 			tmp < ARRAY_SIZE(m25p_data);
 			tmp++, info++) {
 		if (info->jedec_id == jedec)
+			if (ext_jedec != 0 && info->ext_id != ext_jedec)
+				continue;
 			return info;
 	}
 	dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 8bd0dea..6dd9aff 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -30,12 +30,10 @@
  * doesn't (yet) use these for any kind of i/o overlap or prefetching.
  *
  * Sometimes DataFlash is packaged in MMC-format cards, although the
- * MMC stack can't use SPI (yet), or distinguish between MMC and DataFlash
+ * MMC stack can't (yet?) distinguish between MMC and DataFlash
  * protocols during enumeration.
  */
 
-#define CONFIG_DATAFLASH_WRITE_VERIFY
-
 /* reads can bypass the buffers */
 #define OP_READ_CONTINUOUS	0xE8
 #define OP_READ_PAGE		0xD2
@@ -80,7 +78,8 @@
  */
 #define OP_READ_ID		0x9F
 #define OP_READ_SECURITY	0x77
-#define OP_WRITE_SECURITY	0x9A	/* OTP bits */
+#define OP_WRITE_SECURITY_REVC	0x9A
+#define OP_WRITE_SECURITY	0x9B	/* revision D */
 
 
 struct dataflash {
@@ -402,7 +401,7 @@
 		(void) dataflash_waitready(priv->spi);
 
 
-#ifdef	CONFIG_DATAFLASH_WRITE_VERIFY
+#ifdef CONFIG_MTD_DATAFLASH_VERIFY_WRITE
 
 		/* (3) Compare to Buffer1 */
 		addr = pageaddr << priv->page_offset;
@@ -431,7 +430,7 @@
 		} else
 			status = 0;
 
-#endif	/* CONFIG_DATAFLASH_WRITE_VERIFY */
+#endif	/* CONFIG_MTD_DATAFLASH_VERIFY_WRITE */
 
 		remaining = remaining - writelen;
 		pageaddr++;
@@ -451,16 +450,192 @@
 
 /* ......................................................................... */
 
+#ifdef CONFIG_MTD_DATAFLASH_OTP
+
+static int dataflash_get_otp_info(struct mtd_info *mtd,
+		struct otp_info *info, size_t len)
+{
+	/* Report both blocks as identical:  bytes 0..64, locked.
+	 * Unless the user block changed from all-ones, we can't
+	 * tell whether it's still writable; so we assume it isn't.
+	 */
+	info->start = 0;
+	info->length = 64;
+	info->locked = 1;
+	return sizeof(*info);
+}
+
+static ssize_t otp_read(struct spi_device *spi, unsigned base,
+		uint8_t *buf, loff_t off, size_t len)
+{
+	struct spi_message	m;
+	size_t			l;
+	uint8_t			*scratch;
+	struct spi_transfer	t;
+	int			status;
+
+	if (off > 64)
+		return -EINVAL;
+
+	if ((off + len) > 64)
+		len = 64 - off;
+	if (len == 0)
+		return len;
+
+	spi_message_init(&m);
+
+	l = 4 + base + off + len;
+	scratch = kzalloc(l, GFP_KERNEL);
+	if (!scratch)
+		return -ENOMEM;
+
+	/* OUT: OP_READ_SECURITY, 3 don't-care bytes, zeroes
+	 * IN:  ignore 4 bytes, data bytes 0..N (max 127)
+	 */
+	scratch[0] = OP_READ_SECURITY;
+
+	memset(&t, 0, sizeof t);
+	t.tx_buf = scratch;
+	t.rx_buf = scratch;
+	t.len = l;
+	spi_message_add_tail(&t, &m);
+
+	dataflash_waitready(spi);
+
+	status = spi_sync(spi, &m);
+	if (status >= 0) {
+		memcpy(buf, scratch + 4 + base + off, len);
+		status = len;
+	}
+
+	kfree(scratch);
+	return status;
+}
+
+static int dataflash_read_fact_otp(struct mtd_info *mtd,
+		loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+	struct dataflash	*priv = (struct dataflash *)mtd->priv;
+	int			status;
+
+	/* 64 bytes, from 0..63 ... start at 64 on-chip */
+	mutex_lock(&priv->lock);
+	status = otp_read(priv->spi, 64, buf, from, len);
+	mutex_unlock(&priv->lock);
+
+	if (status < 0)
+		return status;
+	*retlen = status;
+	return 0;
+}
+
+static int dataflash_read_user_otp(struct mtd_info *mtd,
+		loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+	struct dataflash	*priv = (struct dataflash *)mtd->priv;
+	int			status;
+
+	/* 64 bytes, from 0..63 ... start at 0 on-chip */
+	mutex_lock(&priv->lock);
+	status = otp_read(priv->spi, 0, buf, from, len);
+	mutex_unlock(&priv->lock);
+
+	if (status < 0)
+		return status;
+	*retlen = status;
+	return 0;
+}
+
+static int dataflash_write_user_otp(struct mtd_info *mtd,
+		loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+	struct spi_message	m;
+	const size_t		l = 4 + 64;
+	uint8_t			*scratch;
+	struct spi_transfer	t;
+	struct dataflash	*priv = (struct dataflash *)mtd->priv;
+	int			status;
+
+	if (len > 64)
+		return -EINVAL;
+
+	/* Strictly speaking, we *could* truncate the write ... but
+	 * let's not do that for the only write that's ever possible.
+	 */
+	if ((from + len) > 64)
+		return -EINVAL;
+
+	/* OUT: OP_WRITE_SECURITY, 3 zeroes, 64 data-or-zero bytes
+	 * IN:  ignore all
+	 */
+	scratch = kzalloc(l, GFP_KERNEL);
+	if (!scratch)
+		return -ENOMEM;
+	scratch[0] = OP_WRITE_SECURITY;
+	memcpy(scratch + 4 + from, buf, len);
+
+	spi_message_init(&m);
+
+	memset(&t, 0, sizeof t);
+	t.tx_buf = scratch;
+	t.len = l;
+	spi_message_add_tail(&t, &m);
+
+	/* Write the OTP bits, if they've not yet been written.
+	 * This modifies SRAM buffer1.
+	 */
+	mutex_lock(&priv->lock);
+	dataflash_waitready(priv->spi);
+	status = spi_sync(priv->spi, &m);
+	mutex_unlock(&priv->lock);
+
+	kfree(scratch);
+
+	if (status >= 0) {
+		status = 0;
+		*retlen = len;
+	}
+	return status;
+}
+
+static char *otp_setup(struct mtd_info *device, char revision)
+{
+	device->get_fact_prot_info = dataflash_get_otp_info;
+	device->read_fact_prot_reg = dataflash_read_fact_otp;
+	device->get_user_prot_info = dataflash_get_otp_info;
+	device->read_user_prot_reg = dataflash_read_user_otp;
+
+	/* rev c parts (at45db321c and at45db1281 only!) use a
+	 * different write procedure; not (yet?) implemented.
+	 */
+	if (revision > 'c')
+		device->write_user_prot_reg = dataflash_write_user_otp;
+
+	return ", OTP";
+}
+
+#else
+
+static char *otp_setup(struct mtd_info *device, char revision)
+{
+	return " (OTP)";
+}
+
+#endif
+
+/* ......................................................................... */
+
 /*
  * Register DataFlash device with MTD subsystem.
  */
 static int __devinit
-add_dataflash(struct spi_device *spi, char *name,
-		int nr_pages, int pagesize, int pageoffset)
+add_dataflash_otp(struct spi_device *spi, char *name,
+		int nr_pages, int pagesize, int pageoffset, char revision)
 {
 	struct dataflash		*priv;
 	struct mtd_info			*device;
 	struct flash_platform_data	*pdata = spi->dev.platform_data;
+	char				*otp_tag = "";
 
 	priv = kzalloc(sizeof *priv, GFP_KERNEL);
 	if (!priv)
@@ -489,8 +664,12 @@
 	device->write = dataflash_write;
 	device->priv = priv;
 
-	dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes\n",
-			name, DIV_ROUND_UP(device->size, 1024), pagesize);
+	if (revision >= 'c')
+		otp_tag = otp_setup(device, revision);
+
+	dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes%s\n",
+			name, DIV_ROUND_UP(device->size, 1024),
+			pagesize, otp_tag);
 	dev_set_drvdata(&spi->dev, priv);
 
 	if (mtd_has_partitions()) {
@@ -519,6 +698,14 @@
 	return add_mtd_device(device) == 1 ? -ENODEV : 0;
 }
 
+static inline int __devinit
+add_dataflash(struct spi_device *spi, char *name,
+		int nr_pages, int pagesize, int pageoffset)
+{
+	return add_dataflash_otp(spi, name, nr_pages, pagesize,
+			pageoffset, 0);
+}
+
 struct flash_info {
 	char		*name;
 
@@ -664,13 +851,16 @@
 	 * Try to detect dataflash by JEDEC ID.
 	 * If it succeeds we know we have either a C or D part.
 	 * D will support power of 2 pagesize option.
+	 * Both support the security register, though with different
+	 * write procedures.
 	 */
 	info = jedec_probe(spi);
 	if (IS_ERR(info))
 		return PTR_ERR(info);
 	if (info != NULL)
-		return add_dataflash(spi, info->name, info->nr_pages,
-				 info->pagesize, info->pageoffset);
+		return add_dataflash_otp(spi, info->name, info->nr_pages,
+				info->pagesize, info->pageoffset,
+				(info->flags & SUP_POW2PS) ? 'd' : 'c');
 
 	/*
 	 * Older chips support only legacy commands, identifing
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index c4f9d33..50ce138 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -388,6 +388,10 @@
 		if (thisEUN == targetEUN)
 			break;
 
+		/* Unlink the last block from the chain. */
+		inftl->PUtable[prevEUN] = BLOCK_NIL;
+
+		/* Now try to erase it. */
 		if (INFTL_formatblock(inftl, thisEUN) < 0) {
 			/*
 			 * Could not erase : mark block as reserved.
@@ -396,7 +400,6 @@
 		} else {
 			/* Correctly erased : mark it as free */
 			inftl->PUtable[thisEUN] = BLOCK_FREE;
-			inftl->PUtable[prevEUN] = BLOCK_NIL;
 			inftl->numfreeEUNs++;
 		}
 	}
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index df8e00b..5ea1693 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -332,30 +332,6 @@
 	  Mapping for the Flaga digital module. If you don't have one, ignore
 	  this setting.
 
-config MTD_WALNUT
-	tristate "Flash device mapped on IBM 405GP Walnut"
-	depends on MTD_JEDECPROBE && WALNUT && !PPC_MERGE
-	help
-	  This enables access routines for the flash chips on the IBM 405GP
-	  Walnut board. If you have one of these boards and would like to
-	  use the flash chips on it, say 'Y'.
-
-config MTD_EBONY
-	tristate "Flash devices mapped on IBM 440GP Ebony"
-	depends on MTD_JEDECPROBE && EBONY && !PPC_MERGE
-	help
-	  This enables access routines for the flash chips on the IBM 440GP
-	  Ebony board. If you have one of these boards and would like to
-	  use the flash chips on it, say 'Y'.
-
-config MTD_OCOTEA
-	tristate "Flash devices mapped on IBM 440GX Ocotea"
-	depends on MTD_CFI && OCOTEA && !PPC_MERGE
-	help
-	  This enables access routines for the flash chips on the IBM 440GX
-	  Ocotea board. If you have one of these boards and would like to
-	  use the flash chips on it, say 'Y'.
-
 config MTD_REDWOOD
 	tristate "CFI Flash devices mapped on IBM Redwood"
 	depends on MTD_CFI && ( REDWOOD_4 || REDWOOD_5 || REDWOOD_6 )
@@ -458,13 +434,6 @@
 	  PhotoMax Digital Picture Frame.
 	  If you have such a device, say 'Y'.
 
-config MTD_NOR_TOTO
-	tristate "NOR Flash device on TOTO board"
-	depends on ARCH_OMAP && OMAP_TOTO
-	help
-	  This enables access to the NOR flash on the Texas Instruments
-	  TOTO board.
-
 config MTD_H720X
 	tristate "Hynix evaluation board mappings"
 	depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 )
@@ -522,7 +491,7 @@
 
 config MTD_UCLINUX
 	tristate "Generic uClinux RAM/ROM filesystem support"
-	depends on MTD_PARTITIONS && !MMU
+	depends on MTD_PARTITIONS && MTD_RAM && !MMU
 	help
 	  Map driver to support image based filesystems for uClinux.
 
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 6cda6df..6d9ba35 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -50,12 +50,8 @@
 obj-$(CONFIG_MTD_UCLINUX)	+= uclinux.o
 obj-$(CONFIG_MTD_NETtel)	+= nettel.o
 obj-$(CONFIG_MTD_SCB2_FLASH)	+= scb2_flash.o
-obj-$(CONFIG_MTD_EBONY)		+= ebony.o
-obj-$(CONFIG_MTD_OCOTEA)	+= ocotea.o
-obj-$(CONFIG_MTD_WALNUT)        += walnut.o
 obj-$(CONFIG_MTD_H720X)		+= h720x-flash.o
 obj-$(CONFIG_MTD_SBC8240)	+= sbc8240.o
-obj-$(CONFIG_MTD_NOR_TOTO)	+= omap-toto-flash.o
 obj-$(CONFIG_MTD_IXP4XX)	+= ixp4xx.o
 obj-$(CONFIG_MTD_IXP2000)	+= ixp2000.o
 obj-$(CONFIG_MTD_WRSBC8260)	+= wr_sbc82xx_flash.o
diff --git a/drivers/mtd/maps/ebony.c b/drivers/mtd/maps/ebony.c
deleted file mode 100644
index d92b7c7..0000000
--- a/drivers/mtd/maps/ebony.c
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Mapping for Ebony user flash
- *
- * Matt Porter <mporter@kernel.crashing.org>
- *
- * Copyright 2002-2004 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <asm/ibm44x.h>
-#include <platforms/4xx/ebony.h>
-
-static struct mtd_info *flash;
-
-static struct map_info ebony_small_map = {
-	.name =		"Ebony small flash",
-	.size =		EBONY_SMALL_FLASH_SIZE,
-	.bankwidth =	1,
-};
-
-static struct map_info ebony_large_map = {
-	.name =		"Ebony large flash",
-	.size =		EBONY_LARGE_FLASH_SIZE,
-	.bankwidth =	1,
-};
-
-static struct mtd_partition ebony_small_partitions[] = {
-	{
-		.name =   "OpenBIOS",
-		.offset = 0x0,
-		.size =   0x80000,
-	}
-};
-
-static struct mtd_partition ebony_large_partitions[] = {
-	{
-		.name =   "fs",
-		.offset = 0,
-		.size =   0x380000,
-	},
-	{
-		.name =   "firmware",
-		.offset = 0x380000,
-		.size =   0x80000,
-	}
-};
-
-int __init init_ebony(void)
-{
-	u8 fpga0_reg;
-	u8 __iomem *fpga0_adr;
-	unsigned long long small_flash_base, large_flash_base;
-
-	fpga0_adr = ioremap64(EBONY_FPGA_ADDR, 16);
-	if (!fpga0_adr)
-		return -ENOMEM;
-
-	fpga0_reg = readb(fpga0_adr);
-	iounmap(fpga0_adr);
-
-	if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
-			!EBONY_FLASH_SEL(fpga0_reg))
-		small_flash_base = EBONY_SMALL_FLASH_HIGH2;
-	else if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
-			EBONY_FLASH_SEL(fpga0_reg))
-		small_flash_base = EBONY_SMALL_FLASH_HIGH1;
-	else if (!EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
-			!EBONY_FLASH_SEL(fpga0_reg))
-		small_flash_base = EBONY_SMALL_FLASH_LOW2;
-	else
-		small_flash_base = EBONY_SMALL_FLASH_LOW1;
-
-	if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
-			!EBONY_ONBRD_FLASH_EN(fpga0_reg))
-		large_flash_base = EBONY_LARGE_FLASH_LOW;
-	else
-		large_flash_base = EBONY_LARGE_FLASH_HIGH;
-
-	ebony_small_map.phys = small_flash_base;
-	ebony_small_map.virt = ioremap64(small_flash_base,
-					 ebony_small_map.size);
-
-	if (!ebony_small_map.virt) {
-		printk("Failed to ioremap flash\n");
-		return -EIO;
-	}
-
-	simple_map_init(&ebony_small_map);
-
-	flash = do_map_probe("jedec_probe", &ebony_small_map);
-	if (flash) {
-		flash->owner = THIS_MODULE;
-		add_mtd_partitions(flash, ebony_small_partitions,
-					ARRAY_SIZE(ebony_small_partitions));
-	} else {
-		printk("map probe failed for flash\n");
-		iounmap(ebony_small_map.virt);
-		return -ENXIO;
-	}
-
-	ebony_large_map.phys = large_flash_base;
-	ebony_large_map.virt = ioremap64(large_flash_base,
-					 ebony_large_map.size);
-
-	if (!ebony_large_map.virt) {
-		printk("Failed to ioremap flash\n");
-		iounmap(ebony_small_map.virt);
-		return -EIO;
-	}
-
-	simple_map_init(&ebony_large_map);
-
-	flash = do_map_probe("jedec_probe", &ebony_large_map);
-	if (flash) {
-		flash->owner = THIS_MODULE;
-		add_mtd_partitions(flash, ebony_large_partitions,
-					ARRAY_SIZE(ebony_large_partitions));
-	} else {
-		printk("map probe failed for flash\n");
-		iounmap(ebony_small_map.virt);
-		iounmap(ebony_large_map.virt);
-		return -ENXIO;
-	}
-
-	return 0;
-}
-
-static void __exit cleanup_ebony(void)
-{
-	if (flash) {
-		del_mtd_partitions(flash);
-		map_destroy(flash);
-	}
-
-	if (ebony_small_map.virt) {
-		iounmap(ebony_small_map.virt);
-		ebony_small_map.virt = NULL;
-	}
-
-	if (ebony_large_map.virt) {
-		iounmap(ebony_large_map.virt);
-		ebony_large_map.virt = NULL;
-	}
-}
-
-module_init(init_ebony);
-module_exit(cleanup_ebony);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Matt Porter <mporter@kernel.crashing.org>");
-MODULE_DESCRIPTION("MTD map and partitions for IBM 440GP Ebony boards");
diff --git a/drivers/mtd/maps/ocotea.c b/drivers/mtd/maps/ocotea.c
deleted file mode 100644
index 5522eac..0000000
--- a/drivers/mtd/maps/ocotea.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Mapping for Ocotea user flash
- *
- * Matt Porter <mporter@kernel.crashing.org>
- *
- * Copyright 2002-2004 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <asm/ibm44x.h>
-#include <platforms/4xx/ocotea.h>
-
-static struct mtd_info *flash;
-
-static struct map_info ocotea_small_map = {
-	.name =		"Ocotea small flash",
-	.size =		OCOTEA_SMALL_FLASH_SIZE,
-	.buswidth =	1,
-};
-
-static struct map_info ocotea_large_map = {
-	.name =		"Ocotea large flash",
-	.size =		OCOTEA_LARGE_FLASH_SIZE,
-	.buswidth =	1,
-};
-
-static struct mtd_partition ocotea_small_partitions[] = {
-	{
-		.name =   "pibs",
-		.offset = 0x0,
-		.size =   0x100000,
-	}
-};
-
-static struct mtd_partition ocotea_large_partitions[] = {
-	{
-		.name =   "fs",
-		.offset = 0,
-		.size =   0x300000,
-	},
-	{
-		.name =   "firmware",
-		.offset = 0x300000,
-		.size =   0x100000,
-	}
-};
-
-int __init init_ocotea(void)
-{
-	u8 fpga0_reg;
-	u8 *fpga0_adr;
-	unsigned long long small_flash_base, large_flash_base;
-
-	fpga0_adr = ioremap64(OCOTEA_FPGA_ADDR, 16);
-	if (!fpga0_adr)
-		return -ENOMEM;
-
-	fpga0_reg = readb((unsigned long)fpga0_adr);
-	iounmap(fpga0_adr);
-
-	if (OCOTEA_BOOT_LARGE_FLASH(fpga0_reg)) {
-		small_flash_base = OCOTEA_SMALL_FLASH_HIGH;
-		large_flash_base = OCOTEA_LARGE_FLASH_LOW;
-	}
-	else {
-		small_flash_base = OCOTEA_SMALL_FLASH_LOW;
-		large_flash_base = OCOTEA_LARGE_FLASH_HIGH;
-	}
-
-	ocotea_small_map.phys = small_flash_base;
-	ocotea_small_map.virt = ioremap64(small_flash_base,
-					 ocotea_small_map.size);
-
-	if (!ocotea_small_map.virt) {
-		printk("Failed to ioremap flash\n");
-		return -EIO;
-	}
-
-	simple_map_init(&ocotea_small_map);
-
-	flash = do_map_probe("map_rom", &ocotea_small_map);
-	if (flash) {
-		flash->owner = THIS_MODULE;
-		add_mtd_partitions(flash, ocotea_small_partitions,
-					ARRAY_SIZE(ocotea_small_partitions));
-	} else {
-		printk("map probe failed for flash\n");
-		iounmap(ocotea_small_map.virt);
-		return -ENXIO;
-	}
-
-	ocotea_large_map.phys = large_flash_base;
-	ocotea_large_map.virt = ioremap64(large_flash_base,
-					 ocotea_large_map.size);
-
-	if (!ocotea_large_map.virt) {
-		printk("Failed to ioremap flash\n");
-		iounmap(ocotea_small_map.virt);
-		return -EIO;
-	}
-
-	simple_map_init(&ocotea_large_map);
-
-	flash = do_map_probe("cfi_probe", &ocotea_large_map);
-	if (flash) {
-		flash->owner = THIS_MODULE;
-		add_mtd_partitions(flash, ocotea_large_partitions,
-					ARRAY_SIZE(ocotea_large_partitions));
-	} else {
-		printk("map probe failed for flash\n");
-		iounmap(ocotea_small_map.virt);
-		iounmap(ocotea_large_map.virt);
-		return -ENXIO;
-	}
-
-	return 0;
-}
-
-static void __exit cleanup_ocotea(void)
-{
-	if (flash) {
-		del_mtd_partitions(flash);
-		map_destroy(flash);
-	}
-
-	if (ocotea_small_map.virt) {
-		iounmap((void *)ocotea_small_map.virt);
-		ocotea_small_map.virt = 0;
-	}
-
-	if (ocotea_large_map.virt) {
-		iounmap((void *)ocotea_large_map.virt);
-		ocotea_large_map.virt = 0;
-	}
-}
-
-module_init(init_ocotea);
-module_exit(cleanup_ocotea);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Matt Porter <mporter@kernel.crashing.org>");
-MODULE_DESCRIPTION("MTD map and partitions for IBM 440GX Ocotea boards");
diff --git a/drivers/mtd/maps/omap-toto-flash.c b/drivers/mtd/maps/omap-toto-flash.c
deleted file mode 100644
index 0a60ebb..0000000
--- a/drivers/mtd/maps/omap-toto-flash.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * NOR Flash memory access on TI Toto board
- *
- * jzhang@ti.com (C) 2003 Texas Instruments.
- *
- *  (C) 2002 MontVista Software, Inc.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/hardware.h>
-#include <asm/io.h>
-
-
-#ifndef CONFIG_ARCH_OMAP
-#error This is for OMAP architecture only
-#endif
-
-//these lines need be moved to a hardware header file
-#define OMAP_TOTO_FLASH_BASE 0xd8000000
-#define OMAP_TOTO_FLASH_SIZE 0x80000
-
-static struct map_info omap_toto_map_flash = {
-	.name =		"OMAP Toto flash",
-	.bankwidth =	2,
-	.virt =		(void __iomem *)OMAP_TOTO_FLASH_BASE,
-};
-
-
-static struct mtd_partition toto_flash_partitions[] = {
-	{
-		.name =		"BootLoader",
-		.size =		0x00040000,     /* hopefully u-boot will stay 128k + 128*/
-		.offset =	0,
-		.mask_flags =	MTD_WRITEABLE,  /* force read-only */
-	}, {
-		.name =		"ReservedSpace",
-		.size =		0x00030000,
-		.offset =	MTDPART_OFS_APPEND,
-		//mask_flags:	MTD_WRITEABLE,  /* force read-only */
-	}, {
-		.name =		"EnvArea",      /* bottom 64KiB for env vars */
-		.size =		MTDPART_SIZ_FULL,
-		.offset =	MTDPART_OFS_APPEND,
-	}
-};
-
-static struct mtd_partition *parsed_parts;
-
-static struct mtd_info *flash_mtd;
-
-static int __init init_flash (void)
-{
-
-	struct mtd_partition *parts;
-	int nb_parts = 0;
-	int parsed_nr_parts = 0;
-	const char *part_type;
-
-	/*
-	 * Static partition definition selection
-	 */
-	part_type = "static";
-
- 	parts = toto_flash_partitions;
-	nb_parts = ARRAY_SIZE(toto_flash_partitions);
-	omap_toto_map_flash.size = OMAP_TOTO_FLASH_SIZE;
-	omap_toto_map_flash.phys = virt_to_phys(OMAP_TOTO_FLASH_BASE);
-
-	simple_map_init(&omap_toto_map_flash);
-	/*
-	 * Now let's probe for the actual flash.  Do it here since
-	 * specific machine settings might have been set above.
-	 */
-	printk(KERN_NOTICE "OMAP toto flash: probing %d-bit flash bus\n",
-		omap_toto_map_flash.bankwidth*8);
-	flash_mtd = do_map_probe("jedec_probe", &omap_toto_map_flash);
-	if (!flash_mtd)
-		return -ENXIO;
-
- 	if (parsed_nr_parts > 0) {
-		parts = parsed_parts;
-		nb_parts = parsed_nr_parts;
-	}
-
-	if (nb_parts == 0) {
-		printk(KERN_NOTICE "OMAP toto flash: no partition info available,"
-			"registering whole flash at once\n");
-		if (add_mtd_device(flash_mtd)){
-            return -ENXIO;
-        }
-	} else {
-		printk(KERN_NOTICE "Using %s partition definition\n",
-			part_type);
-		return add_mtd_partitions(flash_mtd, parts, nb_parts);
-	}
-	return 0;
-}
-
-int __init omap_toto_mtd_init(void)
-{
-	int status;
-
- 	if (status = init_flash()) {
-		printk(KERN_ERR "OMAP Toto Flash: unable to init map for toto flash\n");
-	}
-    return status;
-}
-
-static void  __exit omap_toto_mtd_cleanup(void)
-{
-	if (flash_mtd) {
-		del_mtd_partitions(flash_mtd);
-		map_destroy(flash_mtd);
-		kfree(parsed_parts);
-	}
-}
-
-module_init(omap_toto_mtd_init);
-module_exit(omap_toto_mtd_cleanup);
-
-MODULE_AUTHOR("Jian Zhang");
-MODULE_DESCRIPTION("OMAP Toto board map driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 5c6a25c..48f4cf5 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -203,15 +203,8 @@
 		 * not enabled, should we be allocating a new resource for it
 		 * or simply enabling it?
 		 */
-		if (!(pci_resource_flags(dev, PCI_ROM_RESOURCE) &
-				    IORESOURCE_ROM_ENABLE)) {
-		     	u32 val;
-			pci_resource_flags(dev, PCI_ROM_RESOURCE) |= IORESOURCE_ROM_ENABLE;
-			pci_read_config_dword(dev, PCI_ROM_ADDRESS, &val);
-			val |= PCI_ROM_ADDRESS_ENABLE;
-			pci_write_config_dword(dev, PCI_ROM_ADDRESS, val);
-			printk("%s: enabling expansion ROM\n", pci_name(dev));
-		}
+		pci_enable_rom(dev);
+		printk("%s: enabling expansion ROM\n", pci_name(dev));
 	}
 
 	if (!len || !base)
@@ -232,18 +225,13 @@
 static void
 intel_dc21285_exit(struct pci_dev *dev, struct map_pci_info *map)
 {
-	u32 val;
-
 	if (map->base)
 		iounmap(map->base);
 
 	/*
 	 * We need to undo the PCI BAR2/PCI ROM BAR address alteration.
 	 */
-	pci_resource_flags(dev, PCI_ROM_RESOURCE) &= ~IORESOURCE_ROM_ENABLE;
-	pci_read_config_dword(dev, PCI_ROM_ADDRESS, &val);
-	val &= ~PCI_ROM_ADDRESS_ENABLE;
-	pci_write_config_dword(dev, PCI_ROM_ADDRESS, val);
+	pci_disable_rom(dev);
 }
 
 static unsigned long
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 49acd41..5fcfec0 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -230,8 +230,7 @@
 
 #ifdef CONFIG_MTD_OF_PARTS
 	if (err == 0) {
-		err = of_mtd_parse_partitions(&dev->dev, info->mtd,
-		                              dp, &info->parts);
+		err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
 		if (err < 0)
 			return err;
 	}
diff --git a/drivers/mtd/maps/walnut.c b/drivers/mtd/maps/walnut.c
deleted file mode 100644
index e243476..0000000
--- a/drivers/mtd/maps/walnut.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Mapping for Walnut flash
- * (used ebony.c as a "framework")
- *
- * Heikki Lindholm <holindho@infradead.org>
- *
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <asm/ibm4xx.h>
-#include <platforms/4xx/walnut.h>
-
-/* these should be in platforms/4xx/walnut.h ? */
-#define WALNUT_FLASH_ONBD_N(x)		(x & 0x02)
-#define WALNUT_FLASH_SRAM_SEL(x)	(x & 0x01)
-#define WALNUT_FLASH_LOW		0xFFF00000
-#define WALNUT_FLASH_HIGH		0xFFF80000
-#define WALNUT_FLASH_SIZE		0x80000
-
-static struct mtd_info *flash;
-
-static struct map_info walnut_map = {
-	.name =		"Walnut flash",
-	.size =		WALNUT_FLASH_SIZE,
-	.bankwidth =	1,
-};
-
-/* Actually, OpenBIOS is the last 128 KiB of the flash - better
- * partitioning could be made */
-static struct mtd_partition walnut_partitions[] = {
-	{
-		.name =   "OpenBIOS",
-		.offset = 0x0,
-		.size =   WALNUT_FLASH_SIZE,
-		/*.mask_flags = MTD_WRITEABLE, */ /* force read-only */
-	}
-};
-
-int __init init_walnut(void)
-{
-	u8 fpga_brds1;
-	void *fpga_brds1_adr;
-	void *fpga_status_adr;
-	unsigned long flash_base;
-
-	/* this should already be mapped (platform/4xx/walnut.c) */
-	fpga_status_adr = ioremap(WALNUT_FPGA_BASE, 8);
-	if (!fpga_status_adr)
-		return -ENOMEM;
-
-	fpga_brds1_adr = fpga_status_adr+5;
-	fpga_brds1 = readb(fpga_brds1_adr);
-	/* iounmap(fpga_status_adr); */
-
-	if (WALNUT_FLASH_ONBD_N(fpga_brds1)) {
-		printk("The on-board flash is disabled (U79 sw 5)!");
-		iounmap(fpga_status_adr);
-		return -EIO;
-	}
-	if (WALNUT_FLASH_SRAM_SEL(fpga_brds1))
-		flash_base = WALNUT_FLASH_LOW;
-	else
-		flash_base = WALNUT_FLASH_HIGH;
-
-	walnut_map.phys = flash_base;
-	walnut_map.virt =
-		(void __iomem *)ioremap(flash_base, walnut_map.size);
-
-	if (!walnut_map.virt) {
-		printk("Failed to ioremap flash.\n");
-		iounmap(fpga_status_adr);
-		return -EIO;
-	}
-
-	simple_map_init(&walnut_map);
-
-	flash = do_map_probe("jedec_probe", &walnut_map);
-	if (flash) {
-		flash->owner = THIS_MODULE;
-		add_mtd_partitions(flash, walnut_partitions,
-					ARRAY_SIZE(walnut_partitions));
-	} else {
-		printk("map probe failed for flash\n");
-		iounmap(fpga_status_adr);
-		return -ENXIO;
-	}
-
-	iounmap(fpga_status_adr);
-	return 0;
-}
-
-static void __exit cleanup_walnut(void)
-{
-	if (flash) {
-		del_mtd_partitions(flash);
-		map_destroy(flash);
-	}
-
-	if (walnut_map.virt) {
-		iounmap((void *)walnut_map.virt);
-		walnut_map.virt = 0;
-	}
-}
-
-module_init(init_walnut);
-module_exit(cleanup_walnut);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Heikki Lindholm <holindho@infradead.org>");
-MODULE_DESCRIPTION("MTD map and partitions for IBM 405GP Walnut boards");
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 1c74762..963840e 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -348,7 +348,7 @@
 	wake_up((wait_queue_head_t *)instr->priv);
 }
 
-#if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
+#ifdef CONFIG_HAVE_MTD_OTP
 static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
 {
 	struct mtd_info *mtd = mfi->mtd;
@@ -665,7 +665,7 @@
 		break;
 	}
 
-#if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
+#ifdef CONFIG_HAVE_MTD_OTP
 	case OTPSELECT:
 	{
 		int mode;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 2972a5e..789842d 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -444,7 +444,7 @@
 			return -EINVAL;
 	}
 
-	instr->fail_addr = 0xffffffff;
+	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
 
 	/* make a local copy of instr to avoid modifying the caller's struct */
 	erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
@@ -493,7 +493,7 @@
 			/* sanity check: should never happen since
 			 * block alignment has been checked above */
 			BUG_ON(err == -EINVAL);
-			if (erase->fail_addr != 0xffffffff)
+			if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
 				instr->fail_addr = erase->fail_addr + offset;
 			break;
 		}
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 5a680e1..aebb3b2 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -33,6 +33,7 @@
 #include <linux/interrupt.h>
 #include <linux/mtd/mtd.h>
 
+#define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
 #define OOPS_PAGE_SIZE 4096
 
 static struct mtdoops_context {
@@ -99,7 +100,7 @@
 	int ret;
 
 	cxt->nextpage++;
-	if (cxt->nextpage > cxt->oops_pages)
+	if (cxt->nextpage >= cxt->oops_pages)
 		cxt->nextpage = 0;
 	cxt->nextcount++;
 	if (cxt->nextcount == 0xffffffff)
@@ -141,7 +142,7 @@
 	mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize;
 	if (mod != 0) {
 		cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE);
-		if (cxt->nextpage > cxt->oops_pages)
+		if (cxt->nextpage >= cxt->oops_pages)
 			cxt->nextpage = 0;
 	}
 
@@ -158,7 +159,7 @@
 				cxt->nextpage * OOPS_PAGE_SIZE);
 		i++;
 		cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE);
-		if (cxt->nextpage > cxt->oops_pages)
+		if (cxt->nextpage >= cxt->oops_pages)
 			cxt->nextpage = 0;
 		if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) {
 			printk(KERN_ERR "mtdoops: All blocks bad!\n");
@@ -224,40 +225,40 @@
 {
 	struct mtd_info *mtd = cxt->mtd;
 	int ret, page, maxpos = 0;
-	u32 count, maxcount = 0xffffffff;
+	u32 count[2], maxcount = 0xffffffff;
 	size_t retlen;
 
 	for (page = 0; page < cxt->oops_pages; page++) {
-		ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
-		if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
-			printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
+		ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]);
+		if ((retlen != 8) || ((ret < 0) && (ret != -EUCLEAN))) {
+			printk(KERN_ERR "mtdoops: Read failure at %d (%td of 8 read)"
 				", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
 			continue;
 		}
 
-		if (count == 0xffffffff)
+		if (count[1] != MTDOOPS_KERNMSG_MAGIC)
+			continue;
+		if (count[0] == 0xffffffff)
 			continue;
 		if (maxcount == 0xffffffff) {
-			maxcount = count;
+			maxcount = count[0];
 			maxpos = page;
-		} else if ((count < 0x40000000) && (maxcount > 0xc0000000)) {
-			maxcount = count;
+		} else if ((count[0] < 0x40000000) && (maxcount > 0xc0000000)) {
+			maxcount = count[0];
 			maxpos = page;
-		} else if ((count > maxcount) && (count < 0xc0000000)) {
-			maxcount = count;
+		} else if ((count[0] > maxcount) && (count[0] < 0xc0000000)) {
+			maxcount = count[0];
 			maxpos = page;
-		} else if ((count > maxcount) && (count > 0xc0000000)
+		} else if ((count[0] > maxcount) && (count[0] > 0xc0000000)
 					&& (maxcount > 0x80000000)) {
-			maxcount = count;
+			maxcount = count[0];
 			maxpos = page;
 		}
 	}
 	if (maxcount == 0xffffffff) {
 		cxt->nextpage = 0;
 		cxt->nextcount = 1;
-		cxt->ready = 1;
-		printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n",
-				cxt->nextpage, cxt->nextcount);
+		schedule_work(&cxt->work_erase);
 		return;
 	}
 
@@ -358,8 +359,9 @@
 
 	if (cxt->writecount == 0) {
 		u32 *stamp = cxt->oops_buf;
-		*stamp = cxt->nextcount;
-		cxt->writecount = 4;
+		*stamp++ = cxt->nextcount;
+		*stamp = MTDOOPS_KERNMSG_MAGIC;
+		cxt->writecount = 8;
 	}
 
 	if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 9a06dc9..3728913 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -214,7 +214,7 @@
 	instr->addr += part->offset;
 	ret = part->master->erase(part->master, instr);
 	if (ret) {
-		if (instr->fail_addr != 0xffffffff)
+		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
 			instr->fail_addr -= part->offset;
 		instr->addr -= part->offset;
 	}
@@ -226,7 +226,7 @@
 	if (instr->mtd->erase == part_erase) {
 		struct mtd_part *part = PART(instr->mtd);
 
-		if (instr->fail_addr != 0xffffffff)
+		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
 			instr->fail_addr -= part->offset;
 		instr->addr -= part->offset;
 	}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 41f361c..1c2e945 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -56,6 +56,12 @@
 	help
 	  This enables the driver for the iPAQ h1900 flash.
 
+config MTD_NAND_GPIO
+	tristate "GPIO NAND Flash driver"
+	depends on GENERIC_GPIO && ARM
+	help
+	  This enables a GPIO based NAND flash driver.
+
 config MTD_NAND_SPIA
 	tristate "NAND Flash device on SPIA board"
 	depends on ARCH_P720T
@@ -68,12 +74,6 @@
 	help
 	  Support for NAND flash on Amstrad E3 (Delta).
 
-config MTD_NAND_TOTO
-	tristate "NAND Flash device on TOTO board"
-	depends on ARCH_OMAP && BROKEN
-	help
-	  Support for NAND flash on Texas Instruments Toto platform.
-
 config MTD_NAND_TS7250
 	tristate "NAND Flash device on TS-7250 board"
 	depends on MACH_TS72XX
@@ -163,13 +163,6 @@
 	  incorrect ECC generation, and if using these, the default of
 	  software ECC is preferable.
 
-config MTD_NAND_NDFC
-	tristate "NDFC NanD Flash Controller"
-	depends on 4xx && !PPC_MERGE
-	select MTD_NAND_ECC_SMC
-	help
-	 NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
-
 config MTD_NAND_S3C2410_CLKSTOP
 	bool "S3C2410 NAND IDLE clock stop"
 	depends on MTD_NAND_S3C2410
@@ -340,6 +333,13 @@
 	  This enables the driver for the NAND flash device found on
 	  PXA3xx processors
 
+config MTD_NAND_PXA3xx_BUILTIN
+	bool "Use builtin definitions for some NAND chips (deprecated)"
+	depends on MTD_NAND_PXA3xx
+	help
+	  This enables builtin definitions for some NAND chips. This
+	  is deprecated in favor of platform specific data.
+
 config MTD_NAND_CM_X270
 	tristate "Support for NAND Flash on CM-X270 modules"
 	depends on MTD_NAND && MACH_ARMCORE
@@ -400,10 +400,24 @@
 
 config MTD_NAND_FSL_UPM
 	tristate "Support for NAND on Freescale UPM"
-	depends on MTD_NAND && OF_GPIO && (PPC_83xx || PPC_85xx)
+	depends on MTD_NAND && (PPC_83xx || PPC_85xx)
 	select FSL_LBC
 	help
 	  Enables support for NAND Flash chips wired onto Freescale PowerPC
 	  processor localbus with User-Programmable Machine support.
 
+config MTD_NAND_MXC
+	tristate "MXC NAND support"
+	depends on ARCH_MX2
+	help
+	  This enables the driver for the NAND flash controller on the
+	  MXC processors.
+
+config MTD_NAND_SH_FLCTL
+	tristate "Support for NAND on Renesas SuperH FLCTL"
+	depends on MTD_NAND && SUPERH && CPU_SUBTYPE_SH7723
+	help
+	  Several Renesas SuperH CPU has FLCTL. This option enables support
+	  for NAND Flash using FLCTL. This driver support SH7723.
+
 endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index b786c5d..b661586 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -8,7 +8,6 @@
 obj-$(CONFIG_MTD_NAND_CAFE)		+= cafe_nand.o
 obj-$(CONFIG_MTD_NAND_SPIA)		+= spia.o
 obj-$(CONFIG_MTD_NAND_AMS_DELTA)	+= ams-delta.o
-obj-$(CONFIG_MTD_NAND_TOTO)		+= toto.o
 obj-$(CONFIG_MTD_NAND_AUTCPU12)		+= autcpu12.o
 obj-$(CONFIG_MTD_NAND_EDB7312)		+= edb7312.o
 obj-$(CONFIG_MTD_NAND_AU1550)		+= au1550nd.o
@@ -24,6 +23,7 @@
 obj-$(CONFIG_MTD_NAND_CS553X)		+= cs553x_nand.o
 obj-$(CONFIG_MTD_NAND_NDFC)		+= ndfc.o
 obj-$(CONFIG_MTD_NAND_ATMEL)		+= atmel_nand.o
+obj-$(CONFIG_MTD_NAND_GPIO)		+= gpio.o
 obj-$(CONFIG_MTD_NAND_CM_X270)		+= cmx270_nand.o
 obj-$(CONFIG_MTD_NAND_BASLER_EXCITE)	+= excite_nandflash.o
 obj-$(CONFIG_MTD_NAND_PXA3xx)		+= pxa3xx_nand.o
@@ -34,5 +34,7 @@
 obj-$(CONFIG_MTD_NAND_ORION)		+= orion_nand.o
 obj-$(CONFIG_MTD_NAND_FSL_ELBC)		+= fsl_elbc_nand.o
 obj-$(CONFIG_MTD_NAND_FSL_UPM)		+= fsl_upm.o
+obj-$(CONFIG_MTD_NAND_SH_FLCTL)		+= sh_flctl.o
+obj-$(CONFIG_MTD_NAND_MXC)		+= mxc_nand.o
 
 nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 257937c..9623803 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -691,7 +691,7 @@
 	al[0].port = ALAUDA_PORT_XD;
 	al[1].port = ALAUDA_PORT_SM;
 
-	info("alauda probed");
+	dev_info(&interface->dev, "alauda probed\n");
 	alauda_check_media(al);
 	alauda_check_media(al+1);
 
@@ -716,7 +716,7 @@
 	if (al)
 		kref_put(&al->kref, alauda_delete);
 
-	info("alauda gone");
+	dev_info(&interface->dev, "alauda gone");
 }
 
 static struct usb_driver alauda_driver = {
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 3387e0d..c98c1570 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -174,48 +174,6 @@
 }
 
 /*
- * write oob for small pages
- */
-static int atmel_nand_write_oob_512(struct mtd_info *mtd,
-		struct nand_chip *chip, int page)
-{
-	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
-	int eccsize = chip->ecc.size, length = mtd->oobsize;
-	int len, pos, status = 0;
-	const uint8_t *bufpoi = chip->oob_poi;
-
-	pos = eccsize + chunk;
-
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
-	len = min_t(int, length, chunk);
-	chip->write_buf(mtd, bufpoi, len);
-	bufpoi += len;
-	length -= len;
-	if (length > 0)
-		chip->write_buf(mtd, bufpoi, length);
-
-	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-	status = chip->waitfunc(mtd, chip);
-
-	return status & NAND_STATUS_FAIL ? -EIO : 0;
-
-}
-
-/*
- * read oob for small pages
- */
-static int atmel_nand_read_oob_512(struct mtd_info *mtd,
-		struct nand_chip *chip,	int page, int sndcmd)
-{
-	if (sndcmd) {
-		chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-		sndcmd = 0;
-	}
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-	return sndcmd;
-}
-
-/*
  * Calculate HW ECC
  *
  * function called after a write
@@ -235,14 +193,14 @@
 	/* get the first 2 ECC bytes */
 	ecc_value = ecc_readl(host->ecc, PR);
 
-	ecc_code[eccpos[0]] = ecc_value & 0xFF;
-	ecc_code[eccpos[1]] = (ecc_value >> 8) & 0xFF;
+	ecc_code[0] = ecc_value & 0xFF;
+	ecc_code[1] = (ecc_value >> 8) & 0xFF;
 
 	/* get the last 2 ECC bytes */
 	ecc_value = ecc_readl(host->ecc, NPR) & ATMEL_ECC_NPARITY;
 
-	ecc_code[eccpos[2]] = ecc_value & 0xFF;
-	ecc_code[eccpos[3]] = (ecc_value >> 8) & 0xFF;
+	ecc_code[2] = ecc_value & 0xFF;
+	ecc_code[3] = (ecc_value >> 8) & 0xFF;
 
 	return 0;
 }
@@ -476,14 +434,12 @@
 			res = -EIO;
 			goto err_ecc_ioremap;
 		}
-		nand_chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+		nand_chip->ecc.mode = NAND_ECC_HW;
 		nand_chip->ecc.calculate = atmel_nand_calculate;
 		nand_chip->ecc.correct = atmel_nand_correct;
 		nand_chip->ecc.hwctl = atmel_nand_hwctl;
 		nand_chip->ecc.read_page = atmel_nand_read_page;
 		nand_chip->ecc.bytes = 4;
-		nand_chip->ecc.prepad = 0;
-		nand_chip->ecc.postpad = 0;
 	}
 
 	nand_chip->chip_delay = 20;		/* 20us command delay time */
@@ -514,7 +470,7 @@
 		goto err_scan_ident;
 	}
 
-	if (nand_chip->ecc.mode == NAND_ECC_HW_SYNDROME) {
+	if (nand_chip->ecc.mode == NAND_ECC_HW) {
 		/* ECC is calculated for the whole page (1 step) */
 		nand_chip->ecc.size = mtd->writesize;
 
@@ -522,8 +478,6 @@
 		switch (mtd->writesize) {
 		case 512:
 			nand_chip->ecc.layout = &atmel_oobinfo_small;
-			nand_chip->ecc.read_oob = atmel_nand_read_oob_512;
-			nand_chip->ecc.write_oob = atmel_nand_write_oob_512;
 			ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
 			break;
 		case 1024:
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 3370a80..9f1b451 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -289,8 +289,10 @@
 	int i;
 	uint64_t val;
 
+#ifdef CONFIG_MTD_PARTITIONS
 	int mtd_parts_nb = 0;
 	struct mtd_partition *mtd_parts = NULL;
+#endif
 
 	/* If the CPU isn't a Geode GX or LX, abort */
 	if (!is_geode())
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 98ad3ce..4aa5bd6 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -918,8 +918,7 @@
 
 #ifdef CONFIG_MTD_OF_PARTS
 	if (ret == 0) {
-		ret = of_mtd_parse_partitions(priv->dev, &priv->mtd,
-		                              node, &parts);
+		ret = of_mtd_parse_partitions(priv->dev, node, &parts);
 		if (ret < 0)
 			goto err;
 	}
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 1ebfd87..024e3ff 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -13,6 +13,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/delay.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/nand_ecc.h>
 #include <linux/mtd/partitions.h>
@@ -36,8 +37,6 @@
 	uint8_t upm_cmd_offset;
 	void __iomem *io_base;
 	int rnb_gpio;
-	const uint32_t *wait_pattern;
-	const uint32_t *wait_write;
 	int chip_delay;
 };
 
@@ -61,10 +60,11 @@
 	if (fun->rnb_gpio >= 0) {
 		while (--cnt && !fun_chip_ready(&fun->mtd))
 			cpu_relax();
+		if (!cnt)
+			dev_err(fun->dev, "tired waiting for RNB\n");
+	} else {
+		ndelay(100);
 	}
-
-	if (!cnt)
-		dev_err(fun->dev, "tired waiting for RNB\n");
 }
 
 static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
@@ -89,8 +89,7 @@
 
 	fsl_upm_run_pattern(&fun->upm, fun->io_base, cmd);
 
-	if (fun->wait_pattern)
-		fun_wait_rnb(fun);
+	fun_wait_rnb(fun);
 }
 
 static uint8_t fun_read_byte(struct mtd_info *mtd)
@@ -116,14 +115,16 @@
 
 	for (i = 0; i < len; i++) {
 		out_8(fun->chip.IO_ADDR_W, buf[i]);
-		if (fun->wait_write)
-			fun_wait_rnb(fun);
+		fun_wait_rnb(fun);
 	}
 }
 
-static int __devinit fun_chip_init(struct fsl_upm_nand *fun)
+static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
+				   const struct device_node *upm_np,
+				   const struct resource *io_res)
 {
 	int ret;
+	struct device_node *flash_np;
 #ifdef CONFIG_MTD_PARTITIONS
 	static const char *part_types[] = { "cmdlinepart", NULL, };
 #endif
@@ -143,18 +144,37 @@
 	fun->mtd.priv = &fun->chip;
 	fun->mtd.owner = THIS_MODULE;
 
+	flash_np = of_get_next_child(upm_np, NULL);
+	if (!flash_np)
+		return -ENODEV;
+
+	fun->mtd.name = kasprintf(GFP_KERNEL, "%x.%s", io_res->start,
+				  flash_np->name);
+	if (!fun->mtd.name) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
 	ret = nand_scan(&fun->mtd, 1);
 	if (ret)
-		return ret;
-
-	fun->mtd.name = fun->dev->bus_id;
+		goto err;
 
 #ifdef CONFIG_MTD_PARTITIONS
 	ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
-	if (ret > 0)
-		return add_mtd_partitions(&fun->mtd, fun->parts, ret);
+
+#ifdef CONFIG_MTD_OF_PARTS
+	if (ret == 0)
+		ret = of_mtd_parse_partitions(fun->dev, &fun->mtd,
+					      flash_np, &fun->parts);
 #endif
-	return add_mtd_device(&fun->mtd);
+	if (ret > 0)
+		ret = add_mtd_partitions(&fun->mtd, fun->parts, ret);
+	else
+#endif
+		ret = add_mtd_device(&fun->mtd);
+err:
+	of_node_put(flash_np);
+	return ret;
 }
 
 static int __devinit fun_probe(struct of_device *ofdev,
@@ -211,6 +231,12 @@
 		goto err2;
 	}
 
+	prop = of_get_property(ofdev->node, "chip-delay", NULL);
+	if (prop)
+		fun->chip_delay = *prop;
+	else
+		fun->chip_delay = 50;
+
 	fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
 					  io_res.end - io_res.start + 1);
 	if (!fun->io_base) {
@@ -220,17 +246,8 @@
 
 	fun->dev = &ofdev->dev;
 	fun->last_ctrl = NAND_CLE;
-	fun->wait_pattern = of_get_property(ofdev->node, "fsl,wait-pattern",
-					    NULL);
-	fun->wait_write = of_get_property(ofdev->node, "fsl,wait-write", NULL);
 
-	prop = of_get_property(ofdev->node, "chip-delay", NULL);
-	if (prop)
-		fun->chip_delay = *prop;
-	else
-		fun->chip_delay = 50;
-
-	ret = fun_chip_init(fun);
+	ret = fun_chip_init(fun, ofdev->node, &io_res);
 	if (ret)
 		goto err2;
 
@@ -251,6 +268,7 @@
 	struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
 
 	nand_release(&fun->mtd);
+	kfree(fun->mtd.name);
 
 	if (fun->rnb_gpio >= 0)
 		gpio_free(fun->rnb_gpio);
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
new file mode 100644
index 0000000..8f902e7
--- /dev/null
+++ b/drivers/mtd/nand/gpio.c
@@ -0,0 +1,375 @@
+/*
+ * drivers/mtd/nand/gpio.c
+ *
+ * Updated, and converted to generic GPIO based driver by Russell King.
+ *
+ * Written by Ben Dooks <ben@simtec.co.uk>
+ *   Based on 2.4 version by Mark Whittaker
+ *
+ * © 2004 Simtec Electronics
+ *
+ * Device driver for NAND connected via GPIO
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand-gpio.h>
+
+struct gpiomtd {
+	void __iomem		*io_sync;
+	struct mtd_info		mtd_info;
+	struct nand_chip	nand_chip;
+	struct gpio_nand_platdata plat;
+};
+
+#define gpio_nand_getpriv(x) container_of(x, struct gpiomtd, mtd_info)
+
+
+#ifdef CONFIG_ARM
+/* gpio_nand_dosync()
+ *
+ * Make sure the GPIO state changes occur in-order with writes to NAND
+ * memory region.
+ * Needed on PXA due to bus-reordering within the SoC itself (see section on
+ * I/O ordering in PXA manual (section 2.3, p35)
+ */
+static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
+{
+	unsigned long tmp;
+
+	if (gpiomtd->io_sync) {
+		/*
+		 * Linux memory barriers don't cater for what's required here.
+		 * What's required is what's here - a read from a separate
+		 * region with a dependency on that read.
+		 */
+		tmp = readl(gpiomtd->io_sync);
+		asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp));
+	}
+}
+#else
+static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
+#endif
+
+static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+	struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
+
+	gpio_nand_dosync(gpiomtd);
+
+	if (ctrl & NAND_CTRL_CHANGE) {
+		gpio_set_value(gpiomtd->plat.gpio_nce, !(ctrl & NAND_NCE));
+		gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
+		gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
+		gpio_nand_dosync(gpiomtd);
+	}
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	writeb(cmd, gpiomtd->nand_chip.IO_ADDR_W);
+	gpio_nand_dosync(gpiomtd);
+}
+
+static void gpio_nand_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	struct nand_chip *this = mtd->priv;
+
+	writesb(this->IO_ADDR_W, buf, len);
+}
+
+static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *this = mtd->priv;
+
+	readsb(this->IO_ADDR_R, buf, len);
+}
+
+static int gpio_nand_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	struct nand_chip *this = mtd->priv;
+	unsigned char read, *p = (unsigned char *) buf;
+	int i, err = 0;
+
+	for (i = 0; i < len; i++) {
+		read = readb(this->IO_ADDR_R);
+		if (read != p[i]) {
+			pr_debug("%s: err at %d (read %04x vs %04x)\n",
+			       __func__, i, read, p[i]);
+			err = -EFAULT;
+		}
+	}
+	return err;
+}
+
+static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
+				 int len)
+{
+	struct nand_chip *this = mtd->priv;
+
+	if (IS_ALIGNED((unsigned long)buf, 2)) {
+		writesw(this->IO_ADDR_W, buf, len>>1);
+	} else {
+		int i;
+		unsigned short *ptr = (unsigned short *)buf;
+
+		for (i = 0; i < len; i += 2, ptr++)
+			writew(*ptr, this->IO_ADDR_W);
+	}
+}
+
+static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *this = mtd->priv;
+
+	if (IS_ALIGNED((unsigned long)buf, 2)) {
+		readsw(this->IO_ADDR_R, buf, len>>1);
+	} else {
+		int i;
+		unsigned short *ptr = (unsigned short *)buf;
+
+		for (i = 0; i < len; i += 2, ptr++)
+			*ptr = readw(this->IO_ADDR_R);
+	}
+}
+
+static int gpio_nand_verifybuf16(struct mtd_info *mtd, const u_char *buf,
+				 int len)
+{
+	struct nand_chip *this = mtd->priv;
+	unsigned short read, *p = (unsigned short *) buf;
+	int i, err = 0;
+	len >>= 1;
+
+	for (i = 0; i < len; i++) {
+		read = readw(this->IO_ADDR_R);
+		if (read != p[i]) {
+			pr_debug("%s: err at %d (read %04x vs %04x)\n",
+			       __func__, i, read, p[i]);
+			err = -EFAULT;
+		}
+	}
+	return err;
+}
+
+
+static int gpio_nand_devready(struct mtd_info *mtd)
+{
+	struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
+	return gpio_get_value(gpiomtd->plat.gpio_rdy);
+}
+
+static int __devexit gpio_nand_remove(struct platform_device *dev)
+{
+	struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
+	struct resource *res;
+
+	nand_release(&gpiomtd->mtd_info);
+
+	res = platform_get_resource(dev, IORESOURCE_MEM, 1);
+	iounmap(gpiomtd->io_sync);
+	if (res)
+		release_mem_region(res->start, res->end - res->start + 1);
+
+	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	iounmap(gpiomtd->nand_chip.IO_ADDR_R);
+	release_mem_region(res->start, res->end - res->start + 1);
+
+	if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+		gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+	gpio_set_value(gpiomtd->plat.gpio_nce, 1);
+
+	gpio_free(gpiomtd->plat.gpio_cle);
+	gpio_free(gpiomtd->plat.gpio_ale);
+	gpio_free(gpiomtd->plat.gpio_nce);
+	if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+		gpio_free(gpiomtd->plat.gpio_nwp);
+	gpio_free(gpiomtd->plat.gpio_rdy);
+
+	kfree(gpiomtd);
+
+	return 0;
+}
+
+static void __iomem *request_and_remap(struct resource *res, size_t size,
+					const char *name, int *err)
+{
+	void __iomem *ptr;
+
+	if (!request_mem_region(res->start, res->end - res->start + 1, name)) {
+		*err = -EBUSY;
+		return NULL;
+	}
+
+	ptr = ioremap(res->start, size);
+	if (!ptr) {
+		release_mem_region(res->start, res->end - res->start + 1);
+		*err = -ENOMEM;
+	}
+	return ptr;
+}
+
+static int __devinit gpio_nand_probe(struct platform_device *dev)
+{
+	struct gpiomtd *gpiomtd;
+	struct nand_chip *this;
+	struct resource *res0, *res1;
+	int ret;
+
+	if (!dev->dev.platform_data)
+		return -EINVAL;
+
+	res0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (!res0)
+		return -EINVAL;
+
+	gpiomtd = kzalloc(sizeof(*gpiomtd), GFP_KERNEL);
+	if (gpiomtd == NULL) {
+		dev_err(&dev->dev, "failed to create NAND MTD\n");
+		return -ENOMEM;
+	}
+
+	this = &gpiomtd->nand_chip;
+	this->IO_ADDR_R = request_and_remap(res0, 2, "NAND", &ret);
+	if (!this->IO_ADDR_R) {
+		dev_err(&dev->dev, "unable to map NAND\n");
+		goto err_map;
+	}
+
+	res1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
+	if (res1) {
+		gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret);
+		if (!gpiomtd->io_sync) {
+			dev_err(&dev->dev, "unable to map sync NAND\n");
+			goto err_sync;
+		}
+	}
+
+	memcpy(&gpiomtd->plat, dev->dev.platform_data, sizeof(gpiomtd->plat));
+
+	ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE");
+	if (ret)
+		goto err_nce;
+	gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
+	if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
+		ret = gpio_request(gpiomtd->plat.gpio_nwp, "NAND NWP");
+		if (ret)
+			goto err_nwp;
+		gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
+	}
+	ret = gpio_request(gpiomtd->plat.gpio_ale, "NAND ALE");
+	if (ret)
+		goto err_ale;
+	gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
+	ret = gpio_request(gpiomtd->plat.gpio_cle, "NAND CLE");
+	if (ret)
+		goto err_cle;
+	gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
+	ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
+	if (ret)
+		goto err_rdy;
+	gpio_direction_input(gpiomtd->plat.gpio_rdy);
+
+
+	this->IO_ADDR_W  = this->IO_ADDR_R;
+	this->ecc.mode   = NAND_ECC_SOFT;
+	this->options    = gpiomtd->plat.options;
+	this->chip_delay = gpiomtd->plat.chip_delay;
+
+	/* install our routines */
+	this->cmd_ctrl   = gpio_nand_cmd_ctrl;
+	this->dev_ready  = gpio_nand_devready;
+
+	if (this->options & NAND_BUSWIDTH_16) {
+		this->read_buf   = gpio_nand_readbuf16;
+		this->write_buf  = gpio_nand_writebuf16;
+		this->verify_buf = gpio_nand_verifybuf16;
+	} else {
+		this->read_buf   = gpio_nand_readbuf;
+		this->write_buf  = gpio_nand_writebuf;
+		this->verify_buf = gpio_nand_verifybuf;
+	}
+
+	/* set the mtd private data for the nand driver */
+	gpiomtd->mtd_info.priv = this;
+	gpiomtd->mtd_info.owner = THIS_MODULE;
+
+	if (nand_scan(&gpiomtd->mtd_info, 1)) {
+		dev_err(&dev->dev, "no nand chips found?\n");
+		ret = -ENXIO;
+		goto err_wp;
+	}
+
+	if (gpiomtd->plat.adjust_parts)
+		gpiomtd->plat.adjust_parts(&gpiomtd->plat,
+					   gpiomtd->mtd_info.size);
+
+	add_mtd_partitions(&gpiomtd->mtd_info, gpiomtd->plat.parts,
+			   gpiomtd->plat.num_parts);
+	platform_set_drvdata(dev, gpiomtd);
+
+	return 0;
+
+err_wp:
+	if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+		gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+	gpio_free(gpiomtd->plat.gpio_rdy);
+err_rdy:
+	gpio_free(gpiomtd->plat.gpio_cle);
+err_cle:
+	gpio_free(gpiomtd->plat.gpio_ale);
+err_ale:
+	if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+		gpio_free(gpiomtd->plat.gpio_nwp);
+err_nwp:
+	gpio_free(gpiomtd->plat.gpio_nce);
+err_nce:
+	iounmap(gpiomtd->io_sync);
+	if (res1)
+		release_mem_region(res1->start, res1->end - res1->start + 1);
+err_sync:
+	iounmap(gpiomtd->nand_chip.IO_ADDR_R);
+	release_mem_region(res0->start, res0->end - res0->start + 1);
+err_map:
+	kfree(gpiomtd);
+	return ret;
+}
+
+static struct platform_driver gpio_nand_driver = {
+	.probe		= gpio_nand_probe,
+	.remove		= gpio_nand_remove,
+	.driver		= {
+		.name	= "gpio-nand",
+	},
+};
+
+static int __init gpio_nand_init(void)
+{
+	printk(KERN_INFO "GPIO NAND driver, © 2004 Simtec Electronics\n");
+
+	return platform_driver_register(&gpio_nand_driver);
+}
+
+static void __exit gpio_nand_exit(void)
+{
+	platform_driver_unregister(&gpio_nand_driver);
+}
+
+module_init(gpio_nand_init);
+module_exit(gpio_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("GPIO NAND Driver");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
new file mode 100644
index 0000000..21fd4f1
--- /dev/null
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -0,0 +1,1077 @@
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <asm/mach/flash.h>
+#include <mach/mxc_nand.h>
+
+#define DRIVER_NAME "mxc_nand"
+
+/* Addresses for NFC registers */
+#define NFC_BUF_SIZE		0xE00
+#define NFC_BUF_ADDR		0xE04
+#define NFC_FLASH_ADDR		0xE06
+#define NFC_FLASH_CMD		0xE08
+#define NFC_CONFIG		0xE0A
+#define NFC_ECC_STATUS_RESULT	0xE0C
+#define NFC_RSLTMAIN_AREA	0xE0E
+#define NFC_RSLTSPARE_AREA	0xE10
+#define NFC_WRPROT		0xE12
+#define NFC_UNLOCKSTART_BLKADDR	0xE14
+#define NFC_UNLOCKEND_BLKADDR	0xE16
+#define NFC_NF_WRPRST		0xE18
+#define NFC_CONFIG1		0xE1A
+#define NFC_CONFIG2		0xE1C
+
+/* Addresses for NFC RAM BUFFER Main area 0 */
+#define MAIN_AREA0		0x000
+#define MAIN_AREA1		0x200
+#define MAIN_AREA2		0x400
+#define MAIN_AREA3		0x600
+
+/* Addresses for NFC SPARE BUFFER Spare area 0 */
+#define SPARE_AREA0		0x800
+#define SPARE_AREA1		0x810
+#define SPARE_AREA2		0x820
+#define SPARE_AREA3		0x830
+
+/* Set INT to 0, FCMD to 1, rest to 0 in NFC_CONFIG2 Register
+ * for Command operation */
+#define NFC_CMD            0x1
+
+/* Set INT to 0, FADD to 1, rest to 0 in NFC_CONFIG2 Register
+ * for Address operation */
+#define NFC_ADDR           0x2
+
+/* Set INT to 0, FDI to 1, rest to 0 in NFC_CONFIG2 Register
+ * for Input operation */
+#define NFC_INPUT          0x4
+
+/* Set INT to 0, FDO to 001, rest to 0 in NFC_CONFIG2 Register
+ * for Data Output operation */
+#define NFC_OUTPUT         0x8
+
+/* Set INT to 0, FD0 to 010, rest to 0 in NFC_CONFIG2 Register
+ * for Read ID operation */
+#define NFC_ID             0x10
+
+/* Set INT to 0, FDO to 100, rest to 0 in NFC_CONFIG2 Register
+ * for Read Status operation */
+#define NFC_STATUS         0x20
+
+/* Set INT to 1, rest to 0 in NFC_CONFIG2 Register for Read
+ * Status operation */
+#define NFC_INT            0x8000
+
+#define NFC_SP_EN           (1 << 2)
+#define NFC_ECC_EN          (1 << 3)
+#define NFC_INT_MSK         (1 << 4)
+#define NFC_BIG             (1 << 5)
+#define NFC_RST             (1 << 6)
+#define NFC_CE              (1 << 7)
+#define NFC_ONE_CYCLE       (1 << 8)
+
+struct mxc_nand_host {
+	struct mtd_info		mtd;
+	struct nand_chip	nand;
+	struct mtd_partition	*parts;
+	struct device		*dev;
+
+	void __iomem		*regs;
+	int			spare_only;
+	int			status_request;
+	int			pagesize_2k;
+	uint16_t		col_addr;
+	struct clk		*clk;
+	int			clk_act;
+	int			irq;
+
+	wait_queue_head_t	irq_waitq;
+};
+
+/* Define delays in microsec for NAND device operations */
+#define TROP_US_DELAY   2000
+/* Macros to get byte and bit positions of ECC */
+#define COLPOS(x)  ((x) >> 3)
+#define BITPOS(x) ((x) & 0xf)
+
+/* Define single bit Error positions in Main & Spare area */
+#define MAIN_SINGLEBIT_ERROR 0x4
+#define SPARE_SINGLEBIT_ERROR 0x1
+
+/* OOB placement block for use with hardware ecc generation */
+static struct nand_ecclayout nand_hw_eccoob_8 = {
+	.eccbytes = 5,
+	.eccpos = {6, 7, 8, 9, 10},
+	.oobfree = {{0, 5}, {11, 5}, }
+};
+
+static struct nand_ecclayout nand_hw_eccoob_16 = {
+	.eccbytes = 5,
+	.eccpos = {6, 7, 8, 9, 10},
+	.oobfree = {{0, 6}, {12, 4}, }
+};
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
+#endif
+
+static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+{
+	struct mxc_nand_host *host = dev_id;
+
+	uint16_t tmp;
+
+	tmp = readw(host->regs + NFC_CONFIG1);
+	tmp |= NFC_INT_MSK; /* Disable interrupt */
+	writew(tmp, host->regs + NFC_CONFIG1);
+
+	wake_up(&host->irq_waitq);
+
+	return IRQ_HANDLED;
+}
+
+/* This function polls the NANDFC to wait for the basic operation to
+ * complete by checking the INT bit of config2 register.
+ */
+static void wait_op_done(struct mxc_nand_host *host, int max_retries,
+				uint16_t param, int useirq)
+{
+	uint32_t tmp;
+
+	if (useirq) {
+		if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) {
+
+			tmp = readw(host->regs + NFC_CONFIG1);
+			tmp  &= ~NFC_INT_MSK;	/* Enable interrupt */
+			writew(tmp, host->regs + NFC_CONFIG1);
+
+			wait_event(host->irq_waitq,
+				readw(host->regs + NFC_CONFIG2) & NFC_INT);
+
+			tmp = readw(host->regs + NFC_CONFIG2);
+			tmp  &= ~NFC_INT;
+			writew(tmp, host->regs + NFC_CONFIG2);
+		}
+	} else {
+		while (max_retries-- > 0) {
+			if (readw(host->regs + NFC_CONFIG2) & NFC_INT) {
+				tmp = readw(host->regs + NFC_CONFIG2);
+				tmp  &= ~NFC_INT;
+				writew(tmp, host->regs + NFC_CONFIG2);
+				break;
+			}
+			udelay(1);
+		}
+		if (max_retries <= 0)
+			DEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n",
+			      __func__, param);
+	}
+}
+
+/* This function issues the specified command to the NAND device and
+ * waits for completion. */
+static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq)
+{
+	DEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+
+	writew(cmd, host->regs + NFC_FLASH_CMD);
+	writew(NFC_CMD, host->regs + NFC_CONFIG2);
+
+	/* Wait for operation to complete */
+	wait_op_done(host, TROP_US_DELAY, cmd, useirq);
+}
+
+/* This function sends an address (or partial address) to the
+ * NAND device. The address is used to select the source/destination for
+ * a NAND command. */
+static void send_addr(struct mxc_nand_host *host, uint16_t addr, int islast)
+{
+	DEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x %d)\n", addr, islast);
+
+	writew(addr, host->regs + NFC_FLASH_ADDR);
+	writew(NFC_ADDR, host->regs + NFC_CONFIG2);
+
+	/* Wait for operation to complete */
+	wait_op_done(host, TROP_US_DELAY, addr, islast);
+}
+
+/* This function requests the NANDFC to initate the transfer
+ * of data currently in the NANDFC RAM buffer to the NAND device. */
+static void send_prog_page(struct mxc_nand_host *host, uint8_t buf_id,
+			int spare_only)
+{
+	DEBUG(MTD_DEBUG_LEVEL3, "send_prog_page (%d)\n", spare_only);
+
+	/* NANDFC buffer 0 is used for page read/write */
+	writew(buf_id, host->regs + NFC_BUF_ADDR);
+
+	/* Configure spare or page+spare access */
+	if (!host->pagesize_2k) {
+		uint16_t config1 = readw(host->regs + NFC_CONFIG1);
+		if (spare_only)
+			config1 |= NFC_SP_EN;
+		else
+			config1 &= ~(NFC_SP_EN);
+		writew(config1, host->regs + NFC_CONFIG1);
+	}
+
+	writew(NFC_INPUT, host->regs + NFC_CONFIG2);
+
+	/* Wait for operation to complete */
+	wait_op_done(host, TROP_US_DELAY, spare_only, true);
+}
+
+/* Requests NANDFC to initated the transfer of data from the
+ * NAND device into in the NANDFC ram buffer. */
+static void send_read_page(struct mxc_nand_host *host, uint8_t buf_id,
+		int spare_only)
+{
+	DEBUG(MTD_DEBUG_LEVEL3, "send_read_page (%d)\n", spare_only);
+
+	/* NANDFC buffer 0 is used for page read/write */
+	writew(buf_id, host->regs + NFC_BUF_ADDR);
+
+	/* Configure spare or page+spare access */
+	if (!host->pagesize_2k) {
+		uint32_t config1 = readw(host->regs + NFC_CONFIG1);
+		if (spare_only)
+			config1 |= NFC_SP_EN;
+		else
+			config1 &= ~NFC_SP_EN;
+		writew(config1, host->regs + NFC_CONFIG1);
+	}
+
+	writew(NFC_OUTPUT, host->regs + NFC_CONFIG2);
+
+	/* Wait for operation to complete */
+	wait_op_done(host, TROP_US_DELAY, spare_only, true);
+}
+
+/* Request the NANDFC to perform a read of the NAND device ID. */
+static void send_read_id(struct mxc_nand_host *host)
+{
+	struct nand_chip *this = &host->nand;
+	uint16_t tmp;
+
+	/* NANDFC buffer 0 is used for device ID output */
+	writew(0x0, host->regs + NFC_BUF_ADDR);
+
+	/* Read ID into main buffer */
+	tmp = readw(host->regs + NFC_CONFIG1);
+	tmp &= ~NFC_SP_EN;
+	writew(tmp, host->regs + NFC_CONFIG1);
+
+	writew(NFC_ID, host->regs + NFC_CONFIG2);
+
+	/* Wait for operation to complete */
+	wait_op_done(host, TROP_US_DELAY, 0, true);
+
+	if (this->options & NAND_BUSWIDTH_16) {
+		void __iomem *main_buf = host->regs + MAIN_AREA0;
+		/* compress the ID info */
+		writeb(readb(main_buf + 2), main_buf + 1);
+		writeb(readb(main_buf + 4), main_buf + 2);
+		writeb(readb(main_buf + 6), main_buf + 3);
+		writeb(readb(main_buf + 8), main_buf + 4);
+		writeb(readb(main_buf + 10), main_buf + 5);
+	}
+}
+
+/* This function requests the NANDFC to perform a read of the
+ * NAND device status and returns the current status. */
+static uint16_t get_dev_status(struct mxc_nand_host *host)
+{
+	void __iomem *main_buf = host->regs + MAIN_AREA1;
+	uint32_t store;
+	uint16_t ret, tmp;
+	/* Issue status request to NAND device */
+
+	/* store the main area1 first word, later do recovery */
+	store = readl(main_buf);
+	/* NANDFC buffer 1 is used for device status to prevent
+	 * corruption of read/write buffer on status requests. */
+	writew(1, host->regs + NFC_BUF_ADDR);
+
+	/* Read status into main buffer */
+	tmp = readw(host->regs + NFC_CONFIG1);
+	tmp &= ~NFC_SP_EN;
+	writew(tmp, host->regs + NFC_CONFIG1);
+
+	writew(NFC_STATUS, host->regs + NFC_CONFIG2);
+
+	/* Wait for operation to complete */
+	wait_op_done(host, TROP_US_DELAY, 0, true);
+
+	/* Status is placed in first word of main buffer */
+	/* get status, then recovery area 1 data */
+	ret = readw(main_buf);
+	writel(store, main_buf);
+
+	return ret;
+}
+
+/* This functions is used by upper layer to checks if device is ready */
+static int mxc_nand_dev_ready(struct mtd_info *mtd)
+{
+	/*
+	 * NFC handles R/B internally. Therefore, this function
+	 * always returns status as ready.
+	 */
+	return 1;
+}
+
+static void mxc_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	/*
+	 * If HW ECC is enabled, we turn it on during init. There is
+	 * no need to enable again here.
+	 */
+}
+
+static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+				 u_char *read_ecc, u_char *calc_ecc)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct mxc_nand_host *host = nand_chip->priv;
+
+	/*
+	 * 1-Bit errors are automatically corrected in HW.  No need for
+	 * additional correction.  2-Bit errors cannot be corrected by
+	 * HW ECC, so we need to return failure
+	 */
+	uint16_t ecc_status = readw(host->regs + NFC_ECC_STATUS_RESULT);
+
+	if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
+		DEBUG(MTD_DEBUG_LEVEL0,
+		      "MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int mxc_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+				  u_char *ecc_code)
+{
+	return 0;
+}
+
+static u_char mxc_nand_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct mxc_nand_host *host = nand_chip->priv;
+	uint8_t ret = 0;
+	uint16_t col, rd_word;
+	uint16_t __iomem *main_buf = host->regs + MAIN_AREA0;
+	uint16_t __iomem *spare_buf = host->regs + SPARE_AREA0;
+
+	/* Check for status request */
+	if (host->status_request)
+		return get_dev_status(host) & 0xFF;
+
+	/* Get column for 16-bit access */
+	col = host->col_addr >> 1;
+
+	/* If we are accessing the spare region */
+	if (host->spare_only)
+		rd_word = readw(&spare_buf[col]);
+	else
+		rd_word = readw(&main_buf[col]);
+
+	/* Pick upper/lower byte of word from RAM buffer */
+	if (host->col_addr & 0x1)
+		ret = (rd_word >> 8) & 0xFF;
+	else
+		ret = rd_word & 0xFF;
+
+	/* Update saved column address */
+	host->col_addr++;
+
+	return ret;
+}
+
+static uint16_t mxc_nand_read_word(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct mxc_nand_host *host = nand_chip->priv;
+	uint16_t col, rd_word, ret;
+	uint16_t __iomem *p;
+
+	DEBUG(MTD_DEBUG_LEVEL3,
+	      "mxc_nand_read_word(col = %d)\n", host->col_addr);
+
+	col = host->col_addr;
+	/* Adjust saved column address */
+	if (col < mtd->writesize && host->spare_only)
+		col += mtd->writesize;
+
+	if (col < mtd->writesize)
+		p = (host->regs + MAIN_AREA0) + (col >> 1);
+	else
+		p = (host->regs + SPARE_AREA0) + ((col - mtd->writesize) >> 1);
+
+	if (col & 1) {
+		rd_word = readw(p);
+		ret = (rd_word >> 8) & 0xff;
+		rd_word = readw(&p[1]);
+		ret |= (rd_word << 8) & 0xff00;
+
+	} else
+		ret = readw(p);
+
+	/* Update saved column address */
+	host->col_addr = col + 2;
+
+	return ret;
+}
+
+/* Write data of length len to buffer buf. The data to be
+ * written on NAND Flash is first copied to RAMbuffer. After the Data Input
+ * Operation by the NFC, the data is written to NAND Flash */
+static void mxc_nand_write_buf(struct mtd_info *mtd,
+				const u_char *buf, int len)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct mxc_nand_host *host = nand_chip->priv;
+	int n, col, i = 0;
+
+	DEBUG(MTD_DEBUG_LEVEL3,
+	      "mxc_nand_write_buf(col = %d, len = %d)\n", host->col_addr,
+	      len);
+
+	col = host->col_addr;
+
+	/* Adjust saved column address */
+	if (col < mtd->writesize && host->spare_only)
+		col += mtd->writesize;
+
+	n = mtd->writesize + mtd->oobsize - col;
+	n = min(len, n);
+
+	DEBUG(MTD_DEBUG_LEVEL3,
+	      "%s:%d: col = %d, n = %d\n", __func__, __LINE__, col, n);
+
+	while (n) {
+		void __iomem *p;
+
+		if (col < mtd->writesize)
+			p = host->regs + MAIN_AREA0 + (col & ~3);
+		else
+			p = host->regs + SPARE_AREA0 -
+						mtd->writesize + (col & ~3);
+
+		DEBUG(MTD_DEBUG_LEVEL3, "%s:%d: p = %p\n", __func__,
+		      __LINE__, p);
+
+		if (((col | (int)&buf[i]) & 3) || n < 16) {
+			uint32_t data = 0;
+
+			if (col & 3 || n < 4)
+				data = readl(p);
+
+			switch (col & 3) {
+			case 0:
+				if (n) {
+					data = (data & 0xffffff00) |
+					    (buf[i++] << 0);
+					n--;
+					col++;
+				}
+			case 1:
+				if (n) {
+					data = (data & 0xffff00ff) |
+					    (buf[i++] << 8);
+					n--;
+					col++;
+				}
+			case 2:
+				if (n) {
+					data = (data & 0xff00ffff) |
+					    (buf[i++] << 16);
+					n--;
+					col++;
+				}
+			case 3:
+				if (n) {
+					data = (data & 0x00ffffff) |
+					    (buf[i++] << 24);
+					n--;
+					col++;
+				}
+			}
+
+			writel(data, p);
+		} else {
+			int m = mtd->writesize - col;
+
+			if (col >= mtd->writesize)
+				m += mtd->oobsize;
+
+			m = min(n, m) & ~3;
+
+			DEBUG(MTD_DEBUG_LEVEL3,
+			      "%s:%d: n = %d, m = %d, i = %d, col = %d\n",
+			      __func__,  __LINE__, n, m, i, col);
+
+			memcpy(p, &buf[i], m);
+			col += m;
+			i += m;
+			n -= m;
+		}
+	}
+	/* Update saved column address */
+	host->col_addr = col;
+}
+
+/* Read the data buffer from the NAND Flash. To read the data from NAND
+ * Flash first the data output cycle is initiated by the NFC, which copies
+ * the data to RAMbuffer. This data of length len is then copied to buffer buf.
+ */
+static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct mxc_nand_host *host = nand_chip->priv;
+	int n, col, i = 0;
+
+	DEBUG(MTD_DEBUG_LEVEL3,
+	      "mxc_nand_read_buf(col = %d, len = %d)\n", host->col_addr, len);
+
+	col = host->col_addr;
+
+	/* Adjust saved column address */
+	if (col < mtd->writesize && host->spare_only)
+		col += mtd->writesize;
+
+	n = mtd->writesize + mtd->oobsize - col;
+	n = min(len, n);
+
+	while (n) {
+		void __iomem *p;
+
+		if (col < mtd->writesize)
+			p = host->regs + MAIN_AREA0 + (col & ~3);
+		else
+			p = host->regs + SPARE_AREA0 -
+					mtd->writesize + (col & ~3);
+
+		if (((col | (int)&buf[i]) & 3) || n < 16) {
+			uint32_t data;
+
+			data = readl(p);
+			switch (col & 3) {
+			case 0:
+				if (n) {
+					buf[i++] = (uint8_t) (data);
+					n--;
+					col++;
+				}
+			case 1:
+				if (n) {
+					buf[i++] = (uint8_t) (data >> 8);
+					n--;
+					col++;
+				}
+			case 2:
+				if (n) {
+					buf[i++] = (uint8_t) (data >> 16);
+					n--;
+					col++;
+				}
+			case 3:
+				if (n) {
+					buf[i++] = (uint8_t) (data >> 24);
+					n--;
+					col++;
+				}
+			}
+		} else {
+			int m = mtd->writesize - col;
+
+			if (col >= mtd->writesize)
+				m += mtd->oobsize;
+
+			m = min(n, m) & ~3;
+			memcpy(&buf[i], p, m);
+			col += m;
+			i += m;
+			n -= m;
+		}
+	}
+	/* Update saved column address */
+	host->col_addr = col;
+
+}
+
+/* Used by the upper layer to verify the data in NAND Flash
+ * with the data in the buf. */
+static int mxc_nand_verify_buf(struct mtd_info *mtd,
+				const u_char *buf, int len)
+{
+	return -EFAULT;
+}
+
+/* This function is used by upper layer for select and
+ * deselect of the NAND chip */
+static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct mxc_nand_host *host = nand_chip->priv;
+
+#ifdef CONFIG_MTD_NAND_MXC_FORCE_CE
+	if (chip > 0) {
+		DEBUG(MTD_DEBUG_LEVEL0,
+		      "ERROR:  Illegal chip select (chip = %d)\n", chip);
+		return;
+	}
+
+	if (chip == -1) {
+		writew(readw(host->regs + NFC_CONFIG1) & ~NFC_CE,
+				host->regs + NFC_CONFIG1);
+		return;
+	}
+
+	writew(readw(host->regs + NFC_CONFIG1) | NFC_CE,
+			host->regs + NFC_CONFIG1);
+#endif
+
+	switch (chip) {
+	case -1:
+		/* Disable the NFC clock */
+		if (host->clk_act) {
+			clk_disable(host->clk);
+			host->clk_act = 0;
+		}
+		break;
+	case 0:
+		/* Enable the NFC clock */
+		if (!host->clk_act) {
+			clk_enable(host->clk);
+			host->clk_act = 1;
+		}
+		break;
+
+	default:
+		break;
+	}
+}
+
+/* Used by the upper layer to write command to NAND Flash for
+ * different operations to be carried out on NAND Flash */
+static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
+				int column, int page_addr)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct mxc_nand_host *host = nand_chip->priv;
+	int useirq = true;
+
+	DEBUG(MTD_DEBUG_LEVEL3,
+	      "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+	      command, column, page_addr);
+
+	/* Reset command state information */
+	host->status_request = false;
+
+	/* Command pre-processing step */
+	switch (command) {
+
+	case NAND_CMD_STATUS:
+		host->col_addr = 0;
+		host->status_request = true;
+		break;
+
+	case NAND_CMD_READ0:
+		host->col_addr = column;
+		host->spare_only = false;
+		useirq = false;
+		break;
+
+	case NAND_CMD_READOOB:
+		host->col_addr = column;
+		host->spare_only = true;
+		useirq = false;
+		if (host->pagesize_2k)
+			command = NAND_CMD_READ0; /* only READ0 is valid */
+		break;
+
+	case NAND_CMD_SEQIN:
+		if (column >= mtd->writesize) {
+			/*
+			 * FIXME: before send SEQIN command for write OOB,
+			 * We must read one page out.
+			 * For K9F1GXX has no READ1 command to set current HW
+			 * pointer to spare area, we must write the whole page
+			 * including OOB together.
+			 */
+			if (host->pagesize_2k)
+				/* call ourself to read a page */
+				mxc_nand_command(mtd, NAND_CMD_READ0, 0,
+						page_addr);
+
+			host->col_addr = column - mtd->writesize;
+			host->spare_only = true;
+
+			/* Set program pointer to spare region */
+			if (!host->pagesize_2k)
+				send_cmd(host, NAND_CMD_READOOB, false);
+		} else {
+			host->spare_only = false;
+			host->col_addr = column;
+
+			/* Set program pointer to page start */
+			if (!host->pagesize_2k)
+				send_cmd(host, NAND_CMD_READ0, false);
+		}
+		useirq = false;
+		break;
+
+	case NAND_CMD_PAGEPROG:
+		send_prog_page(host, 0, host->spare_only);
+
+		if (host->pagesize_2k) {
+			/* data in 4 areas datas */
+			send_prog_page(host, 1, host->spare_only);
+			send_prog_page(host, 2, host->spare_only);
+			send_prog_page(host, 3, host->spare_only);
+		}
+
+		break;
+
+	case NAND_CMD_ERASE1:
+		useirq = false;
+		break;
+	}
+
+	/* Write out the command to the device. */
+	send_cmd(host, command, useirq);
+
+	/* Write out column address, if necessary */
+	if (column != -1) {
+		/*
+		 * MXC NANDFC can only perform full page+spare or
+		 * spare-only read/write.  When the upper layers
+		 * layers perform a read/write buf operation,
+		 * we will used the saved column adress to index into
+		 * the full page.
+		 */
+		send_addr(host, 0, page_addr == -1);
+		if (host->pagesize_2k)
+			/* another col addr cycle for 2k page */
+			send_addr(host, 0, false);
+	}
+
+	/* Write out page address, if necessary */
+	if (page_addr != -1) {
+		/* paddr_0 - p_addr_7 */
+		send_addr(host, (page_addr & 0xff), false);
+
+		if (host->pagesize_2k) {
+			send_addr(host, (page_addr >> 8) & 0xFF, false);
+			if (mtd->size >= 0x40000000)
+				send_addr(host, (page_addr >> 16) & 0xff, true);
+		} else {
+			/* One more address cycle for higher density devices */
+			if (mtd->size >= 0x4000000) {
+				/* paddr_8 - paddr_15 */
+				send_addr(host, (page_addr >> 8) & 0xff, false);
+				send_addr(host, (page_addr >> 16) & 0xff, true);
+			} else
+				/* paddr_8 - paddr_15 */
+				send_addr(host, (page_addr >> 8) & 0xff, true);
+		}
+	}
+
+	/* Command post-processing step */
+	switch (command) {
+
+	case NAND_CMD_RESET:
+		break;
+
+	case NAND_CMD_READOOB:
+	case NAND_CMD_READ0:
+		if (host->pagesize_2k) {
+			/* send read confirm command */
+			send_cmd(host, NAND_CMD_READSTART, true);
+			/* read for each AREA */
+			send_read_page(host, 0, host->spare_only);
+			send_read_page(host, 1, host->spare_only);
+			send_read_page(host, 2, host->spare_only);
+			send_read_page(host, 3, host->spare_only);
+		} else
+			send_read_page(host, 0, host->spare_only);
+		break;
+
+	case NAND_CMD_READID:
+		send_read_id(host);
+		break;
+
+	case NAND_CMD_PAGEPROG:
+		break;
+
+	case NAND_CMD_STATUS:
+		break;
+
+	case NAND_CMD_ERASE2:
+		break;
+	}
+}
+
+static int __init mxcnd_probe(struct platform_device *pdev)
+{
+	struct nand_chip *this;
+	struct mtd_info *mtd;
+	struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
+	struct mxc_nand_host *host;
+	struct resource *res;
+	uint16_t tmp;
+	int err = 0, nr_parts = 0;
+
+	/* Allocate memory for MTD device structure and private data */
+	host = kzalloc(sizeof(struct mxc_nand_host), GFP_KERNEL);
+	if (!host)
+		return -ENOMEM;
+
+	host->dev = &pdev->dev;
+	/* structures must be linked */
+	this = &host->nand;
+	mtd = &host->mtd;
+	mtd->priv = this;
+	mtd->owner = THIS_MODULE;
+
+	/* 50 us command delay time */
+	this->chip_delay = 5;
+
+	this->priv = host;
+	this->dev_ready = mxc_nand_dev_ready;
+	this->cmdfunc = mxc_nand_command;
+	this->select_chip = mxc_nand_select_chip;
+	this->read_byte = mxc_nand_read_byte;
+	this->read_word = mxc_nand_read_word;
+	this->write_buf = mxc_nand_write_buf;
+	this->read_buf = mxc_nand_read_buf;
+	this->verify_buf = mxc_nand_verify_buf;
+
+	host->clk = clk_get(&pdev->dev, "nfc_clk");
+	if (IS_ERR(host->clk))
+		goto eclk;
+
+	clk_enable(host->clk);
+	host->clk_act = 1;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		err = -ENODEV;
+		goto eres;
+	}
+
+	host->regs = ioremap(res->start, res->end - res->start + 1);
+	if (!host->regs) {
+		err = -EIO;
+		goto eres;
+	}
+
+	tmp = readw(host->regs + NFC_CONFIG1);
+	tmp |= NFC_INT_MSK;
+	writew(tmp, host->regs + NFC_CONFIG1);
+
+	init_waitqueue_head(&host->irq_waitq);
+
+	host->irq = platform_get_irq(pdev, 0);
+
+	err = request_irq(host->irq, mxc_nfc_irq, 0, "mxc_nd", host);
+	if (err)
+		goto eirq;
+
+	if (pdata->hw_ecc) {
+		this->ecc.calculate = mxc_nand_calculate_ecc;
+		this->ecc.hwctl = mxc_nand_enable_hwecc;
+		this->ecc.correct = mxc_nand_correct_data;
+		this->ecc.mode = NAND_ECC_HW;
+		this->ecc.size = 512;
+		this->ecc.bytes = 3;
+		this->ecc.layout = &nand_hw_eccoob_8;
+		tmp = readw(host->regs + NFC_CONFIG1);
+		tmp |= NFC_ECC_EN;
+		writew(tmp, host->regs + NFC_CONFIG1);
+	} else {
+		this->ecc.size = 512;
+		this->ecc.bytes = 3;
+		this->ecc.layout = &nand_hw_eccoob_8;
+		this->ecc.mode = NAND_ECC_SOFT;
+		tmp = readw(host->regs + NFC_CONFIG1);
+		tmp &= ~NFC_ECC_EN;
+		writew(tmp, host->regs + NFC_CONFIG1);
+	}
+
+	/* Reset NAND */
+	this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+	/* preset operation */
+	/* Unlock the internal RAM Buffer */
+	writew(0x2, host->regs + NFC_CONFIG);
+
+	/* Blocks to be unlocked */
+	writew(0x0, host->regs + NFC_UNLOCKSTART_BLKADDR);
+	writew(0x4000, host->regs + NFC_UNLOCKEND_BLKADDR);
+
+	/* Unlock Block Command for given address range */
+	writew(0x4, host->regs + NFC_WRPROT);
+
+	/* NAND bus width determines access funtions used by upper layer */
+	if (pdata->width == 2) {
+		this->options |= NAND_BUSWIDTH_16;
+		this->ecc.layout = &nand_hw_eccoob_16;
+	}
+
+	host->pagesize_2k = 0;
+
+	/* Scan to find existence of the device */
+	if (nand_scan(mtd, 1)) {
+		DEBUG(MTD_DEBUG_LEVEL0,
+		      "MXC_ND: Unable to find any NAND device.\n");
+		err = -ENXIO;
+		goto escan;
+	}
+
+	/* Register the partitions */
+#ifdef CONFIG_MTD_PARTITIONS
+	nr_parts =
+	    parse_mtd_partitions(mtd, part_probes, &host->parts, 0);
+	if (nr_parts > 0)
+		add_mtd_partitions(mtd, host->parts, nr_parts);
+	else
+#endif
+	{
+		pr_info("Registering %s as whole device\n", mtd->name);
+		add_mtd_device(mtd);
+	}
+
+	platform_set_drvdata(pdev, host);
+
+	return 0;
+
+escan:
+	free_irq(host->irq, NULL);
+eirq:
+	iounmap(host->regs);
+eres:
+	clk_put(host->clk);
+eclk:
+	kfree(host);
+
+	return err;
+}
+
+static int __devexit mxcnd_remove(struct platform_device *pdev)
+{
+	struct mxc_nand_host *host = platform_get_drvdata(pdev);
+
+	clk_put(host->clk);
+
+	platform_set_drvdata(pdev, NULL);
+
+	nand_release(&host->mtd);
+	free_irq(host->irq, NULL);
+	iounmap(host->regs);
+	kfree(host);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct mtd_info *info = platform_get_drvdata(pdev);
+	int ret = 0;
+
+	DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
+	if (info)
+		ret = info->suspend(info);
+
+	/* Disable the NFC clock */
+	clk_disable(nfc_clk);	/* FIXME */
+
+	return ret;
+}
+
+static int mxcnd_resume(struct platform_device *pdev)
+{
+	struct mtd_info *info = platform_get_drvdata(pdev);
+	int ret = 0;
+
+	DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
+	/* Enable the NFC clock */
+	clk_enable(nfc_clk);	/* FIXME */
+
+	if (info)
+		info->resume(info);
+
+	return ret;
+}
+
+#else
+# define mxcnd_suspend   NULL
+# define mxcnd_resume    NULL
+#endif				/* CONFIG_PM */
+
+static struct platform_driver mxcnd_driver = {
+	.driver = {
+		   .name = DRIVER_NAME,
+		   },
+	.remove = __exit_p(mxcnd_remove),
+	.suspend = mxcnd_suspend,
+	.resume = mxcnd_resume,
+};
+
+static int __init mxc_nd_init(void)
+{
+	/* Register the device driver structure. */
+	pr_info("MXC MTD nand Driver\n");
+	if (platform_driver_probe(&mxcnd_driver, mxcnd_probe) != 0) {
+		printk(KERN_ERR "Driver register failed for mxcnd_driver\n");
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static void __exit mxc_nd_cleanup(void)
+{
+	/* Unregister the device structure */
+	platform_driver_unregister(&mxcnd_driver);
+}
+
+module_init(mxc_nd_init);
+module_exit(mxc_nd_cleanup);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MXC NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index d1129ba..0a9c9cd 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -801,9 +801,9 @@
  * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function
  * @mtd:	mtd info structure
  * @chip:	nand chip info structure
- * @dataofs	offset of requested data within the page
- * @readlen	data length
- * @buf:	buffer to store read data
+ * @data_offs:	offset of requested data within the page
+ * @readlen:	data length
+ * @bufpoi:	buffer to store read data
  */
 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
 {
@@ -2042,7 +2042,7 @@
 		return -EINVAL;
 	}
 
-	instr->fail_addr = 0xffffffff;
+	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
 
 	/* Grab the lock and see if the device is available */
 	nand_get_device(chip, mtd, FL_ERASING);
@@ -2318,6 +2318,12 @@
 	/* Select the device */
 	chip->select_chip(mtd, 0);
 
+	/*
+	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
+	 * after power-up
+	 */
+	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
 	/* Send the command for reading device ID */
 	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
 
@@ -2488,6 +2494,8 @@
 	/* Check for a chip array */
 	for (i = 1; i < maxchips; i++) {
 		chip->select_chip(mtd, i);
+		/* See comment in nand_get_flash_type for reset */
+		chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
 		/* Send the command for reading device ID */
 		chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
 		/* Read manufacturer and device IDs */
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 918a806..868147a 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -1,13 +1,18 @@
 /*
- * This file contains an ECC algorithm from Toshiba that detects and
- * corrects 1 bit errors in a 256 byte block of data.
+ * This file contains an ECC algorithm that detects and corrects 1 bit
+ * errors in a 256 byte block of data.
  *
  * drivers/mtd/nand/nand_ecc.c
  *
- * Copyright (C) 2000-2004 Steven J. Hill (sjhill@realitydiluted.com)
- *                         Toshiba America Electronics Components, Inc.
+ * Copyright © 2008 Koninklijke Philips Electronics NV.
+ *                  Author: Frans Meulenbroeks
  *
- * Copyright (C) 2006 Thomas Gleixner <tglx@linutronix.de>
+ * Completely replaces the previous ECC implementation which was written by:
+ *   Steven J. Hill (sjhill@realitydiluted.com)
+ *   Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Information on how this algorithm works and how it was developed
+ * can be found in Documentation/mtd/nand_ecc.txt
  *
  * This file is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -23,174 +28,475 @@
  * with this file; if not, write to the Free Software Foundation, Inc.,
  * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  *
- * As a special exception, if other files instantiate templates or use
- * macros or inline functions from these files, or you compile these
- * files and link them with other works to produce a work based on these
- * files, these files do not by themselves cause the resulting work to be
- * covered by the GNU General Public License. However the source code for
- * these files must still be made available in accordance with section (3)
- * of the GNU General Public License.
- *
- * This exception does not invalidate any other reasons why a work based on
- * this file might be covered by the GNU General Public License.
  */
 
+/*
+ * The STANDALONE macro is useful when running the code outside the kernel
+ * e.g. when running the code in a testbed or a benchmark program.
+ * When STANDALONE is used, the module related macros are commented out
+ * as well as the linux include files.
+ * Instead a private definition of mtd_info is given to satisfy the compiler
+ * (the code does not use mtd_info, so the code does not care)
+ */
+#ifndef STANDALONE
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
 #include <linux/mtd/nand_ecc.h>
+#include <asm/byteorder.h>
+#else
+#include <stdint.h>
+struct mtd_info;
+#define EXPORT_SYMBOL(x)  /* x */
+
+#define MODULE_LICENSE(x)	/* x */
+#define MODULE_AUTHOR(x)	/* x */
+#define MODULE_DESCRIPTION(x)	/* x */
+
+#define printk printf
+#define KERN_ERR		""
+#endif
 
 /*
- * Pre-calculated 256-way 1 byte column parity
+ * invparity is a 256 byte table that contains the odd parity
+ * for each byte. So if the number of bits in a byte is even,
+ * the array element is 1, and when the number of bits is odd
+ * the array eleemnt is 0.
  */
-static const u_char nand_ecc_precalc_table[] = {
-	0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00,
-	0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
-	0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
-	0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
-	0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
-	0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
-	0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
-	0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
-	0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
-	0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
-	0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
-	0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
-	0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
-	0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
-	0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
-	0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00
+static const char invparity[256] = {
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
+};
+
+/*
+ * bitsperbyte contains the number of bits per byte
+ * this is only used for testing and repairing parity
+ * (a precalculated value slightly improves performance)
+ */
+static const char bitsperbyte[256] = {
+	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+	4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
+};
+
+/*
+ * addressbits is a lookup table to filter out the bits from the xor-ed
+ * ecc data that identify the faulty location.
+ * this is only used for repairing parity
+ * see the comments in nand_correct_data for more details
+ */
+static const char addressbits[256] = {
+	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
 };
 
 /**
- * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block
+ * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
+ *			 block
  * @mtd:	MTD block structure
- * @dat:	raw data
- * @ecc_code:	buffer for ECC
+ * @buf:	input buffer with raw data
+ * @code:	output buffer with ECC
  */
-int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
-		       u_char *ecc_code)
+int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+		       unsigned char *code)
 {
-	uint8_t idx, reg1, reg2, reg3, tmp1, tmp2;
 	int i;
+	const uint32_t *bp = (uint32_t *)buf;
+	/* 256 or 512 bytes/ecc  */
+	const uint32_t eccsize_mult =
+			(((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
+	uint32_t cur;		/* current value in buffer */
+	/* rp0..rp15..rp17 are the various accumulated parities (per byte) */
+	uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
+	uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
+	uint32_t uninitialized_var(rp17);	/* to make compiler happy */
+	uint32_t par;		/* the cumulative parity for all data */
+	uint32_t tmppar;	/* the cumulative parity for this iteration;
+				   for rp12, rp14 and rp16 at the end of the
+				   loop */
 
-	/* Initialize variables */
-	reg1 = reg2 = reg3 = 0;
+	par = 0;
+	rp4 = 0;
+	rp6 = 0;
+	rp8 = 0;
+	rp10 = 0;
+	rp12 = 0;
+	rp14 = 0;
+	rp16 = 0;
 
-	/* Build up column parity */
-	for(i = 0; i < 256; i++) {
-		/* Get CP0 - CP5 from table */
-		idx = nand_ecc_precalc_table[*dat++];
-		reg1 ^= (idx & 0x3f);
+	/*
+	 * The loop is unrolled a number of times;
+	 * This avoids if statements to decide on which rp value to update
+	 * Also we process the data by longwords.
+	 * Note: passing unaligned data might give a performance penalty.
+	 * It is assumed that the buffers are aligned.
+	 * tmppar is the cumulative sum of this iteration.
+	 * needed for calculating rp12, rp14, rp16 and par
+	 * also used as a performance improvement for rp6, rp8 and rp10
+	 */
+	for (i = 0; i < eccsize_mult << 2; i++) {
+		cur = *bp++;
+		tmppar = cur;
+		rp4 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp6 ^= tmppar;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp8 ^= tmppar;
 
-		/* All bit XOR = 1 ? */
-		if (idx & 0x40) {
-			reg3 ^= (uint8_t) i;
-			reg2 ^= ~((uint8_t) i);
-		}
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		rp6 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp6 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp10 ^= tmppar;
+
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		rp6 ^= cur;
+		rp8 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp6 ^= cur;
+		rp8 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		rp8 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp8 ^= cur;
+
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		rp6 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp6 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+
+		par ^= tmppar;
+		if ((i & 0x1) == 0)
+			rp12 ^= tmppar;
+		if ((i & 0x2) == 0)
+			rp14 ^= tmppar;
+		if (eccsize_mult == 2 && (i & 0x4) == 0)
+			rp16 ^= tmppar;
 	}
 
-	/* Create non-inverted ECC code from line parity */
-	tmp1  = (reg3 & 0x80) >> 0; /* B7 -> B7 */
-	tmp1 |= (reg2 & 0x80) >> 1; /* B7 -> B6 */
-	tmp1 |= (reg3 & 0x40) >> 1; /* B6 -> B5 */
-	tmp1 |= (reg2 & 0x40) >> 2; /* B6 -> B4 */
-	tmp1 |= (reg3 & 0x20) >> 2; /* B5 -> B3 */
-	tmp1 |= (reg2 & 0x20) >> 3; /* B5 -> B2 */
-	tmp1 |= (reg3 & 0x10) >> 3; /* B4 -> B1 */
-	tmp1 |= (reg2 & 0x10) >> 4; /* B4 -> B0 */
+	/*
+	 * handle the fact that we use longword operations
+	 * we'll bring rp4..rp14..rp16 back to single byte entities by
+	 * shifting and xoring first fold the upper and lower 16 bits,
+	 * then the upper and lower 8 bits.
+	 */
+	rp4 ^= (rp4 >> 16);
+	rp4 ^= (rp4 >> 8);
+	rp4 &= 0xff;
+	rp6 ^= (rp6 >> 16);
+	rp6 ^= (rp6 >> 8);
+	rp6 &= 0xff;
+	rp8 ^= (rp8 >> 16);
+	rp8 ^= (rp8 >> 8);
+	rp8 &= 0xff;
+	rp10 ^= (rp10 >> 16);
+	rp10 ^= (rp10 >> 8);
+	rp10 &= 0xff;
+	rp12 ^= (rp12 >> 16);
+	rp12 ^= (rp12 >> 8);
+	rp12 &= 0xff;
+	rp14 ^= (rp14 >> 16);
+	rp14 ^= (rp14 >> 8);
+	rp14 &= 0xff;
+	if (eccsize_mult == 2) {
+		rp16 ^= (rp16 >> 16);
+		rp16 ^= (rp16 >> 8);
+		rp16 &= 0xff;
+	}
 
-	tmp2  = (reg3 & 0x08) << 4; /* B3 -> B7 */
-	tmp2 |= (reg2 & 0x08) << 3; /* B3 -> B6 */
-	tmp2 |= (reg3 & 0x04) << 3; /* B2 -> B5 */
-	tmp2 |= (reg2 & 0x04) << 2; /* B2 -> B4 */
-	tmp2 |= (reg3 & 0x02) << 2; /* B1 -> B3 */
-	tmp2 |= (reg2 & 0x02) << 1; /* B1 -> B2 */
-	tmp2 |= (reg3 & 0x01) << 1; /* B0 -> B1 */
-	tmp2 |= (reg2 & 0x01) << 0; /* B7 -> B0 */
-
-	/* Calculate final ECC code */
-#ifdef CONFIG_MTD_NAND_ECC_SMC
-	ecc_code[0] = ~tmp2;
-	ecc_code[1] = ~tmp1;
+	/*
+	 * we also need to calculate the row parity for rp0..rp3
+	 * This is present in par, because par is now
+	 * rp3 rp3 rp2 rp2 in little endian and
+	 * rp2 rp2 rp3 rp3 in big endian
+	 * as well as
+	 * rp1 rp0 rp1 rp0 in little endian and
+	 * rp0 rp1 rp0 rp1 in big endian
+	 * First calculate rp2 and rp3
+	 */
+#ifdef __BIG_ENDIAN
+	rp2 = (par >> 16);
+	rp2 ^= (rp2 >> 8);
+	rp2 &= 0xff;
+	rp3 = par & 0xffff;
+	rp3 ^= (rp3 >> 8);
+	rp3 &= 0xff;
 #else
-	ecc_code[0] = ~tmp1;
-	ecc_code[1] = ~tmp2;
+	rp3 = (par >> 16);
+	rp3 ^= (rp3 >> 8);
+	rp3 &= 0xff;
+	rp2 = par & 0xffff;
+	rp2 ^= (rp2 >> 8);
+	rp2 &= 0xff;
 #endif
-	ecc_code[2] = ((~reg1) << 2) | 0x03;
 
+	/* reduce par to 16 bits then calculate rp1 and rp0 */
+	par ^= (par >> 16);
+#ifdef __BIG_ENDIAN
+	rp0 = (par >> 8) & 0xff;
+	rp1 = (par & 0xff);
+#else
+	rp1 = (par >> 8) & 0xff;
+	rp0 = (par & 0xff);
+#endif
+
+	/* finally reduce par to 8 bits */
+	par ^= (par >> 8);
+	par &= 0xff;
+
+	/*
+	 * and calculate rp5..rp15..rp17
+	 * note that par = rp4 ^ rp5 and due to the commutative property
+	 * of the ^ operator we can say:
+	 * rp5 = (par ^ rp4);
+	 * The & 0xff seems superfluous, but benchmarking learned that
+	 * leaving it out gives slightly worse results. No idea why, probably
+	 * it has to do with the way the pipeline in pentium is organized.
+	 */
+	rp5 = (par ^ rp4) & 0xff;
+	rp7 = (par ^ rp6) & 0xff;
+	rp9 = (par ^ rp8) & 0xff;
+	rp11 = (par ^ rp10) & 0xff;
+	rp13 = (par ^ rp12) & 0xff;
+	rp15 = (par ^ rp14) & 0xff;
+	if (eccsize_mult == 2)
+		rp17 = (par ^ rp16) & 0xff;
+
+	/*
+	 * Finally calculate the ecc bits.
+	 * Again here it might seem that there are performance optimisations
+	 * possible, but benchmarks showed that on the system this is developed
+	 * the code below is the fastest
+	 */
+#ifdef CONFIG_MTD_NAND_ECC_SMC
+	code[0] =
+	    (invparity[rp7] << 7) |
+	    (invparity[rp6] << 6) |
+	    (invparity[rp5] << 5) |
+	    (invparity[rp4] << 4) |
+	    (invparity[rp3] << 3) |
+	    (invparity[rp2] << 2) |
+	    (invparity[rp1] << 1) |
+	    (invparity[rp0]);
+	code[1] =
+	    (invparity[rp15] << 7) |
+	    (invparity[rp14] << 6) |
+	    (invparity[rp13] << 5) |
+	    (invparity[rp12] << 4) |
+	    (invparity[rp11] << 3) |
+	    (invparity[rp10] << 2) |
+	    (invparity[rp9] << 1)  |
+	    (invparity[rp8]);
+#else
+	code[1] =
+	    (invparity[rp7] << 7) |
+	    (invparity[rp6] << 6) |
+	    (invparity[rp5] << 5) |
+	    (invparity[rp4] << 4) |
+	    (invparity[rp3] << 3) |
+	    (invparity[rp2] << 2) |
+	    (invparity[rp1] << 1) |
+	    (invparity[rp0]);
+	code[0] =
+	    (invparity[rp15] << 7) |
+	    (invparity[rp14] << 6) |
+	    (invparity[rp13] << 5) |
+	    (invparity[rp12] << 4) |
+	    (invparity[rp11] << 3) |
+	    (invparity[rp10] << 2) |
+	    (invparity[rp9] << 1)  |
+	    (invparity[rp8]);
+#endif
+	if (eccsize_mult == 1)
+		code[2] =
+		    (invparity[par & 0xf0] << 7) |
+		    (invparity[par & 0x0f] << 6) |
+		    (invparity[par & 0xcc] << 5) |
+		    (invparity[par & 0x33] << 4) |
+		    (invparity[par & 0xaa] << 3) |
+		    (invparity[par & 0x55] << 2) |
+		    3;
+	else
+		code[2] =
+		    (invparity[par & 0xf0] << 7) |
+		    (invparity[par & 0x0f] << 6) |
+		    (invparity[par & 0xcc] << 5) |
+		    (invparity[par & 0x33] << 4) |
+		    (invparity[par & 0xaa] << 3) |
+		    (invparity[par & 0x55] << 2) |
+		    (invparity[rp17] << 1) |
+		    (invparity[rp16] << 0);
 	return 0;
 }
 EXPORT_SYMBOL(nand_calculate_ecc);
 
-static inline int countbits(uint32_t byte)
-{
-	int res = 0;
-
-	for (;byte; byte >>= 1)
-		res += byte & 0x01;
-	return res;
-}
-
 /**
  * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
  * @mtd:	MTD block structure
- * @dat:	raw data read from the chip
+ * @buf:	raw data read from the chip
  * @read_ecc:	ECC from the chip
  * @calc_ecc:	the ECC calculated from raw data
  *
- * Detect and correct a 1 bit error for 256 byte block
+ * Detect and correct a 1 bit error for 256/512 byte block
  */
-int nand_correct_data(struct mtd_info *mtd, u_char *dat,
-		      u_char *read_ecc, u_char *calc_ecc)
+int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+		      unsigned char *read_ecc, unsigned char *calc_ecc)
 {
-	uint8_t s0, s1, s2;
+	unsigned char b0, b1, b2;
+	unsigned char byte_addr, bit_addr;
+	/* 256 or 512 bytes/ecc  */
+	const uint32_t eccsize_mult =
+			(((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
 
+	/*
+	 * b0 to b2 indicate which bit is faulty (if any)
+	 * we might need the xor result  more than once,
+	 * so keep them in a local var
+	*/
 #ifdef CONFIG_MTD_NAND_ECC_SMC
-	s0 = calc_ecc[0] ^ read_ecc[0];
-	s1 = calc_ecc[1] ^ read_ecc[1];
-	s2 = calc_ecc[2] ^ read_ecc[2];
+	b0 = read_ecc[0] ^ calc_ecc[0];
+	b1 = read_ecc[1] ^ calc_ecc[1];
 #else
-	s1 = calc_ecc[0] ^ read_ecc[0];
-	s0 = calc_ecc[1] ^ read_ecc[1];
-	s2 = calc_ecc[2] ^ read_ecc[2];
+	b0 = read_ecc[1] ^ calc_ecc[1];
+	b1 = read_ecc[0] ^ calc_ecc[0];
 #endif
-	if ((s0 | s1 | s2) == 0)
-		return 0;
+	b2 = read_ecc[2] ^ calc_ecc[2];
 
-	/* Check for a single bit error */
-	if( ((s0 ^ (s0 >> 1)) & 0x55) == 0x55 &&
-	    ((s1 ^ (s1 >> 1)) & 0x55) == 0x55 &&
-	    ((s2 ^ (s2 >> 1)) & 0x54) == 0x54) {
+	/* check if there are any bitfaults */
 
-		uint32_t byteoffs, bitnum;
+	/* repeated if statements are slightly more efficient than switch ... */
+	/* ordered in order of likelihood */
 
-		byteoffs = (s1 << 0) & 0x80;
-		byteoffs |= (s1 << 1) & 0x40;
-		byteoffs |= (s1 << 2) & 0x20;
-		byteoffs |= (s1 << 3) & 0x10;
+	if ((b0 | b1 | b2) == 0)
+		return 0;	/* no error */
 
-		byteoffs |= (s0 >> 4) & 0x08;
-		byteoffs |= (s0 >> 3) & 0x04;
-		byteoffs |= (s0 >> 2) & 0x02;
-		byteoffs |= (s0 >> 1) & 0x01;
-
-		bitnum = (s2 >> 5) & 0x04;
-		bitnum |= (s2 >> 4) & 0x02;
-		bitnum |= (s2 >> 3) & 0x01;
-
-		dat[byteoffs] ^= (1 << bitnum);
-
+	if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
+	    (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
+	    ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
+	     (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
+	/* single bit error */
+		/*
+		 * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
+		 * byte, cp 5/3/1 indicate the faulty bit.
+		 * A lookup table (called addressbits) is used to filter
+		 * the bits from the byte they are in.
+		 * A marginal optimisation is possible by having three
+		 * different lookup tables.
+		 * One as we have now (for b0), one for b2
+		 * (that would avoid the >> 1), and one for b1 (with all values
+		 * << 4). However it was felt that introducing two more tables
+		 * hardly justify the gain.
+		 *
+		 * The b2 shift is there to get rid of the lowest two bits.
+		 * We could also do addressbits[b2] >> 1 but for the
+		 * performace it does not make any difference
+		 */
+		if (eccsize_mult == 1)
+			byte_addr = (addressbits[b1] << 4) + addressbits[b0];
+		else
+			byte_addr = (addressbits[b2 & 0x3] << 8) +
+				    (addressbits[b1] << 4) + addressbits[b0];
+		bit_addr = addressbits[b2 >> 2];
+		/* flip the bit */
+		buf[byte_addr] ^= (1 << bit_addr);
 		return 1;
+
 	}
+	/* count nr of bits; use table lookup, faster than calculating it */
+	if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
+		return 1;	/* error in ecc data; no action needed */
 
-	if(countbits(s0 | ((uint32_t)s1 << 8) | ((uint32_t)s2 <<16)) == 1)
-		return 1;
-
-	return -EBADMSG;
+	printk(KERN_ERR "uncorrectable error : ");
+	return -1;
 }
 EXPORT_SYMBOL(nand_correct_data);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
+MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
 MODULE_DESCRIPTION("Generic NAND ECC support");
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 556e8131..ae7c577 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -38,7 +38,6 @@
 #include <linux/delay.h>
 #include <linux/list.h>
 #include <linux/random.h>
-#include <asm/div64.h>
 
 /* Default simulator parameters values */
 #if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE)  || \
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index a64ad15..c0fa9c9 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -115,55 +115,11 @@
 	STATE_PIO_WRITING,
 };
 
-struct pxa3xx_nand_timing {
-	unsigned int	tCH;  /* Enable signal hold time */
-	unsigned int	tCS;  /* Enable signal setup time */
-	unsigned int	tWH;  /* ND_nWE high duration */
-	unsigned int	tWP;  /* ND_nWE pulse time */
-	unsigned int	tRH;  /* ND_nRE high duration */
-	unsigned int	tRP;  /* ND_nRE pulse width */
-	unsigned int	tR;   /* ND_nWE high to ND_nRE low for read */
-	unsigned int	tWHR; /* ND_nWE high to ND_nRE low for status read */
-	unsigned int	tAR;  /* ND_ALE low to ND_nRE low delay */
-};
-
-struct pxa3xx_nand_cmdset {
-	uint16_t	read1;
-	uint16_t	read2;
-	uint16_t	program;
-	uint16_t	read_status;
-	uint16_t	read_id;
-	uint16_t	erase;
-	uint16_t	reset;
-	uint16_t	lock;
-	uint16_t	unlock;
-	uint16_t	lock_status;
-};
-
-struct pxa3xx_nand_flash {
-	struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
-	struct pxa3xx_nand_cmdset *cmdset;
-
-	uint32_t page_per_block;/* Pages per block (PG_PER_BLK) */
-	uint32_t page_size;	/* Page size in bytes (PAGE_SZ) */
-	uint32_t flash_width;	/* Width of Flash memory (DWIDTH_M) */
-	uint32_t dfc_width;	/* Width of flash controller(DWIDTH_C) */
-	uint32_t num_blocks;	/* Number of physical blocks in Flash */
-	uint32_t chip_id;
-
-	/* NOTE: these are automatically calculated, do not define */
-	size_t		oob_size;
-	size_t		read_id_bytes;
-
-	unsigned int	col_addr_cycles;
-	unsigned int	row_addr_cycles;
-};
-
 struct pxa3xx_nand_info {
 	struct nand_chip	nand_chip;
 
 	struct platform_device	 *pdev;
-	struct pxa3xx_nand_flash *flash_info;
+	const struct pxa3xx_nand_flash *flash_info;
 
 	struct clk		*clk;
 	void __iomem		*mmio_base;
@@ -202,12 +158,20 @@
 	uint32_t		ndcb0;
 	uint32_t		ndcb1;
 	uint32_t		ndcb2;
+
+	/* calculated from pxa3xx_nand_flash data */
+	size_t		oob_size;
+	size_t		read_id_bytes;
+
+	unsigned int	col_addr_cycles;
+	unsigned int	row_addr_cycles;
 };
 
 static int use_dma = 1;
 module_param(use_dma, bool, 0444);
 MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
 
+#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
 static struct pxa3xx_nand_cmdset smallpage_cmdset = {
 	.read1		= 0x0000,
 	.read2		= 0x0050,
@@ -291,11 +255,35 @@
 	.chip_id	= 0xb12c,
 };
 
+static struct pxa3xx_nand_timing stm2GbX16_timing = {
+	.tCH = 10,
+	.tCS = 35,
+	.tWH = 15,
+	.tWP = 25,
+	.tRH = 15,
+	.tRP = 25,
+	.tR = 25000,
+	.tWHR = 60,
+	.tAR = 10,
+};
+
+static struct pxa3xx_nand_flash stm2GbX16 = {
+	.timing = &stm2GbX16_timing,
+	.page_per_block = 64,
+	.page_size = 2048,
+	.flash_width = 16,
+	.dfc_width = 16,
+	.num_blocks = 2048,
+	.chip_id = 0xba20,
+};
+
 static struct pxa3xx_nand_flash *builtin_flash_types[] = {
 	&samsung512MbX16,
 	&micron1GbX8,
 	&micron1GbX16,
+	&stm2GbX16,
 };
+#endif /* CONFIG_MTD_NAND_PXA3xx_BUILTIN */
 
 #define NDTR0_tCH(c)	(min((c), 7) << 19)
 #define NDTR0_tCS(c)	(min((c), 7) << 16)
@@ -312,7 +300,7 @@
 #define ns2cycle(ns, clk)	(int)(((ns) * (clk / 1000000) / 1000) + 1)
 
 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
-				   struct pxa3xx_nand_timing *t)
+				   const struct pxa3xx_nand_timing *t)
 {
 	unsigned long nand_clk = clk_get_rate(info->clk);
 	uint32_t ndtr0, ndtr1;
@@ -354,8 +342,8 @@
 static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
 			uint16_t cmd, int column, int page_addr)
 {
-	struct pxa3xx_nand_flash *f = info->flash_info;
-	struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
+	const struct pxa3xx_nand_flash *f = info->flash_info;
+	const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
 
 	/* calculate data size */
 	switch (f->page_size) {
@@ -373,14 +361,14 @@
 	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
 	info->ndcb1 = 0;
 	info->ndcb2 = 0;
-	info->ndcb0 |= NDCB0_ADDR_CYC(f->row_addr_cycles + f->col_addr_cycles);
+	info->ndcb0 |= NDCB0_ADDR_CYC(info->row_addr_cycles + info->col_addr_cycles);
 
-	if (f->col_addr_cycles == 2) {
+	if (info->col_addr_cycles == 2) {
 		/* large block, 2 cycles for column address
 		 * row address starts from 3rd cycle
 		 */
 		info->ndcb1 |= (page_addr << 16) | (column & 0xffff);
-		if (f->row_addr_cycles == 3)
+		if (info->row_addr_cycles == 3)
 			info->ndcb2 = (page_addr >> 16) & 0xff;
 	} else
 		/* small block, 1 cycles for column address
@@ -406,7 +394,7 @@
 
 static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
 {
-	struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
+	const struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
 
 	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
 	info->ndcb1 = 0;
@@ -641,8 +629,8 @@
 				int column, int page_addr)
 {
 	struct pxa3xx_nand_info *info = mtd->priv;
-	struct pxa3xx_nand_flash *flash_info = info->flash_info;
-	struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
+	const struct pxa3xx_nand_flash *flash_info = info->flash_info;
+	const struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
 	int ret;
 
 	info->use_dma = (use_dma) ? 1 : 0;
@@ -720,7 +708,7 @@
 		info->use_dma = 0;	/* force PIO read */
 		info->buf_start = 0;
 		info->buf_count = (command == NAND_CMD_READID) ?
-				flash_info->read_id_bytes : 1;
+				info->read_id_bytes : 1;
 
 		if (prepare_other_cmd(info, (command == NAND_CMD_READID) ?
 				cmdset->read_id : cmdset->read_status))
@@ -861,8 +849,8 @@
 
 static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
 {
-	struct pxa3xx_nand_flash *f = info->flash_info;
-	struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
+	const struct pxa3xx_nand_flash *f = info->flash_info;
+	const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
 	uint32_t ndcr;
 	uint8_t  id_buff[8];
 
@@ -891,7 +879,7 @@
 }
 
 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
-				    struct pxa3xx_nand_flash *f)
+				    const struct pxa3xx_nand_flash *f)
 {
 	struct platform_device *pdev = info->pdev;
 	struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
@@ -904,25 +892,25 @@
 		return -EINVAL;
 
 	/* calculate flash information */
-	f->oob_size = (f->page_size == 2048) ? 64 : 16;
-	f->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
+	info->oob_size = (f->page_size == 2048) ? 64 : 16;
+	info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
 
 	/* calculate addressing information */
-	f->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
+	info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
 
 	if (f->num_blocks * f->page_per_block > 65536)
-		f->row_addr_cycles = 3;
+		info->row_addr_cycles = 3;
 	else
-		f->row_addr_cycles = 2;
+		info->row_addr_cycles = 2;
 
 	ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
-	ndcr |= (f->col_addr_cycles == 2) ? NDCR_RA_START : 0;
+	ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0;
 	ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
 	ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
 	ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
 	ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
 
-	ndcr |= NDCR_RD_ID_CNT(f->read_id_bytes);
+	ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes);
 	ndcr |= NDCR_SPARE_EN; /* enable spare by default */
 
 	info->reg_ndcr = ndcr;
@@ -932,12 +920,27 @@
 	return 0;
 }
 
-static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info)
+static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info,
+				    const struct pxa3xx_nand_platform_data *pdata)
 {
-	struct pxa3xx_nand_flash *f;
-	uint32_t id;
+	const struct pxa3xx_nand_flash *f;
+	uint32_t id = -1;
 	int i;
 
+	for (i = 0; i<pdata->num_flash; ++i) {
+		f = pdata->flash + i;
+
+		if (pxa3xx_nand_config_flash(info, f))
+			continue;
+
+		if (__readid(info, &id))
+			continue;
+
+		if (id == f->chip_id)
+			return 0;
+	}
+
+#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
 	for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) {
 
 		f = builtin_flash_types[i];
@@ -951,7 +954,11 @@
 		if (id == f->chip_id)
 			return 0;
 	}
+#endif
 
+	dev_warn(&info->pdev->dev,
+		 "failed to detect configured nand flash; found %04x instead of\n",
+		 id);
 	return -ENODEV;
 }
 
@@ -1014,7 +1021,7 @@
 static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
 				 struct pxa3xx_nand_info *info)
 {
-	struct pxa3xx_nand_flash *f = info->flash_info;
+	const struct pxa3xx_nand_flash *f = info->flash_info;
 	struct nand_chip *this = &info->nand_chip;
 
 	this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
@@ -1135,7 +1142,7 @@
 		goto fail_free_buf;
 	}
 
-	ret = pxa3xx_nand_detect_flash(info);
+	ret = pxa3xx_nand_detect_flash(info, pdata);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to detect flash\n");
 		ret = -ENODEV;
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
new file mode 100644
index 0000000..821acb0
--- /dev/null
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -0,0 +1,878 @@
+/*
+ * SuperH FLCTL nand controller
+ *
+ * Copyright © 2008 Renesas Solutions Corp.
+ * Copyright © 2008 Atom Create Engineering Co., Ltd.
+ *
+ * Based on fsl_elbc_nand.c, Copyright © 2006-2007 Freescale Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/sh_flctl.h>
+
+static struct nand_ecclayout flctl_4secc_oob_16 = {
+	.eccbytes = 10,
+	.eccpos = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
+	.oobfree = {
+		{.offset = 12,
+		. length = 4} },
+};
+
+static struct nand_ecclayout flctl_4secc_oob_64 = {
+	.eccbytes = 10,
+	.eccpos = {48, 49, 50, 51, 52, 53, 54, 55, 56, 57},
+	.oobfree = {
+		{.offset = 60,
+		. length = 4} },
+};
+
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr flctl_4secc_smallpage = {
+	.options = NAND_BBT_SCAN2NDPAGE,
+	.offs = 11,
+	.len = 1,
+	.pattern = scan_ff_pattern,
+};
+
+static struct nand_bbt_descr flctl_4secc_largepage = {
+	.options = 0,
+	.offs = 58,
+	.len = 2,
+	.pattern = scan_ff_pattern,
+};
+
+static void empty_fifo(struct sh_flctl *flctl)
+{
+	writel(0x000c0000, FLINTDMACR(flctl));	/* FIFO Clear */
+	writel(0x00000000, FLINTDMACR(flctl));	/* Clear Error flags */
+}
+
+static void start_translation(struct sh_flctl *flctl)
+{
+	writeb(TRSTRT, FLTRCR(flctl));
+}
+
+static void wait_completion(struct sh_flctl *flctl)
+{
+	uint32_t timeout = LOOP_TIMEOUT_MAX;
+
+	while (timeout--) {
+		if (readb(FLTRCR(flctl)) & TREND) {
+			writeb(0x0, FLTRCR(flctl));
+			return;
+		}
+		udelay(1);
+	}
+
+	printk(KERN_ERR "wait_completion(): Timeout occured \n");
+	writeb(0x0, FLTRCR(flctl));
+}
+
+static void set_addr(struct mtd_info *mtd, int column, int page_addr)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	uint32_t addr = 0;
+
+	if (column == -1) {
+		addr = page_addr;	/* ERASE1 */
+	} else if (page_addr != -1) {
+		/* SEQIN, READ0, etc.. */
+		if (flctl->page_size) {
+			addr = column & 0x0FFF;
+			addr |= (page_addr & 0xff) << 16;
+			addr |= ((page_addr >> 8) & 0xff) << 24;
+			/* big than 128MB */
+			if (flctl->rw_ADRCNT == ADRCNT2_E) {
+				uint32_t 	addr2;
+				addr2 = (page_addr >> 16) & 0xff;
+				writel(addr2, FLADR2(flctl));
+			}
+		} else {
+			addr = column;
+			addr |= (page_addr & 0xff) << 8;
+			addr |= ((page_addr >> 8) & 0xff) << 16;
+			addr |= ((page_addr >> 16) & 0xff) << 24;
+		}
+	}
+	writel(addr, FLADR(flctl));
+}
+
+static void wait_rfifo_ready(struct sh_flctl *flctl)
+{
+	uint32_t timeout = LOOP_TIMEOUT_MAX;
+
+	while (timeout--) {
+		uint32_t val;
+		/* check FIFO */
+		val = readl(FLDTCNTR(flctl)) >> 16;
+		if (val & 0xFF)
+			return;
+		udelay(1);
+	}
+	printk(KERN_ERR "wait_rfifo_ready(): Timeout occured \n");
+}
+
+static void wait_wfifo_ready(struct sh_flctl *flctl)
+{
+	uint32_t len, timeout = LOOP_TIMEOUT_MAX;
+
+	while (timeout--) {
+		/* check FIFO */
+		len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
+		if (len >= 4)
+			return;
+		udelay(1);
+	}
+	printk(KERN_ERR "wait_wfifo_ready(): Timeout occured \n");
+}
+
+static int wait_recfifo_ready(struct sh_flctl *flctl)
+{
+	uint32_t timeout = LOOP_TIMEOUT_MAX;
+	int checked[4];
+	void __iomem *ecc_reg[4];
+	int i;
+	uint32_t data, size;
+
+	memset(checked, 0, sizeof(checked));
+
+	while (timeout--) {
+		size = readl(FLDTCNTR(flctl)) >> 24;
+		if (size & 0xFF)
+			return 0;	/* success */
+
+		if (readl(FL4ECCCR(flctl)) & _4ECCFA)
+			return 1;	/* can't correct */
+
+		udelay(1);
+		if (!(readl(FL4ECCCR(flctl)) & _4ECCEND))
+			continue;
+
+		/* start error correction */
+		ecc_reg[0] = FL4ECCRESULT0(flctl);
+		ecc_reg[1] = FL4ECCRESULT1(flctl);
+		ecc_reg[2] = FL4ECCRESULT2(flctl);
+		ecc_reg[3] = FL4ECCRESULT3(flctl);
+
+		for (i = 0; i < 3; i++) {
+			data = readl(ecc_reg[i]);
+			if (data != INIT_FL4ECCRESULT_VAL && !checked[i]) {
+				uint8_t org;
+				int index;
+
+				index = data >> 16;
+				org = flctl->done_buff[index];
+				flctl->done_buff[index] = org ^ (data & 0xFF);
+				checked[i] = 1;
+			}
+		}
+
+		writel(0, FL4ECCCR(flctl));
+	}
+
+	printk(KERN_ERR "wait_recfifo_ready(): Timeout occured \n");
+	return 1;	/* timeout */
+}
+
+static void wait_wecfifo_ready(struct sh_flctl *flctl)
+{
+	uint32_t timeout = LOOP_TIMEOUT_MAX;
+	uint32_t len;
+
+	while (timeout--) {
+		/* check FLECFIFO */
+		len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
+		if (len >= 4)
+			return;
+		udelay(1);
+	}
+	printk(KERN_ERR "wait_wecfifo_ready(): Timeout occured \n");
+}
+
+static void read_datareg(struct sh_flctl *flctl, int offset)
+{
+	unsigned long data;
+	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+	wait_completion(flctl);
+
+	data = readl(FLDATAR(flctl));
+	*buf = le32_to_cpu(data);
+}
+
+static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+{
+	int i, len_4align;
+	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+	void *fifo_addr = (void *)FLDTFIFO(flctl);
+
+	len_4align = (rlen + 3) / 4;
+
+	for (i = 0; i < len_4align; i++) {
+		wait_rfifo_ready(flctl);
+		buf[i] = readl(fifo_addr);
+		buf[i] = be32_to_cpu(buf[i]);
+	}
+}
+
+static int read_ecfiforeg(struct sh_flctl *flctl, uint8_t *buff)
+{
+	int i;
+	unsigned long *ecc_buf = (unsigned long *)buff;
+	void *fifo_addr = (void *)FLECFIFO(flctl);
+
+	for (i = 0; i < 4; i++) {
+		if (wait_recfifo_ready(flctl))
+			return 1;
+		ecc_buf[i] = readl(fifo_addr);
+		ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
+	}
+
+	return 0;
+}
+
+static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+{
+	int i, len_4align;
+	unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
+	void *fifo_addr = (void *)FLDTFIFO(flctl);
+
+	len_4align = (rlen + 3) / 4;
+	for (i = 0; i < len_4align; i++) {
+		wait_wfifo_ready(flctl);
+		writel(cpu_to_be32(data[i]), fifo_addr);
+	}
+}
+
+static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	uint32_t flcmncr_val = readl(FLCMNCR(flctl));
+	uint32_t flcmdcr_val, addr_len_bytes = 0;
+
+	/* Set SNAND bit if page size is 2048byte */
+	if (flctl->page_size)
+		flcmncr_val |= SNAND_E;
+	else
+		flcmncr_val &= ~SNAND_E;
+
+	/* default FLCMDCR val */
+	flcmdcr_val = DOCMD1_E | DOADR_E;
+
+	/* Set for FLCMDCR */
+	switch (cmd) {
+	case NAND_CMD_ERASE1:
+		addr_len_bytes = flctl->erase_ADRCNT;
+		flcmdcr_val |= DOCMD2_E;
+		break;
+	case NAND_CMD_READ0:
+	case NAND_CMD_READOOB:
+		addr_len_bytes = flctl->rw_ADRCNT;
+		flcmdcr_val |= CDSRC_E;
+		break;
+	case NAND_CMD_SEQIN:
+		/* This case is that cmd is READ0 or READ1 or READ00 */
+		flcmdcr_val &= ~DOADR_E;	/* ONLY execute 1st cmd */
+		break;
+	case NAND_CMD_PAGEPROG:
+		addr_len_bytes = flctl->rw_ADRCNT;
+		flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
+		break;
+	case NAND_CMD_READID:
+		flcmncr_val &= ~SNAND_E;
+		addr_len_bytes = ADRCNT_1;
+		break;
+	case NAND_CMD_STATUS:
+	case NAND_CMD_RESET:
+		flcmncr_val &= ~SNAND_E;
+		flcmdcr_val &= ~(DOADR_E | DOSR_E);
+		break;
+	default:
+		break;
+	}
+
+	/* Set address bytes parameter */
+	flcmdcr_val |= addr_len_bytes;
+
+	/* Now actually write */
+	writel(flcmncr_val, FLCMNCR(flctl));
+	writel(flcmdcr_val, FLCMDCR(flctl));
+	writel(flcmcdr_val, FLCMCDR(flctl));
+}
+
+static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+				uint8_t *buf)
+{
+	int i, eccsize = chip->ecc.size;
+	int eccbytes = chip->ecc.bytes;
+	int eccsteps = chip->ecc.steps;
+	uint8_t *p = buf;
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+
+	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+		chip->read_buf(mtd, p, eccsize);
+
+	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+		if (flctl->hwecc_cant_correct[i])
+			mtd->ecc_stats.failed++;
+		else
+			mtd->ecc_stats.corrected += 0;
+	}
+
+	return 0;
+}
+
+static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+				   const uint8_t *buf)
+{
+	int i, eccsize = chip->ecc.size;
+	int eccbytes = chip->ecc.bytes;
+	int eccsteps = chip->ecc.steps;
+	const uint8_t *p = buf;
+
+	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+		chip->write_buf(mtd, p, eccsize);
+}
+
+static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	int sector, page_sectors;
+
+	if (flctl->page_size)
+		page_sectors = 4;
+	else
+		page_sectors = 1;
+
+	writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
+		 FLCMNCR(flctl));
+
+	set_cmd_regs(mtd, NAND_CMD_READ0,
+		(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
+
+	for (sector = 0; sector < page_sectors; sector++) {
+		int ret;
+
+		empty_fifo(flctl);
+		writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
+		writel(page_addr << 2 | sector, FLADR(flctl));
+
+		start_translation(flctl);
+		read_fiforeg(flctl, 512, 512 * sector);
+
+		ret = read_ecfiforeg(flctl,
+			&flctl->done_buff[mtd->writesize + 16 * sector]);
+
+		if (ret)
+			flctl->hwecc_cant_correct[sector] = 1;
+
+		writel(0x0, FL4ECCCR(flctl));
+		wait_completion(flctl);
+	}
+	writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
+			FLCMNCR(flctl));
+}
+
+static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+
+	set_cmd_regs(mtd, NAND_CMD_READ0,
+		(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
+
+	empty_fifo(flctl);
+	if (flctl->page_size) {
+		int i;
+		/* In case that the page size is 2k */
+		for (i = 0; i < 16 * 3; i++)
+			flctl->done_buff[i] = 0xFF;
+
+		set_addr(mtd, 3 * 528 + 512, page_addr);
+		writel(16, FLDTCNTR(flctl));
+
+		start_translation(flctl);
+		read_fiforeg(flctl, 16, 16 * 3);
+		wait_completion(flctl);
+	} else {
+		/* In case that the page size is 512b */
+		set_addr(mtd, 512, page_addr);
+		writel(16, FLDTCNTR(flctl));
+
+		start_translation(flctl);
+		read_fiforeg(flctl, 16, 0);
+		wait_completion(flctl);
+	}
+}
+
+static void execmd_write_page_sector(struct mtd_info *mtd)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	int i, page_addr = flctl->seqin_page_addr;
+	int sector, page_sectors;
+
+	if (flctl->page_size)
+		page_sectors = 4;
+	else
+		page_sectors = 1;
+
+	writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
+
+	set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
+			(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
+
+	for (sector = 0; sector < page_sectors; sector++) {
+		empty_fifo(flctl);
+		writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
+		writel(page_addr << 2 | sector, FLADR(flctl));
+
+		start_translation(flctl);
+		write_fiforeg(flctl, 512, 512 * sector);
+
+		for (i = 0; i < 4; i++) {
+			wait_wecfifo_ready(flctl); /* wait for write ready */
+			writel(0xFFFFFFFF, FLECFIFO(flctl));
+		}
+		wait_completion(flctl);
+	}
+
+	writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
+}
+
+static void execmd_write_oob(struct mtd_info *mtd)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	int page_addr = flctl->seqin_page_addr;
+	int sector, page_sectors;
+
+	if (flctl->page_size) {
+		sector = 3;
+		page_sectors = 4;
+	} else {
+		sector = 0;
+		page_sectors = 1;
+	}
+
+	set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
+			(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
+
+	for (; sector < page_sectors; sector++) {
+		empty_fifo(flctl);
+		set_addr(mtd, sector * 528 + 512, page_addr);
+		writel(16, FLDTCNTR(flctl));	/* set read size */
+
+		start_translation(flctl);
+		write_fiforeg(flctl, 16, 16 * sector);
+		wait_completion(flctl);
+	}
+}
+
+static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
+			int column, int page_addr)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	uint32_t read_cmd = 0;
+
+	flctl->read_bytes = 0;
+	if (command != NAND_CMD_PAGEPROG)
+		flctl->index = 0;
+
+	switch (command) {
+	case NAND_CMD_READ1:
+	case NAND_CMD_READ0:
+		if (flctl->hwecc) {
+			/* read page with hwecc */
+			execmd_read_page_sector(mtd, page_addr);
+			break;
+		}
+		empty_fifo(flctl);
+		if (flctl->page_size)
+			set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
+				| command);
+		else
+			set_cmd_regs(mtd, command, command);
+
+		set_addr(mtd, 0, page_addr);
+
+		flctl->read_bytes = mtd->writesize + mtd->oobsize;
+		flctl->index += column;
+		goto read_normal_exit;
+
+	case NAND_CMD_READOOB:
+		if (flctl->hwecc) {
+			/* read page with hwecc */
+			execmd_read_oob(mtd, page_addr);
+			break;
+		}
+
+		empty_fifo(flctl);
+		if (flctl->page_size) {
+			set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
+				| NAND_CMD_READ0);
+			set_addr(mtd, mtd->writesize, page_addr);
+		} else {
+			set_cmd_regs(mtd, command, command);
+			set_addr(mtd, 0, page_addr);
+		}
+		flctl->read_bytes = mtd->oobsize;
+		goto read_normal_exit;
+
+	case NAND_CMD_READID:
+		empty_fifo(flctl);
+		set_cmd_regs(mtd, command, command);
+		set_addr(mtd, 0, 0);
+
+		flctl->read_bytes = 4;
+		writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+		start_translation(flctl);
+		read_datareg(flctl, 0);	/* read and end */
+		break;
+
+	case NAND_CMD_ERASE1:
+		flctl->erase1_page_addr = page_addr;
+		break;
+
+	case NAND_CMD_ERASE2:
+		set_cmd_regs(mtd, NAND_CMD_ERASE1,
+			(command << 8) | NAND_CMD_ERASE1);
+		set_addr(mtd, -1, flctl->erase1_page_addr);
+		start_translation(flctl);
+		wait_completion(flctl);
+		break;
+
+	case NAND_CMD_SEQIN:
+		if (!flctl->page_size) {
+			/* output read command */
+			if (column >= mtd->writesize) {
+				column -= mtd->writesize;
+				read_cmd = NAND_CMD_READOOB;
+			} else if (column < 256) {
+				read_cmd = NAND_CMD_READ0;
+			} else {
+				column -= 256;
+				read_cmd = NAND_CMD_READ1;
+			}
+		}
+		flctl->seqin_column = column;
+		flctl->seqin_page_addr = page_addr;
+		flctl->seqin_read_cmd = read_cmd;
+		break;
+
+	case NAND_CMD_PAGEPROG:
+		empty_fifo(flctl);
+		if (!flctl->page_size) {
+			set_cmd_regs(mtd, NAND_CMD_SEQIN,
+					flctl->seqin_read_cmd);
+			set_addr(mtd, -1, -1);
+			writel(0, FLDTCNTR(flctl));	/* set 0 size */
+			start_translation(flctl);
+			wait_completion(flctl);
+		}
+		if (flctl->hwecc) {
+			/* write page with hwecc */
+			if (flctl->seqin_column == mtd->writesize)
+				execmd_write_oob(mtd);
+			else if (!flctl->seqin_column)
+				execmd_write_page_sector(mtd);
+			else
+				printk(KERN_ERR "Invalid address !?\n");
+			break;
+		}
+		set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
+		set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
+		writel(flctl->index, FLDTCNTR(flctl));	/* set write size */
+		start_translation(flctl);
+		write_fiforeg(flctl, flctl->index, 0);
+		wait_completion(flctl);
+		break;
+
+	case NAND_CMD_STATUS:
+		set_cmd_regs(mtd, command, command);
+		set_addr(mtd, -1, -1);
+
+		flctl->read_bytes = 1;
+		writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+		start_translation(flctl);
+		read_datareg(flctl, 0); /* read and end */
+		break;
+
+	case NAND_CMD_RESET:
+		set_cmd_regs(mtd, command, command);
+		set_addr(mtd, -1, -1);
+
+		writel(0, FLDTCNTR(flctl));	/* set 0 size */
+		start_translation(flctl);
+		wait_completion(flctl);
+		break;
+
+	default:
+		break;
+	}
+	return;
+
+read_normal_exit:
+	writel(flctl->read_bytes, FLDTCNTR(flctl));	/* set read size */
+	start_translation(flctl);
+	read_fiforeg(flctl, flctl->read_bytes, 0);
+	wait_completion(flctl);
+	return;
+}
+
+static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	uint32_t flcmncr_val = readl(FLCMNCR(flctl));
+
+	switch (chipnr) {
+	case -1:
+		flcmncr_val &= ~CE0_ENABLE;
+		writel(flcmncr_val, FLCMNCR(flctl));
+		break;
+	case 0:
+		flcmncr_val |= CE0_ENABLE;
+		writel(flcmncr_val, FLCMNCR(flctl));
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	int i, index = flctl->index;
+
+	for (i = 0; i < len; i++)
+		flctl->done_buff[index + i] = buf[i];
+	flctl->index += len;
+}
+
+static uint8_t flctl_read_byte(struct mtd_info *mtd)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	int index = flctl->index;
+	uint8_t data;
+
+	data = flctl->done_buff[index];
+	flctl->index++;
+	return data;
+}
+
+static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		buf[i] = flctl_read_byte(mtd);
+}
+
+static int flctl_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		if (buf[i] != flctl_read_byte(mtd))
+			return -EFAULT;
+	return 0;
+}
+
+static void flctl_register_init(struct sh_flctl *flctl, unsigned long val)
+{
+	writel(val, FLCMNCR(flctl));
+}
+
+static int flctl_chip_init_tail(struct mtd_info *mtd)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	struct nand_chip *chip = &flctl->chip;
+
+	if (mtd->writesize == 512) {
+		flctl->page_size = 0;
+		if (chip->chipsize > (32 << 20)) {
+			/* big than 32MB */
+			flctl->rw_ADRCNT = ADRCNT_4;
+			flctl->erase_ADRCNT = ADRCNT_3;
+		} else if (chip->chipsize > (2 << 16)) {
+			/* big than 128KB */
+			flctl->rw_ADRCNT = ADRCNT_3;
+			flctl->erase_ADRCNT = ADRCNT_2;
+		} else {
+			flctl->rw_ADRCNT = ADRCNT_2;
+			flctl->erase_ADRCNT = ADRCNT_1;
+		}
+	} else {
+		flctl->page_size = 1;
+		if (chip->chipsize > (128 << 20)) {
+			/* big than 128MB */
+			flctl->rw_ADRCNT = ADRCNT2_E;
+			flctl->erase_ADRCNT = ADRCNT_3;
+		} else if (chip->chipsize > (8 << 16)) {
+			/* big than 512KB */
+			flctl->rw_ADRCNT = ADRCNT_4;
+			flctl->erase_ADRCNT = ADRCNT_2;
+		} else {
+			flctl->rw_ADRCNT = ADRCNT_3;
+			flctl->erase_ADRCNT = ADRCNT_1;
+		}
+	}
+
+	if (flctl->hwecc) {
+		if (mtd->writesize == 512) {
+			chip->ecc.layout = &flctl_4secc_oob_16;
+			chip->badblock_pattern = &flctl_4secc_smallpage;
+		} else {
+			chip->ecc.layout = &flctl_4secc_oob_64;
+			chip->badblock_pattern = &flctl_4secc_largepage;
+		}
+
+		chip->ecc.size = 512;
+		chip->ecc.bytes = 10;
+		chip->ecc.read_page = flctl_read_page_hwecc;
+		chip->ecc.write_page = flctl_write_page_hwecc;
+		chip->ecc.mode = NAND_ECC_HW;
+
+		/* 4 symbols ECC enabled */
+		writel(readl(FLCMNCR(flctl)) | _4ECCEN | ECCPOS2 | ECCPOS_02,
+				FLCMNCR(flctl));
+	} else {
+		chip->ecc.mode = NAND_ECC_SOFT;
+	}
+
+	return 0;
+}
+
+static int __init flctl_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct sh_flctl *flctl;
+	struct mtd_info *flctl_mtd;
+	struct nand_chip *nand;
+	struct sh_flctl_platform_data *pdata;
+	int ret;
+
+	pdata = pdev->dev.platform_data;
+	if (pdata == NULL) {
+		printk(KERN_ERR "sh_flctl platform_data not found.\n");
+		return -ENODEV;
+	}
+
+	flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
+	if (!flctl) {
+		printk(KERN_ERR "Unable to allocate NAND MTD dev structure.\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		printk(KERN_ERR "%s: resource not found.\n", __func__);
+		ret = -ENODEV;
+		goto err;
+	}
+
+	flctl->reg = ioremap(res->start, res->end - res->start + 1);
+	if (flctl->reg == NULL) {
+		printk(KERN_ERR "%s: ioremap error.\n", __func__);
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	platform_set_drvdata(pdev, flctl);
+	flctl_mtd = &flctl->mtd;
+	nand = &flctl->chip;
+	flctl_mtd->priv = nand;
+	flctl->hwecc = pdata->has_hwecc;
+
+	flctl_register_init(flctl, pdata->flcmncr_val);
+
+	nand->options = NAND_NO_AUTOINCR;
+
+	/* Set address of hardware control function */
+	/* 20 us command delay time */
+	nand->chip_delay = 20;
+
+	nand->read_byte = flctl_read_byte;
+	nand->write_buf = flctl_write_buf;
+	nand->read_buf = flctl_read_buf;
+	nand->verify_buf = flctl_verify_buf;
+	nand->select_chip = flctl_select_chip;
+	nand->cmdfunc = flctl_cmdfunc;
+
+	ret = nand_scan_ident(flctl_mtd, 1);
+	if (ret)
+		goto err;
+
+	ret = flctl_chip_init_tail(flctl_mtd);
+	if (ret)
+		goto err;
+
+	ret = nand_scan_tail(flctl_mtd);
+	if (ret)
+		goto err;
+
+	add_mtd_partitions(flctl_mtd, pdata->parts, pdata->nr_parts);
+
+	return 0;
+
+err:
+	kfree(flctl);
+	return ret;
+}
+
+static int __exit flctl_remove(struct platform_device *pdev)
+{
+	struct sh_flctl *flctl = platform_get_drvdata(pdev);
+
+	nand_release(&flctl->mtd);
+	kfree(flctl);
+
+	return 0;
+}
+
+static struct platform_driver flctl_driver = {
+	.probe		= flctl_probe,
+	.remove		= flctl_remove,
+	.driver = {
+		.name	= "sh_flctl",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init flctl_nand_init(void)
+{
+	return platform_driver_register(&flctl_driver);
+}
+
+static void __exit flctl_nand_cleanup(void)
+{
+	platform_driver_unregister(&flctl_driver);
+}
+
+module_init(flctl_nand_init);
+module_exit(flctl_nand_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_DESCRIPTION("SuperH FLCTL driver");
+MODULE_ALIAS("platform:sh_flctl");
diff --git a/drivers/mtd/nand/toto.c b/drivers/mtd/nand/toto.c
deleted file mode 100644
index bbf492e..0000000
--- a/drivers/mtd/nand/toto.c
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- *  drivers/mtd/nand/toto.c
- *
- *  Copyright (c) 2003 Texas Instruments
- *
- *  Derived from drivers/mtd/autcpu12.c
- *
- *  Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  Overview:
- *   This is a device driver for the NAND flash device found on the
- *   TI fido board. It supports 32MiB and 64MiB cards
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <asm/arch/hardware.h>
-#include <asm/sizes.h>
-#include <asm/arch/toto.h>
-#include <asm/arch-omap1510/hardware.h>
-#include <asm/arch/gpio.h>
-
-#define CONFIG_NAND_WORKAROUND 1
-
-/*
- * MTD structure for TOTO board
- */
-static struct mtd_info *toto_mtd = NULL;
-
-static unsigned long toto_io_base = OMAP_FLASH_1_BASE;
-
-/*
- * Define partitions for flash devices
- */
-
-static struct mtd_partition partition_info64M[] = {
-	{ .name =	"toto kernel partition 1",
-	  .offset =	0,
-	  .size	=	2 * SZ_1M },
-	{ .name =	"toto file sys partition 2",
-	  .offset =	2 * SZ_1M,
-	  .size =	14 * SZ_1M },
-	{ .name =	"toto user partition 3",
-	  .offset =	16 * SZ_1M,
-	  .size =	16 * SZ_1M },
-	{ .name =	"toto devboard extra partition 4",
-	  .offset =	32 * SZ_1M,
-	  .size =	32 * SZ_1M },
-};
-
-static struct mtd_partition partition_info32M[] = {
-	{ .name =	"toto kernel partition 1",
-	  .offset =	0,
-	  .size =	2 * SZ_1M },
-	{ .name =	"toto file sys partition 2",
-	  .offset =	2 * SZ_1M,
-	  .size =	14 * SZ_1M },
-	{ .name =	"toto user partition 3",
-	  .offset =	16 * SZ_1M,
-	  .size =	16 * SZ_1M },
-};
-
-#define NUM_PARTITIONS32M 3
-#define NUM_PARTITIONS64M 4
-
-/*
- *	hardware specific access to control-lines
- *
- *	ctrl:
- *	NAND_NCE: bit 0 -> bit 14 (0x4000)
- *	NAND_CLE: bit 1 -> bit 12 (0x1000)
- *	NAND_ALE: bit 2 -> bit 1  (0x0002)
- */
-static void toto_hwcontrol(struct mtd_info *mtd, int cmd,
-			   unsigned int ctrl)
-{
-	struct nand_chip *chip = mtd->priv;
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		unsigned long bits;
-
-		/* hopefully enough time for tc make proceding write to clear */
-		udelay(1);
-
-		bits = (~ctrl & NAND_NCE) << 14;
-		bits |= (ctrl & NAND_CLE) << 12;
-		bits |= (ctrl & NAND_ALE) >> 1;
-
-#warning Wild guess as gpiosetout() is nowhere defined in the kernel source - tglx
-		gpiosetout(0x5002, bits);
-
-#ifdef CONFIG_NAND_WORKAROUND
-		/* "some" dev boards busted, blue wired to rts2 :( */
-		rts2setout(2, (ctrl & NAND_CLE) << 1);
-#endif
-		/* allow time to ensure gpio state to over take memory write */
-		udelay(1);
-	}
-
-	if (cmd != NAND_CMD_NONE)
-		writeb(cmd, chip->IO_ADDR_W);
-}
-
-/*
- * Main initialization routine
- */
-static int __init toto_init(void)
-{
-	struct nand_chip *this;
-	int err = 0;
-
-	/* Allocate memory for MTD device structure and private data */
-	toto_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
-	if (!toto_mtd) {
-		printk(KERN_WARNING "Unable to allocate toto NAND MTD device structure.\n");
-		err = -ENOMEM;
-		goto out;
-	}
-
-	/* Get pointer to private data */
-	this = (struct nand_chip *)(&toto_mtd[1]);
-
-	/* Initialize structures */
-	memset(toto_mtd, 0, sizeof(struct mtd_info));
-	memset(this, 0, sizeof(struct nand_chip));
-
-	/* Link the private data with the MTD structure */
-	toto_mtd->priv = this;
-	toto_mtd->owner = THIS_MODULE;
-
-	/* Set address of NAND IO lines */
-	this->IO_ADDR_R = toto_io_base;
-	this->IO_ADDR_W = toto_io_base;
-	this->cmd_ctrl = toto_hwcontrol;
-	this->dev_ready = NULL;
-	/* 25 us command delay time */
-	this->chip_delay = 30;
-	this->ecc.mode = NAND_ECC_SOFT;
-
-	/* Scan to find existance of the device */
-	if (nand_scan(toto_mtd, 1)) {
-		err = -ENXIO;
-		goto out_mtd;
-	}
-
-	/* Register the partitions */
-	switch (toto_mtd->size) {
-	case SZ_64M:
-		add_mtd_partitions(toto_mtd, partition_info64M, NUM_PARTITIONS64M);
-		break;
-	case SZ_32M:
-		add_mtd_partitions(toto_mtd, partition_info32M, NUM_PARTITIONS32M);
-		break;
-	default:{
-			printk(KERN_WARNING "Unsupported Nand device\n");
-			err = -ENXIO;
-			goto out_buf;
-		}
-	}
-
-	gpioreserve(NAND_MASK);	/* claim our gpios */
-	archflashwp(0, 0);	/* open up flash for writing */
-
-	goto out;
-
- out_mtd:
-	kfree(toto_mtd);
- out:
-	return err;
-}
-
-module_init(toto_init);
-
-/*
- * Clean up routine
- */
-static void __exit toto_cleanup(void)
-{
-	/* Release resources, unregister device */
-	nand_release(toto_mtd);
-
-	/* Free the MTD device structure */
-	kfree(toto_mtd);
-
-	/* stop flash writes */
-	archflashwp(0, 1);
-
-	/* release gpios to system */
-	gpiorelease(NAND_MASK);
-}
-
-module_exit(toto_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Richard Woodruff <r-woodruff2@ti.com>");
-MODULE_DESCRIPTION("Glue layer for NAND flash on toto board");
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 4f80c2f..9e45b3f 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -20,7 +20,6 @@
 #include <linux/mtd/partitions.h>
 
 int __devinit of_mtd_parse_partitions(struct device *dev,
-                                      struct mtd_info *mtd,
                                       struct device_node *node,
                                       struct mtd_partition **pparts)
 {
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index cb41cbc..79fa79e 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -27,8 +27,16 @@
 	help
 	  Support for OneNAND flash via platform device driver.
 
+config MTD_ONENAND_OMAP2
+	tristate "OneNAND on OMAP2/OMAP3 support"
+	depends on MTD_ONENAND && (ARCH_OMAP2 || ARCH_OMAP3)
+	help
+	  Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU
+	  via the GPMC memory controller.
+
 config MTD_ONENAND_OTP
 	bool "OneNAND OTP Support"
+	select HAVE_MTD_OTP
 	help
 	  One Block of the NAND Flash Array memory is reserved as
 	  a One-Time Programmable Block memory area.
diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile
index 4d2eacf..64b6cc6 100644
--- a/drivers/mtd/onenand/Makefile
+++ b/drivers/mtd/onenand/Makefile
@@ -7,6 +7,7 @@
 
 # Board specific.
 obj-$(CONFIG_MTD_ONENAND_GENERIC)	+= generic.o
+obj-$(CONFIG_MTD_ONENAND_OMAP2)		+= omap2.o
 
 # Simulator
 obj-$(CONFIG_MTD_ONENAND_SIM)		+= onenand_sim.o
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
new file mode 100644
index 0000000..8387e05
--- /dev/null
+++ b/drivers/mtd/onenand/omap2.c
@@ -0,0 +1,802 @@
+/*
+ *  linux/drivers/mtd/onenand/omap2.c
+ *
+ *  OneNAND driver for OMAP2 / OMAP3
+ *
+ *  Copyright © 2005-2006 Nokia Corporation
+ *
+ *  Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
+ *  IRQ and DMA support written by Timo Teras
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/mach/flash.h>
+#include <asm/arch/gpmc.h>
+#include <asm/arch/onenand.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/gpmc.h>
+#include <asm/arch/pm.h>
+
+#include <linux/dma-mapping.h>
+#include <asm/dma-mapping.h>
+#include <asm/arch/dma.h>
+
+#include <asm/arch/board.h>
+
+#define DRIVER_NAME "omap2-onenand"
+
+#define ONENAND_IO_SIZE		SZ_128K
+#define ONENAND_BUFRAM_SIZE	(1024 * 5)
+
+struct omap2_onenand {
+	struct platform_device *pdev;
+	int gpmc_cs;
+	unsigned long phys_base;
+	int gpio_irq;
+	struct mtd_info mtd;
+	struct mtd_partition *parts;
+	struct onenand_chip onenand;
+	struct completion irq_done;
+	struct completion dma_done;
+	int dma_channel;
+	int freq;
+	int (*setup)(void __iomem *base, int freq);
+};
+
+static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
+{
+	struct omap2_onenand *c = data;
+
+	complete(&c->dma_done);
+}
+
+static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
+{
+	struct omap2_onenand *c = dev_id;
+
+	complete(&c->irq_done);
+
+	return IRQ_HANDLED;
+}
+
+static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
+{
+	return readw(c->onenand.base + reg);
+}
+
+static inline void write_reg(struct omap2_onenand *c, unsigned short value,
+			     int reg)
+{
+	writew(value, c->onenand.base + reg);
+}
+
+static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
+{
+	printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
+	       msg, state, ctrl, intr);
+}
+
+static void wait_warn(char *msg, int state, unsigned int ctrl,
+		      unsigned int intr)
+{
+	printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
+	       "intr 0x%04x\n", msg, state, ctrl, intr);
+}
+
+static int omap2_onenand_wait(struct mtd_info *mtd, int state)
+{
+	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+	unsigned int intr = 0;
+	unsigned int ctrl;
+	unsigned long timeout;
+	u32 syscfg;
+
+	if (state == FL_RESETING) {
+		int i;
+
+		for (i = 0; i < 20; i++) {
+			udelay(1);
+			intr = read_reg(c, ONENAND_REG_INTERRUPT);
+			if (intr & ONENAND_INT_MASTER)
+				break;
+		}
+		ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+		if (ctrl & ONENAND_CTRL_ERROR) {
+			wait_err("controller error", state, ctrl, intr);
+			return -EIO;
+		}
+		if (!(intr & ONENAND_INT_RESET)) {
+			wait_err("timeout", state, ctrl, intr);
+			return -EIO;
+		}
+		return 0;
+	}
+
+	if (state != FL_READING) {
+		int result;
+
+		/* Turn interrupts on */
+		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+		if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
+			syscfg |= ONENAND_SYS_CFG1_IOBE;
+			write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
+			if (cpu_is_omap34xx())
+				/* Add a delay to let GPIO settle */
+				syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+		}
+
+		INIT_COMPLETION(c->irq_done);
+		if (c->gpio_irq) {
+			result = omap_get_gpio_datain(c->gpio_irq);
+			if (result == -1) {
+				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+				intr = read_reg(c, ONENAND_REG_INTERRUPT);
+				wait_err("gpio error", state, ctrl, intr);
+				return -EIO;
+			}
+		} else
+			result = 0;
+		if (result == 0) {
+			int retry_cnt = 0;
+retry:
+			result = wait_for_completion_timeout(&c->irq_done,
+						    msecs_to_jiffies(20));
+			if (result == 0) {
+				/* Timeout after 20ms */
+				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+				if (ctrl & ONENAND_CTRL_ONGO) {
+					/*
+					 * The operation seems to be still going
+					 * so give it some more time.
+					 */
+					retry_cnt += 1;
+					if (retry_cnt < 3)
+						goto retry;
+					intr = read_reg(c,
+							ONENAND_REG_INTERRUPT);
+					wait_err("timeout", state, ctrl, intr);
+					return -EIO;
+				}
+				intr = read_reg(c, ONENAND_REG_INTERRUPT);
+				if ((intr & ONENAND_INT_MASTER) == 0)
+					wait_warn("timeout", state, ctrl, intr);
+			}
+		}
+	} else {
+		int retry_cnt = 0;
+
+		/* Turn interrupts off */
+		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+		syscfg &= ~ONENAND_SYS_CFG1_IOBE;
+		write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
+
+		timeout = jiffies + msecs_to_jiffies(20);
+		while (1) {
+			if (time_before(jiffies, timeout)) {
+				intr = read_reg(c, ONENAND_REG_INTERRUPT);
+				if (intr & ONENAND_INT_MASTER)
+					break;
+			} else {
+				/* Timeout after 20ms */
+				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+				if (ctrl & ONENAND_CTRL_ONGO) {
+					/*
+					 * The operation seems to be still going
+					 * so give it some more time.
+					 */
+					retry_cnt += 1;
+					if (retry_cnt < 3) {
+						timeout = jiffies +
+							  msecs_to_jiffies(20);
+						continue;
+					}
+				}
+				break;
+			}
+		}
+	}
+
+	intr = read_reg(c, ONENAND_REG_INTERRUPT);
+	ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+
+	if (intr & ONENAND_INT_READ) {
+		int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
+
+		if (ecc) {
+			unsigned int addr1, addr8;
+
+			addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
+			addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
+			if (ecc & ONENAND_ECC_2BIT_ALL) {
+				printk(KERN_ERR "onenand_wait: ECC error = "
+				       "0x%04x, addr1 %#x, addr8 %#x\n",
+				       ecc, addr1, addr8);
+				mtd->ecc_stats.failed++;
+				return -EBADMSG;
+			} else if (ecc & ONENAND_ECC_1BIT_ALL) {
+				printk(KERN_NOTICE "onenand_wait: correctable "
+				       "ECC error = 0x%04x, addr1 %#x, "
+				       "addr8 %#x\n", ecc, addr1, addr8);
+				mtd->ecc_stats.corrected++;
+			}
+		}
+	} else if (state == FL_READING) {
+		wait_err("timeout", state, ctrl, intr);
+		return -EIO;
+	}
+
+	if (ctrl & ONENAND_CTRL_ERROR) {
+		wait_err("controller error", state, ctrl, intr);
+		if (ctrl & ONENAND_CTRL_LOCK)
+			printk(KERN_ERR "onenand_wait: "
+					"Device is write protected!!!\n");
+		return -EIO;
+	}
+
+	if (ctrl & 0xFE9F)
+		wait_warn("unexpected controller status", state, ctrl, intr);
+
+	return 0;
+}
+
+static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
+{
+	struct onenand_chip *this = mtd->priv;
+
+	if (ONENAND_CURRENT_BUFFERRAM(this)) {
+		if (area == ONENAND_DATARAM)
+			return mtd->writesize;
+		if (area == ONENAND_SPARERAM)
+			return mtd->oobsize;
+	}
+
+	return 0;
+}
+
+#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
+
+static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
+					unsigned char *buffer, int offset,
+					size_t count)
+{
+	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+	struct onenand_chip *this = mtd->priv;
+	dma_addr_t dma_src, dma_dst;
+	int bram_offset;
+	unsigned long timeout;
+	void *buf = (void *)buffer;
+	size_t xtra;
+	volatile unsigned *done;
+
+	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
+		goto out_copy;
+
+	if (buf >= high_memory) {
+		struct page *p1;
+
+		if (((size_t)buf & PAGE_MASK) !=
+		    ((size_t)(buf + count - 1) & PAGE_MASK))
+			goto out_copy;
+		p1 = vmalloc_to_page(buf);
+		if (!p1)
+			goto out_copy;
+		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
+	}
+
+	xtra = count & 3;
+	if (xtra) {
+		count -= xtra;
+		memcpy(buf + count, this->base + bram_offset + count, xtra);
+	}
+
+	dma_src = c->phys_base + bram_offset;
+	dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
+	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+		dev_err(&c->pdev->dev,
+			"Couldn't DMA map a %d byte buffer\n",
+			count);
+		goto out_copy;
+	}
+
+	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
+				     count >> 2, 1, 0, 0, 0);
+	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+				dma_src, 0, 0);
+	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+				 dma_dst, 0, 0);
+
+	INIT_COMPLETION(c->dma_done);
+	omap_start_dma(c->dma_channel);
+
+	timeout = jiffies + msecs_to_jiffies(20);
+	done = &c->dma_done.done;
+	while (time_before(jiffies, timeout))
+		if (*done)
+			break;
+
+	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
+
+	if (!*done) {
+		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
+		goto out_copy;
+	}
+
+	return 0;
+
+out_copy:
+	memcpy(buf, this->base + bram_offset, count);
+	return 0;
+}
+
+static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
+					 const unsigned char *buffer,
+					 int offset, size_t count)
+{
+	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+	struct onenand_chip *this = mtd->priv;
+	dma_addr_t dma_src, dma_dst;
+	int bram_offset;
+	unsigned long timeout;
+	void *buf = (void *)buffer;
+	volatile unsigned *done;
+
+	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
+		goto out_copy;
+
+	/* panic_write() may be in an interrupt context */
+	if (in_interrupt())
+		goto out_copy;
+
+	if (buf >= high_memory) {
+		struct page *p1;
+
+		if (((size_t)buf & PAGE_MASK) !=
+		    ((size_t)(buf + count - 1) & PAGE_MASK))
+			goto out_copy;
+		p1 = vmalloc_to_page(buf);
+		if (!p1)
+			goto out_copy;
+		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
+	}
+
+	dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
+	dma_dst = c->phys_base + bram_offset;
+	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+		dev_err(&c->pdev->dev,
+			"Couldn't DMA map a %d byte buffer\n",
+			count);
+		return -1;
+	}
+
+	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
+				     count >> 2, 1, 0, 0, 0);
+	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+				dma_src, 0, 0);
+	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+				 dma_dst, 0, 0);
+
+	INIT_COMPLETION(c->dma_done);
+	omap_start_dma(c->dma_channel);
+
+	timeout = jiffies + msecs_to_jiffies(20);
+	done = &c->dma_done.done;
+	while (time_before(jiffies, timeout))
+		if (*done)
+			break;
+
+	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
+
+	if (!*done) {
+		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
+		goto out_copy;
+	}
+
+	return 0;
+
+out_copy:
+	memcpy(this->base + bram_offset, buf, count);
+	return 0;
+}
+
+#else
+
+int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
+				 unsigned char *buffer, int offset,
+				 size_t count);
+
+int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
+				  const unsigned char *buffer,
+				  int offset, size_t count);
+
+#endif
+
+#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
+
+static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
+					unsigned char *buffer, int offset,
+					size_t count)
+{
+	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+	struct onenand_chip *this = mtd->priv;
+	dma_addr_t dma_src, dma_dst;
+	int bram_offset;
+
+	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+	/* DMA is not used.  Revisit PM requirements before enabling it. */
+	if (1 || (c->dma_channel < 0) ||
+	    ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
+	    (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
+		memcpy(buffer, (__force void *)(this->base + bram_offset),
+		       count);
+		return 0;
+	}
+
+	dma_src = c->phys_base + bram_offset;
+	dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
+				 DMA_FROM_DEVICE);
+	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+		dev_err(&c->pdev->dev,
+			"Couldn't DMA map a %d byte buffer\n",
+			count);
+		return -1;
+	}
+
+	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
+				     count / 4, 1, 0, 0, 0);
+	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+				dma_src, 0, 0);
+	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+				 dma_dst, 0, 0);
+
+	INIT_COMPLETION(c->dma_done);
+	omap_start_dma(c->dma_channel);
+	wait_for_completion(&c->dma_done);
+
+	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
+
+	return 0;
+}
+
+static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
+					 const unsigned char *buffer,
+					 int offset, size_t count)
+{
+	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+	struct onenand_chip *this = mtd->priv;
+	dma_addr_t dma_src, dma_dst;
+	int bram_offset;
+
+	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+	/* DMA is not used.  Revisit PM requirements before enabling it. */
+	if (1 || (c->dma_channel < 0) ||
+	    ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
+	    (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
+		memcpy((__force void *)(this->base + bram_offset), buffer,
+		       count);
+		return 0;
+	}
+
+	dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
+				 DMA_TO_DEVICE);
+	dma_dst = c->phys_base + bram_offset;
+	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+		dev_err(&c->pdev->dev,
+			"Couldn't DMA map a %d byte buffer\n",
+			count);
+		return -1;
+	}
+
+	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
+				     count / 2, 1, 0, 0, 0);
+	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+				dma_src, 0, 0);
+	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+				 dma_dst, 0, 0);
+
+	INIT_COMPLETION(c->dma_done);
+	omap_start_dma(c->dma_channel);
+	wait_for_completion(&c->dma_done);
+
+	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
+
+	return 0;
+}
+
+#else
+
+int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
+				 unsigned char *buffer, int offset,
+				 size_t count);
+
+int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
+				  const unsigned char *buffer,
+				  int offset, size_t count);
+
+#endif
+
+static struct platform_driver omap2_onenand_driver;
+
+static int __adjust_timing(struct device *dev, void *data)
+{
+	int ret = 0;
+	struct omap2_onenand *c;
+
+	c = dev_get_drvdata(dev);
+
+	BUG_ON(c->setup == NULL);
+
+	/* DMA is not in use so this is all that is needed */
+	/* Revisit for OMAP3! */
+	ret = c->setup(c->onenand.base, c->freq);
+
+	return ret;
+}
+
+int omap2_onenand_rephase(void)
+{
+	return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
+				      NULL, __adjust_timing);
+}
+
+static void __devexit omap2_onenand_shutdown(struct platform_device *pdev)
+{
+	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
+
+	/* With certain content in the buffer RAM, the OMAP boot ROM code
+	 * can recognize the flash chip incorrectly. Zero it out before
+	 * soft reset.
+	 */
+	memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
+}
+
+static int __devinit omap2_onenand_probe(struct platform_device *pdev)
+{
+	struct omap_onenand_platform_data *pdata;
+	struct omap2_onenand *c;
+	int r;
+
+	pdata = pdev->dev.platform_data;
+	if (pdata == NULL) {
+		dev_err(&pdev->dev, "platform data missing\n");
+		return -ENODEV;
+	}
+
+	c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
+	if (!c)
+		return -ENOMEM;
+
+	init_completion(&c->irq_done);
+	init_completion(&c->dma_done);
+	c->gpmc_cs = pdata->cs;
+	c->gpio_irq = pdata->gpio_irq;
+	c->dma_channel = pdata->dma_channel;
+	if (c->dma_channel < 0) {
+		/* if -1, don't use DMA */
+		c->gpio_irq = 0;
+	}
+
+	r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
+	if (r < 0) {
+		dev_err(&pdev->dev, "Cannot request GPMC CS\n");
+		goto err_kfree;
+	}
+
+	if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
+			       pdev->dev.driver->name) == NULL) {
+		dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
+			"size: 0x%x\n",	c->phys_base, ONENAND_IO_SIZE);
+		r = -EBUSY;
+		goto err_free_cs;
+	}
+	c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
+	if (c->onenand.base == NULL) {
+		r = -ENOMEM;
+		goto err_release_mem_region;
+	}
+
+	if (pdata->onenand_setup != NULL) {
+		r = pdata->onenand_setup(c->onenand.base, c->freq);
+		if (r < 0) {
+			dev_err(&pdev->dev, "Onenand platform setup failed: "
+				"%d\n", r);
+			goto err_iounmap;
+		}
+		c->setup = pdata->onenand_setup;
+	}
+
+	if (c->gpio_irq) {
+		if ((r = omap_request_gpio(c->gpio_irq)) < 0) {
+			dev_err(&pdev->dev,  "Failed to request GPIO%d for "
+				"OneNAND\n", c->gpio_irq);
+			goto err_iounmap;
+	}
+	omap_set_gpio_direction(c->gpio_irq, 1);
+
+	if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq),
+			     omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
+			     pdev->dev.driver->name, c)) < 0)
+		goto err_release_gpio;
+	}
+
+	if (c->dma_channel >= 0) {
+		r = omap_request_dma(0, pdev->dev.driver->name,
+				     omap2_onenand_dma_cb, (void *) c,
+				     &c->dma_channel);
+		if (r == 0) {
+			omap_set_dma_write_mode(c->dma_channel,
+						OMAP_DMA_WRITE_NON_POSTED);
+			omap_set_dma_src_data_pack(c->dma_channel, 1);
+			omap_set_dma_src_burst_mode(c->dma_channel,
+						    OMAP_DMA_DATA_BURST_8);
+			omap_set_dma_dest_data_pack(c->dma_channel, 1);
+			omap_set_dma_dest_burst_mode(c->dma_channel,
+						     OMAP_DMA_DATA_BURST_8);
+		} else {
+			dev_info(&pdev->dev,
+				 "failed to allocate DMA for OneNAND, "
+				 "using PIO instead\n");
+			c->dma_channel = -1;
+		}
+	}
+
+	dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
+		 "base %p\n", c->gpmc_cs, c->phys_base,
+		 c->onenand.base);
+
+	c->pdev = pdev;
+	c->mtd.name = pdev->dev.bus_id;
+	c->mtd.priv = &c->onenand;
+	c->mtd.owner = THIS_MODULE;
+
+	if (c->dma_channel >= 0) {
+		struct onenand_chip *this = &c->onenand;
+
+		this->wait = omap2_onenand_wait;
+		if (cpu_is_omap34xx()) {
+			this->read_bufferram = omap3_onenand_read_bufferram;
+			this->write_bufferram = omap3_onenand_write_bufferram;
+		} else {
+			this->read_bufferram = omap2_onenand_read_bufferram;
+			this->write_bufferram = omap2_onenand_write_bufferram;
+		}
+	}
+
+	if ((r = onenand_scan(&c->mtd, 1)) < 0)
+		goto err_release_dma;
+
+	switch ((c->onenand.version_id >> 4) & 0xf) {
+	case 0:
+		c->freq = 40;
+		break;
+	case 1:
+		c->freq = 54;
+		break;
+	case 2:
+		c->freq = 66;
+		break;
+	case 3:
+		c->freq = 83;
+		break;
+	}
+
+#ifdef CONFIG_MTD_PARTITIONS
+	if (pdata->parts != NULL)
+		r = add_mtd_partitions(&c->mtd, pdata->parts,
+				       pdata->nr_parts);
+	else
+#endif
+		r = add_mtd_device(&c->mtd);
+	if (r < 0)
+		goto err_release_onenand;
+
+	platform_set_drvdata(pdev, c);
+
+	return 0;
+
+err_release_onenand:
+	onenand_release(&c->mtd);
+err_release_dma:
+	if (c->dma_channel != -1)
+		omap_free_dma(c->dma_channel);
+	if (c->gpio_irq)
+		free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c);
+err_release_gpio:
+	if (c->gpio_irq)
+		omap_free_gpio(c->gpio_irq);
+err_iounmap:
+	iounmap(c->onenand.base);
+err_release_mem_region:
+	release_mem_region(c->phys_base, ONENAND_IO_SIZE);
+err_free_cs:
+	gpmc_cs_free(c->gpmc_cs);
+err_kfree:
+	kfree(c);
+
+	return r;
+}
+
+static int __devexit omap2_onenand_remove(struct platform_device *pdev)
+{
+	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
+
+	BUG_ON(c == NULL);
+
+#ifdef CONFIG_MTD_PARTITIONS
+	if (c->parts)
+		del_mtd_partitions(&c->mtd);
+	else
+		del_mtd_device(&c->mtd);
+#else
+	del_mtd_device(&c->mtd);
+#endif
+
+	onenand_release(&c->mtd);
+	if (c->dma_channel != -1)
+		omap_free_dma(c->dma_channel);
+	omap2_onenand_shutdown(pdev);
+	platform_set_drvdata(pdev, NULL);
+	if (c->gpio_irq) {
+		free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c);
+		omap_free_gpio(c->gpio_irq);
+	}
+	iounmap(c->onenand.base);
+	release_mem_region(c->phys_base, ONENAND_IO_SIZE);
+	kfree(c);
+
+	return 0;
+}
+
+static struct platform_driver omap2_onenand_driver = {
+	.probe		= omap2_onenand_probe,
+	.remove		= omap2_onenand_remove,
+	.shutdown	= omap2_onenand_shutdown,
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.owner  = THIS_MODULE,
+	},
+};
+
+static int __init omap2_onenand_init(void)
+{
+	printk(KERN_INFO "OneNAND driver initializing\n");
+	return platform_driver_register(&omap2_onenand_driver);
+}
+
+static void __exit omap2_onenand_exit(void)
+{
+	platform_driver_unregister(&omap2_onenand_driver);
+}
+
+module_init(omap2_onenand_init);
+module_exit(omap2_onenand_exit);
+
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
+MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 926cf3a..90ed319 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1794,7 +1794,7 @@
 		return -EINVAL;
 	}
 
-	instr->fail_addr = 0xffffffff;
+	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
 
 	/* Grab the lock and see if the device is available */
 	onenand_get_device(mtd, FL_ERASING);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index a5f3d60..33a5d6e 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -321,8 +321,7 @@
 	DEBUG(MTD_DEBUG_LEVEL1,
 		"SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
 		ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len,
-		(ssfdc->map_len + MAX_PHYS_BLK_PER_ZONE - 1) /
-		MAX_PHYS_BLK_PER_ZONE);
+		DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE));
 
 	/* Set geometry */
 	ssfdc->heads = 16;
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 03c759b..b30a0b8 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -104,12 +104,9 @@
 	struct ubi_volume_desc *desc;
 	int vol_id = iminor(inode) - 1, mode, ubi_num;
 
-	lock_kernel();
 	ubi_num = ubi_major2num(imajor(inode));
-	if (ubi_num < 0) {
-		unlock_kernel();
+	if (ubi_num < 0)
 		return ubi_num;
-	}
 
 	if (file->f_mode & FMODE_WRITE)
 		mode = UBI_READWRITE;
@@ -119,7 +116,6 @@
 	dbg_gen("open volume %d, mode %d", vol_id, mode);
 
 	desc = ubi_open_volume(ubi_num, vol_id, mode);
-	unlock_kernel();
 	if (IS_ERR(desc))
 		return PTR_ERR(desc);
 
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 967bb44..4f2daa5 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -387,7 +387,7 @@
 		pnum, vol_id, lnum, ec, sqnum, bitflips);
 
 	sv = add_volume(si, vol_id, pnum, vid_hdr);
-	if (IS_ERR(sv) < 0)
+	if (IS_ERR(sv))
 		return PTR_ERR(sv);
 
 	if (si->max_sqnum < sqnum)
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 217d0e1..333c894 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -244,8 +244,8 @@
 		}
 
 		if (reserved_pebs > ubi->good_peb_count) {
-			dbg_err("too large reserved_pebs, good PEBs %d",
-				ubi->good_peb_count);
+			dbg_err("too large reserved_pebs %d, good PEBs %d",
+				reserved_pebs, ubi->good_peb_count);
 			err = 9;
 			goto bad;
 		}
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 491ee16..9ba295d 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -90,7 +90,7 @@
 #include <linux/eisa.h>
 #include <linux/bitops.h>
 #include <linux/jiffies.h>
-#include <asm/irq.h>			/* For NR_IRQS only. */
+#include <asm/irq.h>			/* For nr_irqs only. */
 #include <asm/io.h>
 #include <asm/uaccess.h>
 
@@ -1221,7 +1221,7 @@
 	if (print_info)
 		printk(", IRQ %d\n", dev->irq);
 	/* Tell them about an invalid IRQ. */
-	if (dev->irq <= 0 || dev->irq >= NR_IRQS)
+	if (dev->irq <= 0 || dev->irq >= nr_irqs)
 		printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n",
 			   dev->irq);
 
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ad301ac..0b71ebc 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -464,6 +464,12 @@
 	  This is the driver for the onboard card of MIPS Magnum 4000,
 	  Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
 
+config XTENSA_XT2000_SONIC
+	tristate "Xtensa XT2000 onboard SONIC Ethernet support"
+	depends on XTENSA_PLATFORM_XT2000
+	help
+	  This is the driver for the onboard card of the Xtensa XT2000 board.
+
 config MIPS_AU1X00_ENET
 	bool "MIPS AU1000 Ethernet support"
 	depends on SOC_AU1X00
@@ -2504,6 +2510,15 @@
 	  This driver supports the on-chip 1/10Gbit Ethernet controller on
 	  PA Semi's PWRficient line of chips.
 
+config MLX4_EN
+	tristate "Mellanox Technologies 10Gbit Ethernet support"
+	depends on PCI && INET
+	select MLX4_CORE
+	select INET_LRO
+	help
+	  This driver supports Mellanox Technologies ConnectX Ethernet
+	  devices.
+
 config MLX4_CORE
 	tristate
 	depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fa2510b..f19acf8 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -227,6 +227,8 @@
 obj-$(CONFIG_MLX4_CORE) += mlx4/
 obj-$(CONFIG_ENC28J60) += enc28j60.o
 
+obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
+
 obj-$(CONFIG_MACB) += macb.o
 
 obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 08e18bc..45dd9bd 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -2,6 +2,7 @@
  * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver
  *
  * Copyright (C) 2007  Domen Puncer, Telargo, Inc.
+ * Copyright (C) 2008  Wolfram Sang, Pengutronix
  *
  * This file is licensed under the terms of the GNU General Public License
  * version 2. This program is licensed "as is" without any warranty of any
@@ -21,58 +22,45 @@
 	struct mpc52xx_fec __iomem *regs;
 };
 
-static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
+		int reg, u32 value)
 {
 	struct mpc52xx_fec_mdio_priv *priv = bus->priv;
 	struct mpc52xx_fec __iomem *fec;
 	int tries = 100;
-	u32 request = FEC_MII_READ_FRAME;
+
+	value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
+	value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
 
 	fec = priv->regs;
 	out_be32(&fec->ievent, FEC_IEVENT_MII);
-
-	request |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
-	request |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
-
-	out_be32(&priv->regs->mii_data, request);
+	out_be32(&priv->regs->mii_data, value);
 
 	/* wait for it to finish, this takes about 23 us on lite5200b */
 	while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
 		udelay(5);
 
-	if (tries == 0)
+	if (!tries)
 		return -ETIMEDOUT;
 
-	return in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK;
+	return value & FEC_MII_DATA_OP_RD ?
+		in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0;
 }
 
-static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data)
+static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
 {
-	struct mpc52xx_fec_mdio_priv *priv = bus->priv;
-	struct mpc52xx_fec __iomem *fec;
-	u32 value = data;
-	int tries = 100;
-
-	fec = priv->regs;
-	out_be32(&fec->ievent, FEC_IEVENT_MII);
-
-	value |= FEC_MII_WRITE_FRAME;
-	value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
-	value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
-
-	out_be32(&priv->regs->mii_data, value);
-
-	/* wait for request to finish */
-	while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
-		udelay(5);
-
-	if (tries == 0)
-		return -ETIMEDOUT;
-
-	return 0;
+	return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME);
 }
 
-static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_id *match)
+static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
+		u16 data)
+{
+	return mpc52xx_fec_mdio_transfer(bus, phy_id, reg,
+		data | FEC_MII_WRITE_FRAME);
+}
+
+static int mpc52xx_fec_mdio_probe(struct of_device *of,
+		const struct of_device_id *match)
 {
 	struct device *dev = &of->dev;
 	struct device_node *np = of->node;
@@ -131,7 +119,8 @@
 	dev_set_drvdata(dev, bus);
 
 	/* set MII speed */
-	out_be32(&priv->regs->mii_speed, ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1);
+	out_be32(&priv->regs->mii_speed,
+		((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1);
 
 	/* enable MII interrupt */
 	out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII);
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 17ac697..b6a816e 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -416,10 +416,10 @@
 	if (!dev || !bc)
 		return -ENXIO;
 	if (!dev->base_addr || dev->base_addr > 0xffff-SER12_EXTENT ||
-	    dev->irq < 2 || dev->irq > NR_IRQS) {
+	    dev->irq < 2 || dev->irq > nr_irqs) {
 		printk(KERN_INFO "baycom_ser_fdx: invalid portnumber (max %u) "
 				"or irq (2 <= irq <= %d)\n",
-				0xffff-SER12_EXTENT, NR_IRQS);
+				0xffff-SER12_EXTENT, nr_irqs);
 		return -ENXIO;
 	}
 	if (bc->baud < 300 || bc->baud > 4800) {
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 45ae9d1..c17e39b 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1465,7 +1465,7 @@
 	printk(KERN_INFO "Init Z8530 driver: %u channels, IRQ", Nchips*2);
 	
 	flag=" ";
-	for (k = 0; k < NR_IRQS; k++)
+	for (k = 0; k < nr_irqs; k++)
 		if (Ivec[k].used) 
 		{
 			printk("%s%d", flag, k);
@@ -1728,7 +1728,7 @@
 
 			if (hwcfg.irq == 2) hwcfg.irq = 9;
 
-			if (hwcfg.irq < 0 || hwcfg.irq >= NR_IRQS)
+			if (hwcfg.irq < 0 || hwcfg.irq >= nr_irqs)
 				return -EINVAL;
 				
 			if (!Ivec[hwcfg.irq].used && hwcfg.irq)
@@ -2148,7 +2148,7 @@
 		}
 		
 	/* To unload the port must be closed so no real IRQ pending */
-	for (k=0; k < NR_IRQS ; k++)
+	for (k = 0; k < nr_irqs ; k++)
 		if (Ivec[k].used) free_irq(k, NULL);
 		
 	local_irq_enable();
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index efcf21c..2ee262225 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2604,8 +2604,16 @@
 		if (of_device_is_compatible(np, "ibm,emac-440ep") ||
 		    of_device_is_compatible(np, "ibm,emac-440gr"))
 			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
-		if (of_device_is_compatible(np, "ibm,emac-405ez"))
+		if (of_device_is_compatible(np, "ibm,emac-405ez")) {
+#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CONTROL
 			dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
+#else
+			printk(KERN_ERR "%s: Flow control not disabled!\n",
+					np->full_name);
+			return -ENXIO;
+#endif
+		}
+
 	}
 
 	/* Fixup some feature bits based on the device tree */
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 1839d3f..ecf9798 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -280,9 +280,11 @@
 	mal_schedule_poll(mal);
 	set_mal_dcrn(mal, MAL_TXEOBISR, r);
 
+#ifdef CONFIG_PPC_DCR_NATIVE
 	if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
 		mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
 				(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
+#endif
 
 	return IRQ_HANDLED;
 }
@@ -298,9 +300,11 @@
 	mal_schedule_poll(mal);
 	set_mal_dcrn(mal, MAL_RXEOBISR, r);
 
+#ifdef CONFIG_PPC_DCR_NATIVE
 	if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
 		mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
 				(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
+#endif
 
 	return IRQ_HANDLED;
 }
@@ -572,9 +576,18 @@
 		goto fail;
 	}
 
-	if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez"))
+	if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) {
+#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
+		defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
 		mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
 				MAL_FTR_COMMON_ERR_INT);
+#else
+		printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
+				ofdev->node->full_name);
+		err = -ENODEV;
+		goto fail;
+#endif
+	}
 
 	mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0);
 	mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1);
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 73fe83b..e1429fc 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -540,7 +540,8 @@
 	if (ret != 0)
 		goto free_mem;
 
-	info("IrDA: Registered KingSun/DonShine device %s", net->name);
+	dev_info(&net->dev, "IrDA: Registered KingSun/DonShine device %s\n",
+		 net->name);
 
 	usb_set_intfdata(intf, kingsun);
 
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 8c257a5..2482d61 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -801,7 +801,8 @@
 	if (ret != 0)
 		goto free_mem;
 
-	info("IrDA: Registered KingSun KS-959 device %s", net->name);
+	dev_info(&net->dev, "IrDA: Registered KingSun KS-959 device %s\n",
+		 net->name);
 
 	usb_set_intfdata(intf, kingsun);
 
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index d01a285..1e0de93 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -705,7 +705,8 @@
 	if (ret != 0)
 		goto free_mem;
 
-	info("IrDA: Registered KingSun/Dazzle device %s", net->name);
+	dev_info(&net->dev, "IrDA: Registered KingSun/Dazzle device %s\n",
+		 net->name);
 
 	usb_set_intfdata(intf, kingsun);
 
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 0519637..3575804 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -506,7 +506,7 @@
 			goto found;
 	}
 
-	warn("%s: invalid speed %d", stir->netdev->name, speed);
+	dev_warn(&stir->netdev->dev, "invalid speed %d\n", speed);
 	return -EINVAL;
 
  found:
@@ -598,8 +598,8 @@
 		err = read_reg(stir, REG_FIFOCTL, stir->fifo_status, 
 				   FIFO_REGS_SIZE);
 		if (unlikely(err != FIFO_REGS_SIZE)) {
-			warn("%s: FIFO register read error: %d", 
-			     stir->netdev->name, err);
+			dev_warn(&stir->netdev->dev,
+				 "FIFO register read error: %d\n", err);
 
 			return err;
 		}
@@ -783,8 +783,9 @@
 
 			if (unlikely(receive_start(stir))) {
 				if (net_ratelimit())
-					info("%s: receive usb submit failed",
-					     stir->netdev->name);
+					dev_info(&dev->dev,
+						 "%s: receive usb submit failed\n",
+						 stir->netdev->name);
 				stir->receiving = 0;
 				msleep(10);
 				continue;
@@ -836,8 +837,8 @@
 
 	/* in case of error, the kernel thread will restart us */
 	if (err) {
-		warn("%s: usb receive submit error: %d",
-			stir->netdev->name, err);
+		dev_warn(&stir->netdev->dev, "usb receive submit error: %d\n",
+			 err);
 		stir->receiving = 0;
 		wake_up_process(stir->thread);
 	}
@@ -1073,7 +1074,8 @@
 	if (ret != 0)
 		goto err_out2;
 
-	info("IrDA: Registered SigmaTel device %s", net->name);
+	dev_info(&intf->dev, "IrDA: Registered SigmaTel device %s\n",
+		 net->name);
 
 	usb_set_intfdata(intf, stir);
 
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 0952a65..a7a97bf 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -1,4 +1,9 @@
 obj-$(CONFIG_MLX4_CORE)		+= mlx4_core.o
 
 mlx4_core-y :=	alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-		mr.o pd.o profile.o qp.o reset.o srq.o
+		mr.o pd.o port.o profile.o qp.o reset.o srq.o
+
+obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
+
+mlx4_en-y := 	en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \
+		en_resources.o en_netdev.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index b411b79..ad95d5f 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -48,13 +48,16 @@
 
 	obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
 	if (obj >= bitmap->max) {
-		bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
+		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+				& bitmap->mask;
 		obj = find_first_zero_bit(bitmap->table, bitmap->max);
 	}
 
 	if (obj < bitmap->max) {
 		set_bit(obj, bitmap->table);
-		bitmap->last = (obj + 1) & (bitmap->max - 1);
+		bitmap->last = (obj + 1);
+		if (bitmap->last == bitmap->max)
+			bitmap->last = 0;
 		obj |= bitmap->top;
 	} else
 		obj = -1;
@@ -66,16 +69,90 @@
 
 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
 {
-	obj &= bitmap->max - 1;
+	mlx4_bitmap_free_range(bitmap, obj, 1);
+}
+
+static unsigned long find_aligned_range(unsigned long *bitmap,
+					u32 start, u32 nbits,
+					int len, int align)
+{
+	unsigned long end, i;
+
+again:
+	start = ALIGN(start, align);
+
+	while ((start < nbits) && test_bit(start, bitmap))
+		start += align;
+
+	if (start >= nbits)
+		return -1;
+
+	end = start+len;
+	if (end > nbits)
+		return -1;
+
+	for (i = start + 1; i < end; i++) {
+		if (test_bit(i, bitmap)) {
+			start = i + 1;
+			goto again;
+		}
+	}
+
+	return start;
+}
+
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
+{
+	u32 obj, i;
+
+	if (likely(cnt == 1 && align == 1))
+		return mlx4_bitmap_alloc(bitmap);
 
 	spin_lock(&bitmap->lock);
-	clear_bit(obj, bitmap->table);
+
+	obj = find_aligned_range(bitmap->table, bitmap->last,
+				 bitmap->max, cnt, align);
+	if (obj >= bitmap->max) {
+		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+				& bitmap->mask;
+		obj = find_aligned_range(bitmap->table, 0, bitmap->max,
+					 cnt, align);
+	}
+
+	if (obj < bitmap->max) {
+		for (i = 0; i < cnt; i++)
+			set_bit(obj + i, bitmap->table);
+		if (obj == bitmap->last) {
+			bitmap->last = (obj + cnt);
+			if (bitmap->last >= bitmap->max)
+				bitmap->last = 0;
+		}
+		obj |= bitmap->top;
+	} else
+		obj = -1;
+
+	spin_unlock(&bitmap->lock);
+
+	return obj;
+}
+
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
+{
+	u32 i;
+
+	obj &= bitmap->max + bitmap->reserved_top - 1;
+
+	spin_lock(&bitmap->lock);
+	for (i = 0; i < cnt; i++)
+		clear_bit(obj + i, bitmap->table);
 	bitmap->last = min(bitmap->last, obj);
-	bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
+	bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+			& bitmap->mask;
 	spin_unlock(&bitmap->lock);
 }
 
-int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved)
+int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
+		     u32 reserved_bot, u32 reserved_top)
 {
 	int i;
 
@@ -85,14 +162,16 @@
 
 	bitmap->last = 0;
 	bitmap->top  = 0;
-	bitmap->max  = num;
+	bitmap->max  = num - reserved_top;
 	bitmap->mask = mask;
+	bitmap->reserved_top = reserved_top;
 	spin_lock_init(&bitmap->lock);
-	bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL);
+	bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
+				sizeof (long), GFP_KERNEL);
 	if (!bitmap->table)
 		return -ENOMEM;
 
-	for (i = 0; i < reserved; ++i)
+	for (i = 0; i < reserved_bot; ++i)
 		set_bit(i, bitmap->table);
 
 	return 0;
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 9bb50e3..b7ad282 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -300,7 +300,7 @@
 	INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
 
 	err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
-			       dev->caps.num_cqs - 1, dev->caps.reserved_cqs);
+			       dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
 	if (err)
 		return err;
 
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
new file mode 100644
index 0000000..1368a80
--- /dev/null
+++ b/drivers/net/mlx4/en_cq.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4_en.h"
+
+static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
+{
+	return;
+}
+
+
+int mlx4_en_create_cq(struct mlx4_en_priv *priv,
+		      struct mlx4_en_cq *cq,
+		      int entries, int ring, enum cq_type mode)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+
+	cq->size = entries;
+	if (mode == RX)
+		cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
+	else
+		cq->buf_size = sizeof(struct mlx4_cqe);
+
+	cq->ring = ring;
+	cq->is_tx = mode;
+	spin_lock_init(&cq->lock);
+
+	err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
+				cq->buf_size, 2 * PAGE_SIZE);
+	if (err)
+		return err;
+
+	err = mlx4_en_map_buffer(&cq->wqres.buf);
+	if (err)
+		mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+
+	return err;
+}
+
+int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+
+	cq->dev = mdev->pndev[priv->port];
+	cq->mcq.set_ci_db  = cq->wqres.db.db;
+	cq->mcq.arm_db     = cq->wqres.db.db + 1;
+	*cq->mcq.set_ci_db = 0;
+	*cq->mcq.arm_db    = 0;
+	cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
+	memset(cq->buf, 0, cq->buf_size);
+
+	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
+			    cq->wqres.db.dma, &cq->mcq, cq->is_tx);
+	if (err)
+		return err;
+
+	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
+	cq->mcq.event = mlx4_en_cq_event;
+
+	if (cq->is_tx) {
+		init_timer(&cq->timer);
+		cq->timer.function = mlx4_en_poll_tx_cq;
+		cq->timer.data = (unsigned long) cq;
+	} else {
+		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
+		napi_enable(&cq->napi);
+	}
+
+	return 0;
+}
+
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+
+	mlx4_en_unmap_buffer(&cq->wqres.buf);
+	mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+	cq->buf_size = 0;
+	cq->buf = NULL;
+}
+
+void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+
+	if (cq->is_tx)
+		del_timer(&cq->timer);
+	else
+		napi_disable(&cq->napi);
+
+	mlx4_cq_free(mdev->dev, &cq->mcq);
+}
+
+/* Set rx cq moderation parameters */
+int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+	return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
+			      cq->moder_cnt, cq->moder_time);
+}
+
+int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+	cq->armed = 1;
+	mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
+		    &priv->mdev->uar_lock);
+
+	return 0;
+}
+
+
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
new file mode 100644
index 0000000..1b0eebf
--- /dev/null
+++ b/drivers/net/mlx4/en_main.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/cpumask.h>
+
+#include <linux/mlx4/driver.h>
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4_en.h"
+
+MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
+MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
+
+static const char mlx4_en_version[] =
+	DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
+	DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
+			  enum mlx4_dev_event event, int port)
+{
+	struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
+	struct mlx4_en_priv *priv;
+
+	if (!mdev->pndev[port])
+		return;
+
+	priv = netdev_priv(mdev->pndev[port]);
+	switch (event) {
+	case MLX4_DEV_EVENT_PORT_UP:
+	case MLX4_DEV_EVENT_PORT_DOWN:
+		/* To prevent races, we poll the link state in a separate
+		  task rather than changing it here */
+		priv->link_state = event;
+		queue_work(mdev->workqueue, &priv->linkstate_task);
+		break;
+
+	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
+		mlx4_err(mdev, "Internal error detected, restarting device\n");
+		break;
+
+	default:
+		mlx4_warn(mdev, "Unhandled event: %d\n", event);
+	}
+}
+
+static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
+{
+	struct mlx4_en_dev *mdev = endev_ptr;
+	int i;
+
+	mutex_lock(&mdev->state_lock);
+	mdev->device_up = false;
+	mutex_unlock(&mdev->state_lock);
+
+	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+		if (mdev->pndev[i])
+			mlx4_en_destroy_netdev(mdev->pndev[i]);
+
+	flush_workqueue(mdev->workqueue);
+	destroy_workqueue(mdev->workqueue);
+	mlx4_mr_free(dev, &mdev->mr);
+	mlx4_uar_free(dev, &mdev->priv_uar);
+	mlx4_pd_free(dev, mdev->priv_pdn);
+	kfree(mdev);
+}
+
+static void *mlx4_en_add(struct mlx4_dev *dev)
+{
+	static int mlx4_en_version_printed;
+	struct mlx4_en_dev *mdev;
+	int i;
+	int err;
+
+	if (!mlx4_en_version_printed) {
+		printk(KERN_INFO "%s", mlx4_en_version);
+		mlx4_en_version_printed++;
+	}
+
+	mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
+	if (!mdev) {
+		dev_err(&dev->pdev->dev, "Device struct alloc failed, "
+			"aborting.\n");
+		err = -ENOMEM;
+		goto err_free_res;
+	}
+
+	if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
+		goto err_free_dev;
+
+	if (mlx4_uar_alloc(dev, &mdev->priv_uar))
+		goto err_pd;
+
+	mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+	if (!mdev->uar_map)
+		goto err_uar;
+	spin_lock_init(&mdev->uar_lock);
+
+	mdev->dev = dev;
+	mdev->dma_device = &(dev->pdev->dev);
+	mdev->pdev = dev->pdev;
+	mdev->device_up = false;
+
+	mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
+	if (!mdev->LSO_support)
+		mlx4_warn(mdev, "LSO not supported, please upgrade to later "
+				"FW version to enable LSO\n");
+
+	if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
+			 MLX4_PERM_LOCAL_WRITE |  MLX4_PERM_LOCAL_READ,
+			 0, 0, &mdev->mr)) {
+		mlx4_err(mdev, "Failed allocating memory region\n");
+		goto err_uar;
+	}
+	if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
+		mlx4_err(mdev, "Failed enabling memory region\n");
+		goto err_mr;
+	}
+
+	/* Build device profile according to supplied module parameters */
+	err = mlx4_en_get_profile(mdev);
+	if (err) {
+		mlx4_err(mdev, "Bad module parameters, aborting.\n");
+		goto err_mr;
+	}
+
+	/* Configure wich ports to start according to module parameters */
+	mdev->port_cnt = 0;
+	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+		mdev->port_cnt++;
+
+	/* If we did not receive an explicit number of Rx rings, default to
+	 * the number of completion vectors populated by the mlx4_core */
+	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+		mlx4_info(mdev, "Using %d tx rings for port:%d\n",
+			  mdev->profile.prof[i].tx_ring_num, i);
+		if (!mdev->profile.prof[i].rx_ring_num) {
+			mdev->profile.prof[i].rx_ring_num = 1;
+			mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
+				  1, i);
+		} else
+			mlx4_info(mdev, "Using %d rx rings for port:%d\n",
+				  mdev->profile.prof[i].rx_ring_num, i);
+	}
+
+	/* Create our own workqueue for reset/multicast tasks
+	 * Note: we cannot use the shared workqueue because of deadlocks caused
+	 *       by the rtnl lock */
+	mdev->workqueue = create_singlethread_workqueue("mlx4_en");
+	if (!mdev->workqueue) {
+		err = -ENOMEM;
+		goto err_close_nic;
+	}
+
+	/* At this stage all non-port specific tasks are complete:
+	 * mark the card state as up */
+	mutex_init(&mdev->state_lock);
+	mdev->device_up = true;
+
+	/* Setup ports */
+
+	/* Create a netdev for each port */
+	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+		mlx4_info(mdev, "Activating port:%d\n", i);
+		if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) {
+			mdev->pndev[i] = NULL;
+			goto err_free_netdev;
+		}
+	}
+	return mdev;
+
+
+err_free_netdev:
+	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+		if (mdev->pndev[i])
+			mlx4_en_destroy_netdev(mdev->pndev[i]);
+	}
+
+	mutex_lock(&mdev->state_lock);
+	mdev->device_up = false;
+	mutex_unlock(&mdev->state_lock);
+	flush_workqueue(mdev->workqueue);
+
+	/* Stop event queue before we drop down to release shared SW state */
+
+err_close_nic:
+	destroy_workqueue(mdev->workqueue);
+err_mr:
+	mlx4_mr_free(dev, &mdev->mr);
+err_uar:
+	mlx4_uar_free(dev, &mdev->priv_uar);
+err_pd:
+	mlx4_pd_free(dev, mdev->priv_pdn);
+err_free_dev:
+	kfree(mdev);
+err_free_res:
+	return NULL;
+}
+
+static struct mlx4_interface mlx4_en_interface = {
+	.add	= mlx4_en_add,
+	.remove	= mlx4_en_remove,
+	.event	= mlx4_en_event,
+};
+
+static int __init mlx4_en_init(void)
+{
+	return mlx4_register_interface(&mlx4_en_interface);
+}
+
+static void __exit mlx4_en_cleanup(void)
+{
+	mlx4_unregister_interface(&mlx4_en_interface);
+}
+
+module_init(mlx4_en_init);
+module_exit(mlx4_en_cleanup);
+
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
new file mode 100644
index 0000000..a339afb
--- /dev/null
+++ b/drivers/net/mlx4/en_netdev.c
@@ -0,0 +1,1088 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+
+#include <linux/mlx4/driver.h>
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/cq.h>
+
+#include "mlx4_en.h"
+#include "en_port.h"
+
+
+static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+
+	mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
+	priv->vlgrp = grp;
+
+	mutex_lock(&mdev->state_lock);
+	if (mdev->device_up && priv->port_up) {
+		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
+		if (err)
+			mlx4_err(mdev, "Failed configuring VLAN filter\n");
+	}
+	mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+
+	if (!priv->vlgrp)
+		return;
+
+	mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
+		 vid, vlan_group_get_device(priv->vlgrp, vid));
+
+	/* Add VID to port VLAN filter */
+	mutex_lock(&mdev->state_lock);
+	if (mdev->device_up && priv->port_up) {
+		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
+		if (err)
+			mlx4_err(mdev, "Failed configuring VLAN filter\n");
+	}
+	mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+
+	if (!priv->vlgrp)
+		return;
+
+	mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp "
+		 "entry:%p)\n", vid, priv->vlgrp,
+		 vlan_group_get_device(priv->vlgrp, vid));
+	vlan_group_set_device(priv->vlgrp, vid, NULL);
+
+	/* Remove VID from port VLAN filter */
+	mutex_lock(&mdev->state_lock);
+	if (mdev->device_up && priv->port_up) {
+		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
+		if (err)
+			mlx4_err(mdev, "Failed configuring VLAN filter\n");
+	}
+	mutex_unlock(&mdev->state_lock);
+}
+
+static u64 mlx4_en_mac_to_u64(u8 *addr)
+{
+	u64 mac = 0;
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++) {
+		mac <<= 8;
+		mac |= addr[i];
+	}
+	return mac;
+}
+
+static int mlx4_en_set_mac(struct net_device *dev, void *addr)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct sockaddr *saddr = addr;
+
+	if (!is_valid_ether_addr(saddr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+	priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
+	queue_work(mdev->workqueue, &priv->mac_task);
+	return 0;
+}
+
+static void mlx4_en_do_set_mac(struct work_struct *work)
+{
+	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+						 mac_task);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err = 0;
+
+	mutex_lock(&mdev->state_lock);
+	if (priv->port_up) {
+		/* Remove old MAC and insert the new one */
+		mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
+		err = mlx4_register_mac(mdev->dev, priv->port,
+					priv->mac, &priv->mac_index);
+		if (err)
+			mlx4_err(mdev, "Failed changing HW MAC address\n");
+	} else
+		mlx4_dbg(HW, priv, "Port is down, exiting...\n");
+
+	mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_clear_list(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct dev_mc_list *plist = priv->mc_list;
+	struct dev_mc_list *next;
+
+	while (plist) {
+		next = plist->next;
+		kfree(plist);
+		plist = next;
+	}
+	priv->mc_list = NULL;
+}
+
+static void mlx4_en_cache_mclist(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct dev_mc_list *mclist;
+	struct dev_mc_list *tmp;
+	struct dev_mc_list *plist = NULL;
+
+	for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
+		tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
+		if (!tmp) {
+			mlx4_err(mdev, "failed to allocate multicast list\n");
+			mlx4_en_clear_list(dev);
+			return;
+		}
+		memcpy(tmp, mclist, sizeof(struct dev_mc_list));
+		tmp->next = NULL;
+		if (plist)
+			plist->next = tmp;
+		else
+			priv->mc_list = tmp;
+		plist = tmp;
+	}
+}
+
+
+static void mlx4_en_set_multicast(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	if (!priv->port_up)
+		return;
+
+	queue_work(priv->mdev->workqueue, &priv->mcast_task);
+}
+
+static void mlx4_en_do_set_multicast(struct work_struct *work)
+{
+	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+						 mcast_task);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct net_device *dev = priv->dev;
+	struct dev_mc_list *mclist;
+	u64 mcast_addr = 0;
+	int err;
+
+	mutex_lock(&mdev->state_lock);
+	if (!mdev->device_up) {
+		mlx4_dbg(HW, priv, "Card is not up, ignoring "
+				   "multicast change.\n");
+		goto out;
+	}
+	if (!priv->port_up) {
+		mlx4_dbg(HW, priv, "Port is down, ignoring "
+				   "multicast change.\n");
+		goto out;
+	}
+
+	/*
+	 * Promsicuous mode: disable all filters
+	 */
+
+	if (dev->flags & IFF_PROMISC) {
+		if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
+			if (netif_msg_rx_status(priv))
+				mlx4_warn(mdev, "Port:%d entering promiscuous mode\n",
+					  priv->port);
+			priv->flags |= MLX4_EN_FLAG_PROMISC;
+
+			/* Enable promiscouos mode */
+			err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
+						     priv->base_qpn, 1);
+			if (err)
+				mlx4_err(mdev, "Failed enabling "
+					 "promiscous mode\n");
+
+			/* Disable port multicast filter (unconditionally) */
+			err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+						  0, MLX4_MCAST_DISABLE);
+			if (err)
+				mlx4_err(mdev, "Failed disabling "
+					 "multicast filter\n");
+
+			/* Disable port VLAN filter */
+			err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
+			if (err)
+				mlx4_err(mdev, "Failed disabling "
+					 "VLAN filter\n");
+		}
+		goto out;
+	}
+
+	/*
+	 * Not in promiscous mode
+	 */
+
+	if (priv->flags & MLX4_EN_FLAG_PROMISC) {
+		if (netif_msg_rx_status(priv))
+			mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n",
+				  priv->port);
+		priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+
+		/* Disable promiscouos mode */
+		err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
+					     priv->base_qpn, 0);
+		if (err)
+			mlx4_err(mdev, "Failed disabling promiscous mode\n");
+
+		/* Enable port VLAN filter */
+		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
+		if (err)
+			mlx4_err(mdev, "Failed enabling VLAN filter\n");
+	}
+
+	/* Enable/disable the multicast filter according to IFF_ALLMULTI */
+	if (dev->flags & IFF_ALLMULTI) {
+		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+					  0, MLX4_MCAST_DISABLE);
+		if (err)
+			mlx4_err(mdev, "Failed disabling multicast filter\n");
+	} else {
+		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+					  0, MLX4_MCAST_DISABLE);
+		if (err)
+			mlx4_err(mdev, "Failed disabling multicast filter\n");
+
+		/* Flush mcast filter and init it with broadcast address */
+		mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
+				    1, MLX4_MCAST_CONFIG);
+
+		/* Update multicast list - we cache all addresses so they won't
+		 * change while HW is updated holding the command semaphor */
+		netif_tx_lock_bh(dev);
+		mlx4_en_cache_mclist(dev);
+		netif_tx_unlock_bh(dev);
+		for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
+			mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr);
+			mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
+					    mcast_addr, 0, MLX4_MCAST_CONFIG);
+		}
+		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+					  0, MLX4_MCAST_ENABLE);
+		if (err)
+			mlx4_err(mdev, "Failed enabling multicast filter\n");
+
+		mlx4_en_clear_list(dev);
+	}
+out:
+	mutex_unlock(&mdev->state_lock);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mlx4_en_netpoll(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_cq *cq;
+	unsigned long flags;
+	int i;
+
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		cq = &priv->rx_cq[i];
+		spin_lock_irqsave(&cq->lock, flags);
+		napi_synchronize(&cq->napi);
+		mlx4_en_process_rx_cq(dev, cq, 0);
+		spin_unlock_irqrestore(&cq->lock, flags);
+	}
+}
+#endif
+
+static void mlx4_en_tx_timeout(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+
+	if (netif_msg_timer(priv))
+		mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port);
+
+	if (netif_carrier_ok(dev)) {
+		priv->port_stats.tx_timeout++;
+		mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
+		queue_work(mdev->workqueue, &priv->watchdog_task);
+	}
+}
+
+
+static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	spin_lock_bh(&priv->stats_lock);
+	memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
+	spin_unlock_bh(&priv->stats_lock);
+
+	return &priv->ret_stats;
+}
+
+static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_cq *cq;
+	int i;
+
+	/* If we haven't received a specific coalescing setting
+	 * (module param), we set the moderation paramters as follows:
+	 * - moder_cnt is set to the number of mtu sized packets to
+	 *   satisfy our coelsing target.
+	 * - moder_time is set to a fixed value.
+	 */
+	priv->rx_frames = (mdev->profile.rx_moder_cnt ==
+			   MLX4_EN_AUTO_CONF) ?
+				MLX4_EN_RX_COAL_TARGET /
+				priv->dev->mtu + 1 :
+				mdev->profile.rx_moder_cnt;
+	priv->rx_usecs = (mdev->profile.rx_moder_time ==
+			  MLX4_EN_AUTO_CONF) ?
+				MLX4_EN_RX_COAL_TIME :
+				mdev->profile.rx_moder_time;
+	mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
+			     "rx_frames:%d rx_usecs:%d\n",
+		 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
+
+	/* Setup cq moderation params */
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		cq = &priv->rx_cq[i];
+		cq->moder_cnt = priv->rx_frames;
+		cq->moder_time = priv->rx_usecs;
+	}
+
+	for (i = 0; i < priv->tx_ring_num; i++) {
+		cq = &priv->tx_cq[i];
+		cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
+		cq->moder_time = MLX4_EN_TX_COAL_TIME;
+	}
+
+	/* Reset auto-moderation params */
+	priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
+	priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
+	priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
+	priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
+	priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
+	priv->adaptive_rx_coal = mdev->profile.auto_moder;
+	priv->last_moder_time = MLX4_EN_AUTO_CONF;
+	priv->last_moder_jiffies = 0;
+	priv->last_moder_packets = 0;
+	priv->last_moder_tx_packets = 0;
+	priv->last_moder_bytes = 0;
+}
+
+static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
+{
+	unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_cq *cq;
+	unsigned long packets;
+	unsigned long rate;
+	unsigned long avg_pkt_size;
+	unsigned long rx_packets;
+	unsigned long rx_bytes;
+	unsigned long tx_packets;
+	unsigned long tx_pkt_diff;
+	unsigned long rx_pkt_diff;
+	int moder_time;
+	int i, err;
+
+	if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
+		return;
+
+	spin_lock_bh(&priv->stats_lock);
+	rx_packets = priv->stats.rx_packets;
+	rx_bytes = priv->stats.rx_bytes;
+	tx_packets = priv->stats.tx_packets;
+	spin_unlock_bh(&priv->stats_lock);
+
+	if (!priv->last_moder_jiffies || !period)
+		goto out;
+
+	tx_pkt_diff = ((unsigned long) (tx_packets -
+					priv->last_moder_tx_packets));
+	rx_pkt_diff = ((unsigned long) (rx_packets -
+					priv->last_moder_packets));
+	packets = max(tx_pkt_diff, rx_pkt_diff);
+	rate = packets * HZ / period;
+	avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
+				 priv->last_moder_bytes)) / packets : 0;
+
+	/* Apply auto-moderation only when packet rate exceeds a rate that
+	 * it matters */
+	if (rate > MLX4_EN_RX_RATE_THRESH) {
+		/* If tx and rx packet rates are not balanced, assume that
+		 * traffic is mainly BW bound and apply maximum moderation.
+		 * Otherwise, moderate according to packet rate */
+		if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
+		    2 * rx_pkt_diff > 3 * tx_pkt_diff) {
+			moder_time = priv->rx_usecs_high;
+		} else {
+			if (rate < priv->pkt_rate_low)
+				moder_time = priv->rx_usecs_low;
+			else if (rate > priv->pkt_rate_high)
+				moder_time = priv->rx_usecs_high;
+			else
+				moder_time = (rate - priv->pkt_rate_low) *
+					(priv->rx_usecs_high - priv->rx_usecs_low) /
+					(priv->pkt_rate_high - priv->pkt_rate_low) +
+					priv->rx_usecs_low;
+		}
+	} else {
+		/* When packet rate is low, use default moderation rather than
+		 * 0 to prevent interrupt storms if traffic suddenly increases */
+		moder_time = priv->rx_usecs;
+	}
+
+	mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
+		 tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
+
+	mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
+		 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
+		 priv->last_moder_time, moder_time, period, packets,
+		 avg_pkt_size, rate);
+
+	if (moder_time != priv->last_moder_time) {
+		priv->last_moder_time = moder_time;
+		for (i = 0; i < priv->rx_ring_num; i++) {
+			cq = &priv->rx_cq[i];
+			cq->moder_time = moder_time;
+			err = mlx4_en_set_cq_moder(priv, cq);
+			if (err) {
+				mlx4_err(mdev, "Failed modifying moderation for cq:%d "
+					 "on port:%d\n", i, priv->port);
+				break;
+			}
+		}
+	}
+
+out:
+	priv->last_moder_packets = rx_packets;
+	priv->last_moder_tx_packets = tx_packets;
+	priv->last_moder_bytes = rx_bytes;
+	priv->last_moder_jiffies = jiffies;
+}
+
+static void mlx4_en_do_get_stats(struct work_struct *work)
+{
+	struct delayed_work *delay = container_of(work, struct delayed_work, work);
+	struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
+						 stats_task);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+
+	err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
+	if (err)
+		mlx4_dbg(HW, priv, "Could not update stats for "
+				   "port:%d\n", priv->port);
+
+	mutex_lock(&mdev->state_lock);
+	if (mdev->device_up) {
+		if (priv->port_up)
+			mlx4_en_auto_moderation(priv);
+
+		queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+	}
+	mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_linkstate(struct work_struct *work)
+{
+	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+						 linkstate_task);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int linkstate = priv->link_state;
+
+	mutex_lock(&mdev->state_lock);
+	/* If observable port state changed set carrier state and
+	 * report to system log */
+	if (priv->last_link_state != linkstate) {
+		if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
+			if (netif_msg_link(priv))
+				mlx4_info(mdev, "Port %d - link down\n", priv->port);
+			netif_carrier_off(priv->dev);
+		} else {
+			if (netif_msg_link(priv))
+				mlx4_info(mdev, "Port %d - link up\n", priv->port);
+			netif_carrier_on(priv->dev);
+		}
+	}
+	priv->last_link_state = linkstate;
+	mutex_unlock(&mdev->state_lock);
+}
+
+
+static int mlx4_en_start_port(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_cq *cq;
+	struct mlx4_en_tx_ring *tx_ring;
+	struct mlx4_en_rx_ring *rx_ring;
+	int rx_index = 0;
+	int tx_index = 0;
+	u16 stride;
+	int err = 0;
+	int i;
+	int j;
+
+	if (priv->port_up) {
+		mlx4_dbg(DRV, priv, "start port called while port already up\n");
+		return 0;
+	}
+
+	/* Calculate Rx buf size */
+	dev->mtu = min(dev->mtu, priv->max_mtu);
+	mlx4_en_calc_rx_buf(dev);
+	mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
+	stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+				    DS_SIZE * priv->num_frags);
+	/* Configure rx cq's and rings */
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		cq = &priv->rx_cq[i];
+		rx_ring = &priv->rx_ring[i];
+
+		err = mlx4_en_activate_cq(priv, cq);
+		if (err) {
+			mlx4_err(mdev, "Failed activating Rx CQ\n");
+			goto rx_err;
+		}
+		for (j = 0; j < cq->size; j++)
+			cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
+		err = mlx4_en_set_cq_moder(priv, cq);
+		if (err) {
+			mlx4_err(mdev, "Failed setting cq moderation parameters");
+			mlx4_en_deactivate_cq(priv, cq);
+			goto cq_err;
+		}
+		mlx4_en_arm_cq(priv, cq);
+
+		++rx_index;
+	}
+
+	err = mlx4_en_activate_rx_rings(priv);
+	if (err) {
+		mlx4_err(mdev, "Failed to activate RX rings\n");
+		goto cq_err;
+	}
+
+	err = mlx4_en_config_rss_steer(priv);
+	if (err) {
+		mlx4_err(mdev, "Failed configuring rss steering\n");
+		goto rx_err;
+	}
+
+	/* Configure tx cq's and rings */
+	for (i = 0; i < priv->tx_ring_num; i++) {
+		/* Configure cq */
+		cq = &priv->tx_cq[i];
+		err = mlx4_en_activate_cq(priv, cq);
+		if (err) {
+			mlx4_err(mdev, "Failed allocating Tx CQ\n");
+			goto tx_err;
+		}
+		err = mlx4_en_set_cq_moder(priv, cq);
+		if (err) {
+			mlx4_err(mdev, "Failed setting cq moderation parameters");
+			mlx4_en_deactivate_cq(priv, cq);
+			goto tx_err;
+		}
+		mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
+		cq->buf->wqe_index = cpu_to_be16(0xffff);
+
+		/* Configure ring */
+		tx_ring = &priv->tx_ring[i];
+		err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
+					       priv->rx_ring[0].srq.srqn);
+		if (err) {
+			mlx4_err(mdev, "Failed allocating Tx ring\n");
+			mlx4_en_deactivate_cq(priv, cq);
+			goto tx_err;
+		}
+		/* Set initial ownership of all Tx TXBBs to SW (1) */
+		for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
+			*((u32 *) (tx_ring->buf + j)) = 0xffffffff;
+		++tx_index;
+	}
+
+	/* Configure port */
+	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+				    priv->rx_skb_size + ETH_FCS_LEN,
+				    mdev->profile.tx_pause,
+				    mdev->profile.tx_ppp,
+				    mdev->profile.rx_pause,
+				    mdev->profile.rx_ppp);
+	if (err) {
+		mlx4_err(mdev, "Failed setting port general configurations"
+			       " for port %d, with error %d\n", priv->port, err);
+		goto tx_err;
+	}
+	/* Set default qp number */
+	err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
+	if (err) {
+		mlx4_err(mdev, "Failed setting default qp numbers\n");
+		goto tx_err;
+	}
+	/* Set port mac number */
+	mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
+	err = mlx4_register_mac(mdev->dev, priv->port,
+				priv->mac, &priv->mac_index);
+	if (err) {
+		mlx4_err(mdev, "Failed setting port mac\n");
+		goto tx_err;
+	}
+
+	/* Init port */
+	mlx4_dbg(HW, priv, "Initializing port\n");
+	err = mlx4_INIT_PORT(mdev->dev, priv->port);
+	if (err) {
+		mlx4_err(mdev, "Failed Initializing port\n");
+		goto mac_err;
+	}
+
+	/* Schedule multicast task to populate multicast list */
+	queue_work(mdev->workqueue, &priv->mcast_task);
+
+	priv->port_up = true;
+	netif_start_queue(dev);
+	return 0;
+
+mac_err:
+	mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
+tx_err:
+	while (tx_index--) {
+		mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
+		mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
+	}
+
+	mlx4_en_release_rss_steer(priv);
+rx_err:
+	for (i = 0; i < priv->rx_ring_num; i++)
+		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[rx_index]);
+cq_err:
+	while (rx_index--)
+		mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
+
+	return err; /* need to close devices */
+}
+
+
+static void mlx4_en_stop_port(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int i;
+
+	if (!priv->port_up) {
+		mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n",
+			 priv->port);
+		return;
+	}
+	netif_stop_queue(dev);
+
+	/* Synchronize with tx routine */
+	netif_tx_lock_bh(dev);
+	priv->port_up = false;
+	netif_tx_unlock_bh(dev);
+
+	/* close port*/
+	mlx4_CLOSE_PORT(mdev->dev, priv->port);
+
+	/* Unregister Mac address for the port */
+	mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
+
+	/* Free TX Rings */
+	for (i = 0; i < priv->tx_ring_num; i++) {
+		mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
+		mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
+	}
+	msleep(10);
+
+	for (i = 0; i < priv->tx_ring_num; i++)
+		mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
+
+	/* Free RSS qps */
+	mlx4_en_release_rss_steer(priv);
+
+	/* Free RX Rings */
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+		while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
+			msleep(1);
+		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
+	}
+}
+
+static void mlx4_en_restart(struct work_struct *work)
+{
+	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+						 watchdog_task);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct net_device *dev = priv->dev;
+
+	mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
+	mlx4_en_stop_port(dev);
+	if (mlx4_en_start_port(dev))
+	    mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
+}
+
+
+static int mlx4_en_open(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int i;
+	int err = 0;
+
+	mutex_lock(&mdev->state_lock);
+
+	if (!mdev->device_up) {
+		mlx4_err(mdev, "Cannot open - device down/disabled\n");
+		err = -EBUSY;
+		goto out;
+	}
+
+	/* Reset HW statistics and performance counters */
+	if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
+		mlx4_dbg(HW, priv, "Failed dumping statistics\n");
+
+	memset(&priv->stats, 0, sizeof(priv->stats));
+	memset(&priv->pstats, 0, sizeof(priv->pstats));
+
+	for (i = 0; i < priv->tx_ring_num; i++) {
+		priv->tx_ring[i].bytes = 0;
+		priv->tx_ring[i].packets = 0;
+	}
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		priv->rx_ring[i].bytes = 0;
+		priv->rx_ring[i].packets = 0;
+	}
+
+	mlx4_en_set_default_moderation(priv);
+	err = mlx4_en_start_port(dev);
+	if (err)
+		mlx4_err(mdev, "Failed starting port:%d\n", priv->port);
+
+out:
+	mutex_unlock(&mdev->state_lock);
+	return err;
+}
+
+
+static int mlx4_en_close(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+
+	if (netif_msg_ifdown(priv))
+		mlx4_info(mdev, "Close called for port:%d\n", priv->port);
+
+	mutex_lock(&mdev->state_lock);
+
+	mlx4_en_stop_port(dev);
+	netif_carrier_off(dev);
+
+	mutex_unlock(&mdev->state_lock);
+	return 0;
+}
+
+static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->tx_ring_num; i++) {
+		if (priv->tx_ring[i].tx_info)
+			mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
+		if (priv->tx_cq[i].buf)
+			mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+	}
+
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		if (priv->rx_ring[i].rx_info)
+			mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
+		if (priv->rx_cq[i].buf)
+			mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
+	}
+}
+
+static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_port_profile *prof = priv->prof;
+	int i;
+
+	/* Create tx Rings */
+	for (i = 0; i < priv->tx_ring_num; i++) {
+		if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
+				      prof->tx_ring_size, i, TX))
+			goto err;
+
+		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
+					   prof->tx_ring_size, TXBB_SIZE))
+			goto err;
+	}
+
+	/* Create rx Rings */
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
+				      prof->rx_ring_size, i, RX))
+			goto err;
+
+		if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
+					   prof->rx_ring_size, priv->stride))
+			goto err;
+	}
+
+	return 0;
+
+err:
+	mlx4_err(mdev, "Failed to allocate NIC resources\n");
+	return -ENOMEM;
+}
+
+
+void mlx4_en_destroy_netdev(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+
+	mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
+
+	/* Unregister device - this will close the port if it was up */
+	if (priv->registered)
+		unregister_netdev(dev);
+
+	if (priv->allocated)
+		mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
+
+	cancel_delayed_work(&priv->stats_task);
+	cancel_delayed_work(&priv->refill_task);
+	/* flush any pending task for this netdev */
+	flush_workqueue(mdev->workqueue);
+
+	/* Detach the netdev so tasks would not attempt to access it */
+	mutex_lock(&mdev->state_lock);
+	mdev->pndev[priv->port] = NULL;
+	mutex_unlock(&mdev->state_lock);
+
+	mlx4_en_free_resources(priv);
+	free_netdev(dev);
+}
+
+static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err = 0;
+
+	mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
+		 dev->mtu, new_mtu);
+
+	if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
+		mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu);
+		return -EPERM;
+	}
+	dev->mtu = new_mtu;
+
+	if (netif_running(dev)) {
+		mutex_lock(&mdev->state_lock);
+		if (!mdev->device_up) {
+			/* NIC is probably restarting - let watchdog task reset
+			 * the port */
+			mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n");
+		} else {
+			mlx4_en_stop_port(dev);
+			mlx4_en_set_default_moderation(priv);
+			err = mlx4_en_start_port(dev);
+			if (err) {
+				mlx4_err(mdev, "Failed restarting port:%d\n",
+					 priv->port);
+				queue_work(mdev->workqueue, &priv->watchdog_task);
+			}
+		}
+		mutex_unlock(&mdev->state_lock);
+	}
+	return 0;
+}
+
+int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+			struct mlx4_en_port_profile *prof)
+{
+	struct net_device *dev;
+	struct mlx4_en_priv *priv;
+	int i;
+	int err;
+
+	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+	if (dev == NULL) {
+		mlx4_err(mdev, "Net device allocation failed\n");
+		return -ENOMEM;
+	}
+
+	SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
+
+	/*
+	 * Initialize driver private data
+	 */
+
+	priv = netdev_priv(dev);
+	memset(priv, 0, sizeof(struct mlx4_en_priv));
+	priv->dev = dev;
+	priv->mdev = mdev;
+	priv->prof = prof;
+	priv->port = port;
+	priv->port_up = false;
+	priv->rx_csum = 1;
+	priv->flags = prof->flags;
+	priv->tx_ring_num = prof->tx_ring_num;
+	priv->rx_ring_num = prof->rx_ring_num;
+	priv->mc_list = NULL;
+	priv->mac_index = -1;
+	priv->msg_enable = MLX4_EN_MSG_LEVEL;
+	spin_lock_init(&priv->stats_lock);
+	INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
+	INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
+	INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
+	INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+	INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+	INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+
+	/* Query for default mac and max mtu */
+	priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
+	priv->mac = mdev->dev->caps.def_mac[priv->port];
+	if (ILLEGAL_MAC(priv->mac)) {
+		mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
+			 priv->port, priv->mac);
+		err = -EINVAL;
+		goto out;
+	}
+
+	priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+					  DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+	err = mlx4_en_alloc_resources(priv);
+	if (err)
+		goto out;
+
+	/* Populate Rx default RSS mappings */
+	mlx4_en_set_default_rss_map(priv, &priv->rss_map, priv->rx_ring_num *
+						RSS_FACTOR, priv->rx_ring_num);
+	/* Allocate page for receive rings */
+	err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
+				MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
+	if (err) {
+		mlx4_err(mdev, "Failed to allocate page for rx qps\n");
+		goto out;
+	}
+	priv->allocated = 1;
+
+	/* Populate Tx priority mappings */
+	mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num);
+
+	/*
+	 * Initialize netdev entry points
+	 */
+
+	dev->open = &mlx4_en_open;
+	dev->stop = &mlx4_en_close;
+	dev->hard_start_xmit = &mlx4_en_xmit;
+	dev->get_stats = &mlx4_en_get_stats;
+	dev->set_multicast_list = &mlx4_en_set_multicast;
+	dev->set_mac_address = &mlx4_en_set_mac;
+	dev->change_mtu = &mlx4_en_change_mtu;
+	dev->tx_timeout = &mlx4_en_tx_timeout;
+	dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
+	dev->vlan_rx_register = mlx4_en_vlan_rx_register;
+	dev->vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid;
+	dev->vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	dev->poll_controller = mlx4_en_netpoll;
+#endif
+	SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
+
+	/* Set defualt MAC */
+	dev->addr_len = ETH_ALEN;
+	for (i = 0; i < ETH_ALEN; i++)
+		dev->dev_addr[ETH_ALEN - 1 - i] =
+		(u8) (priv->mac >> (8 * i));
+
+	/*
+	 * Set driver features
+	 */
+	dev->features |= NETIF_F_SG;
+	dev->features |= NETIF_F_HW_CSUM;
+	dev->features |= NETIF_F_HIGHDMA;
+	dev->features |= NETIF_F_HW_VLAN_TX |
+			 NETIF_F_HW_VLAN_RX |
+			 NETIF_F_HW_VLAN_FILTER;
+	if (mdev->profile.num_lro)
+		dev->features |= NETIF_F_LRO;
+	if (mdev->LSO_support) {
+		dev->features |= NETIF_F_TSO;
+		dev->features |= NETIF_F_TSO6;
+	}
+
+	mdev->pndev[port] = dev;
+
+	netif_carrier_off(dev);
+	err = register_netdev(dev);
+	if (err) {
+		mlx4_err(mdev, "Netdev registration failed\n");
+		goto out;
+	}
+	priv->registered = 1;
+	queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+	return 0;
+
+out:
+	mlx4_en_destroy_netdev(dev);
+	return err;
+}
+
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
new file mode 100644
index 0000000..c2e69b1
--- /dev/null
+++ b/drivers/net/mlx4/en_params.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "mlx4_en.h"
+#include "en_port.h"
+
+#define MLX4_EN_PARM_INT(X, def_val, desc) \
+	static unsigned int X = def_val;\
+	module_param(X , uint, 0444); \
+	MODULE_PARM_DESC(X, desc);
+
+
+/*
+ * Device scope module parameters
+ */
+
+
+/* Use a XOR rathern than Toeplitz hash function for RSS */
+MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
+
+/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
+MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
+
+/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
+MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
+		 "Number of LRO sessions per ring or disabled (0)");
+
+/* Priority pausing */
+MLX4_EN_PARM_INT(pptx, MLX4_EN_DEF_TX_PAUSE,
+		 "Pause policy on TX: 0 never generate pause frames "
+		 "1 generate pause frames according to RX buffer threshold");
+MLX4_EN_PARM_INT(pprx, MLX4_EN_DEF_RX_PAUSE,
+		 "Pause policy on RX: 0 ignore received pause frames "
+		 "1 respect received pause frames");
+MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
+			   " Per priority bit mask");
+MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
+			   " Per priority bit mask");
+
+/* Interrupt moderation tunning */
+MLX4_EN_PARM_INT(rx_moder_cnt, MLX4_EN_AUTO_CONF,
+	       "Max coalesced descriptors for Rx interrupt moderation");
+MLX4_EN_PARM_INT(rx_moder_time, MLX4_EN_AUTO_CONF,
+	       "Timeout following last packet for Rx interrupt moderation");
+MLX4_EN_PARM_INT(auto_moder, 1, "Enable dynamic interrupt moderation");
+
+MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
+MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
+
+MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1");
+MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2");
+MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1");
+MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2");
+
+
+int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
+{
+	struct mlx4_en_profile *params = &mdev->profile;
+
+	params->rx_moder_cnt = min_t(int, rx_moder_cnt, MLX4_EN_AUTO_CONF);
+	params->rx_moder_time = min_t(int, rx_moder_time, MLX4_EN_AUTO_CONF);
+	params->auto_moder = auto_moder;
+	params->rss_xor = (rss_xor != 0);
+	params->rss_mask = rss_mask & 0x1f;
+	params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
+	params->rx_pause = pprx;
+	params->rx_ppp = pfcrx;
+	params->tx_pause = pptx;
+	params->tx_ppp = pfctx;
+	if (params->rx_ppp || params->tx_ppp) {
+		params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
+		params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM;
+	} else {
+		params->prof[1].tx_ring_num = 1;
+		params->prof[2].tx_ring_num = 1;
+	}
+	params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS);
+	params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS);
+
+	if (tx_ring_size1 == MLX4_EN_AUTO_CONF)
+		tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE;
+	params->prof[1].tx_ring_size =
+		(tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ?
+		 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1);
+
+	if (tx_ring_size2 == MLX4_EN_AUTO_CONF)
+		tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE;
+	params->prof[2].tx_ring_size =
+		(tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ?
+		 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2);
+
+	if (rx_ring_size1 == MLX4_EN_AUTO_CONF)
+		rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE;
+	params->prof[1].rx_ring_size =
+		(rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ?
+		 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1);
+
+	if (rx_ring_size2 == MLX4_EN_AUTO_CONF)
+		rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE;
+	params->prof[2].rx_ring_size =
+		(rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ?
+		 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2);
+	return 0;
+}
+
+
+/*
+ * Ethtool support
+ */
+
+static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
+{
+	int i;
+
+	priv->port_stats.lro_aggregated = 0;
+	priv->port_stats.lro_flushed = 0;
+	priv->port_stats.lro_no_desc = 0;
+
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
+		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
+		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
+	}
+}
+
+static void
+mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+
+	sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id);
+	strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
+	sprintf(drvinfo->fw_version, "%d.%d.%d",
+		(u16) (mdev->dev->caps.fw_ver >> 32),
+		(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
+		(u16) (mdev->dev->caps.fw_ver & 0xffff));
+	strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+	drvinfo->n_stats = 0;
+	drvinfo->regdump_len = 0;
+	drvinfo->eedump_len = 0;
+}
+
+static u32 mlx4_en_get_tso(struct net_device *dev)
+{
+	return (dev->features & NETIF_F_TSO) != 0;
+}
+
+static int mlx4_en_set_tso(struct net_device *dev, u32 data)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	if (data) {
+		if (!priv->mdev->LSO_support)
+			return -EPERM;
+		dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+	} else
+		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+	return 0;
+}
+
+static u32 mlx4_en_get_rx_csum(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	return priv->rx_csum;
+}
+
+static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	priv->rx_csum = (data != 0);
+	return 0;
+}
+
+static const char main_strings[][ETH_GSTRING_LEN] = {
+	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
+	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
+	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
+	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
+	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
+	"tx_heartbeat_errors", "tx_window_errors",
+
+	/* port statistics */
+	"lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets",
+	"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
+	"rx_csum_good", "rx_csum_none", "tx_chksum_offload",
+
+	/* packet statistics */
+	"broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
+	"rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
+	"tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
+	"tx_prio_6", "tx_prio_7",
+};
+#define NUM_MAIN_STATS	21
+#define NUM_ALL_STATS	(NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
+
+static u32 mlx4_en_get_msglevel(struct net_device *dev)
+{
+	return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
+}
+
+static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
+{
+	((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
+}
+
+static void mlx4_en_get_wol(struct net_device *netdev,
+			    struct ethtool_wolinfo *wol)
+{
+	wol->supported = 0;
+	wol->wolopts = 0;
+
+	return;
+}
+
+static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	if (sset != ETH_SS_STATS)
+		return -EOPNOTSUPP;
+
+	return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
+}
+
+static void mlx4_en_get_ethtool_stats(struct net_device *dev,
+		struct ethtool_stats *stats, uint64_t *data)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int index = 0;
+	int i;
+
+	spin_lock_bh(&priv->stats_lock);
+
+	mlx4_en_update_lro_stats(priv);
+
+	for (i = 0; i < NUM_MAIN_STATS; i++)
+		data[index++] = ((unsigned long *) &priv->stats)[i];
+	for (i = 0; i < NUM_PORT_STATS; i++)
+		data[index++] = ((unsigned long *) &priv->port_stats)[i];
+	for (i = 0; i < priv->tx_ring_num; i++) {
+		data[index++] = priv->tx_ring[i].packets;
+		data[index++] = priv->tx_ring[i].bytes;
+	}
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		data[index++] = priv->rx_ring[i].packets;
+		data[index++] = priv->rx_ring[i].bytes;
+	}
+	for (i = 0; i < NUM_PKT_STATS; i++)
+		data[index++] = ((unsigned long *) &priv->pkstats)[i];
+	spin_unlock_bh(&priv->stats_lock);
+
+}
+
+static void mlx4_en_get_strings(struct net_device *dev,
+				uint32_t stringset, uint8_t *data)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int index = 0;
+	int i;
+
+	if (stringset != ETH_SS_STATS)
+		return;
+
+	/* Add main counters */
+	for (i = 0; i < NUM_MAIN_STATS; i++)
+		strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
+	for (i = 0; i < NUM_PORT_STATS; i++)
+		strcpy(data + (index++) * ETH_GSTRING_LEN,
+			main_strings[i + NUM_MAIN_STATS]);
+	for (i = 0; i < priv->tx_ring_num; i++) {
+		sprintf(data + (index++) * ETH_GSTRING_LEN,
+			"tx%d_packets", i);
+		sprintf(data + (index++) * ETH_GSTRING_LEN,
+			"tx%d_bytes", i);
+	}
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		sprintf(data + (index++) * ETH_GSTRING_LEN,
+			"rx%d_packets", i);
+		sprintf(data + (index++) * ETH_GSTRING_LEN,
+			"rx%d_bytes", i);
+	}
+	for (i = 0; i < NUM_PKT_STATS; i++)
+		strcpy(data + (index++) * ETH_GSTRING_LEN,
+			main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
+}
+
+static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	cmd->autoneg = AUTONEG_DISABLE;
+	cmd->supported = SUPPORTED_10000baseT_Full;
+	cmd->advertising = SUPPORTED_10000baseT_Full;
+	if (netif_carrier_ok(dev)) {
+		cmd->speed = SPEED_10000;
+		cmd->duplex = DUPLEX_FULL;
+	} else {
+		cmd->speed = -1;
+		cmd->duplex = -1;
+	}
+	return 0;
+}
+
+static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	if ((cmd->autoneg == AUTONEG_ENABLE) ||
+	    (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL))
+		return -EINVAL;
+
+	/* Nothing to change */
+	return 0;
+}
+
+static int mlx4_en_get_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	coal->tx_coalesce_usecs = 0;
+	coal->tx_max_coalesced_frames = 0;
+	coal->rx_coalesce_usecs = priv->rx_usecs;
+	coal->rx_max_coalesced_frames = priv->rx_frames;
+
+	coal->pkt_rate_low = priv->pkt_rate_low;
+	coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
+	coal->pkt_rate_high = priv->pkt_rate_high;
+	coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
+	coal->rate_sample_interval = priv->sample_interval;
+	coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
+	return 0;
+}
+
+static int mlx4_en_set_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int err, i;
+
+	priv->rx_frames = (coal->rx_max_coalesced_frames ==
+			   MLX4_EN_AUTO_CONF) ?
+				MLX4_EN_RX_COAL_TARGET /
+				priv->dev->mtu + 1 :
+				coal->rx_max_coalesced_frames;
+	priv->rx_usecs = (coal->rx_coalesce_usecs ==
+			  MLX4_EN_AUTO_CONF) ?
+				MLX4_EN_RX_COAL_TIME :
+				coal->rx_coalesce_usecs;
+
+	/* Set adaptive coalescing params */
+	priv->pkt_rate_low = coal->pkt_rate_low;
+	priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
+	priv->pkt_rate_high = coal->pkt_rate_high;
+	priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
+	priv->sample_interval = coal->rate_sample_interval;
+	priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
+	priv->last_moder_time = MLX4_EN_AUTO_CONF;
+	if (priv->adaptive_rx_coal)
+		return 0;
+
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		priv->rx_cq[i].moder_cnt = priv->rx_frames;
+		priv->rx_cq[i].moder_time = priv->rx_usecs;
+		err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+static int mlx4_en_set_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *pause)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+
+	mdev->profile.tx_pause = pause->tx_pause != 0;
+	mdev->profile.rx_pause = pause->rx_pause != 0;
+	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+				    priv->rx_skb_size + ETH_FCS_LEN,
+				    mdev->profile.tx_pause,
+				    mdev->profile.tx_ppp,
+				    mdev->profile.rx_pause,
+				    mdev->profile.rx_ppp);
+	if (err)
+		mlx4_err(mdev, "Failed setting pause params to\n");
+
+	return err;
+}
+
+static void mlx4_en_get_pauseparam(struct net_device *dev,
+				 struct ethtool_pauseparam *pause)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+
+	pause->tx_pause = mdev->profile.tx_pause;
+	pause->rx_pause = mdev->profile.rx_pause;
+}
+
+static void mlx4_en_get_ringparam(struct net_device *dev,
+				  struct ethtool_ringparam *param)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+
+	memset(param, 0, sizeof(*param));
+	param->rx_max_pending = mdev->dev->caps.max_rq_sg;
+	param->tx_max_pending = mdev->dev->caps.max_sq_sg;
+	param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
+	param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
+}
+
+const struct ethtool_ops mlx4_en_ethtool_ops = {
+	.get_drvinfo = mlx4_en_get_drvinfo,
+	.get_settings = mlx4_en_get_settings,
+	.set_settings = mlx4_en_set_settings,
+#ifdef NETIF_F_TSO
+	.get_tso = mlx4_en_get_tso,
+	.set_tso = mlx4_en_set_tso,
+#endif
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = ethtool_op_set_sg,
+	.get_link = ethtool_op_get_link,
+	.get_rx_csum = mlx4_en_get_rx_csum,
+	.set_rx_csum = mlx4_en_set_rx_csum,
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
+	.get_strings = mlx4_en_get_strings,
+	.get_sset_count = mlx4_en_get_sset_count,
+	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
+	.get_wol = mlx4_en_get_wol,
+	.get_msglevel = mlx4_en_get_msglevel,
+	.set_msglevel = mlx4_en_set_msglevel,
+	.get_coalesce = mlx4_en_get_coalesce,
+	.set_coalesce = mlx4_en_set_coalesce,
+	.get_pauseparam = mlx4_en_get_pauseparam,
+	.set_pauseparam = mlx4_en_set_pauseparam,
+	.get_ringparam = mlx4_en_get_ringparam,
+	.get_flags = ethtool_op_get_flags,
+	.set_flags = ethtool_op_set_flags,
+};
+
+
+
+
+
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
new file mode 100644
index 0000000..c5a4c03
--- /dev/null
+++ b/drivers/net/mlx4/en_port.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+
+#include <linux/if_vlan.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+
+#include "en_port.h"
+#include "mlx4_en.h"
+
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+			u64 mac, u64 clear, u8 mode)
+{
+	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_set_vlan_fltr_mbox *filter;
+	int i;
+	int j;
+	int index = 0;
+	u32 entry;
+	int err = 0;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	filter = mailbox->buf;
+	if (grp) {
+		memset(filter, 0, sizeof *filter);
+		for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
+			entry = 0;
+			for (j = 0; j < 32; j++)
+				if (vlan_group_get_device(grp, index++))
+					entry |= 1 << j;
+			filter->entry[i] = cpu_to_be32(entry);
+		}
+	} else {
+		/* When no vlans are configured we block all vlans */
+		memset(filter, 0, sizeof(*filter));
+	}
+	err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
+		       MLX4_CMD_TIME_CLASS_B);
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
+
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_set_port_general_context *context;
+	int err;
+	u32 in_mod;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	context = mailbox->buf;
+	memset(context, 0, sizeof *context);
+
+	context->flags = SET_PORT_GEN_ALL_VALID;
+	context->mtu = cpu_to_be16(mtu);
+	context->pptx = (pptx * (!pfctx)) << 7;
+	context->pfctx = pfctx;
+	context->pprx = (pprx * (!pfcrx)) << 7;
+	context->pfcrx = pfcrx;
+
+	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+			   u8 promisc)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_set_port_rqp_calc_context *context;
+	int err;
+	u32 in_mod;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	context = mailbox->buf;
+	memset(context, 0, sizeof *context);
+
+	context->base_qpn = cpu_to_be32(base_qpn);
+	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
+	context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
+	context->intra_no_vlan = 0;
+	context->no_vlan = MLX4_NO_VLAN_IDX;
+	context->intra_vlan_miss = 0;
+	context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+	in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
+
+int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
+{
+	struct mlx4_en_stat_out_mbox *mlx4_en_stats;
+	struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
+	struct net_device_stats *stats = &priv->stats;
+	struct mlx4_cmd_mailbox *mailbox;
+	u64 in_mod = reset << 8 | port;
+	int err;
+
+	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
+	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
+			   MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+	if (err)
+		goto out;
+
+	mlx4_en_stats = mailbox->buf;
+
+	spin_lock_bh(&priv->stats_lock);
+
+	stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) -
+			    be32_to_cpu(mlx4_en_stats->RDROP);
+	stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
+			    be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
+			    be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
+			    be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
+			    be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
+			    be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
+			    be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
+			    be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
+			    be64_to_cpu(mlx4_en_stats->TTOT_novlan) +
+			    be64_to_cpu(mlx4_en_stats->TTOT_loopbk);
+	stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
+			  be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
+			  be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
+			  be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
+			  be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
+			  be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
+			  be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
+			  be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
+			  be64_to_cpu(mlx4_en_stats->ROCT_novlan);
+
+	stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) +
+			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) +
+			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) +
+			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) +
+			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) +
+			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) +
+			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) +
+			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) +
+			  be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) +
+			  be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk);
+
+	stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
+			   be32_to_cpu(mlx4_en_stats->RdropLength) +
+			   be32_to_cpu(mlx4_en_stats->RJBBR) +
+			   be32_to_cpu(mlx4_en_stats->RCRC) +
+			   be32_to_cpu(mlx4_en_stats->RRUNT);
+	stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
+	stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
+			   be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
+			   be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
+			   be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
+			   be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
+			   be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
+			   be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
+			   be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
+			   be64_to_cpu(mlx4_en_stats->MCAST_novlan);
+	stats->collisions = 0;
+	stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
+	stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+	stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
+	stats->rx_frame_errors = 0;
+	stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+	stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+	stats->tx_aborted_errors = 0;
+	stats->tx_carrier_errors = 0;
+	stats->tx_fifo_errors = 0;
+	stats->tx_heartbeat_errors = 0;
+	stats->tx_window_errors = 0;
+
+	priv->pkstats.broadcast =
+				be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
+				be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
+				be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
+				be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
+				be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
+				be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
+				be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
+				be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
+				be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
+	priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
+	priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
+	priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
+	priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
+	priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
+	priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
+	priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
+	priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
+	priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
+	priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
+	priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
+	priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
+	priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
+	priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
+	priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
+	priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
+	spin_unlock_bh(&priv->stats_lock);
+
+out:
+	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+	return err;
+}
+
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
new file mode 100644
index 0000000..e6477f1
--- /dev/null
+++ b/drivers/net/mlx4/en_port.h
@@ -0,0 +1,570 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _MLX4_EN_PORT_H_
+#define _MLX4_EN_PORT_H_
+
+
+#define SET_PORT_GEN_ALL_VALID	0x7
+#define SET_PORT_PROMISC_SHIFT	31
+
+enum {
+	MLX4_CMD_SET_VLAN_FLTR  = 0x47,
+	MLX4_CMD_SET_MCAST_FLTR = 0x48,
+	MLX4_CMD_DUMP_ETH_STATS = 0x49,
+};
+
+struct mlx4_set_port_general_context {
+	u8 reserved[3];
+	u8 flags;
+	u16 reserved2;
+	__be16 mtu;
+	u8 pptx;
+	u8 pfctx;
+	u16 reserved3;
+	u8 pprx;
+	u8 pfcrx;
+	u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+	__be32 base_qpn;
+	__be32 flags;
+	u8 reserved[3];
+	u8 mac_miss;
+	u8 intra_no_vlan;
+	u8 no_vlan;
+	u8 intra_vlan_miss;
+	u8 vlan_miss;
+	u8 reserved2[3];
+	u8 no_vlan_prio;
+	__be32 promisc;
+	__be32 mcast;
+};
+
+#define VLAN_FLTR_SIZE	128
+struct mlx4_set_vlan_fltr_mbox {
+	__be32 entry[VLAN_FLTR_SIZE];
+};
+
+
+enum {
+	MLX4_MCAST_CONFIG       = 0,
+	MLX4_MCAST_DISABLE      = 1,
+	MLX4_MCAST_ENABLE       = 2,
+};
+
+
+struct mlx4_en_stat_out_mbox {
+	/* Received frames with a length of 64 octets */
+	__be64 R64_prio_0;
+	__be64 R64_prio_1;
+	__be64 R64_prio_2;
+	__be64 R64_prio_3;
+	__be64 R64_prio_4;
+	__be64 R64_prio_5;
+	__be64 R64_prio_6;
+	__be64 R64_prio_7;
+	__be64 R64_novlan;
+	/* Received frames with a length of 127 octets */
+	__be64 R127_prio_0;
+	__be64 R127_prio_1;
+	__be64 R127_prio_2;
+	__be64 R127_prio_3;
+	__be64 R127_prio_4;
+	__be64 R127_prio_5;
+	__be64 R127_prio_6;
+	__be64 R127_prio_7;
+	__be64 R127_novlan;
+	/* Received frames with a length of 255 octets */
+	__be64 R255_prio_0;
+	__be64 R255_prio_1;
+	__be64 R255_prio_2;
+	__be64 R255_prio_3;
+	__be64 R255_prio_4;
+	__be64 R255_prio_5;
+	__be64 R255_prio_6;
+	__be64 R255_prio_7;
+	__be64 R255_novlan;
+	/* Received frames with a length of 511 octets */
+	__be64 R511_prio_0;
+	__be64 R511_prio_1;
+	__be64 R511_prio_2;
+	__be64 R511_prio_3;
+	__be64 R511_prio_4;
+	__be64 R511_prio_5;
+	__be64 R511_prio_6;
+	__be64 R511_prio_7;
+	__be64 R511_novlan;
+	/* Received frames with a length of 1023 octets */
+	__be64 R1023_prio_0;
+	__be64 R1023_prio_1;
+	__be64 R1023_prio_2;
+	__be64 R1023_prio_3;
+	__be64 R1023_prio_4;
+	__be64 R1023_prio_5;
+	__be64 R1023_prio_6;
+	__be64 R1023_prio_7;
+	__be64 R1023_novlan;
+	/* Received frames with a length of 1518 octets */
+	__be64 R1518_prio_0;
+	__be64 R1518_prio_1;
+	__be64 R1518_prio_2;
+	__be64 R1518_prio_3;
+	__be64 R1518_prio_4;
+	__be64 R1518_prio_5;
+	__be64 R1518_prio_6;
+	__be64 R1518_prio_7;
+	__be64 R1518_novlan;
+	/* Received frames with a length of 1522 octets */
+	__be64 R1522_prio_0;
+	__be64 R1522_prio_1;
+	__be64 R1522_prio_2;
+	__be64 R1522_prio_3;
+	__be64 R1522_prio_4;
+	__be64 R1522_prio_5;
+	__be64 R1522_prio_6;
+	__be64 R1522_prio_7;
+	__be64 R1522_novlan;
+	/* Received frames with a length of 1548 octets */
+	__be64 R1548_prio_0;
+	__be64 R1548_prio_1;
+	__be64 R1548_prio_2;
+	__be64 R1548_prio_3;
+	__be64 R1548_prio_4;
+	__be64 R1548_prio_5;
+	__be64 R1548_prio_6;
+	__be64 R1548_prio_7;
+	__be64 R1548_novlan;
+	/* Received frames with a length of 1548 < octets < MTU */
+	__be64 R2MTU_prio_0;
+	__be64 R2MTU_prio_1;
+	__be64 R2MTU_prio_2;
+	__be64 R2MTU_prio_3;
+	__be64 R2MTU_prio_4;
+	__be64 R2MTU_prio_5;
+	__be64 R2MTU_prio_6;
+	__be64 R2MTU_prio_7;
+	__be64 R2MTU_novlan;
+	/* Received frames with a length of MTU< octets and good CRC */
+	__be64 RGIANT_prio_0;
+	__be64 RGIANT_prio_1;
+	__be64 RGIANT_prio_2;
+	__be64 RGIANT_prio_3;
+	__be64 RGIANT_prio_4;
+	__be64 RGIANT_prio_5;
+	__be64 RGIANT_prio_6;
+	__be64 RGIANT_prio_7;
+	__be64 RGIANT_novlan;
+	/* Received broadcast frames with good CRC */
+	__be64 RBCAST_prio_0;
+	__be64 RBCAST_prio_1;
+	__be64 RBCAST_prio_2;
+	__be64 RBCAST_prio_3;
+	__be64 RBCAST_prio_4;
+	__be64 RBCAST_prio_5;
+	__be64 RBCAST_prio_6;
+	__be64 RBCAST_prio_7;
+	__be64 RBCAST_novlan;
+	/* Received multicast frames with good CRC */
+	__be64 MCAST_prio_0;
+	__be64 MCAST_prio_1;
+	__be64 MCAST_prio_2;
+	__be64 MCAST_prio_3;
+	__be64 MCAST_prio_4;
+	__be64 MCAST_prio_5;
+	__be64 MCAST_prio_6;
+	__be64 MCAST_prio_7;
+	__be64 MCAST_novlan;
+	/* Received unicast not short or GIANT frames with good CRC */
+	__be64 RTOTG_prio_0;
+	__be64 RTOTG_prio_1;
+	__be64 RTOTG_prio_2;
+	__be64 RTOTG_prio_3;
+	__be64 RTOTG_prio_4;
+	__be64 RTOTG_prio_5;
+	__be64 RTOTG_prio_6;
+	__be64 RTOTG_prio_7;
+	__be64 RTOTG_novlan;
+
+	/* Count of total octets of received frames, includes framing characters */
+	__be64 RTTLOCT_prio_0;
+	/* Count of total octets of received frames, not including framing
+	   characters */
+	__be64 RTTLOCT_NOFRM_prio_0;
+	/* Count of Total number of octets received
+	   (only for frames without errors) */
+	__be64 ROCT_prio_0;
+
+	__be64 RTTLOCT_prio_1;
+	__be64 RTTLOCT_NOFRM_prio_1;
+	__be64 ROCT_prio_1;
+
+	__be64 RTTLOCT_prio_2;
+	__be64 RTTLOCT_NOFRM_prio_2;
+	__be64 ROCT_prio_2;
+
+	__be64 RTTLOCT_prio_3;
+	__be64 RTTLOCT_NOFRM_prio_3;
+	__be64 ROCT_prio_3;
+
+	__be64 RTTLOCT_prio_4;
+	__be64 RTTLOCT_NOFRM_prio_4;
+	__be64 ROCT_prio_4;
+
+	__be64 RTTLOCT_prio_5;
+	__be64 RTTLOCT_NOFRM_prio_5;
+	__be64 ROCT_prio_5;
+
+	__be64 RTTLOCT_prio_6;
+	__be64 RTTLOCT_NOFRM_prio_6;
+	__be64 ROCT_prio_6;
+
+	__be64 RTTLOCT_prio_7;
+	__be64 RTTLOCT_NOFRM_prio_7;
+	__be64 ROCT_prio_7;
+
+	__be64 RTTLOCT_novlan;
+	__be64 RTTLOCT_NOFRM_novlan;
+	__be64 ROCT_novlan;
+
+	/* Count of Total received frames including bad frames */
+	__be64 RTOT_prio_0;
+	/* Count of  Total number of received frames with 802.1Q encapsulation */
+	__be64 R1Q_prio_0;
+	__be64 reserved1;
+
+	__be64 RTOT_prio_1;
+	__be64 R1Q_prio_1;
+	__be64 reserved2;
+
+	__be64 RTOT_prio_2;
+	__be64 R1Q_prio_2;
+	__be64 reserved3;
+
+	__be64 RTOT_prio_3;
+	__be64 R1Q_prio_3;
+	__be64 reserved4;
+
+	__be64 RTOT_prio_4;
+	__be64 R1Q_prio_4;
+	__be64 reserved5;
+
+	__be64 RTOT_prio_5;
+	__be64 R1Q_prio_5;
+	__be64 reserved6;
+
+	__be64 RTOT_prio_6;
+	__be64 R1Q_prio_6;
+	__be64 reserved7;
+
+	__be64 RTOT_prio_7;
+	__be64 R1Q_prio_7;
+	__be64 reserved8;
+
+	__be64 RTOT_novlan;
+	__be64 R1Q_novlan;
+	__be64 reserved9;
+
+	/* Total number of Successfully Received Control Frames */
+	__be64 RCNTL;
+	__be64 reserved10;
+	__be64 reserved11;
+	__be64 reserved12;
+	/* Count of received frames with a length/type field  value between 46
+	   (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames),
+	   inclusive */
+	__be64 RInRangeLengthErr;
+	/* Count of received frames with length/type field between 1501 and 1535
+	   decimal, inclusive */
+	__be64 ROutRangeLengthErr;
+	/* Count of received frames that are longer than max allowed size for
+	   802.3 frames (1518/1522) */
+	__be64 RFrmTooLong;
+	/* Count frames received with PCS error */
+	__be64 PCS;
+
+	/* Transmit frames with a length of 64 octets */
+	__be64 T64_prio_0;
+	__be64 T64_prio_1;
+	__be64 T64_prio_2;
+	__be64 T64_prio_3;
+	__be64 T64_prio_4;
+	__be64 T64_prio_5;
+	__be64 T64_prio_6;
+	__be64 T64_prio_7;
+	__be64 T64_novlan;
+	__be64 T64_loopbk;
+	/* Transmit frames with a length of 65 to 127 octets. */
+	__be64 T127_prio_0;
+	__be64 T127_prio_1;
+	__be64 T127_prio_2;
+	__be64 T127_prio_3;
+	__be64 T127_prio_4;
+	__be64 T127_prio_5;
+	__be64 T127_prio_6;
+	__be64 T127_prio_7;
+	__be64 T127_novlan;
+	__be64 T127_loopbk;
+	/* Transmit frames with a length of 128 to 255 octets */
+	__be64 T255_prio_0;
+	__be64 T255_prio_1;
+	__be64 T255_prio_2;
+	__be64 T255_prio_3;
+	__be64 T255_prio_4;
+	__be64 T255_prio_5;
+	__be64 T255_prio_6;
+	__be64 T255_prio_7;
+	__be64 T255_novlan;
+	__be64 T255_loopbk;
+	/* Transmit frames with a length of 256 to 511 octets */
+	__be64 T511_prio_0;
+	__be64 T511_prio_1;
+	__be64 T511_prio_2;
+	__be64 T511_prio_3;
+	__be64 T511_prio_4;
+	__be64 T511_prio_5;
+	__be64 T511_prio_6;
+	__be64 T511_prio_7;
+	__be64 T511_novlan;
+	__be64 T511_loopbk;
+	/* Transmit frames with a length of 512 to 1023 octets */
+	__be64 T1023_prio_0;
+	__be64 T1023_prio_1;
+	__be64 T1023_prio_2;
+	__be64 T1023_prio_3;
+	__be64 T1023_prio_4;
+	__be64 T1023_prio_5;
+	__be64 T1023_prio_6;
+	__be64 T1023_prio_7;
+	__be64 T1023_novlan;
+	__be64 T1023_loopbk;
+	/* Transmit frames with a length of 1024 to 1518 octets */
+	__be64 T1518_prio_0;
+	__be64 T1518_prio_1;
+	__be64 T1518_prio_2;
+	__be64 T1518_prio_3;
+	__be64 T1518_prio_4;
+	__be64 T1518_prio_5;
+	__be64 T1518_prio_6;
+	__be64 T1518_prio_7;
+	__be64 T1518_novlan;
+	__be64 T1518_loopbk;
+	/* Counts transmit frames with a length of 1519 to 1522 bytes */
+	__be64 T1522_prio_0;
+	__be64 T1522_prio_1;
+	__be64 T1522_prio_2;
+	__be64 T1522_prio_3;
+	__be64 T1522_prio_4;
+	__be64 T1522_prio_5;
+	__be64 T1522_prio_6;
+	__be64 T1522_prio_7;
+	__be64 T1522_novlan;
+	__be64 T1522_loopbk;
+	/* Transmit frames with a length of 1523 to 1548 octets */
+	__be64 T1548_prio_0;
+	__be64 T1548_prio_1;
+	__be64 T1548_prio_2;
+	__be64 T1548_prio_3;
+	__be64 T1548_prio_4;
+	__be64 T1548_prio_5;
+	__be64 T1548_prio_6;
+	__be64 T1548_prio_7;
+	__be64 T1548_novlan;
+	__be64 T1548_loopbk;
+	/* Counts transmit frames with a length of 1549 to MTU bytes */
+	__be64 T2MTU_prio_0;
+	__be64 T2MTU_prio_1;
+	__be64 T2MTU_prio_2;
+	__be64 T2MTU_prio_3;
+	__be64 T2MTU_prio_4;
+	__be64 T2MTU_prio_5;
+	__be64 T2MTU_prio_6;
+	__be64 T2MTU_prio_7;
+	__be64 T2MTU_novlan;
+	__be64 T2MTU_loopbk;
+	/* Transmit frames with a length greater than MTU octets and a good CRC. */
+	__be64 TGIANT_prio_0;
+	__be64 TGIANT_prio_1;
+	__be64 TGIANT_prio_2;
+	__be64 TGIANT_prio_3;
+	__be64 TGIANT_prio_4;
+	__be64 TGIANT_prio_5;
+	__be64 TGIANT_prio_6;
+	__be64 TGIANT_prio_7;
+	__be64 TGIANT_novlan;
+	__be64 TGIANT_loopbk;
+	/* Transmit broadcast frames with a good CRC */
+	__be64 TBCAST_prio_0;
+	__be64 TBCAST_prio_1;
+	__be64 TBCAST_prio_2;
+	__be64 TBCAST_prio_3;
+	__be64 TBCAST_prio_4;
+	__be64 TBCAST_prio_5;
+	__be64 TBCAST_prio_6;
+	__be64 TBCAST_prio_7;
+	__be64 TBCAST_novlan;
+	__be64 TBCAST_loopbk;
+	/* Transmit multicast frames with a good CRC */
+	__be64 TMCAST_prio_0;
+	__be64 TMCAST_prio_1;
+	__be64 TMCAST_prio_2;
+	__be64 TMCAST_prio_3;
+	__be64 TMCAST_prio_4;
+	__be64 TMCAST_prio_5;
+	__be64 TMCAST_prio_6;
+	__be64 TMCAST_prio_7;
+	__be64 TMCAST_novlan;
+	__be64 TMCAST_loopbk;
+	/* Transmit good frames that are neither broadcast nor multicast */
+	__be64 TTOTG_prio_0;
+	__be64 TTOTG_prio_1;
+	__be64 TTOTG_prio_2;
+	__be64 TTOTG_prio_3;
+	__be64 TTOTG_prio_4;
+	__be64 TTOTG_prio_5;
+	__be64 TTOTG_prio_6;
+	__be64 TTOTG_prio_7;
+	__be64 TTOTG_novlan;
+	__be64 TTOTG_loopbk;
+
+	/* total octets of transmitted frames, including framing characters */
+	__be64 TTTLOCT_prio_0;
+	/* total octets of transmitted frames, not including framing characters */
+	__be64 TTTLOCT_NOFRM_prio_0;
+	/* ifOutOctets */
+	__be64 TOCT_prio_0;
+
+	__be64 TTTLOCT_prio_1;
+	__be64 TTTLOCT_NOFRM_prio_1;
+	__be64 TOCT_prio_1;
+
+	__be64 TTTLOCT_prio_2;
+	__be64 TTTLOCT_NOFRM_prio_2;
+	__be64 TOCT_prio_2;
+
+	__be64 TTTLOCT_prio_3;
+	__be64 TTTLOCT_NOFRM_prio_3;
+	__be64 TOCT_prio_3;
+
+	__be64 TTTLOCT_prio_4;
+	__be64 TTTLOCT_NOFRM_prio_4;
+	__be64 TOCT_prio_4;
+
+	__be64 TTTLOCT_prio_5;
+	__be64 TTTLOCT_NOFRM_prio_5;
+	__be64 TOCT_prio_5;
+
+	__be64 TTTLOCT_prio_6;
+	__be64 TTTLOCT_NOFRM_prio_6;
+	__be64 TOCT_prio_6;
+
+	__be64 TTTLOCT_prio_7;
+	__be64 TTTLOCT_NOFRM_prio_7;
+	__be64 TOCT_prio_7;
+
+	__be64 TTTLOCT_novlan;
+	__be64 TTTLOCT_NOFRM_novlan;
+	__be64 TOCT_novlan;
+
+	__be64 TTTLOCT_loopbk;
+	__be64 TTTLOCT_NOFRM_loopbk;
+	__be64 TOCT_loopbk;
+
+	/* Total frames transmitted with a good CRC that are not aborted  */
+	__be64 TTOT_prio_0;
+	/* Total number of frames transmitted with 802.1Q encapsulation */
+	__be64 T1Q_prio_0;
+	__be64 reserved13;
+
+	__be64 TTOT_prio_1;
+	__be64 T1Q_prio_1;
+	__be64 reserved14;
+
+	__be64 TTOT_prio_2;
+	__be64 T1Q_prio_2;
+	__be64 reserved15;
+
+	__be64 TTOT_prio_3;
+	__be64 T1Q_prio_3;
+	__be64 reserved16;
+
+	__be64 TTOT_prio_4;
+	__be64 T1Q_prio_4;
+	__be64 reserved17;
+
+	__be64 TTOT_prio_5;
+	__be64 T1Q_prio_5;
+	__be64 reserved18;
+
+	__be64 TTOT_prio_6;
+	__be64 T1Q_prio_6;
+	__be64 reserved19;
+
+	__be64 TTOT_prio_7;
+	__be64 T1Q_prio_7;
+	__be64 reserved20;
+
+	__be64 TTOT_novlan;
+	__be64 T1Q_novlan;
+	__be64 reserved21;
+
+	__be64 TTOT_loopbk;
+	__be64 T1Q_loopbk;
+	__be64 reserved22;
+
+	/* Received frames with a length greater than MTU octets and a bad CRC */
+	__be32 RJBBR;
+	/* Received frames with a bad CRC that are not runts, jabbers,
+	   or alignment errors */
+	__be32 RCRC;
+	/* Received frames with SFD with a length of less than 64 octets and a
+	   bad CRC */
+	__be32 RRUNT;
+	/* Received frames with a length less than 64 octets and a good CRC */
+	__be32 RSHORT;
+	/* Total Number of Received Packets Dropped */
+	__be32 RDROP;
+	/* Drop due to overflow  */
+	__be32 RdropOvflw;
+	/* Drop due to overflow */
+	__be32 RdropLength;
+	/* Total of good frames. Does not include frames received with
+	   frame-too-long, FCS, or length errors */
+	__be32 RTOTFRMS;
+	/* Total dropped Xmited packets */
+	__be32 TDROP;
+};
+
+
+#endif
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c
new file mode 100644
index 0000000..a054520
--- /dev/null
+++ b/drivers/net/mlx4/en_resources.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4_en.h"
+
+void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
+			     int is_tx, int rss, int qpn, int cqn, int srqn,
+			     struct mlx4_qp_context *context)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+
+	memset(context, 0, sizeof *context);
+	context->flags = cpu_to_be32(7 << 16 | rss << 13);
+	context->pd = cpu_to_be32(mdev->priv_pdn);
+	context->mtu_msgmax = 0xff;
+	context->rq_size_stride = 0;
+	if (is_tx)
+		context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
+	else
+		context->sq_size_stride = 1;
+	context->usr_page = cpu_to_be32(mdev->priv_uar.index);
+	context->local_qpn = cpu_to_be32(qpn);
+	context->pri_path.ackto = 1 & 0x07;
+	context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
+	context->pri_path.counter_index = 0xff;
+	context->cqn_send = cpu_to_be32(cqn);
+	context->cqn_recv = cpu_to_be32(cqn);
+	context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
+	if (!rss)
+		context->srqn = cpu_to_be32(MLX4_EN_USE_SRQ | srqn);
+}
+
+
+int mlx4_en_map_buffer(struct mlx4_buf *buf)
+{
+	struct page **pages;
+	int i;
+
+	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+		return 0;
+
+	pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	for (i = 0; i < buf->nbufs; ++i)
+		pages[i] = virt_to_page(buf->page_list[i].buf);
+
+	buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
+	kfree(pages);
+	if (!buf->direct.buf)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
+{
+	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+		return;
+
+	vunmap(buf->direct.buf);
+}
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
new file mode 100644
index 0000000..6232227
--- /dev/null
+++ b/drivers/net/mlx4/en_rx.c
@@ -0,0 +1,1080 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/qp.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+
+#include "mlx4_en.h"
+
+static void *get_wqe(struct mlx4_en_rx_ring *ring, int n)
+{
+	int offset = n << ring->srq.wqe_shift;
+	return ring->buf + offset;
+}
+
+static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
+{
+	return;
+}
+
+static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
+				   void **ip_hdr, void **tcpudp_hdr,
+				   u64 *hdr_flags, void *priv)
+{
+	*mac_hdr = page_address(frags->page) + frags->page_offset;
+	*ip_hdr = *mac_hdr + ETH_HLEN;
+	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
+	*hdr_flags = LRO_IPV4 | LRO_TCP;
+
+	return 0;
+}
+
+static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
+			      struct mlx4_en_rx_desc *rx_desc,
+			      struct skb_frag_struct *skb_frags,
+			      struct mlx4_en_rx_alloc *ring_alloc,
+			      int i)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+	struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
+	struct page *page;
+	dma_addr_t dma;
+
+	if (page_alloc->offset == frag_info->last_offset) {
+		/* Allocate new page */
+		page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
+		if (!page)
+			return -ENOMEM;
+
+		skb_frags[i].page = page_alloc->page;
+		skb_frags[i].page_offset = page_alloc->offset;
+		page_alloc->page = page;
+		page_alloc->offset = frag_info->frag_align;
+	} else {
+		page = page_alloc->page;
+		get_page(page);
+
+		skb_frags[i].page = page;
+		skb_frags[i].page_offset = page_alloc->offset;
+		page_alloc->offset += frag_info->frag_stride;
+	}
+	dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
+			     skb_frags[i].page_offset, frag_info->frag_size,
+			     PCI_DMA_FROMDEVICE);
+	rx_desc->data[i].addr = cpu_to_be64(dma);
+	return 0;
+}
+
+static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
+				  struct mlx4_en_rx_ring *ring)
+{
+	struct mlx4_en_rx_alloc *page_alloc;
+	int i;
+
+	for (i = 0; i < priv->num_frags; i++) {
+		page_alloc = &ring->page_alloc[i];
+		page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
+					       MLX4_EN_ALLOC_ORDER);
+		if (!page_alloc->page)
+			goto out;
+
+		page_alloc->offset = priv->frag_info[i].frag_align;
+		mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
+			 i, page_alloc->page);
+	}
+	return 0;
+
+out:
+	while (i--) {
+		page_alloc = &ring->page_alloc[i];
+		put_page(page_alloc->page);
+		page_alloc->page = NULL;
+	}
+	return -ENOMEM;
+}
+
+static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
+				      struct mlx4_en_rx_ring *ring)
+{
+	struct mlx4_en_rx_alloc *page_alloc;
+	int i;
+
+	for (i = 0; i < priv->num_frags; i++) {
+		page_alloc = &ring->page_alloc[i];
+		mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
+			 i, page_count(page_alloc->page));
+
+		put_page(page_alloc->page);
+		page_alloc->page = NULL;
+	}
+}
+
+
+static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
+				 struct mlx4_en_rx_ring *ring, int index)
+{
+	struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
+	struct skb_frag_struct *skb_frags = ring->rx_info +
+					    (index << priv->log_rx_info);
+	int possible_frags;
+	int i;
+
+	/* Pre-link descriptor */
+	rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask);
+
+	/* Set size and memtype fields */
+	for (i = 0; i < priv->num_frags; i++) {
+		skb_frags[i].size = priv->frag_info[i].frag_size;
+		rx_desc->data[i].byte_count =
+			cpu_to_be32(priv->frag_info[i].frag_size);
+		rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
+	}
+
+	/* If the number of used fragments does not fill up the ring stride,
+	 * remaining (unused) fragments must be padded with null address/size
+	 * and a special memory key */
+	possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
+	for (i = priv->num_frags; i < possible_frags; i++) {
+		rx_desc->data[i].byte_count = 0;
+		rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
+		rx_desc->data[i].addr = 0;
+	}
+}
+
+
+static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
+				   struct mlx4_en_rx_ring *ring, int index)
+{
+	struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
+	struct skb_frag_struct *skb_frags = ring->rx_info +
+					    (index << priv->log_rx_info);
+	int i;
+
+	for (i = 0; i < priv->num_frags; i++)
+		if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
+			goto err;
+
+	return 0;
+
+err:
+	while (i--)
+		put_page(skb_frags[i].page);
+	return -ENOMEM;
+}
+
+static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
+{
+	*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
+}
+
+static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_rx_ring *ring;
+	int ring_ind;
+	int buf_ind;
+
+	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
+		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+			ring = &priv->rx_ring[ring_ind];
+
+			if (mlx4_en_prepare_rx_desc(priv, ring,
+						    ring->actual_size)) {
+				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
+					mlx4_err(mdev, "Failed to allocate "
+						       "enough rx buffers\n");
+					return -ENOMEM;
+				} else {
+					if (netif_msg_rx_err(priv))
+						mlx4_warn(mdev,
+							  "Only %d buffers allocated\n",
+							  ring->actual_size);
+					goto out;
+				}
+			}
+			ring->actual_size++;
+			ring->prod++;
+		}
+	}
+out:
+	return 0;
+}
+
+static int mlx4_en_fill_rx_buf(struct net_device *dev,
+			       struct mlx4_en_rx_ring *ring)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int num = 0;
+	int err;
+
+	while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
+		err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
+					      ring->size_mask);
+		if (err) {
+			if (netif_msg_rx_err(priv))
+				mlx4_warn(priv->mdev,
+					  "Failed preparing rx descriptor\n");
+			priv->port_stats.rx_alloc_failed++;
+			break;
+		}
+		++num;
+		++ring->prod;
+	}
+	if ((u32) (ring->prod - ring->cons) == ring->size)
+		ring->full = 1;
+
+	return num;
+}
+
+static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
+				struct mlx4_en_rx_ring *ring)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct skb_frag_struct *skb_frags;
+	struct mlx4_en_rx_desc *rx_desc;
+	dma_addr_t dma;
+	int index;
+	int nr;
+
+	mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
+			ring->cons, ring->prod);
+
+	/* Unmap and free Rx buffers */
+	BUG_ON((u32) (ring->prod - ring->cons) > ring->size);
+	while (ring->cons != ring->prod) {
+		index = ring->cons & ring->size_mask;
+		rx_desc = ring->buf + (index << ring->log_stride);
+		skb_frags = ring->rx_info + (index << priv->log_rx_info);
+		mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index);
+
+		for (nr = 0; nr < priv->num_frags; nr++) {
+			mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
+			dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+			mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
+			pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+					 PCI_DMA_FROMDEVICE);
+			put_page(skb_frags[nr].page);
+		}
+		++ring->cons;
+	}
+}
+
+
+void mlx4_en_rx_refill(struct work_struct *work)
+{
+	struct delayed_work *delay = container_of(work, struct delayed_work, work);
+	struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
+						 refill_task);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct net_device *dev = priv->dev;
+	struct mlx4_en_rx_ring *ring;
+	int need_refill = 0;
+	int i;
+
+	mutex_lock(&mdev->state_lock);
+	if (!mdev->device_up || !priv->port_up)
+		goto out;
+
+	/* We only get here if there are no receive buffers, so we can't race
+	 * with Rx interrupts while filling buffers */
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		ring = &priv->rx_ring[i];
+		if (ring->need_refill) {
+			if (mlx4_en_fill_rx_buf(dev, ring)) {
+				ring->need_refill = 0;
+				mlx4_en_update_rx_prod_db(ring);
+			} else
+				need_refill = 1;
+		}
+	}
+	if (need_refill)
+		queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
+
+out:
+	mutex_unlock(&mdev->state_lock);
+}
+
+
+int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+			   struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+	int tmp;
+
+	/* Sanity check SRQ size before proceeding */
+	if (size >= mdev->dev->caps.max_srq_wqes)
+		return -EINVAL;
+
+	ring->prod = 0;
+	ring->cons = 0;
+	ring->size = size;
+	ring->size_mask = size - 1;
+	ring->stride = stride;
+	ring->log_stride = ffs(ring->stride) - 1;
+	ring->buf_size = ring->size * ring->stride;
+
+	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
+					sizeof(struct skb_frag_struct));
+	ring->rx_info = vmalloc(tmp);
+	if (!ring->rx_info) {
+		mlx4_err(mdev, "Failed allocating rx_info ring\n");
+		return -ENOMEM;
+	}
+	mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
+		 ring->rx_info, tmp);
+
+	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
+				 ring->buf_size, 2 * PAGE_SIZE);
+	if (err)
+		goto err_ring;
+
+	err = mlx4_en_map_buffer(&ring->wqres.buf);
+	if (err) {
+		mlx4_err(mdev, "Failed to map RX buffer\n");
+		goto err_hwq;
+	}
+	ring->buf = ring->wqres.buf.direct.buf;
+
+	/* Configure lro mngr */
+	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
+	ring->lro.dev = priv->dev;
+	ring->lro.features = LRO_F_NAPI;
+	ring->lro.frag_align_pad = NET_IP_ALIGN;
+	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
+	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+	ring->lro.max_desc = mdev->profile.num_lro;
+	ring->lro.max_aggr = MAX_SKB_FRAGS;
+	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
+				    sizeof(struct net_lro_desc),
+				    GFP_KERNEL);
+	if (!ring->lro.lro_arr) {
+		mlx4_err(mdev, "Failed to allocate lro array\n");
+		goto err_map;
+	}
+	ring->lro.get_frag_header = mlx4_en_get_frag_header;
+
+	return 0;
+
+err_map:
+	mlx4_en_unmap_buffer(&ring->wqres.buf);
+err_hwq:
+	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+err_ring:
+	vfree(ring->rx_info);
+	ring->rx_info = NULL;
+	return err;
+}
+
+int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_wqe_srq_next_seg *next;
+	struct mlx4_en_rx_ring *ring;
+	int i;
+	int ring_ind;
+	int err;
+	int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+					DS_SIZE * priv->num_frags);
+	int max_gs = (stride - sizeof(struct mlx4_wqe_srq_next_seg)) / DS_SIZE;
+
+	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+		ring = &priv->rx_ring[ring_ind];
+
+		ring->prod = 0;
+		ring->cons = 0;
+		ring->actual_size = 0;
+		ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
+
+		ring->stride = stride;
+		ring->log_stride = ffs(ring->stride) - 1;
+		ring->buf_size = ring->size * ring->stride;
+
+		memset(ring->buf, 0, ring->buf_size);
+		mlx4_en_update_rx_prod_db(ring);
+
+		/* Initailize all descriptors */
+		for (i = 0; i < ring->size; i++)
+			mlx4_en_init_rx_desc(priv, ring, i);
+
+		/* Initialize page allocators */
+		err = mlx4_en_init_allocator(priv, ring);
+		if (err) {
+			 mlx4_err(mdev, "Failed initializing ring allocator\n");
+			 goto err_allocator;
+		}
+
+		/* Fill Rx buffers */
+		ring->full = 0;
+	}
+	if (mlx4_en_fill_rx_buffers(priv))
+		goto err_buffers;
+
+	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+		ring = &priv->rx_ring[ring_ind];
+
+		mlx4_en_update_rx_prod_db(ring);
+
+		/* Configure SRQ representing the ring */
+		ring->srq.max    = ring->size;
+		ring->srq.max_gs = max_gs;
+		ring->srq.wqe_shift = ilog2(ring->stride);
+
+		for (i = 0; i < ring->srq.max; ++i) {
+			next = get_wqe(ring, i);
+			next->next_wqe_index =
+			cpu_to_be16((i + 1) & (ring->srq.max - 1));
+		}
+
+		err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt,
+				     ring->wqres.db.dma, &ring->srq);
+		if (err){
+			mlx4_err(mdev, "Failed to allocate srq\n");
+			goto err_srq;
+		}
+		ring->srq.event = mlx4_en_srq_event;
+	}
+
+	return 0;
+
+err_srq:
+	while (ring_ind >= 0) {
+		ring = &priv->rx_ring[ring_ind];
+		mlx4_srq_free(mdev->dev, &ring->srq);
+		ring_ind--;
+	}
+
+err_buffers:
+	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
+		mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
+
+	ring_ind = priv->rx_ring_num - 1;
+err_allocator:
+	while (ring_ind >= 0) {
+		mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
+		ring_ind--;
+	}
+	return err;
+}
+
+void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+			     struct mlx4_en_rx_ring *ring)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+
+	kfree(ring->lro.lro_arr);
+	mlx4_en_unmap_buffer(&ring->wqres.buf);
+	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+	vfree(ring->rx_info);
+	ring->rx_info = NULL;
+}
+
+void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
+				struct mlx4_en_rx_ring *ring)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+
+	mlx4_srq_free(mdev->dev, &ring->srq);
+	mlx4_en_free_rx_buf(priv, ring);
+	mlx4_en_destroy_allocator(priv, ring);
+}
+
+
+/* Unmap a completed descriptor and free unused pages */
+static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
+				    struct mlx4_en_rx_desc *rx_desc,
+				    struct skb_frag_struct *skb_frags,
+				    struct skb_frag_struct *skb_frags_rx,
+				    struct mlx4_en_rx_alloc *page_alloc,
+				    int length)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_frag_info *frag_info;
+	int nr;
+	dma_addr_t dma;
+
+	/* Collect used fragments while replacing them in the HW descirptors */
+	for (nr = 0; nr < priv->num_frags; nr++) {
+		frag_info = &priv->frag_info[nr];
+		if (length <= frag_info->frag_prefix_size)
+			break;
+
+		/* Save page reference in skb */
+		skb_frags_rx[nr].page = skb_frags[nr].page;
+		skb_frags_rx[nr].size = skb_frags[nr].size;
+		skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
+		dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+		/* Allocate a replacement page */
+		if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
+			goto fail;
+
+		/* Unmap buffer */
+		pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+				 PCI_DMA_FROMDEVICE);
+	}
+	/* Adjust size of last fragment to match actual length */
+	skb_frags_rx[nr - 1].size = length -
+		priv->frag_info[nr - 1].frag_prefix_size;
+	return nr;
+
+fail:
+	/* Drop all accumulated fragments (which have already been replaced in
+	 * the descriptor) of this packet; remaining fragments are reused... */
+	while (nr > 0) {
+		nr--;
+		put_page(skb_frags_rx[nr].page);
+	}
+	return 0;
+}
+
+
+static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+				      struct mlx4_en_rx_desc *rx_desc,
+				      struct skb_frag_struct *skb_frags,
+				      struct mlx4_en_rx_alloc *page_alloc,
+				      unsigned int length)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct sk_buff *skb;
+	void *va;
+	int used_frags;
+	dma_addr_t dma;
+
+	skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
+	if (!skb) {
+		mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n");
+		return NULL;
+	}
+	skb->dev = priv->dev;
+	skb_reserve(skb, NET_IP_ALIGN);
+	skb->len = length;
+	skb->truesize = length + sizeof(struct sk_buff);
+
+	/* Get pointer to first fragment so we could copy the headers into the
+	 * (linear part of the) skb */
+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
+
+	if (length <= SMALL_PACKET_SIZE) {
+		/* We are copying all relevant data to the skb - temporarily
+		 * synch buffers for the copy */
+		dma = be64_to_cpu(rx_desc->data[0].addr);
+		dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
+					      length, DMA_FROM_DEVICE);
+		skb_copy_to_linear_data(skb, va, length);
+		dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
+						 length, DMA_FROM_DEVICE);
+		skb->tail += length;
+	} else {
+
+		/* Move relevant fragments to skb */
+		used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
+						      skb_shinfo(skb)->frags,
+						      page_alloc, length);
+		skb_shinfo(skb)->nr_frags = used_frags;
+
+		/* Copy headers into the skb linear buffer */
+		memcpy(skb->data, va, HEADER_COPY_SIZE);
+		skb->tail += HEADER_COPY_SIZE;
+
+		/* Skip headers in first fragment */
+		skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
+
+		/* Adjust size of first fragment */
+		skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
+		skb->data_len = length - HEADER_COPY_SIZE;
+	}
+	return skb;
+}
+
+static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
+			      struct mlx4_en_rx_ring *ring,
+			      int from, int to, int num)
+{
+	struct skb_frag_struct *skb_frags_from;
+	struct skb_frag_struct *skb_frags_to;
+	struct mlx4_en_rx_desc *rx_desc_from;
+	struct mlx4_en_rx_desc *rx_desc_to;
+	int from_index, to_index;
+	int nr, i;
+
+	for (i = 0; i < num; i++) {
+		from_index = (from + i) & ring->size_mask;
+		to_index = (to + i) & ring->size_mask;
+		skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
+		skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
+		rx_desc_from = ring->buf + (from_index << ring->log_stride);
+		rx_desc_to = ring->buf + (to_index << ring->log_stride);
+
+		for (nr = 0; nr < priv->num_frags; nr++) {
+			skb_frags_to[nr].page = skb_frags_from[nr].page;
+			skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
+			rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
+		}
+	}
+}
+
+
+int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_cqe *cqe;
+	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+	struct skb_frag_struct *skb_frags;
+	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
+	struct mlx4_en_rx_desc *rx_desc;
+	struct sk_buff *skb;
+	int index;
+	int nr;
+	unsigned int length;
+	int polled = 0;
+	int ip_summed;
+
+	if (!priv->port_up)
+		return 0;
+
+	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
+	 * descriptor offset can be deduced from the CQE index instead of
+	 * reading 'cqe->index' */
+	index = cq->mcq.cons_index & ring->size_mask;
+	cqe = &cq->buf[index];
+
+	/* Process all completed CQEs */
+	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+		    cq->mcq.cons_index & cq->size)) {
+
+		skb_frags = ring->rx_info + (index << priv->log_rx_info);
+		rx_desc = ring->buf + (index << ring->log_stride);
+
+		/*
+		 * make sure we read the CQE after we read the ownership bit
+		 */
+		rmb();
+
+		/* Drop packet on bad receive or bad checksum */
+		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+						MLX4_CQE_OPCODE_ERROR)) {
+			mlx4_err(mdev, "CQE completed in error - vendor "
+				  "syndrom:%d syndrom:%d\n",
+				  ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
+				  ((struct mlx4_err_cqe *) cqe)->syndrome);
+			goto next;
+		}
+		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
+			mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
+			goto next;
+		}
+
+		/*
+		 * Packet is OK - process it.
+		 */
+		length = be32_to_cpu(cqe->byte_cnt);
+		ring->bytes += length;
+		ring->packets++;
+
+		if (likely(priv->rx_csum)) {
+			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+			    (cqe->checksum == cpu_to_be16(0xffff))) {
+				priv->port_stats.rx_chksum_good++;
+				/* This packet is eligible for LRO if it is:
+				 * - DIX Ethernet (type interpretation)
+				 * - TCP/IP (v4)
+				 * - without IP options
+				 * - not an IP fragment */
+				if (mlx4_en_can_lro(cqe->status) &&
+				    dev->features & NETIF_F_LRO) {
+
+					nr = mlx4_en_complete_rx_desc(
+						priv, rx_desc,
+						skb_frags, lro_frags,
+						ring->page_alloc, length);
+					if (!nr)
+						goto next;
+
+					if (priv->vlgrp && (cqe->vlan_my_qpn &
+							    cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
+						lro_vlan_hwaccel_receive_frags(
+						       &ring->lro, lro_frags,
+						       length, length,
+						       priv->vlgrp,
+						       be16_to_cpu(cqe->sl_vid),
+						       NULL, 0);
+					} else
+						lro_receive_frags(&ring->lro,
+								  lro_frags,
+								  length,
+								  length,
+								  NULL, 0);
+
+					goto next;
+				}
+
+				/* LRO not possible, complete processing here */
+				ip_summed = CHECKSUM_UNNECESSARY;
+				INC_PERF_COUNTER(priv->pstats.lro_misses);
+			} else {
+				ip_summed = CHECKSUM_NONE;
+				priv->port_stats.rx_chksum_none++;
+			}
+		} else {
+			ip_summed = CHECKSUM_NONE;
+			priv->port_stats.rx_chksum_none++;
+		}
+
+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
+				     ring->page_alloc, length);
+		if (!skb) {
+			priv->stats.rx_dropped++;
+			goto next;
+		}
+
+		skb->ip_summed = ip_summed;
+		skb->protocol = eth_type_trans(skb, dev);
+
+		/* Push it up the stack */
+		if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
+				    MLX4_CQE_VLAN_PRESENT_MASK)) {
+			vlan_hwaccel_receive_skb(skb, priv->vlgrp,
+						be16_to_cpu(cqe->sl_vid));
+		} else
+			netif_receive_skb(skb);
+
+		dev->last_rx = jiffies;
+
+next:
+		++cq->mcq.cons_index;
+		index = (cq->mcq.cons_index) & ring->size_mask;
+		cqe = &cq->buf[index];
+		if (++polled == budget) {
+			/* We are here because we reached the NAPI budget -
+			 * flush only pending LRO sessions */
+			lro_flush_all(&ring->lro);
+			goto out;
+		}
+	}
+
+	/* If CQ is empty flush all LRO sessions unconditionally */
+	lro_flush_all(&ring->lro);
+
+out:
+	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
+	mlx4_cq_set_ci(&cq->mcq);
+	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
+	ring->cons = cq->mcq.cons_index;
+	ring->prod += polled; /* Polled descriptors were realocated in place */
+	if (unlikely(!ring->full)) {
+		mlx4_en_copy_desc(priv, ring, ring->cons - polled,
+				  ring->prod - polled, polled);
+		mlx4_en_fill_rx_buf(dev, ring);
+	}
+	mlx4_en_update_rx_prod_db(ring);
+	return polled;
+}
+
+
+void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+{
+	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
+	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+
+	if (priv->port_up)
+		netif_rx_schedule(cq->dev, &cq->napi);
+	else
+		mlx4_en_arm_cq(priv, cq);
+}
+
+/* Rx CQ polling - called by NAPI */
+int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+{
+	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
+	struct net_device *dev = cq->dev;
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int done;
+
+	done = mlx4_en_process_rx_cq(dev, cq, budget);
+
+	/* If we used up all the quota - we're probably not done yet... */
+	if (done == budget)
+		INC_PERF_COUNTER(priv->pstats.napi_quota);
+	else {
+		/* Done for now */
+		netif_rx_complete(dev, napi);
+		mlx4_en_arm_cq(priv, cq);
+	}
+	return done;
+}
+
+
+/* Calculate the last offset position that accomodates a full fragment
+ * (assuming fagment size = stride-align) */
+static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
+{
+	u16 res = MLX4_EN_ALLOC_SIZE % stride;
+	u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
+
+	mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
+			    "res:%d offset:%d\n", stride, align, res, offset);
+	return offset;
+}
+
+
+static int frag_sizes[] = {
+	FRAG_SZ0,
+	FRAG_SZ1,
+	FRAG_SZ2,
+	FRAG_SZ3
+};
+
+void mlx4_en_calc_rx_buf(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
+	int buf_size = 0;
+	int i = 0;
+
+	while (buf_size < eff_mtu) {
+		priv->frag_info[i].frag_size =
+			(eff_mtu > buf_size + frag_sizes[i]) ?
+				frag_sizes[i] : eff_mtu - buf_size;
+		priv->frag_info[i].frag_prefix_size = buf_size;
+		if (!i)	{
+			priv->frag_info[i].frag_align = NET_IP_ALIGN;
+			priv->frag_info[i].frag_stride =
+				ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
+		} else {
+			priv->frag_info[i].frag_align = 0;
+			priv->frag_info[i].frag_stride =
+				ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
+		}
+		priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
+						priv, priv->frag_info[i].frag_stride,
+						priv->frag_info[i].frag_align);
+		buf_size += priv->frag_info[i].frag_size;
+		i++;
+	}
+
+	priv->num_frags = i;
+	priv->rx_skb_size = eff_mtu;
+	priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
+
+	mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
+		  "num_frags:%d):\n", eff_mtu, priv->num_frags);
+	for (i = 0; i < priv->num_frags; i++) {
+		mlx4_dbg(DRV, priv, "  frag:%d - size:%d prefix:%d align:%d "
+				"stride:%d last_offset:%d\n", i,
+				priv->frag_info[i].frag_size,
+				priv->frag_info[i].frag_prefix_size,
+				priv->frag_info[i].frag_align,
+				priv->frag_info[i].frag_stride,
+				priv->frag_info[i].last_offset);
+	}
+}
+
+/* RSS related functions */
+
+/* Calculate rss size and map each entry in rss table to rx ring */
+void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
+				 struct mlx4_en_rss_map *rss_map,
+				 int num_entries, int num_rings)
+{
+	int i;
+
+	rss_map->size = roundup_pow_of_two(num_entries);
+	mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
+		 rss_map->size);
+
+	for (i = 0; i < rss_map->size; i++) {
+		rss_map->map[i] = i % num_rings;
+		mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
+	}
+}
+
+static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
+{
+    return;
+}
+
+
+static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
+				 int qpn, int srqn, int cqn,
+				 enum mlx4_qp_state *state,
+				 struct mlx4_qp *qp)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_qp_context *context;
+	int err = 0;
+
+	context = kmalloc(sizeof *context , GFP_KERNEL);
+	if (!context) {
+		mlx4_err(mdev, "Failed to allocate qp context\n");
+		return -ENOMEM;
+	}
+
+	err = mlx4_qp_alloc(mdev->dev, qpn, qp);
+	if (err) {
+		mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn);
+		goto out;
+		return err;
+	}
+	qp->event = mlx4_en_sqp_event;
+
+	memset(context, 0, sizeof *context);
+	mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context);
+
+	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state);
+	if (err) {
+		mlx4_qp_remove(mdev->dev, qp);
+		mlx4_qp_free(mdev->dev, qp);
+	}
+out:
+	kfree(context);
+	return err;
+}
+
+/* Allocate rx qp's and configure them according to rss map */
+int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+	struct mlx4_qp_context context;
+	struct mlx4_en_rss_context *rss_context;
+	void *ptr;
+	int rss_xor = mdev->profile.rss_xor;
+	u8 rss_mask = mdev->profile.rss_mask;
+	int i, srqn, qpn, cqn;
+	int err = 0;
+	int good_qps = 0;
+
+	mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port);
+	err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
+				    rss_map->size, &rss_map->base_qpn);
+	if (err) {
+		mlx4_err(mdev, "Failed reserving %d qps for port %u\n",
+			 rss_map->size, priv->port);
+		return err;
+	}
+
+	for (i = 0; i < rss_map->size; i++) {
+		cqn = priv->rx_ring[rss_map->map[i]].cqn;
+		srqn = priv->rx_ring[rss_map->map[i]].srq.srqn;
+		qpn = rss_map->base_qpn + i;
+		err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn,
+					    &rss_map->state[i],
+					    &rss_map->qps[i]);
+		if (err)
+			goto rss_err;
+
+		++good_qps;
+	}
+
+	/* Configure RSS indirection qp */
+	err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
+	if (err) {
+		mlx4_err(mdev, "Failed to reserve range for RSS "
+			       "indirection qp\n");
+		goto rss_err;
+	}
+	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
+	if (err) {
+		mlx4_err(mdev, "Failed to allocate RSS indirection QP\n");
+		goto reserve_err;
+	}
+	rss_map->indir_qp.event = mlx4_en_sqp_event;
+	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
+				priv->rx_ring[0].cqn, 0, &context);
+
+	ptr = ((void *) &context) + 0x3c;
+	rss_context = (struct mlx4_en_rss_context *) ptr;
+	rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size) << 24 |
+					    (rss_map->base_qpn));
+	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
+	rss_context->hash_fn = rss_xor & 0x3;
+	rss_context->flags = rss_mask << 2;
+
+	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
+			       &rss_map->indir_qp, &rss_map->indir_state);
+	if (err)
+		goto indir_err;
+
+	return 0;
+
+indir_err:
+	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
+		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
+	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
+	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+reserve_err:
+	mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
+rss_err:
+	for (i = 0; i < good_qps; i++) {
+		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
+			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
+		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
+		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
+	}
+	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
+	return err;
+}
+
+void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+	int i;
+
+	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
+		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
+	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
+	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+	mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
+
+	for (i = 0; i < rss_map->size; i++) {
+		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
+			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
+		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
+		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
+	}
+	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
+}
+
+
+
+
+
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
new file mode 100644
index 0000000..8592f8f
--- /dev/null
+++ b/drivers/net/mlx4/en_tx.c
@@ -0,0 +1,820 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/qp.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+
+#include "mlx4_en.h"
+
+enum {
+	MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
+};
+
+static int inline_thold __read_mostly = MAX_INLINE;
+
+module_param_named(inline_thold, inline_thold, int, 0444);
+MODULE_PARM_DESC(inline_thold, "treshold for using inline data");
+
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+			   struct mlx4_en_tx_ring *ring, u32 size,
+			   u16 stride)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int tmp;
+	int err;
+
+	ring->size = size;
+	ring->size_mask = size - 1;
+	ring->stride = stride;
+
+	inline_thold = min(inline_thold, MAX_INLINE);
+
+	spin_lock_init(&ring->comp_lock);
+
+	tmp = size * sizeof(struct mlx4_en_tx_info);
+	ring->tx_info = vmalloc(tmp);
+	if (!ring->tx_info) {
+		mlx4_err(mdev, "Failed allocating tx_info ring\n");
+		return -ENOMEM;
+	}
+	mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
+		 ring->tx_info, tmp);
+
+	ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+	if (!ring->bounce_buf) {
+		mlx4_err(mdev, "Failed allocating bounce buffer\n");
+		err = -ENOMEM;
+		goto err_tx;
+	}
+	ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
+
+	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
+				 2 * PAGE_SIZE);
+	if (err) {
+		mlx4_err(mdev, "Failed allocating hwq resources\n");
+		goto err_bounce;
+	}
+
+	err = mlx4_en_map_buffer(&ring->wqres.buf);
+	if (err) {
+		mlx4_err(mdev, "Failed to map TX buffer\n");
+		goto err_hwq_res;
+	}
+
+	ring->buf = ring->wqres.buf.direct.buf;
+
+	mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
+		 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
+		 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
+
+	err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
+	if (err) {
+		mlx4_err(mdev, "Failed reserving qp for tx ring.\n");
+		goto err_map;
+	}
+
+	err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
+	if (err) {
+		mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
+		goto err_reserve;
+	}
+
+	return 0;
+
+err_reserve:
+	mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
+err_map:
+	mlx4_en_unmap_buffer(&ring->wqres.buf);
+err_hwq_res:
+	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+err_bounce:
+	kfree(ring->bounce_buf);
+	ring->bounce_buf = NULL;
+err_tx:
+	vfree(ring->tx_info);
+	ring->tx_info = NULL;
+	return err;
+}
+
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
+			     struct mlx4_en_tx_ring *ring)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
+
+	mlx4_qp_remove(mdev->dev, &ring->qp);
+	mlx4_qp_free(mdev->dev, &ring->qp);
+	mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
+	mlx4_en_unmap_buffer(&ring->wqres.buf);
+	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+	kfree(ring->bounce_buf);
+	ring->bounce_buf = NULL;
+	vfree(ring->tx_info);
+	ring->tx_info = NULL;
+}
+
+int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+			     struct mlx4_en_tx_ring *ring,
+			     int cq, int srqn)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+
+	ring->cqn = cq;
+	ring->prod = 0;
+	ring->cons = 0xffffffff;
+	ring->last_nr_txbb = 1;
+	ring->poll_cnt = 0;
+	ring->blocked = 0;
+	memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
+	memset(ring->buf, 0, ring->buf_size);
+
+	ring->qp_state = MLX4_QP_STATE_RST;
+	ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
+
+	mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
+				ring->cqn, srqn, &ring->context);
+
+	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
+			       &ring->qp, &ring->qp_state);
+
+	return err;
+}
+
+void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+				struct mlx4_en_tx_ring *ring)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+
+	mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
+		       MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
+}
+
+
+static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+				struct mlx4_en_tx_ring *ring,
+				int index, u8 owner)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
+	struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+	struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
+	struct sk_buff *skb = tx_info->skb;
+	struct skb_frag_struct *frag;
+	void *end = ring->buf + ring->buf_size;
+	int frags = skb_shinfo(skb)->nr_frags;
+	int i;
+	__be32 *ptr = (__be32 *)tx_desc;
+	__be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
+
+	/* Optimize the common case when there are no wraparounds */
+	if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
+		if (tx_info->linear) {
+			pci_unmap_single(mdev->pdev,
+					 (dma_addr_t) be64_to_cpu(data->addr),
+					 be32_to_cpu(data->byte_count),
+					 PCI_DMA_TODEVICE);
+			++data;
+		}
+
+		for (i = 0; i < frags; i++) {
+			frag = &skb_shinfo(skb)->frags[i];
+			pci_unmap_page(mdev->pdev,
+				       (dma_addr_t) be64_to_cpu(data[i].addr),
+				       frag->size, PCI_DMA_TODEVICE);
+		}
+		/* Stamp the freed descriptor */
+		for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
+			*ptr = stamp;
+			ptr += STAMP_DWORDS;
+		}
+
+	} else {
+		if ((void *) data >= end) {
+			data = (struct mlx4_wqe_data_seg *)
+					(ring->buf + ((void *) data - end));
+		}
+
+		if (tx_info->linear) {
+			pci_unmap_single(mdev->pdev,
+					 (dma_addr_t) be64_to_cpu(data->addr),
+					 be32_to_cpu(data->byte_count),
+					 PCI_DMA_TODEVICE);
+			++data;
+		}
+
+		for (i = 0; i < frags; i++) {
+			/* Check for wraparound before unmapping */
+			if ((void *) data >= end)
+				data = (struct mlx4_wqe_data_seg *) ring->buf;
+			frag = &skb_shinfo(skb)->frags[i];
+			pci_unmap_page(mdev->pdev,
+					(dma_addr_t) be64_to_cpu(data->addr),
+					 frag->size, PCI_DMA_TODEVICE);
+		}
+		/* Stamp the freed descriptor */
+		for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
+			*ptr = stamp;
+			ptr += STAMP_DWORDS;
+			if ((void *) ptr >= end) {
+				ptr = ring->buf;
+				stamp ^= cpu_to_be32(0x80000000);
+			}
+		}
+
+	}
+	dev_kfree_skb_any(skb);
+	return tx_info->nr_txbb;
+}
+
+
+int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int cnt = 0;
+
+	/* Skip last polled descriptor */
+	ring->cons += ring->last_nr_txbb;
+	mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
+		 ring->cons, ring->prod);
+
+	if ((u32) (ring->prod - ring->cons) > ring->size) {
+		if (netif_msg_tx_err(priv))
+			mlx4_warn(priv->mdev, "Tx consumer passed producer!\n");
+		return 0;
+	}
+
+	while (ring->cons != ring->prod) {
+		ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
+						ring->cons & ring->size_mask,
+						!!(ring->cons & ring->size));
+		ring->cons += ring->last_nr_txbb;
+		cnt++;
+	}
+
+	if (cnt)
+		mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
+
+	return cnt;
+}
+
+void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num)
+{
+	int block = 8 / ring_num;
+	int extra = 8 - (block * ring_num);
+	int num = 0;
+	u16 ring = 1;
+	int prio;
+
+	if (ring_num == 1) {
+		for (prio = 0; prio < 8; prio++)
+			prio_map[prio] = 0;
+		return;
+	}
+
+	for (prio = 0; prio < 8; prio++) {
+		if (extra && (num == block + 1)) {
+			ring++;
+			num = 0;
+			extra--;
+		} else if (!extra && (num == block)) {
+			ring++;
+			num = 0;
+		}
+		prio_map[prio] = ring;
+		mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
+		num++;
+	}
+}
+
+static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_cq *mcq = &cq->mcq;
+	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+	struct mlx4_cqe *cqe = cq->buf;
+	u16 index;
+	u16 new_index;
+	u32 txbbs_skipped = 0;
+	u32 cq_last_sav;
+
+	/* index always points to the first TXBB of the last polled descriptor */
+	index = ring->cons & ring->size_mask;
+	new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
+	if (index == new_index)
+		return;
+
+	if (!priv->port_up)
+		return;
+
+	/*
+	 * We use a two-stage loop:
+	 * - the first samples the HW-updated CQE
+	 * - the second frees TXBBs until the last sample
+	 * This lets us amortize CQE cache misses, while still polling the CQ
+	 * until is quiescent.
+	 */
+	cq_last_sav = mcq->cons_index;
+	do {
+		do {
+			/* Skip over last polled CQE */
+			index = (index + ring->last_nr_txbb) & ring->size_mask;
+			txbbs_skipped += ring->last_nr_txbb;
+
+			/* Poll next CQE */
+			ring->last_nr_txbb = mlx4_en_free_tx_desc(
+						priv, ring, index,
+						!!((ring->cons + txbbs_skipped) &
+						   ring->size));
+			++mcq->cons_index;
+
+		} while (index != new_index);
+
+		new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
+	} while (index != new_index);
+	AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
+			 (u32) (mcq->cons_index - cq_last_sav));
+
+	/*
+	 * To prevent CQ overflow we first update CQ consumer and only then
+	 * the ring consumer.
+	 */
+	mlx4_cq_set_ci(mcq);
+	wmb();
+	ring->cons += txbbs_skipped;
+
+	/* Wakeup Tx queue if this ring stopped it */
+	if (unlikely(ring->blocked)) {
+		if (((u32) (ring->prod - ring->cons) <=
+		     ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) {
+
+			/* TODO: support multiqueue netdevs. Currently, we block
+			 * when *any* ring is full. Note that:
+			 * - 2 Tx rings can unblock at the same time and call
+			 *   netif_wake_queue(), which is OK since this
+			 *   operation is idempotent.
+			 * - We might wake the queue just after another ring
+			 *   stopped it. This is no big deal because the next
+			 *   transmission on that ring would stop the queue.
+			 */
+			ring->blocked = 0;
+			netif_wake_queue(dev);
+			priv->port_stats.wake_queue++;
+		}
+	}
+}
+
+void mlx4_en_tx_irq(struct mlx4_cq *mcq)
+{
+	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
+	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+
+	spin_lock_irq(&ring->comp_lock);
+	cq->armed = 0;
+	mlx4_en_process_tx_cq(cq->dev, cq);
+	if (ring->blocked)
+		mlx4_en_arm_cq(priv, cq);
+	else
+		mod_timer(&cq->timer, jiffies + 1);
+	spin_unlock_irq(&ring->comp_lock);
+}
+
+
+void mlx4_en_poll_tx_cq(unsigned long data)
+{
+	struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
+	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+	u32 inflight;
+
+	INC_PERF_COUNTER(priv->pstats.tx_poll);
+
+	netif_tx_lock(priv->dev);
+	spin_lock_irq(&ring->comp_lock);
+	mlx4_en_process_tx_cq(cq->dev, cq);
+	inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
+
+	/* If there are still packets in flight and the timer has not already
+	 * been scheduled by the Tx routine then schedule it here to guarantee
+	 * completion processing of these packets */
+	if (inflight && priv->port_up)
+		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+
+	spin_unlock_irq(&ring->comp_lock);
+	netif_tx_unlock(priv->dev);
+}
+
+static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
+						      struct mlx4_en_tx_ring *ring,
+						      u32 index,
+						      unsigned int desc_size)
+{
+	u32 copy = (ring->size - index) * TXBB_SIZE;
+	int i;
+
+	for (i = desc_size - copy - 4; i >= 0; i -= 4) {
+		if ((i & (TXBB_SIZE - 1)) == 0)
+			wmb();
+
+		*((u32 *) (ring->buf + i)) =
+			*((u32 *) (ring->bounce_buf + copy + i));
+	}
+
+	for (i = copy - 4; i >= 4 ; i -= 4) {
+		if ((i & (TXBB_SIZE - 1)) == 0)
+			wmb();
+
+		*((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
+			*((u32 *) (ring->bounce_buf + i));
+	}
+
+	/* Return real descriptor location */
+	return ring->buf + index * TXBB_SIZE;
+}
+
+static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
+{
+	struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
+	struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
+
+	/* If we don't have a pending timer, set one up to catch our recent
+	   post in case the interface becomes idle */
+	if (!timer_pending(&cq->timer))
+		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+
+	/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
+	if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
+		mlx4_en_process_tx_cq(priv->dev, cq);
+}
+
+static void *get_frag_ptr(struct sk_buff *skb)
+{
+	struct skb_frag_struct *frag =  &skb_shinfo(skb)->frags[0];
+	struct page *page = frag->page;
+	void *ptr;
+
+	ptr = page_address(page);
+	if (unlikely(!ptr))
+		return NULL;
+
+	return ptr + frag->page_offset;
+}
+
+static int is_inline(struct sk_buff *skb, void **pfrag)
+{
+	void *ptr;
+
+	if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
+		if (skb_shinfo(skb)->nr_frags == 1) {
+			ptr = get_frag_ptr(skb);
+			if (unlikely(!ptr))
+				return 0;
+
+			if (pfrag)
+				*pfrag = ptr;
+
+			return 1;
+		} else if (unlikely(skb_shinfo(skb)->nr_frags))
+			return 0;
+		else
+			return 1;
+	}
+
+	return 0;
+}
+
+static int inline_size(struct sk_buff *skb)
+{
+	if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
+	    <= MLX4_INLINE_ALIGN)
+		return ALIGN(skb->len + CTRL_SIZE +
+			     sizeof(struct mlx4_wqe_inline_seg), 16);
+	else
+		return ALIGN(skb->len + CTRL_SIZE + 2 *
+			     sizeof(struct mlx4_wqe_inline_seg), 16);
+}
+
+static int get_real_size(struct sk_buff *skb, struct net_device *dev,
+			 int *lso_header_size)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int real_size;
+
+	if (skb_is_gso(skb)) {
+		*lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
+			ALIGN(*lso_header_size + 4, DS_SIZE);
+		if (unlikely(*lso_header_size != skb_headlen(skb))) {
+			/* We add a segment for the skb linear buffer only if
+			 * it contains data */
+			if (*lso_header_size < skb_headlen(skb))
+				real_size += DS_SIZE;
+			else {
+				if (netif_msg_tx_err(priv))
+					mlx4_warn(mdev, "Non-linear headers\n");
+				dev_kfree_skb_any(skb);
+				return 0;
+			}
+		}
+		if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
+			if (netif_msg_tx_err(priv))
+				mlx4_warn(mdev, "LSO header size too big\n");
+			dev_kfree_skb_any(skb);
+			return 0;
+		}
+	} else {
+		*lso_header_size = 0;
+		if (!is_inline(skb, NULL))
+			real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
+		else
+			real_size = inline_size(skb);
+	}
+
+	return real_size;
+}
+
+static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
+			     int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
+{
+	struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
+	int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
+
+	if (skb->len <= spc) {
+		inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+		skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
+		if (skb_shinfo(skb)->nr_frags)
+			memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
+			       skb_shinfo(skb)->frags[0].size);
+
+	} else {
+		inl->byte_count = cpu_to_be32(1 << 31 | spc);
+		if (skb_headlen(skb) <= spc) {
+			skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
+			if (skb_headlen(skb) < spc) {
+				memcpy(((void *)(inl + 1)) + skb_headlen(skb),
+					fragptr, spc - skb_headlen(skb));
+				fragptr +=  spc - skb_headlen(skb);
+			}
+			inl = (void *) (inl + 1) + spc;
+			memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
+		} else {
+			skb_copy_from_linear_data(skb, inl + 1, spc);
+			inl = (void *) (inl + 1) + spc;
+			skb_copy_from_linear_data_offset(skb, spc, inl + 1,
+					skb_headlen(skb) - spc);
+			if (skb_shinfo(skb)->nr_frags)
+				memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
+					fragptr, skb_shinfo(skb)->frags[0].size);
+		}
+
+		wmb();
+		inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
+	}
+	tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
+	tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
+	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+}
+
+static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb,
+			 u16 *vlan_tag)
+{
+	int tx_ind;
+
+	/* Obtain VLAN information if present */
+	if (priv->vlgrp && vlan_tx_tag_present(skb)) {
+		*vlan_tag = vlan_tx_tag_get(skb);
+		/* Set the Tx ring to use according to vlan priority */
+		tx_ind = priv->tx_prio_map[*vlan_tag >> 13];
+	} else {
+		*vlan_tag = 0;
+		tx_ind = 0;
+	}
+	return tx_ind;
+}
+
+int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_tx_ring *ring;
+	struct mlx4_en_cq *cq;
+	struct mlx4_en_tx_desc *tx_desc;
+	struct mlx4_wqe_data_seg *data;
+	struct skb_frag_struct *frag;
+	struct mlx4_en_tx_info *tx_info;
+	int tx_ind = 0;
+	int nr_txbb;
+	int desc_size;
+	int real_size;
+	dma_addr_t dma;
+	u32 index;
+	__be32 op_own;
+	u16 vlan_tag;
+	int i;
+	int lso_header_size;
+	void *fragptr;
+
+	if (unlikely(!skb->len)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+	real_size = get_real_size(skb, dev, &lso_header_size);
+	if (unlikely(!real_size))
+		return NETDEV_TX_OK;
+
+	/* Allign descriptor to TXBB size */
+	desc_size = ALIGN(real_size, TXBB_SIZE);
+	nr_txbb = desc_size / TXBB_SIZE;
+	if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
+		if (netif_msg_tx_err(priv))
+			mlx4_warn(mdev, "Oversized header or SG list\n");
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	tx_ind = get_vlan_info(priv, skb, &vlan_tag);
+	ring = &priv->tx_ring[tx_ind];
+
+	/* Check available TXBBs And 2K spare for prefetch */
+	if (unlikely(((int)(ring->prod - ring->cons)) >
+		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+		/* every full Tx ring stops queue.
+		 * TODO: implement multi-queue support (per-queue stop) */
+		netif_stop_queue(dev);
+		ring->blocked = 1;
+		priv->port_stats.queue_stopped++;
+
+		/* Use interrupts to find out when queue opened */
+		cq = &priv->tx_cq[tx_ind];
+		mlx4_en_arm_cq(priv, cq);
+		return NETDEV_TX_BUSY;
+	}
+
+	/* Now that we know what Tx ring to use */
+	if (unlikely(!priv->port_up)) {
+		if (netif_msg_tx_err(priv))
+			mlx4_warn(mdev, "xmit: port down!\n");
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* Track current inflight packets for performance analysis */
+	AVG_PERF_COUNTER(priv->pstats.inflight_avg,
+			 (u32) (ring->prod - ring->cons - 1));
+
+	/* Packet is good - grab an index and transmit it */
+	index = ring->prod & ring->size_mask;
+
+	/* See if we have enough space for whole descriptor TXBB for setting
+	 * SW ownership on next descriptor; if not, use a bounce buffer. */
+	if (likely(index + nr_txbb <= ring->size))
+		tx_desc = ring->buf + index * TXBB_SIZE;
+	else
+		tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
+
+	/* Save skb in tx_info ring */
+	tx_info = &ring->tx_info[index];
+	tx_info->skb = skb;
+	tx_info->nr_txbb = nr_txbb;
+
+	/* Prepare ctrl segement apart opcode+ownership, which depends on
+	 * whether LSO is used */
+	tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
+	tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
+	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+	tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+						MLX4_WQE_CTRL_SOLICITED);
+	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+		tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
+							 MLX4_WQE_CTRL_TCP_UDP_CSUM);
+		priv->port_stats.tx_chksum_offload++;
+	}
+
+	/* Handle LSO (TSO) packets */
+	if (lso_header_size) {
+		/* Mark opcode as LSO */
+		op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
+			((ring->prod & ring->size) ?
+				cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+
+		/* Fill in the LSO prefix */
+		tx_desc->lso.mss_hdr_size = cpu_to_be32(
+			skb_shinfo(skb)->gso_size << 16 | lso_header_size);
+
+		/* Copy headers;
+		 * note that we already verified that it is linear */
+		memcpy(tx_desc->lso.header, skb->data, lso_header_size);
+		data = ((void *) &tx_desc->lso +
+			ALIGN(lso_header_size + 4, DS_SIZE));
+
+		priv->port_stats.tso_packets++;
+		i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
+			!!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
+		ring->bytes += skb->len + (i - 1) * lso_header_size;
+		ring->packets += i;
+	} else {
+		/* Normal (Non LSO) packet */
+		op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
+			((ring->prod & ring->size) ?
+			 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+		data = &tx_desc->data;
+		ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
+		ring->packets++;
+
+	}
+	AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
+
+
+	/* valid only for none inline segments */
+	tx_info->data_offset = (void *) data - (void *) tx_desc;
+
+	tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
+	data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
+
+	if (!is_inline(skb, &fragptr)) {
+		/* Map fragments */
+		for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
+			frag = &skb_shinfo(skb)->frags[i];
+			dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
+					   frag->size, PCI_DMA_TODEVICE);
+			data->addr = cpu_to_be64(dma);
+			data->lkey = cpu_to_be32(mdev->mr.key);
+			wmb();
+			data->byte_count = cpu_to_be32(frag->size);
+			--data;
+		}
+
+		/* Map linear part */
+		if (tx_info->linear) {
+			dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
+					     skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
+			data->addr = cpu_to_be64(dma);
+			data->lkey = cpu_to_be32(mdev->mr.key);
+			wmb();
+			data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
+		}
+	} else
+		build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
+
+	ring->prod += nr_txbb;
+
+	/* If we used a bounce buffer then copy descriptor back into place */
+	if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf)
+		tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
+
+	/* Run destructor before passing skb to HW */
+	if (likely(!skb_shared(skb)))
+		skb_orphan(skb);
+
+	/* Ensure new descirptor hits memory
+	 * before setting ownership of this descriptor to HW */
+	wmb();
+	tx_desc->ctrl.owner_opcode = op_own;
+
+	/* Ring doorbell! */
+	wmb();
+	writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
+	dev->trans_start = jiffies;
+
+	/* Poll CQ here */
+	mlx4_en_xmit_poll(priv, tx_ind);
+
+	return 0;
+}
+
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 8a8b561..de16933 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -558,7 +558,7 @@
 	int i;
 
 	err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
-			       dev->caps.num_eqs - 1, dev->caps.reserved_eqs);
+			       dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
 	if (err)
 		return err;
 
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7e32955..be09fdb 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -88,6 +88,7 @@
 		[ 8] = "P_Key violation counter",
 		[ 9] = "Q_Key violation counter",
 		[10] = "VMM",
+		[12] = "DPDP",
 		[16] = "MW support",
 		[17] = "APM support",
 		[18] = "Atomic ops support",
@@ -346,7 +347,7 @@
 			MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
 			dev_cap->max_vl[i]	   = field >> 4;
 			MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
-			dev_cap->max_mtu[i]	   = field >> 4;
+			dev_cap->ib_mtu[i]	   = field >> 4;
 			dev_cap->max_port_width[i] = field & 0xf;
 			MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
 			dev_cap->max_gids[i]	   = 1 << (field & 0xf);
@@ -354,9 +355,13 @@
 			dev_cap->max_pkeys[i]	   = 1 << (field & 0xf);
 		}
 	} else {
+#define QUERY_PORT_SUPPORTED_TYPE_OFFSET	0x00
 #define QUERY_PORT_MTU_OFFSET			0x01
+#define QUERY_PORT_ETH_MTU_OFFSET		0x02
 #define QUERY_PORT_WIDTH_OFFSET			0x06
 #define QUERY_PORT_MAX_GID_PKEY_OFFSET		0x07
+#define QUERY_PORT_MAC_OFFSET			0x08
+#define QUERY_PORT_MAX_MACVLAN_OFFSET		0x0a
 #define QUERY_PORT_MAX_VL_OFFSET		0x0b
 
 		for (i = 1; i <= dev_cap->num_ports; ++i) {
@@ -365,8 +370,10 @@
 			if (err)
 				goto out;
 
+			MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+			dev_cap->supported_port_types[i] = field & 3;
 			MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
-			dev_cap->max_mtu[i]	   = field & 0xf;
+			dev_cap->ib_mtu[i]	   = field & 0xf;
 			MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
 			dev_cap->max_port_width[i] = field & 0xf;
 			MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
@@ -374,6 +381,11 @@
 			dev_cap->max_pkeys[i]	   = 1 << (field & 0xf);
 			MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
 			dev_cap->max_vl[i]	   = field & 0xf;
+			MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
+			dev_cap->log_max_macs[i]  = field & 0xf;
+			dev_cap->log_max_vlans[i] = field >> 4;
+			MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
+			MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
 		}
 	}
 
@@ -407,7 +419,7 @@
 	mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
 		 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
 	mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
-		 dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1],
+		 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
 		 dev_cap->max_port_width[1]);
 	mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
 		 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
@@ -819,7 +831,7 @@
 		flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
 		MLX4_PUT(inbox, flags,		  INIT_PORT_FLAGS_OFFSET);
 
-		field = 128 << dev->caps.mtu_cap[port];
+		field = 128 << dev->caps.ib_mtu_cap[port];
 		MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
 		field = dev->caps.gid_table_len[port];
 		MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index decbb5c..526d7f3 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -66,11 +66,13 @@
 	int local_ca_ack_delay;
 	int num_ports;
 	u32 max_msg_sz;
-	int max_mtu[MLX4_MAX_PORTS + 1];
+	int ib_mtu[MLX4_MAX_PORTS + 1];
 	int max_port_width[MLX4_MAX_PORTS + 1];
 	int max_vl[MLX4_MAX_PORTS + 1];
 	int max_gids[MLX4_MAX_PORTS + 1];
 	int max_pkeys[MLX4_MAX_PORTS + 1];
+	u64 def_mac[MLX4_MAX_PORTS + 1];
+	u16 eth_mtu[MLX4_MAX_PORTS + 1];
 	u16 stat_rate_support;
 	u32 flags;
 	int reserved_uars;
@@ -102,6 +104,9 @@
 	u32 reserved_lkey;
 	u64 max_icm_sz;
 	int max_gso_sz;
+	u8  supported_port_types[MLX4_MAX_PORTS + 1];
+	u8  log_max_macs[MLX4_MAX_PORTS + 1];
+	u8  log_max_vlans[MLX4_MAX_PORTS + 1];
 };
 
 struct mlx4_adapter {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 1252a919..468921b8 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -85,6 +85,57 @@
 	.num_mtt	= 1 << 20,
 };
 
+static int log_num_mac = 2;
+module_param_named(log_num_mac, log_num_mac, int, 0444);
+MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
+
+static int log_num_vlan;
+module_param_named(log_num_vlan, log_num_vlan, int, 0444);
+MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
+
+static int use_prio;
+module_param_named(use_prio, use_prio, bool, 0444);
+MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
+		  "(0/1, default 0)");
+
+static int mlx4_check_port_params(struct mlx4_dev *dev,
+				  enum mlx4_port_type *port_type)
+{
+	int i;
+
+	for (i = 0; i < dev->caps.num_ports - 1; i++) {
+		if (port_type[i] != port_type[i+1] &&
+		    !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+			mlx4_err(dev, "Only same port types supported "
+				 "on this HCA, aborting.\n");
+			return -EINVAL;
+		}
+	}
+	if ((port_type[0] == MLX4_PORT_TYPE_ETH) &&
+	    (port_type[1] == MLX4_PORT_TYPE_IB)) {
+		mlx4_err(dev, "eth-ib configuration is not supported.\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < dev->caps.num_ports; i++) {
+		if (!(port_type[i] & dev->caps.supported_type[i+1])) {
+			mlx4_err(dev, "Requested port type for port %d is not "
+				      "supported on this HCA\n", i + 1);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static void mlx4_set_port_mask(struct mlx4_dev *dev)
+{
+	int i;
+
+	dev->caps.port_mask = 0;
+	for (i = 1; i <= dev->caps.num_ports; ++i)
+		if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
+			dev->caps.port_mask |= 1 << (i - 1);
+}
 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 {
 	int err;
@@ -120,10 +171,13 @@
 	dev->caps.num_ports	     = dev_cap->num_ports;
 	for (i = 1; i <= dev->caps.num_ports; ++i) {
 		dev->caps.vl_cap[i]	    = dev_cap->max_vl[i];
-		dev->caps.mtu_cap[i]	    = dev_cap->max_mtu[i];
+		dev->caps.ib_mtu_cap[i]	    = dev_cap->ib_mtu[i];
 		dev->caps.gid_table_len[i]  = dev_cap->max_gids[i];
 		dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
 		dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
+		dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
+		dev->caps.def_mac[i]        = dev_cap->def_mac[i];
+		dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
 	}
 
 	dev->caps.num_uars	     = dev_cap->uar_size / PAGE_SIZE;
@@ -134,7 +188,6 @@
 	dev->caps.max_rq_sg	     = dev_cap->max_rq_sg;
 	dev->caps.max_wqes	     = dev_cap->max_qp_sz;
 	dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
-	dev->caps.reserved_qps	     = dev_cap->reserved_qps;
 	dev->caps.max_srq_wqes	     = dev_cap->max_srq_sz;
 	dev->caps.max_srq_sge	     = dev_cap->max_rq_sg - 1;
 	dev->caps.reserved_srqs	     = dev_cap->reserved_srqs;
@@ -163,9 +216,138 @@
 	dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
 	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
 
+	dev->caps.log_num_macs  = log_num_mac;
+	dev->caps.log_num_vlans = log_num_vlan;
+	dev->caps.log_num_prios = use_prio ? 3 : 0;
+
+	for (i = 1; i <= dev->caps.num_ports; ++i) {
+		if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
+			dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
+		else
+			dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+
+		if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
+			dev->caps.log_num_macs = dev_cap->log_max_macs[i];
+			mlx4_warn(dev, "Requested number of MACs is too much "
+				  "for port %d, reducing to %d.\n",
+				  i, 1 << dev->caps.log_num_macs);
+		}
+		if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
+			dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
+			mlx4_warn(dev, "Requested number of VLANs is too much "
+				  "for port %d, reducing to %d.\n",
+				  i, 1 << dev->caps.log_num_vlans);
+		}
+	}
+
+	mlx4_set_port_mask(dev);
+
+	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
+	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
+		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
+		(1 << dev->caps.log_num_macs) *
+		(1 << dev->caps.log_num_vlans) *
+		(1 << dev->caps.log_num_prios) *
+		dev->caps.num_ports;
+	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
+
+	dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
+		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
+		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
+		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
+
 	return 0;
 }
 
+/*
+ * Change the port configuration of the device.
+ * Every user of this function must hold the port mutex.
+ */
+static int mlx4_change_port_types(struct mlx4_dev *dev,
+				  enum mlx4_port_type *port_types)
+{
+	int err = 0;
+	int change = 0;
+	int port;
+
+	for (port = 0; port <  dev->caps.num_ports; port++) {
+		if (port_types[port] != dev->caps.port_type[port + 1]) {
+			change = 1;
+			dev->caps.port_type[port + 1] = port_types[port];
+		}
+	}
+	if (change) {
+		mlx4_unregister_device(dev);
+		for (port = 1; port <= dev->caps.num_ports; port++) {
+			mlx4_CLOSE_PORT(dev, port);
+			err = mlx4_SET_PORT(dev, port);
+			if (err) {
+				mlx4_err(dev, "Failed to set port %d, "
+					      "aborting\n", port);
+				goto out;
+			}
+		}
+		mlx4_set_port_mask(dev);
+		err = mlx4_register_device(dev);
+	}
+
+out:
+	return err;
+}
+
+static ssize_t show_port_type(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+						   port_attr);
+	struct mlx4_dev *mdev = info->dev;
+
+	return sprintf(buf, "%s\n",
+		       mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB ?
+		       "ib" : "eth");
+}
+
+static ssize_t set_port_type(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+						   port_attr);
+	struct mlx4_dev *mdev = info->dev;
+	struct mlx4_priv *priv = mlx4_priv(mdev);
+	enum mlx4_port_type types[MLX4_MAX_PORTS];
+	int i;
+	int err = 0;
+
+	if (!strcmp(buf, "ib\n"))
+		info->tmp_type = MLX4_PORT_TYPE_IB;
+	else if (!strcmp(buf, "eth\n"))
+		info->tmp_type = MLX4_PORT_TYPE_ETH;
+	else {
+		mlx4_err(mdev, "%s is not supported port type\n", buf);
+		return -EINVAL;
+	}
+
+	mutex_lock(&priv->port_mutex);
+	for (i = 0; i < mdev->caps.num_ports; i++)
+		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
+					mdev->caps.port_type[i+1];
+
+	err = mlx4_check_port_params(mdev, types);
+	if (err)
+		goto out;
+
+	for (i = 1; i <= mdev->caps.num_ports; i++)
+		priv->port[i].tmp_type = 0;
+
+	err = mlx4_change_port_types(mdev, types);
+
+out:
+	mutex_unlock(&priv->port_mutex);
+	return err ? err : count;
+}
+
 static int mlx4_load_fw(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
@@ -211,7 +393,8 @@
 				  ((u64) (MLX4_CMPT_TYPE_QP *
 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
 				  cmpt_entry_sz, dev->caps.num_qps,
-				  dev->caps.reserved_qps, 0, 0);
+				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+				  0, 0);
 	if (err)
 		goto err;
 
@@ -336,7 +519,8 @@
 				  init_hca->qpc_base,
 				  dev_cap->qpc_entry_sz,
 				  dev->caps.num_qps,
-				  dev->caps.reserved_qps, 0, 0);
+				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+				  0, 0);
 	if (err) {
 		mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
 		goto err_unmap_dmpt;
@@ -346,7 +530,8 @@
 				  init_hca->auxc_base,
 				  dev_cap->aux_entry_sz,
 				  dev->caps.num_qps,
-				  dev->caps.reserved_qps, 0, 0);
+				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+				  0, 0);
 	if (err) {
 		mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
 		goto err_unmap_qp;
@@ -356,7 +541,8 @@
 				  init_hca->altc_base,
 				  dev_cap->altc_entry_sz,
 				  dev->caps.num_qps,
-				  dev->caps.reserved_qps, 0, 0);
+				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+				  0, 0);
 	if (err) {
 		mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
 		goto err_unmap_auxc;
@@ -366,7 +552,8 @@
 				  init_hca->rdmarc_base,
 				  dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
 				  dev->caps.num_qps,
-				  dev->caps.reserved_qps, 0, 0);
+				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+				  0, 0);
 	if (err) {
 		mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
 		goto err_unmap_altc;
@@ -565,6 +752,7 @@
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	int err;
+	int port;
 
 	err = mlx4_init_uar_table(dev);
 	if (err) {
@@ -663,8 +851,20 @@
 		goto err_qp_table_free;
 	}
 
+	for (port = 1; port <= dev->caps.num_ports; port++) {
+		err = mlx4_SET_PORT(dev, port);
+		if (err) {
+			mlx4_err(dev, "Failed to set port %d, aborting\n",
+				port);
+			goto err_mcg_table_free;
+		}
+	}
+
 	return 0;
 
+err_mcg_table_free:
+	mlx4_cleanup_mcg_table(dev);
+
 err_qp_table_free:
 	mlx4_cleanup_qp_table(dev);
 
@@ -728,11 +928,45 @@
 		priv->eq_table.eq[i].irq = dev->pdev->irq;
 }
 
+static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+{
+	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+	int err = 0;
+
+	info->dev = dev;
+	info->port = port;
+	mlx4_init_mac_table(dev, &info->mac_table);
+	mlx4_init_vlan_table(dev, &info->vlan_table);
+
+	sprintf(info->dev_name, "mlx4_port%d", port);
+	info->port_attr.attr.name = info->dev_name;
+	info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+	info->port_attr.show      = show_port_type;
+	info->port_attr.store     = set_port_type;
+
+	err = device_create_file(&dev->pdev->dev, &info->port_attr);
+	if (err) {
+		mlx4_err(dev, "Failed to create file for port %d\n", port);
+		info->port = -1;
+	}
+
+	return err;
+}
+
+static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
+{
+	if (info->port < 0)
+		return;
+
+	device_remove_file(&info->dev->pdev->dev, &info->port_attr);
+}
+
 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	struct mlx4_priv *priv;
 	struct mlx4_dev *dev;
 	int err;
+	int port;
 
 	printk(KERN_INFO PFX "Initializing %s\n",
 	       pci_name(pdev));
@@ -807,6 +1041,8 @@
 	INIT_LIST_HEAD(&priv->ctx_list);
 	spin_lock_init(&priv->ctx_lock);
 
+	mutex_init(&priv->port_mutex);
+
 	INIT_LIST_HEAD(&priv->pgdir_list);
 	mutex_init(&priv->pgdir_mutex);
 
@@ -842,15 +1078,24 @@
 	if (err)
 		goto err_close;
 
+	for (port = 1; port <= dev->caps.num_ports; port++) {
+		err = mlx4_init_port_info(dev, port);
+		if (err)
+			goto err_port;
+	}
+
 	err = mlx4_register_device(dev);
 	if (err)
-		goto err_cleanup;
+		goto err_port;
 
 	pci_set_drvdata(pdev, dev);
 
 	return 0;
 
-err_cleanup:
+err_port:
+	for (port = 1; port <= dev->caps.num_ports; port++)
+		mlx4_cleanup_port_info(&priv->port[port]);
+
 	mlx4_cleanup_mcg_table(dev);
 	mlx4_cleanup_qp_table(dev);
 	mlx4_cleanup_srq_table(dev);
@@ -907,8 +1152,10 @@
 	if (dev) {
 		mlx4_unregister_device(dev);
 
-		for (p = 1; p <= dev->caps.num_ports; ++p)
+		for (p = 1; p <= dev->caps.num_ports; p++) {
+			mlx4_cleanup_port_info(&priv->port[p]);
 			mlx4_CLOSE_PORT(dev, p);
+		}
 
 		mlx4_cleanup_mcg_table(dev);
 		mlx4_cleanup_qp_table(dev);
@@ -948,6 +1195,8 @@
 	{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
 	{ PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
 	{ PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
+	{ PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
+	{ PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
 	{ 0, }
 };
 
@@ -960,10 +1209,28 @@
 	.remove		= __devexit_p(mlx4_remove_one)
 };
 
+static int __init mlx4_verify_params(void)
+{
+	if ((log_num_mac < 0) || (log_num_mac > 7)) {
+		printk(KERN_WARNING "mlx4_core: bad num_mac: %d\n", log_num_mac);
+		return -1;
+	}
+
+	if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
+		printk(KERN_WARNING "mlx4_core: bad num_vlan: %d\n", log_num_vlan);
+		return -1;
+	}
+
+	return 0;
+}
+
 static int __init mlx4_init(void)
 {
 	int ret;
 
+	if (mlx4_verify_params())
+		return -EINVAL;
+
 	ret = mlx4_catas_init();
 	if (ret)
 		return ret;
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c83f88c..592c01a 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -368,8 +368,8 @@
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	int err;
 
-	err = mlx4_bitmap_init(&priv->mcg_table.bitmap,
-			       dev->caps.num_amgms, dev->caps.num_amgms - 1, 0);
+	err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
+			       dev->caps.num_amgms - 1, 0, 0);
 	if (err)
 		return err;
 
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 5337e3a..fa431fa 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -111,6 +111,7 @@
 	u32			last;
 	u32			top;
 	u32			max;
+	u32                     reserved_top;
 	u32			mask;
 	spinlock_t		lock;
 	unsigned long	       *table;
@@ -251,6 +252,38 @@
 	struct list_head	list;
 };
 
+#define MLX4_MAX_MAC_NUM	128
+#define MLX4_MAC_TABLE_SIZE	(MLX4_MAX_MAC_NUM << 3)
+
+struct mlx4_mac_table {
+	__be64			entries[MLX4_MAX_MAC_NUM];
+	int			refs[MLX4_MAX_MAC_NUM];
+	struct mutex		mutex;
+	int			total;
+	int			max;
+};
+
+#define MLX4_MAX_VLAN_NUM	128
+#define MLX4_VLAN_TABLE_SIZE	(MLX4_MAX_VLAN_NUM << 2)
+
+struct mlx4_vlan_table {
+	__be32			entries[MLX4_MAX_VLAN_NUM];
+	int			refs[MLX4_MAX_VLAN_NUM];
+	struct mutex		mutex;
+	int			total;
+	int			max;
+};
+
+struct mlx4_port_info {
+	struct mlx4_dev	       *dev;
+	int			port;
+	char			dev_name[16];
+	struct device_attribute port_attr;
+	enum mlx4_port_type	tmp_type;
+	struct mlx4_mac_table	mac_table;
+	struct mlx4_vlan_table	vlan_table;
+};
+
 struct mlx4_priv {
 	struct mlx4_dev		dev;
 
@@ -279,6 +312,8 @@
 
 	struct mlx4_uar		driver_uar;
 	void __iomem	       *kar;
+	struct mlx4_port_info	port[MLX4_MAX_PORTS + 1];
+	struct mutex		port_mutex;
 };
 
 static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -288,7 +323,10 @@
 
 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
-int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved);
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
+int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
+		     u32 reserved_bot, u32 resetrved_top);
 void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
 
 int mlx4_reset(struct mlx4_dev *dev);
@@ -346,4 +384,9 @@
 
 void mlx4_handle_catas_err(struct mlx4_dev *dev);
 
+void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
+void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
+
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+
 #endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
new file mode 100644
index 0000000..11fb17c
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -0,0 +1,561 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _MLX4_EN_H_
+#define _MLX4_EN_H_
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/inet_lro.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/srq.h>
+#include <linux/mlx4/doorbell.h>
+
+#include "en_port.h"
+
+#define DRV_NAME	"mlx4_en"
+#define DRV_VERSION	"1.4.0"
+#define DRV_RELDATE	"Sep 2008"
+
+
+#define MLX4_EN_MSG_LEVEL	(NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
+
+#define mlx4_dbg(mlevel, priv, format, arg...)	\
+	if (NETIF_MSG_##mlevel & priv->msg_enable) \
+	printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\
+		(&priv->mdev->pdev->dev)->bus_id , ## arg)
+
+#define mlx4_err(mdev, format, arg...) \
+	printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
+		(&mdev->pdev->dev)->bus_id , ## arg)
+#define mlx4_info(mdev, format, arg...) \
+	printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
+		(&mdev->pdev->dev)->bus_id , ## arg)
+#define mlx4_warn(mdev, format, arg...) \
+	printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
+		(&mdev->pdev->dev)->bus_id , ## arg)
+
+/*
+ * Device constants
+ */
+
+
+#define MLX4_EN_PAGE_SHIFT	12
+#define MLX4_EN_PAGE_SIZE	(1 << MLX4_EN_PAGE_SHIFT)
+#define MAX_TX_RINGS		16
+#define MAX_RX_RINGS		16
+#define MAX_RSS_MAP_SIZE	64
+#define RSS_FACTOR		2
+#define TXBB_SIZE		64
+#define HEADROOM		(2048 / TXBB_SIZE + 1)
+#define MAX_LSO_HDR_SIZE	92
+#define STAMP_STRIDE		64
+#define STAMP_DWORDS		(STAMP_STRIDE / 4)
+#define STAMP_SHIFT		31
+#define STAMP_VAL		0x7fffffff
+#define STATS_DELAY		(HZ / 4)
+
+/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
+#define MAX_DESC_SIZE		512
+#define MAX_DESC_TXBBS		(MAX_DESC_SIZE / TXBB_SIZE)
+
+/*
+ * OS related constants and tunables
+ */
+
+#define MLX4_EN_WATCHDOG_TIMEOUT	(15 * HZ)
+
+#define MLX4_EN_ALLOC_ORDER	2
+#define MLX4_EN_ALLOC_SIZE	(PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
+
+#define MLX4_EN_MAX_LRO_DESCRIPTORS	32
+
+/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
+ * and 4K allocations) */
+enum {
+	FRAG_SZ0 = 512 - NET_IP_ALIGN,
+	FRAG_SZ1 = 1024,
+	FRAG_SZ2 = 4096,
+	FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
+};
+#define MLX4_EN_MAX_RX_FRAGS	4
+
+/* Minimum ring size for our page-allocation sceme to work */
+#define MLX4_EN_MIN_RX_SIZE	(MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
+#define MLX4_EN_MIN_TX_SIZE	(4096 / TXBB_SIZE)
+
+#define MLX4_EN_TX_RING_NUM		9
+#define MLX4_EN_DEF_TX_RING_SIZE	1024
+#define MLX4_EN_DEF_RX_RING_SIZE  	1024
+
+/* Target number of bytes to coalesce with interrupt moderation */
+#define MLX4_EN_RX_COAL_TARGET	0x20000
+#define MLX4_EN_RX_COAL_TIME	0x10
+
+#define MLX4_EN_TX_COAL_PKTS	5
+#define MLX4_EN_TX_COAL_TIME	0x80
+
+#define MLX4_EN_RX_RATE_LOW		400000
+#define MLX4_EN_RX_COAL_TIME_LOW	0
+#define MLX4_EN_RX_RATE_HIGH		450000
+#define MLX4_EN_RX_COAL_TIME_HIGH	128
+#define MLX4_EN_RX_SIZE_THRESH		1024
+#define MLX4_EN_RX_RATE_THRESH		(1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
+#define MLX4_EN_SAMPLE_INTERVAL		0
+
+#define MLX4_EN_AUTO_CONF	0xffff
+
+#define MLX4_EN_DEF_RX_PAUSE	1
+#define MLX4_EN_DEF_TX_PAUSE	1
+
+/* Interval between sucessive polls in the Tx routine when polling is used
+   instead of interrupts (in per-core Tx rings) - should be power of 2 */
+#define MLX4_EN_TX_POLL_MODER	16
+#define MLX4_EN_TX_POLL_TIMEOUT	(HZ / 4)
+
+#define ETH_LLC_SNAP_SIZE	8
+
+#define SMALL_PACKET_SIZE      (256 - NET_IP_ALIGN)
+#define HEADER_COPY_SIZE       (128 - NET_IP_ALIGN)
+
+#define MLX4_EN_MIN_MTU		46
+#define ETH_BCAST		0xffffffffffffULL
+
+#ifdef MLX4_EN_PERF_STAT
+/* Number of samples to 'average' */
+#define AVG_SIZE			128
+#define AVG_FACTOR			1024
+#define NUM_PERF_STATS			NUM_PERF_COUNTERS
+
+#define INC_PERF_COUNTER(cnt)		(++(cnt))
+#define ADD_PERF_COUNTER(cnt, add)	((cnt) += (add))
+#define AVG_PERF_COUNTER(cnt, sample) \
+	((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
+#define GET_PERF_COUNTER(cnt)		(cnt)
+#define GET_AVG_PERF_COUNTER(cnt)	((cnt) / AVG_FACTOR)
+
+#else
+
+#define NUM_PERF_STATS			0
+#define INC_PERF_COUNTER(cnt)		do {} while (0)
+#define ADD_PERF_COUNTER(cnt, add)	do {} while (0)
+#define AVG_PERF_COUNTER(cnt, sample)	do {} while (0)
+#define GET_PERF_COUNTER(cnt)		(0)
+#define GET_AVG_PERF_COUNTER(cnt)	(0)
+#endif /* MLX4_EN_PERF_STAT */
+
+/*
+ * Configurables
+ */
+
+enum cq_type {
+	RX = 0,
+	TX = 1,
+};
+
+
+/*
+ * Useful macros
+ */
+#define ROUNDUP_LOG2(x)		ilog2(roundup_pow_of_two(x))
+#define XNOR(x, y)		(!(x) == !(y))
+#define ILLEGAL_MAC(addr)	(addr == 0xffffffffffffULL || addr == 0x0)
+
+
+struct mlx4_en_tx_info {
+	struct sk_buff *skb;
+	u32 nr_txbb;
+	u8 linear;
+	u8 data_offset;
+};
+
+
+#define MLX4_EN_BIT_DESC_OWN	0x80000000
+#define CTRL_SIZE	sizeof(struct mlx4_wqe_ctrl_seg)
+#define MLX4_EN_MEMTYPE_PAD	0x100
+#define DS_SIZE		sizeof(struct mlx4_wqe_data_seg)
+
+
+struct mlx4_en_tx_desc {
+	struct mlx4_wqe_ctrl_seg ctrl;
+	union {
+		struct mlx4_wqe_data_seg data; /* at least one data segment */
+		struct mlx4_wqe_lso_seg lso;
+		struct mlx4_wqe_inline_seg inl;
+	};
+};
+
+#define MLX4_EN_USE_SRQ		0x01000000
+
+struct mlx4_en_rx_alloc {
+	struct page *page;
+	u16 offset;
+};
+
+struct mlx4_en_tx_ring {
+	struct mlx4_hwq_resources wqres;
+	u32 size ; /* number of TXBBs */
+	u32 size_mask;
+	u16 stride;
+	u16 cqn;	/* index of port CQ associated with this ring */
+	u32 prod;
+	u32 cons;
+	u32 buf_size;
+	u32 doorbell_qpn;
+	void *buf;
+	u16 poll_cnt;
+	int blocked;
+	struct mlx4_en_tx_info *tx_info;
+	u8 *bounce_buf;
+	u32 last_nr_txbb;
+	struct mlx4_qp qp;
+	struct mlx4_qp_context context;
+	int qpn;
+	enum mlx4_qp_state qp_state;
+	struct mlx4_srq dummy;
+	unsigned long bytes;
+	unsigned long packets;
+	spinlock_t comp_lock;
+};
+
+struct mlx4_en_rx_desc {
+	struct mlx4_wqe_srq_next_seg next;
+	/* actual number of entries depends on rx ring stride */
+	struct mlx4_wqe_data_seg data[0];
+};
+
+struct mlx4_en_rx_ring {
+	struct mlx4_srq srq;
+	struct mlx4_hwq_resources wqres;
+	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
+	struct net_lro_mgr lro;
+	u32 size ;	/* number of Rx descs*/
+	u32 actual_size;
+	u32 size_mask;
+	u16 stride;
+	u16 log_stride;
+	u16 cqn;	/* index of port CQ associated with this ring */
+	u32 prod;
+	u32 cons;
+	u32 buf_size;
+	int need_refill;
+	int full;
+	void *buf;
+	void *rx_info;
+	unsigned long bytes;
+	unsigned long packets;
+};
+
+
+static inline int mlx4_en_can_lro(__be16 status)
+{
+	return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4	|
+				     MLX4_CQE_STATUS_IPV4F	|
+				     MLX4_CQE_STATUS_IPV6	|
+				     MLX4_CQE_STATUS_IPV4OPT	|
+				     MLX4_CQE_STATUS_TCP	|
+				     MLX4_CQE_STATUS_UDP	|
+				     MLX4_CQE_STATUS_IPOK)) ==
+		cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+			    MLX4_CQE_STATUS_IPOK |
+			    MLX4_CQE_STATUS_TCP);
+}
+
+struct mlx4_en_cq {
+	struct mlx4_cq          mcq;
+	struct mlx4_hwq_resources wqres;
+	int                     ring;
+	spinlock_t              lock;
+	struct net_device      *dev;
+	struct napi_struct	napi;
+	/* Per-core Tx cq processing support */
+	struct timer_list timer;
+	int size;
+	int buf_size;
+	unsigned vector;
+	enum cq_type is_tx;
+	u16 moder_time;
+	u16 moder_cnt;
+	int armed;
+	struct mlx4_cqe *buf;
+#define MLX4_EN_OPCODE_ERROR	0x1e
+};
+
+struct mlx4_en_port_profile {
+	u32 flags;
+	u32 tx_ring_num;
+	u32 rx_ring_num;
+	u32 tx_ring_size;
+	u32 rx_ring_size;
+};
+
+struct mlx4_en_profile {
+	int rss_xor;
+	int num_lro;
+	u8 rss_mask;
+	u32 active_ports;
+	u32 small_pkt_int;
+	int rx_moder_cnt;
+	int rx_moder_time;
+	int auto_moder;
+	u8 rx_pause;
+	u8 rx_ppp;
+	u8 tx_pause;
+	u8 tx_ppp;
+	u8 no_reset;
+	struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
+};
+
+struct mlx4_en_dev {
+	struct mlx4_dev         *dev;
+	struct pci_dev		*pdev;
+	struct mutex		state_lock;
+	struct net_device       *pndev[MLX4_MAX_PORTS + 1];
+	u32                     port_cnt;
+	bool			device_up;
+	struct mlx4_en_profile  profile;
+	u32			LSO_support;
+	struct workqueue_struct *workqueue;
+	struct device           *dma_device;
+	void __iomem            *uar_map;
+	struct mlx4_uar         priv_uar;
+	struct mlx4_mr		mr;
+	u32                     priv_pdn;
+	spinlock_t              uar_lock;
+};
+
+
+struct mlx4_en_rss_map {
+	int size;
+	int base_qpn;
+	u16 map[MAX_RSS_MAP_SIZE];
+	struct mlx4_qp qps[MAX_RSS_MAP_SIZE];
+	enum mlx4_qp_state state[MAX_RSS_MAP_SIZE];
+	struct mlx4_qp indir_qp;
+	enum mlx4_qp_state indir_state;
+};
+
+struct mlx4_en_rss_context {
+	__be32 base_qpn;
+	__be32 default_qpn;
+	u16 reserved;
+	u8 hash_fn;
+	u8 flags;
+	__be32 rss_key[10];
+};
+
+struct mlx4_en_pkt_stats {
+	unsigned long broadcast;
+	unsigned long rx_prio[8];
+	unsigned long tx_prio[8];
+#define NUM_PKT_STATS		17
+};
+
+struct mlx4_en_port_stats {
+	unsigned long lro_aggregated;
+	unsigned long lro_flushed;
+	unsigned long lro_no_desc;
+	unsigned long tso_packets;
+	unsigned long queue_stopped;
+	unsigned long wake_queue;
+	unsigned long tx_timeout;
+	unsigned long rx_alloc_failed;
+	unsigned long rx_chksum_good;
+	unsigned long rx_chksum_none;
+	unsigned long tx_chksum_offload;
+#define NUM_PORT_STATS		11
+};
+
+struct mlx4_en_perf_stats {
+	u32 tx_poll;
+	u64 tx_pktsz_avg;
+	u32 inflight_avg;
+	u16 tx_coal_avg;
+	u16 rx_coal_avg;
+	u32 napi_quota;
+#define NUM_PERF_COUNTERS		6
+};
+
+struct mlx4_en_frag_info {
+	u16 frag_size;
+	u16 frag_prefix_size;
+	u16 frag_stride;
+	u16 frag_align;
+	u16 last_offset;
+
+};
+
+struct mlx4_en_priv {
+	struct mlx4_en_dev *mdev;
+	struct mlx4_en_port_profile *prof;
+	struct net_device *dev;
+	struct vlan_group *vlgrp;
+	struct net_device_stats stats;
+	struct net_device_stats ret_stats;
+	spinlock_t stats_lock;
+
+	unsigned long last_moder_packets;
+	unsigned long last_moder_tx_packets;
+	unsigned long last_moder_bytes;
+	unsigned long last_moder_jiffies;
+	int last_moder_time;
+	u16 rx_usecs;
+	u16 rx_frames;
+	u16 tx_usecs;
+	u16 tx_frames;
+	u32 pkt_rate_low;
+	u16 rx_usecs_low;
+	u32 pkt_rate_high;
+	u16 rx_usecs_high;
+	u16 sample_interval;
+	u16 adaptive_rx_coal;
+	u32 msg_enable;
+
+	struct mlx4_hwq_resources res;
+	int link_state;
+	int last_link_state;
+	bool port_up;
+	int port;
+	int registered;
+	int allocated;
+	int stride;
+	int rx_csum;
+	u64 mac;
+	int mac_index;
+	unsigned max_mtu;
+	int base_qpn;
+
+	struct mlx4_en_rss_map rss_map;
+	u16 tx_prio_map[8];
+	u32 flags;
+#define MLX4_EN_FLAG_PROMISC	0x1
+	u32 tx_ring_num;
+	u32 rx_ring_num;
+	u32 rx_skb_size;
+	struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
+	u16 num_frags;
+	u16 log_rx_info;
+
+	struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
+	struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
+	struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
+	struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+	struct work_struct mcast_task;
+	struct work_struct mac_task;
+	struct delayed_work refill_task;
+	struct work_struct watchdog_task;
+	struct work_struct linkstate_task;
+	struct delayed_work stats_task;
+	struct mlx4_en_perf_stats pstats;
+	struct mlx4_en_pkt_stats pkstats;
+	struct mlx4_en_port_stats port_stats;
+	struct dev_mc_list *mc_list;
+	struct mlx4_en_stat_out_mbox hw_stats;
+};
+
+
+void mlx4_en_destroy_netdev(struct net_device *dev);
+int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+			struct mlx4_en_port_profile *prof);
+
+int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
+
+int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+		      int entries, int ring, enum cq_type mode);
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+
+void mlx4_en_poll_tx_cq(unsigned long data);
+void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+			   u32 size, u16 stride);
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
+int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+			     struct mlx4_en_tx_ring *ring,
+			     int cq, int srqn);
+void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+				struct mlx4_en_tx_ring *ring);
+
+int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+			   struct mlx4_en_rx_ring *ring,
+			   u32 size, u16 stride);
+void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+			     struct mlx4_en_rx_ring *ring);
+int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
+void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
+				struct mlx4_en_rx_ring *ring);
+int mlx4_en_process_rx_cq(struct net_device *dev,
+			  struct mlx4_en_cq *cq,
+			  int budget);
+int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
+void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
+			     int is_tx, int rss, int qpn, int cqn, int srqn,
+			     struct mlx4_qp_context *context);
+int mlx4_en_map_buffer(struct mlx4_buf *buf);
+void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
+
+void mlx4_en_calc_rx_buf(struct net_device *dev);
+void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
+				 struct mlx4_en_rss_map *rss_map,
+				 int num_entries, int num_rings);
+void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
+int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
+void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
+int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
+void mlx4_en_rx_refill(struct work_struct *work);
+void mlx4_en_rx_irq(struct mlx4_cq *mcq);
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
+int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp);
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+			   u8 promisc);
+
+int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
+
+/*
+ * Globals
+ */
+extern const struct ethtool_ops mlx4_en_ethtool_ops;
+#endif
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index d1dd5b4..0caf74c 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -461,7 +461,7 @@
 	int err;
 
 	err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
-			       ~0, dev->caps.reserved_mrws);
+			       ~0, dev->caps.reserved_mrws, 0);
 	if (err)
 		return err;
 
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index aa61689..26d1a7a 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -62,7 +62,7 @@
 	struct mlx4_priv *priv = mlx4_priv(dev);
 
 	return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
-				(1 << 24) - 1, dev->caps.reserved_pds);
+				(1 << 24) - 1, dev->caps.reserved_pds, 0);
 }
 
 void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -100,7 +100,7 @@
 
 	return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
 				dev->caps.num_uars, dev->caps.num_uars - 1,
-				max(128, dev->caps.reserved_uars));
+				max(128, dev->caps.reserved_uars), 0);
 }
 
 void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
new file mode 100644
index 0000000..e2fdab4
--- /dev/null
+++ b/drivers/net/mlx4/port.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+
+#define MLX4_MAC_VALID		(1ull << 63)
+#define MLX4_MAC_MASK		0xffffffffffffULL
+
+#define MLX4_VLAN_VALID		(1u << 31)
+#define MLX4_VLAN_MASK		0xfff
+
+void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
+{
+	int i;
+
+	mutex_init(&table->mutex);
+	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+		table->entries[i] = 0;
+		table->refs[i]	 = 0;
+	}
+	table->max   = 1 << dev->caps.log_num_macs;
+	table->total = 0;
+}
+
+void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
+{
+	int i;
+
+	mutex_init(&table->mutex);
+	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
+		table->entries[i] = 0;
+		table->refs[i]	 = 0;
+	}
+	table->max   = 1 << dev->caps.log_num_vlans;
+	table->total = 0;
+}
+
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+				   __be64 *entries)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	u32 in_mod;
+	int err;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+	in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
+{
+	struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
+	int i, err = 0;
+	int free = -1;
+
+	mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+	mutex_lock(&table->mutex);
+	for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
+		if (free < 0 && !table->refs[i]) {
+			free = i;
+			continue;
+		}
+
+		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+			/* MAC already registered, increase refernce count */
+			*index = i;
+			++table->refs[i];
+			goto out;
+		}
+	}
+	mlx4_dbg(dev, "Free MAC index is %d\n", free);
+
+	if (table->total == table->max) {
+		/* No free mac entries */
+		err = -ENOSPC;
+		goto out;
+	}
+
+	/* Register new MAC */
+	table->refs[free] = 1;
+	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
+
+	err = mlx4_set_port_mac_table(dev, port, table->entries);
+	if (unlikely(err)) {
+		mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
+		table->refs[free] = 0;
+		table->entries[free] = 0;
+		goto out;
+	}
+
+	*index = free;
+	++table->total;
+out:
+	mutex_unlock(&table->mutex);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index)
+{
+	struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
+
+	mutex_lock(&table->mutex);
+	if (!table->refs[index]) {
+		mlx4_warn(dev, "No MAC entry for index %d\n", index);
+		goto out;
+	}
+	if (--table->refs[index]) {
+		mlx4_warn(dev, "Have more references for index %d,"
+			  "no need to modify MAC table\n", index);
+		goto out;
+	}
+	table->entries[index] = 0;
+	mlx4_set_port_mac_table(dev, port, table->entries);
+	--table->total;
+out:
+	mutex_unlock(&table->mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
+
+static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
+				    __be32 *entries)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	u32 in_mod;
+	int err;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
+	in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+
+	return err;
+}
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+	int i, err = 0;
+	int free = -1;
+
+	mutex_lock(&table->mutex);
+	for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
+		if (free < 0 && (table->refs[i] == 0)) {
+			free = i;
+			continue;
+		}
+
+		if (table->refs[i] &&
+		    (vlan == (MLX4_VLAN_MASK &
+			      be32_to_cpu(table->entries[i])))) {
+			/* Vlan already registered, increase refernce count */
+			*index = i;
+			++table->refs[i];
+			goto out;
+		}
+	}
+
+	if (table->total == table->max) {
+		/* No free vlan entries */
+		err = -ENOSPC;
+		goto out;
+	}
+
+	/* Register new MAC */
+	table->refs[free] = 1;
+	table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
+
+	err = mlx4_set_port_vlan_table(dev, port, table->entries);
+	if (unlikely(err)) {
+		mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
+		table->refs[free] = 0;
+		table->entries[free] = 0;
+		goto out;
+	}
+
+	*index = free;
+	++table->total;
+out:
+	mutex_unlock(&table->mutex);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_vlan);
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+
+	if (index < MLX4_VLAN_REGULAR) {
+		mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
+		return;
+	}
+
+	mutex_lock(&table->mutex);
+	if (!table->refs[index]) {
+		mlx4_warn(dev, "No vlan entry for index %d\n", index);
+		goto out;
+	}
+	if (--table->refs[index]) {
+		mlx4_dbg(dev, "Have more references for index %d,"
+			 "no need to modify vlan table\n", index);
+		goto out;
+	}
+	table->entries[index] = 0;
+	mlx4_set_port_vlan_table(dev, port, table->entries);
+	--table->total;
+out:
+	mutex_unlock(&table->mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
+
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	int err;
+	u8 is_eth = dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	memset(mailbox->buf, 0, 256);
+	if (is_eth) {
+		((u8 *) mailbox->buf)[3] = 6;
+		((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15);
+		((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15);
+	}
+	err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index c49a860..1c565ef 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -147,19 +147,42 @@
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_modify);
 
-int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp)
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_qp_table *qp_table = &priv->qp_table;
+	int qpn;
+
+	qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+	if (qpn == -1)
+		return -ENOMEM;
+
+	*base = qpn;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_qp_table *qp_table = &priv->qp_table;
+	if (base_qpn < dev->caps.sqp_start + 8)
+		return;
+
+	mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_qp_table *qp_table = &priv->qp_table;
 	int err;
 
-	if (sqpn)
-		qp->qpn = sqpn;
-	else {
-		qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap);
-		if (qp->qpn == -1)
-			return -ENOMEM;
-	}
+	if (!qpn)
+		return -EINVAL;
+
+	qp->qpn = qpn;
 
 	err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
 	if (err)
@@ -208,9 +231,6 @@
 	mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
 
 err_out:
-	if (!sqpn)
-		mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
-
 	return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
@@ -239,9 +259,6 @@
 	mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
 	mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
 	mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
-
-	if (qp->qpn >= dev->caps.sqp_start + 8)
-		mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_free);
 
@@ -255,6 +272,7 @@
 {
 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
 	int err;
+	int reserved_from_top = 0;
 
 	spin_lock_init(&qp_table->lock);
 	INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
@@ -264,9 +282,40 @@
 	 * block of special QPs must be aligned to a multiple of 8, so
 	 * round up.
 	 */
-	dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8);
+	dev->caps.sqp_start =
+		ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
+
+	{
+		int sort[MLX4_NUM_QP_REGION];
+		int i, j, tmp;
+		int last_base = dev->caps.num_qps;
+
+		for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
+			sort[i] = i;
+
+		for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
+			for (j = 2; j < i; ++j) {
+				if (dev->caps.reserved_qps_cnt[sort[j]] >
+				    dev->caps.reserved_qps_cnt[sort[j - 1]]) {
+					tmp             = sort[j];
+					sort[j]         = sort[j - 1];
+					sort[j - 1]     = tmp;
+				}
+			}
+		}
+
+		for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
+			last_base -= dev->caps.reserved_qps_cnt[sort[i]];
+			dev->caps.reserved_qps_base[sort[i]] = last_base;
+			reserved_from_top +=
+				dev->caps.reserved_qps_cnt[sort[i]];
+		}
+
+	}
+
 	err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
-			       (1 << 24) - 1, dev->caps.sqp_start + 8);
+			       (1 << 23) - 1, dev->caps.sqp_start + 8,
+			       reserved_from_top);
 	if (err)
 		return err;
 
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index 533eb6d..fe9f218 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -245,7 +245,7 @@
 	INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
 
 	err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
-			       dev->caps.num_srqs - 1, dev->caps.reserved_srqs);
+			       dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
 	if (err)
 		return err;
 
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index b39d1cc..a24bb68 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1205,11 +1205,12 @@
 		devno = 0;
 
 	ndev->dma = -1;
-	ndev->irq = platform_get_irq(pdev, 0);
-	if (ndev->irq < 0) {
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
 		ret = -ENODEV;
 		goto out_release;
 	}
+	ndev->irq = ret;
 
 	SET_NETDEV_DEV(ndev, &pdev->dev);
 
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 22c17bb..466a89e 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -456,7 +456,7 @@
 {
 	struct catc *catc = netdev_priv(netdev);
 
-	warn("Transmit timed out.");
+	dev_warn(&netdev->dev, "Transmit timed out.\n");
 	usb_unlink_urb(catc->tx_urb);
 }
 
@@ -847,7 +847,8 @@
 			dbg("64k Memory\n");
 			break;
 		default:
-			warn("Couldn't detect memory size, assuming 32k");
+			dev_warn(&intf->dev,
+				 "Couldn't detect memory size, assuming 32k\n");
 		case 0x87654321:
 			catc_set_reg(catc, TxBufCount, 4);
 			catc_set_reg(catc, RxBufCount, 16);
@@ -953,7 +954,8 @@
 {
 	int result = usb_register(&catc_driver);
 	if (result == 0)
-		info(DRIVER_VERSION " " DRIVER_DESC);
+		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+		       DRIVER_DESC "\n");
 	return result;
 }
 
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index d6829db..fdbf3be 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -832,7 +832,7 @@
 
 	if((res = usb_submit_urb(kaweth->tx_urb, GFP_ATOMIC)))
 	{
-		warn("kaweth failed tx_urb %d", res);
+		dev_warn(&net->dev, "kaweth failed tx_urb %d\n", res);
 skip:
 		kaweth->stats.tx_errors++;
 
@@ -924,7 +924,7 @@
 {
 	struct kaweth_device *kaweth = netdev_priv(net);
 
-	warn("%s: Tx timed out. Resetting.", net->name);
+	dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name);
 	kaweth->stats.tx_errors++;
 	net->trans_start = jiffies;
 
@@ -1016,10 +1016,10 @@
 	 */
 
 	if (le16_to_cpu(dev->descriptor.bcdDevice) >> 8) {
-		info("Firmware present in device.");
+		dev_info(&intf->dev, "Firmware present in device.\n");
 	} else {
 		/* Download the firmware */
-		info("Downloading firmware...");
+		dev_info(&intf->dev, "Downloading firmware...\n");
 		kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
 		if ((result = kaweth_download_firmware(kaweth,
 						      "kaweth/new_code.bin",
@@ -1061,7 +1061,7 @@
 		}
 
 		/* Device will now disappear for a moment...  */
-		info("Firmware loaded.  I'll be back...");
+		dev_info(&intf->dev, "Firmware loaded.  I'll be back...\n");
 err_fw:
 		free_page((unsigned long)kaweth->firmware_buf);
 		free_netdev(netdev);
@@ -1075,10 +1075,10 @@
 		goto err_free_netdev;
 	}
 
-	info("Statistics collection: %x", kaweth->configuration.statistics_mask);
-	info("Multicast filter limit: %x", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1));
-	info("MTU: %d", le16_to_cpu(kaweth->configuration.segment_size));
-	info("Read MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+	dev_info(&intf->dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask);
+	dev_info(&intf->dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1));
+	dev_info(&intf->dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size));
+	dev_info(&intf->dev, "Read MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
 		 (int)kaweth->configuration.hw_addr[0],
 		 (int)kaweth->configuration.hw_addr[1],
 		 (int)kaweth->configuration.hw_addr[2],
@@ -1174,7 +1174,8 @@
 		goto err_intfdata;
 	}
 
-	info("kaweth interface created at %s", kaweth->net->name);
+	dev_info(&intf->dev, "kaweth interface created at %s\n",
+		 kaweth->net->name);
 
 	dbg("Kaweth probe returning.");
 
@@ -1205,11 +1206,11 @@
 	struct kaweth_device *kaweth = usb_get_intfdata(intf);
 	struct net_device *netdev;
 
-	info("Unregistering");
+	dev_info(&intf->dev, "Unregistering\n");
 
 	usb_set_intfdata(intf, NULL);
 	if (!kaweth) {
-		warn("unregistering non-existant device");
+		dev_warn(&intf->dev, "unregistering non-existant device\n");
 		return;
 	}
 	netdev = kaweth->net;
@@ -1269,7 +1270,7 @@
 
 	if (!wait_event_timeout(awd.wqh, awd.done, timeout)) {
                 // timeout
-                warn("usb_control/bulk_msg: timeout");
+                dev_warn(&urb->dev->dev, "usb_control/bulk_msg: timeout\n");
                 usb_kill_urb(urb);  // remove urb safely
                 status = -ETIMEDOUT;
         }
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 38b90e7..7914867 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -168,7 +168,7 @@
 			netif_device_detach(pegasus->net);
 		if (netif_msg_drv(pegasus) && printk_ratelimit())
 			dev_err(&pegasus->intf->dev, "%s, status %d\n",
-					__FUNCTION__, ret);
+					__func__, ret);
 		goto out;
 	}
 
@@ -192,7 +192,7 @@
 	if (!buffer) {
 		if (netif_msg_drv(pegasus))
 			dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
-					__FUNCTION__);
+					__func__);
 		return -ENOMEM;
 	}
 	memcpy(buffer, data, size);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index df56a51..6133401 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -221,7 +221,7 @@
 	case -ENOENT:
 		break;
 	default:
-		warn("ctrl urb status %d", urb->status);
+		dev_warn(&urb->dev->dev, "ctrl urb status %d\n", urb->status);
 	}
 	dev = urb->context;
 	clear_bit(RX_REG_SET, &dev->flags);
@@ -441,10 +441,10 @@
 	case -ENOENT:
 		return;	/* the urb is in unlink state */
 	case -ETIME:
-		warn("may be reset is needed?..");
+		dev_warn(&urb->dev->dev, "may be reset is needed?..\n");
 		goto goon;
 	default:
-		warn("Rx status %d", urb->status);
+		dev_warn(&urb->dev->dev, "Rx status %d\n", urb->status);
 		goto goon;
 	}
 
@@ -538,7 +538,8 @@
 	if (!netif_device_present(dev->netdev))
 		return;
 	if (urb->status)
-		info("%s: Tx status %d", dev->netdev->name, urb->status);
+		dev_info(&urb->dev->dev, "%s: Tx status %d\n",
+			 dev->netdev->name, urb->status);
 	dev->netdev->trans_start = jiffies;
 	netif_wake_queue(dev->netdev);
 }
@@ -561,7 +562,8 @@
 		return;
 	/* -EPIPE:  should clear the halt */
 	default:
-		info("%s: intr status %d", dev->netdev->name, urb->status);
+		dev_info(&urb->dev->dev, "%s: intr status %d\n",
+			 dev->netdev->name, urb->status);
 		goto resubmit;
 	}
 
@@ -665,7 +667,7 @@
 	u8 cr, tcr, rcr, msr;
 
 	if (!rtl8150_reset(dev)) {
-		warn("%s - device reset failed", __FUNCTION__);
+		dev_warn(&dev->udev->dev, "device reset failed\n");
 	}
 	/* RCR bit7=1 attach Rx info at the end;  =0 HW CRC (which is broken) */
 	rcr = 0x9e;
@@ -699,7 +701,7 @@
 static void rtl8150_tx_timeout(struct net_device *netdev)
 {
 	rtl8150_t *dev = netdev_priv(netdev);
-	warn("%s: Tx timeout.", netdev->name);
+	dev_warn(&netdev->dev, "Tx timeout.\n");
 	usb_unlink_urb(dev->tx_urb);
 	dev->stats.tx_errors++;
 }
@@ -710,12 +712,12 @@
 	netif_stop_queue(netdev);
 	if (netdev->flags & IFF_PROMISC) {
 		dev->rx_creg |= cpu_to_le16(0x0001);
-		info("%s: promiscuous mode", netdev->name);
+		dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
 	} else if (netdev->mc_count ||
 		   (netdev->flags & IFF_ALLMULTI)) {
 		dev->rx_creg &= cpu_to_le16(0xfffe);
 		dev->rx_creg |= cpu_to_le16(0x0002);
-		info("%s: allmulti set", netdev->name);
+		dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
 	} else {
 		/* ~RX_MULTICAST, ~RX_PROMISCUOUS */
 		dev->rx_creg &= cpu_to_le16(0x00fc);
@@ -740,7 +742,7 @@
 		if (res == -ENODEV)
 			netif_device_detach(dev->netdev);
 		else {
-			warn("failed tx_urb %d\n", res);
+			dev_warn(&netdev->dev, "failed tx_urb %d\n", res);
 			dev->stats.tx_errors++;
 			netif_start_queue(netdev);
 		}
@@ -783,7 +785,7 @@
 	if ((res = usb_submit_urb(dev->rx_urb, GFP_KERNEL))) {
 		if (res == -ENODEV)
 			netif_device_detach(dev->netdev);
-		warn("%s: rx_urb submit failed: %d", __FUNCTION__, res);
+		dev_warn(&netdev->dev, "rx_urb submit failed: %d\n", res);
 		return res;
 	}
 	usb_fill_int_urb(dev->intr_urb, dev->udev, usb_rcvintpipe(dev->udev, 3),
@@ -792,7 +794,7 @@
 	if ((res = usb_submit_urb(dev->intr_urb, GFP_KERNEL))) {
 		if (res == -ENODEV)
 			netif_device_detach(dev->netdev);
-		warn("%s: intr_urb submit failed: %d", __FUNCTION__, res);
+		dev_warn(&netdev->dev, "intr_urb submit failed: %d\n", res);
 		usb_kill_urb(dev->rx_urb);
 		return res;
 	}
@@ -947,7 +949,7 @@
 		goto out2;
 	}
 
-	info("%s: rtl8150 is detected", netdev->name);
+	dev_info(&intf->dev, "%s: rtl8150 is detected\n", netdev->name);
 
 	return 0;
 
@@ -984,7 +986,8 @@
 
 static int __init usb_rtl8150_init(void)
 {
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return usb_register(&rtl8150_driver);
 }
 
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index f972fef..ee51b6a 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -318,7 +318,7 @@
 				continue;
 		}
 
-		if( pci_irq_line <= 0  ||  pci_irq_line >= NR_IRQS )
+		if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs)
 			printk( KERN_WARNING "  WARNING: The PCI BIOS assigned "
 				"this PCI card to IRQ %d, which is unlikely "
 				"to work!.\n"
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
new file mode 100644
index 0000000..da42aa0
--- /dev/null
+++ b/drivers/net/xtsonic.c
@@ -0,0 +1,319 @@
+/*
+ * xtsonic.c
+ *
+ * (C) 2001 - 2007 Tensilica Inc.
+ *	Kevin Chea <kchea@yahoo.com>
+ *	Marc Gauthier <marc@linux-xtensa.org>
+ *	Chris Zankel <chris@zankel.net>
+ *
+ * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ *
+ * This driver is based on work from Andreas Busse, but most of
+ * the code is rewritten.
+ *
+ * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
+ *
+ * A driver for the onboard Sonic ethernet controller on the XT2000.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/dma.h>
+
+static char xtsonic_string[] = "xtsonic";
+
+extern unsigned xtboard_nvram_valid(void);
+extern void xtboard_get_ether_addr(unsigned char *buf);
+
+#include "sonic.h"
+
+/*
+ * According to the documentation for the Sonic ethernet controller,
+ * EOBC should be 760 words (1520 bytes) for 32-bit applications, and,
+ * as such, 2 words less than the buffer size. The value for RBSIZE
+ * defined in sonic.h, however is only 1520.
+ *
+ * (Note that in 16-bit configurations, EOBC is 759 words (1518 bytes) and
+ * RBSIZE 1520 bytes)
+ */
+#undef SONIC_RBSIZE
+#define SONIC_RBSIZE	1524
+
+/*
+ * The chip provides 256 byte register space.
+ */
+#define SONIC_MEM_SIZE	0x100
+
+/*
+ * Macros to access SONIC registers
+ */
+#define SONIC_READ(reg) \
+	(0xffff & *((volatile unsigned int *)dev->base_addr+reg))
+
+#define SONIC_WRITE(reg,val) \
+	*((volatile unsigned int *)dev->base_addr+reg) = val
+
+
+/* Use 0 for production, 1 for verification, and >2 for debug */
+#ifdef SONIC_DEBUG
+static unsigned int sonic_debug = SONIC_DEBUG;
+#else
+static unsigned int sonic_debug = 1;
+#endif
+
+/*
+ * We cannot use station (ethernet) address prefixes to detect the
+ * sonic controller since these are board manufacturer depended.
+ * So we check for known Silicon Revision IDs instead.
+ */
+static unsigned short known_revisions[] =
+{
+	0x101,			/* SONIC 83934 */
+	0xffff			/* end of list */
+};
+
+static int xtsonic_open(struct net_device *dev)
+{
+	if (request_irq(dev->irq,&sonic_interrupt,IRQF_DISABLED,"sonic",dev)) {
+		printk(KERN_ERR "%s: unable to get IRQ %d.\n",
+		       dev->name, dev->irq);
+		return -EAGAIN;
+	}
+	return sonic_open(dev);
+}
+
+static int xtsonic_close(struct net_device *dev)
+{
+	int err;
+	err = sonic_close(dev);
+	free_irq(dev->irq, dev);
+	return err;
+}
+
+static int __init sonic_probe1(struct net_device *dev)
+{
+	static unsigned version_printed = 0;
+	unsigned int silicon_revision;
+	struct sonic_local *lp = netdev_priv(dev);
+	unsigned int base_addr = dev->base_addr;
+	int i;
+	int err = 0;
+
+	if (!request_mem_region(base_addr, 0x100, xtsonic_string))
+		return -EBUSY;
+
+	/*
+	 * get the Silicon Revision ID. If this is one of the known
+	 * one assume that we found a SONIC ethernet controller at
+	 * the expected location.
+	 */
+	silicon_revision = SONIC_READ(SONIC_SR);
+	if (sonic_debug > 1)
+		printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
+
+	i = 0;
+	while ((known_revisions[i] != 0xffff) &&
+			(known_revisions[i] != silicon_revision))
+		i++;
+
+	if (known_revisions[i] == 0xffff) {
+		printk("SONIC ethernet controller not found (0x%4x)\n",
+				silicon_revision);
+		return -ENODEV;
+	}
+
+	if (sonic_debug  &&  version_printed++ == 0)
+		printk(version);
+
+	/*
+	 * Put the sonic into software reset, then retrieve ethernet address.
+	 * Note: we are assuming that the boot-loader has initialized the cam.
+	 */
+	SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
+	SONIC_WRITE(SONIC_DCR,
+		    SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS);
+	SONIC_WRITE(SONIC_CEP,0);
+	SONIC_WRITE(SONIC_IMR,0);
+
+	SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
+	SONIC_WRITE(SONIC_CEP,0);
+
+	for (i=0; i<3; i++) {
+		unsigned int val = SONIC_READ(SONIC_CAP0-i);
+		dev->dev_addr[i*2] = val;
+		dev->dev_addr[i*2+1] = val >> 8;
+	}
+
+	/* Initialize the device structure. */
+
+	lp->dma_bitmode = SONIC_BITMODE32;
+
+	/*
+	 *  Allocate local private descriptor areas in uncached space.
+	 *  The entire structure must be located within the same 64kb segment.
+	 *  A simple way to ensure this is to allocate twice the
+	 *  size of the structure -- given that the structure is
+	 *  much less than 64 kB, at least one of the halves of
+	 *  the allocated area will be contained entirely in 64 kB.
+	 *  We also allocate extra space for a pointer to allow freeing
+	 *  this structure later on (in xtsonic_cleanup_module()).
+	 */
+	lp->descriptors =
+		dma_alloc_coherent(lp->device,
+			SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+			&lp->descriptors_laddr, GFP_KERNEL);
+
+	if (lp->descriptors == NULL) {
+		printk(KERN_ERR "%s: couldn't alloc DMA memory for "
+				" descriptors.\n", lp->device->bus_id);
+		goto out;
+	}
+
+	lp->cda = lp->descriptors;
+	lp->tda = lp->cda + (SIZEOF_SONIC_CDA
+			     * SONIC_BUS_SCALE(lp->dma_bitmode));
+	lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+			     * SONIC_BUS_SCALE(lp->dma_bitmode));
+	lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+			     * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+	/* get the virtual dma address */
+
+	lp->cda_laddr = lp->descriptors_laddr;
+	lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
+				         * SONIC_BUS_SCALE(lp->dma_bitmode));
+	lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+					 * SONIC_BUS_SCALE(lp->dma_bitmode));
+	lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+					 * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+	dev->open = xtsonic_open;
+	dev->stop = xtsonic_close;
+	dev->hard_start_xmit	= sonic_send_packet;
+	dev->get_stats		= sonic_get_stats;
+	dev->set_multicast_list	= &sonic_multicast_list;
+	dev->tx_timeout		= sonic_tx_timeout;
+	dev->watchdog_timeo	= TX_TIMEOUT;
+
+	/*
+	 * clear tally counter
+	 */
+	SONIC_WRITE(SONIC_CRCT,0xffff);
+	SONIC_WRITE(SONIC_FAET,0xffff);
+	SONIC_WRITE(SONIC_MPT,0xffff);
+
+	return 0;
+out:
+	release_region(dev->base_addr, SONIC_MEM_SIZE);
+	return err;
+}
+
+
+/*
+ * Probe for a SONIC ethernet controller on an XT2000 board.
+ * Actually probing is superfluous but we're paranoid.
+ */
+
+int __init xtsonic_probe(struct platform_device *pdev)
+{
+	struct net_device *dev;
+	struct sonic_local *lp;
+	struct resource *resmem, *resirq;
+	int err = 0;
+
+	DECLARE_MAC_BUF(mac);
+
+	if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL)
+		return -ENODEV;
+
+	if ((resirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0)) == NULL)
+		return -ENODEV;
+
+	if ((dev = alloc_etherdev(sizeof(struct sonic_local))) == NULL)
+		return -ENOMEM;
+
+	lp = netdev_priv(dev);
+	lp->device = &pdev->dev;
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	netdev_boot_setup_check(dev);
+
+	dev->base_addr = resmem->start;
+	dev->irq = resirq->start;
+
+	if ((err = sonic_probe1(dev)))
+		goto out;
+	if ((err = register_netdev(dev)))
+		goto out1;
+
+	printk("%s: SONIC ethernet @%08lx, MAC %s, IRQ %d\n", dev->name,
+	       dev->base_addr, print_mac(mac, dev->dev_addr), dev->irq);
+
+	return 0;
+
+out1:
+	release_region(dev->base_addr, SONIC_MEM_SIZE);
+out:
+	free_netdev(dev);
+
+	return err;
+}
+
+MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver");
+module_param(sonic_debug, int, 0);
+MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)");
+
+#include "sonic.c"
+
+static int __devexit xtsonic_device_remove (struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct sonic_local *lp = netdev_priv(dev);
+
+	unregister_netdev(dev);
+	dma_free_coherent(lp->device,
+			  SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+			  lp->descriptors, lp->descriptors_laddr);
+	release_region (dev->base_addr, SONIC_MEM_SIZE);
+	free_netdev(dev);
+
+	return 0;
+}
+
+static struct platform_driver xtsonic_driver = {
+	.probe = xtsonic_probe,
+	.remove = __devexit_p(xtsonic_device_remove),
+	.driver = {
+		.name = xtsonic_string,
+	},
+};
+
+static int __init xtsonic_init(void)
+{
+	return platform_driver_register(&xtsonic_driver);
+}
+
+static void __exit xtsonic_cleanup(void)
+{
+	platform_driver_unregister(&xtsonic_driver);
+}
+
+module_init(xtsonic_init);
+module_exit(xtsonic_cleanup);
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index 6a98dc8..24bbef7 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -41,7 +41,7 @@
 
 		info.addr = *addr;
 
-		request_module(info.type);
+		request_module("%s", info.type);
 
 		result = i2c_new_device(adap, &info);
 		if (result == NULL) {
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c
index b01eec0..bed0ed6 100644
--- a/drivers/of/of_spi.c
+++ b/drivers/of/of_spi.c
@@ -61,6 +61,8 @@
 			spi->mode |= SPI_CPHA;
 		if (of_find_property(nc, "spi-cpol", NULL))
 			spi->mode |= SPI_CPOL;
+		if (of_find_property(nc, "spi-cs-high", NULL))
+			spi->mode |= SPI_CS_HIGH;
 
 		/* Device speed */
 		prop = of_get_property(nc, "spi-max-frequency", &len);
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index ed98227..b55cd23 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -41,7 +41,6 @@
 static DEFINE_SPINLOCK(task_mortuary);
 static void process_task_mortuary(void);
 
-
 /* Take ownership of the task struct and place it on the
  * list for processing. Only after two full buffer syncs
  * does the task eventually get freed, because by then
@@ -341,7 +340,7 @@
  * Add IBS fetch and op entries to event buffer
  */
 static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
-	int in_kernel, struct mm_struct *mm)
+			  struct mm_struct *mm)
 {
 	unsigned long rip;
 	int i, count;
@@ -565,9 +564,11 @@
 	struct task_struct *new;
 	unsigned long cookie = 0;
 	int in_kernel = 1;
-	unsigned int i;
 	sync_buffer_state state = sb_buffer_start;
+#ifndef CONFIG_OPROFILE_IBS
+	unsigned int i;
 	unsigned long available;
+#endif
 
 	mutex_lock(&buffer_mutex);
 
@@ -575,9 +576,13 @@
 
 	/* Remember, only we can modify tail_pos */
 
+#ifndef CONFIG_OPROFILE_IBS
 	available = get_slots(cpu_buf);
 
 	for (i = 0; i < available; ++i) {
+#else
+	while (get_slots(cpu_buf)) {
+#endif
 		struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
 
 		if (is_code(s->eip)) {
@@ -593,12 +598,10 @@
 #ifdef CONFIG_OPROFILE_IBS
 			} else if (s->event == IBS_FETCH_BEGIN) {
 				state = sb_bt_start;
-				add_ibs_begin(cpu_buf,
-					IBS_FETCH_CODE, in_kernel, mm);
+				add_ibs_begin(cpu_buf, IBS_FETCH_CODE, mm);
 			} else if (s->event == IBS_OP_BEGIN) {
 				state = sb_bt_start;
-				add_ibs_begin(cpu_buf,
-					IBS_OP_CODE, in_kernel, mm);
+				add_ibs_begin(cpu_buf, IBS_OP_CODE, mm);
 #endif
 			} else {
 				struct mm_struct *oldmm = mm;
@@ -628,3 +631,27 @@
 
 	mutex_unlock(&buffer_mutex);
 }
+
+/* The function can be used to add a buffer worth of data directly to
+ * the kernel buffer. The buffer is assumed to be a circular buffer.
+ * Take the entries from index start and end at index end, wrapping
+ * at max_entries.
+ */
+void oprofile_put_buff(unsigned long *buf, unsigned int start,
+		       unsigned int stop, unsigned int max)
+{
+	int i;
+
+	i = start;
+
+	mutex_lock(&buffer_mutex);
+	while (i != stop) {
+		add_event_entry(buf[i++]);
+
+		if (i >= max)
+			i = 0;
+	}
+
+	mutex_unlock(&buffer_mutex);
+}
+
diff --git a/drivers/oprofile/buffer_sync.h b/drivers/oprofile/buffer_sync.h
index 08866f6..3110732 100644
--- a/drivers/oprofile/buffer_sync.h
+++ b/drivers/oprofile/buffer_sync.h
@@ -9,13 +9,13 @@
 
 #ifndef OPROFILE_BUFFER_SYNC_H
 #define OPROFILE_BUFFER_SYNC_H
- 
+
 /* add the necessary profiling hooks */
 int sync_start(void);
 
 /* remove the hooks */
 void sync_stop(void);
- 
+
 /* sync the given CPU's buffer */
 void sync_buffer(int cpu);
 
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index e1bd5a9..01d38e7 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -22,7 +22,7 @@
 #include <linux/oprofile.h>
 #include <linux/vmalloc.h>
 #include <linux/errno.h>
- 
+
 #include "event_buffer.h"
 #include "cpu_buffer.h"
 #include "buffer_sync.h"
@@ -38,27 +38,40 @@
 void free_cpu_buffers(void)
 {
 	int i;
- 
-	for_each_online_cpu(i) {
+
+	for_each_possible_cpu(i) {
 		vfree(per_cpu(cpu_buffer, i).buffer);
 		per_cpu(cpu_buffer, i).buffer = NULL;
 	}
 }
 
+unsigned long oprofile_get_cpu_buffer_size(void)
+{
+	return fs_cpu_buffer_size;
+}
+
+void oprofile_cpu_buffer_inc_smpl_lost(void)
+{
+	struct oprofile_cpu_buffer *cpu_buf
+		= &__get_cpu_var(cpu_buffer);
+
+	cpu_buf->sample_lost_overflow++;
+}
+
 int alloc_cpu_buffers(void)
 {
 	int i;
- 
+
 	unsigned long buffer_size = fs_cpu_buffer_size;
- 
-	for_each_online_cpu(i) {
+
+	for_each_possible_cpu(i) {
 		struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
- 
+
 		b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
 			cpu_to_node(i));
 		if (!b->buffer)
 			goto fail;
- 
+
 		b->last_task = NULL;
 		b->last_is_kernel = -1;
 		b->tracing = 0;
@@ -112,7 +125,7 @@
 }
 
 /* Resets the cpu buffer to a sane state. */
-void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
+void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf)
 {
 	/* reset these to invalid values; the next sample
 	 * collected will populate the buffer with proper
@@ -123,7 +136,7 @@
 }
 
 /* compute number of available slots in cpu_buffer queue */
-static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
+static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b)
 {
 	unsigned long head = b->head_pos;
 	unsigned long tail = b->tail_pos;
@@ -134,7 +147,7 @@
 	return tail + (b->buffer_size - head) - 1;
 }
 
-static void increment_head(struct oprofile_cpu_buffer * b)
+static void increment_head(struct oprofile_cpu_buffer *b)
 {
 	unsigned long new_head = b->head_pos + 1;
 
@@ -149,17 +162,17 @@
 }
 
 static inline void
-add_sample(struct oprofile_cpu_buffer * cpu_buf,
-           unsigned long pc, unsigned long event)
+add_sample(struct oprofile_cpu_buffer *cpu_buf,
+	   unsigned long pc, unsigned long event)
 {
-	struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos];
+	struct op_sample *entry = &cpu_buf->buffer[cpu_buf->head_pos];
 	entry->eip = pc;
 	entry->event = event;
 	increment_head(cpu_buf);
 }
 
 static inline void
-add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
+add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
 {
 	add_sample(buffer, ESCAPE_CODE, value);
 }
@@ -173,10 +186,10 @@
  * pc. We tag this in the buffer by generating kernel enter/exit
  * events whenever is_kernel changes
  */
-static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
+static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
 		      int is_kernel, unsigned long event)
 {
-	struct task_struct * task;
+	struct task_struct *task;
 
 	cpu_buf->sample_received++;
 
@@ -205,7 +218,7 @@
 		cpu_buf->last_task = task;
 		add_code(cpu_buf, (unsigned long)task);
 	}
- 
+
 	add_sample(cpu_buf, pc, event);
 	return 1;
 }
@@ -222,7 +235,7 @@
 	return 1;
 }
 
-static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
+static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
 {
 	cpu_buf->tracing = 0;
 }
@@ -257,21 +270,23 @@
 
 #ifdef CONFIG_OPROFILE_IBS
 
-#define MAX_IBS_SAMPLE_SIZE	14
-static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
-	unsigned long pc, int is_kernel, unsigned  int *ibs, int ibs_code)
+#define MAX_IBS_SAMPLE_SIZE 14
+
+void oprofile_add_ibs_sample(struct pt_regs *const regs,
+			     unsigned int *const ibs_sample, int ibs_code)
 {
+	int is_kernel = !user_mode(regs);
+	struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
 	struct task_struct *task;
 
 	cpu_buf->sample_received++;
 
 	if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
+		/* we can't backtrace since we lost the source of this event */
 		cpu_buf->sample_lost_overflow++;
-		return 0;
+		return;
 	}
 
-	is_kernel = !!is_kernel;
-
 	/* notice a switch from user->kernel or vice versa */
 	if (cpu_buf->last_is_kernel != is_kernel) {
 		cpu_buf->last_is_kernel = is_kernel;
@@ -281,7 +296,6 @@
 	/* notice a task switch */
 	if (!is_kernel) {
 		task = current;
-
 		if (cpu_buf->last_task != task) {
 			cpu_buf->last_task = task;
 			add_code(cpu_buf, (unsigned long)task);
@@ -289,36 +303,17 @@
 	}
 
 	add_code(cpu_buf, ibs_code);
-	add_sample(cpu_buf, ibs[0], ibs[1]);
-	add_sample(cpu_buf, ibs[2], ibs[3]);
-	add_sample(cpu_buf, ibs[4], ibs[5]);
+	add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
+	add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
+	add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
 
 	if (ibs_code == IBS_OP_BEGIN) {
-	add_sample(cpu_buf, ibs[6], ibs[7]);
-	add_sample(cpu_buf, ibs[8], ibs[9]);
-	add_sample(cpu_buf, ibs[10], ibs[11]);
+		add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
+		add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
+		add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
 	}
 
-	return 1;
-}
-
-void oprofile_add_ibs_sample(struct pt_regs *const regs,
-				unsigned int * const ibs_sample, u8 code)
-{
-	int is_kernel = !user_mode(regs);
-	unsigned long pc = profile_pc(regs);
-
-	struct oprofile_cpu_buffer *cpu_buf =
-			 &per_cpu(cpu_buffer, smp_processor_id());
-
-	if (!backtrace_depth) {
-		log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
-		return;
-	}
-
-	/* if log_sample() fails we can't backtrace since we lost the source
-	* of this event */
-	if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
+	if (backtrace_depth)
 		oprofile_ops.backtrace(regs, backtrace_depth);
 }
 
@@ -363,11 +358,16 @@
  */
 static void wq_sync_buffer(struct work_struct *work)
 {
-	struct oprofile_cpu_buffer * b =
+	struct oprofile_cpu_buffer *b =
 		container_of(work, struct oprofile_cpu_buffer, work.work);
 	if (b->cpu != smp_processor_id()) {
 		printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
 		       smp_processor_id(), b->cpu);
+
+		if (!cpu_online(b->cpu)) {
+			cancel_delayed_work(&b->work);
+			return;
+		}
 	}
 	sync_buffer(b->cpu);
 
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 9c44d00..d3cc262 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -15,9 +15,9 @@
 #include <linux/workqueue.h>
 #include <linux/cache.h>
 #include <linux/sched.h>
- 
+
 struct task_struct;
- 
+
 int alloc_cpu_buffers(void);
 void free_cpu_buffers(void);
 
@@ -31,15 +31,15 @@
 	unsigned long eip;
 	unsigned long event;
 };
- 
+
 struct oprofile_cpu_buffer {
 	volatile unsigned long head_pos;
 	volatile unsigned long tail_pos;
 	unsigned long buffer_size;
-	struct task_struct * last_task;
+	struct task_struct *last_task;
 	int last_is_kernel;
 	int tracing;
-	struct op_sample * buffer;
+	struct op_sample *buffer;
 	unsigned long sample_received;
 	unsigned long sample_lost_overflow;
 	unsigned long backtrace_aborted;
@@ -50,7 +50,7 @@
 
 DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
 
-void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
+void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf);
 
 /* transient events for the CPU buffer -> event buffer */
 #define CPU_IS_KERNEL 1
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 8d692a5..d962ba0 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -19,16 +19,16 @@
 #include <linux/dcookies.h>
 #include <linux/fs.h>
 #include <asm/uaccess.h>
- 
+
 #include "oprof.h"
 #include "event_buffer.h"
 #include "oprofile_stats.h"
 
 DEFINE_MUTEX(buffer_mutex);
- 
+
 static unsigned long buffer_opened;
 static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
-static unsigned long * event_buffer;
+static unsigned long *event_buffer;
 static unsigned long buffer_size;
 static unsigned long buffer_watershed;
 static size_t buffer_pos;
@@ -66,7 +66,7 @@
 	mutex_unlock(&buffer_mutex);
 }
 
- 
+
 int alloc_event_buffer(void)
 {
 	int err = -ENOMEM;
@@ -76,13 +76,13 @@
 	buffer_size = fs_buffer_size;
 	buffer_watershed = fs_buffer_watershed;
 	spin_unlock_irqrestore(&oprofilefs_lock, flags);
- 
+
 	if (buffer_watershed >= buffer_size)
 		return -EINVAL;
- 
+
 	event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
 	if (!event_buffer)
-		goto out; 
+		goto out;
 
 	err = 0;
 out:
@@ -97,8 +97,8 @@
 	event_buffer = NULL;
 }
 
- 
-static int event_buffer_open(struct inode * inode, struct file * file)
+
+static int event_buffer_open(struct inode *inode, struct file *file)
 {
 	int err = -EPERM;
 
@@ -116,14 +116,14 @@
 	file->private_data = dcookie_register();
 	if (!file->private_data)
 		goto out;
-		 
+
 	if ((err = oprofile_setup()))
 		goto fail;
 
 	/* NB: the actual start happens from userspace
 	 * echo 1 >/dev/oprofile/enable
 	 */
- 
+
 	return 0;
 
 fail:
@@ -134,7 +134,7 @@
 }
 
 
-static int event_buffer_release(struct inode * inode, struct file * file)
+static int event_buffer_release(struct inode *inode, struct file *file)
 {
 	oprofile_stop();
 	oprofile_shutdown();
@@ -146,8 +146,8 @@
 }
 
 
-static ssize_t event_buffer_read(struct file * file, char __user * buf,
-				 size_t count, loff_t * offset)
+static ssize_t event_buffer_read(struct file *file, char __user *buf,
+				 size_t count, loff_t *offset)
 {
 	int retval = -EINVAL;
 	size_t const max = buffer_size * sizeof(unsigned long);
@@ -172,18 +172,18 @@
 	retval = -EFAULT;
 
 	count = buffer_pos * sizeof(unsigned long);
- 
+
 	if (copy_to_user(buf, event_buffer, count))
 		goto out;
 
 	retval = count;
 	buffer_pos = 0;
- 
+
 out:
 	mutex_unlock(&buffer_mutex);
 	return retval;
 }
- 
+
 const struct file_operations event_buffer_fops = {
 	.open		= event_buffer_open,
 	.release	= event_buffer_release,
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
index 5076ed1..4e70749 100644
--- a/drivers/oprofile/event_buffer.h
+++ b/drivers/oprofile/event_buffer.h
@@ -10,13 +10,20 @@
 #ifndef EVENT_BUFFER_H
 #define EVENT_BUFFER_H
 
-#include <linux/types.h> 
+#include <linux/types.h>
 #include <asm/mutex.h>
- 
+
 int alloc_event_buffer(void);
 
 void free_event_buffer(void);
- 
+
+/**
+ * Add data to the event buffer.
+ * The data passed is free-form, but typically consists of
+ * file offsets, dcookies, context information, and ESCAPE codes.
+ */
+void add_event_entry(unsigned long data);
+
 /* wake up the process sleeping on the event file */
 void wake_up_buffer_waiter(void);
 
@@ -24,10 +31,10 @@
 #define NO_COOKIE 0UL
 
 extern const struct file_operations event_buffer_fops;
- 
+
 /* mutex between sync_cpu_buffers() and the
  * file reading code.
  */
 extern struct mutex buffer_mutex;
- 
+
 #endif /* EVENT_BUFFER_H */
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index 2c64517..cd37590 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -19,7 +19,7 @@
 #include "cpu_buffer.h"
 #include "buffer_sync.h"
 #include "oprofile_stats.h"
- 
+
 struct oprofile_operations oprofile_ops;
 
 unsigned long oprofile_started;
@@ -36,7 +36,7 @@
 int oprofile_setup(void)
 {
 	int err;
- 
+
 	mutex_lock(&start_mutex);
 
 	if ((err = alloc_cpu_buffers()))
@@ -44,10 +44,10 @@
 
 	if ((err = alloc_event_buffer()))
 		goto out1;
- 
+
 	if (oprofile_ops.setup && (err = oprofile_ops.setup()))
 		goto out2;
- 
+
 	/* Note even though this starts part of the
 	 * profiling overhead, it's necessary to prevent
 	 * us missing task deaths and eventually oopsing
@@ -74,7 +74,7 @@
 	is_setup = 1;
 	mutex_unlock(&start_mutex);
 	return 0;
- 
+
 out3:
 	if (oprofile_ops.shutdown)
 		oprofile_ops.shutdown();
@@ -92,17 +92,17 @@
 int oprofile_start(void)
 {
 	int err = -EINVAL;
- 
+
 	mutex_lock(&start_mutex);
- 
+
 	if (!is_setup)
 		goto out;
 
-	err = 0; 
- 
+	err = 0;
+
 	if (oprofile_started)
 		goto out;
- 
+
 	oprofile_reset_stats();
 
 	if ((err = oprofile_ops.start()))
@@ -114,7 +114,7 @@
 	return err;
 }
 
- 
+
 /* echo 0>/dev/oprofile/enable */
 void oprofile_stop(void)
 {
@@ -204,13 +204,13 @@
 	oprofile_arch_exit();
 }
 
- 
+
 module_init(oprofile_init);
 module_exit(oprofile_exit);
 
 module_param_named(timer, timer, int, 0644);
 MODULE_PARM_DESC(timer, "force use of timer interrupt");
- 
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("John Levon <levon@movementarian.org>");
 MODULE_DESCRIPTION("OProfile system profiler");
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 1832365..5df0c21 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -11,7 +11,7 @@
 #define OPROF_H
 
 int oprofile_setup(void);
-void oprofile_shutdown(void); 
+void oprofile_shutdown(void);
 
 int oprofilefs_register(void);
 void oprofilefs_unregister(void);
@@ -20,20 +20,20 @@
 void oprofile_stop(void);
 
 struct oprofile_operations;
- 
+
 extern unsigned long fs_buffer_size;
 extern unsigned long fs_cpu_buffer_size;
 extern unsigned long fs_buffer_watershed;
 extern struct oprofile_operations oprofile_ops;
 extern unsigned long oprofile_started;
 extern unsigned long backtrace_depth;
- 
+
 struct super_block;
 struct dentry;
 
-void oprofile_create_files(struct super_block * sb, struct dentry * root);
-void oprofile_timer_init(struct oprofile_operations * ops);
+void oprofile_create_files(struct super_block *sb, struct dentry *root);
+void oprofile_timer_init(struct oprofile_operations *ops);
 
 int oprofile_set_backtrace(unsigned long depth);
- 
+
 #endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index ef953ba..cc106d5 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -13,18 +13,18 @@
 #include "event_buffer.h"
 #include "oprofile_stats.h"
 #include "oprof.h"
- 
+
 unsigned long fs_buffer_size = 131072;
 unsigned long fs_cpu_buffer_size = 8192;
 unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
 
-static ssize_t depth_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
+static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
 	return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset);
 }
 
 
-static ssize_t depth_write(struct file * file, char const __user * buf, size_t count, loff_t * offset)
+static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
 {
 	unsigned long val;
 	int retval;
@@ -49,8 +49,8 @@
 	.write		= depth_write
 };
 
- 
-static ssize_t pointer_size_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
+
+static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
 	return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset);
 }
@@ -61,24 +61,24 @@
 };
 
 
-static ssize_t cpu_type_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
+static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
 	return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset);
 }
- 
- 
+
+
 static const struct file_operations cpu_type_fops = {
 	.read		= cpu_type_read,
 };
- 
- 
-static ssize_t enable_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
+
+
+static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
 	return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
 }
 
 
-static ssize_t enable_write(struct file * file, char const __user * buf, size_t count, loff_t * offset)
+static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
 {
 	unsigned long val;
 	int retval;
@@ -89,7 +89,7 @@
 	retval = oprofilefs_ulong_from_user(&val, buf, count);
 	if (retval)
 		return retval;
- 
+
 	if (val)
 		retval = oprofile_start();
 	else
@@ -100,14 +100,14 @@
 	return count;
 }
 
- 
+
 static const struct file_operations enable_fops = {
 	.read		= enable_read,
 	.write		= enable_write,
 };
 
 
-static ssize_t dump_write(struct file * file, char const __user * buf, size_t count, loff_t * offset)
+static ssize_t dump_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
 {
 	wake_up_buffer_waiter();
 	return count;
@@ -117,8 +117,8 @@
 static const struct file_operations dump_fops = {
 	.write		= dump_write,
 };
- 
-void oprofile_create_files(struct super_block * sb, struct dentry * root)
+
+void oprofile_create_files(struct super_block *sb, struct dentry *root)
 {
 	oprofilefs_create_file(sb, root, "enable", &enable_fops);
 	oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
@@ -126,7 +126,7 @@
 	oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
 	oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
 	oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size);
-	oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); 
+	oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
 	oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
 	oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
 	oprofile_create_stats_files(sb, root);
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index f99b28e..e1f6ce0 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -11,17 +11,17 @@
 #include <linux/smp.h>
 #include <linux/cpumask.h>
 #include <linux/threads.h>
- 
+
 #include "oprofile_stats.h"
 #include "cpu_buffer.h"
- 
+
 struct oprofile_stat_struct oprofile_stats;
- 
+
 void oprofile_reset_stats(void)
 {
-	struct oprofile_cpu_buffer * cpu_buf; 
+	struct oprofile_cpu_buffer *cpu_buf;
 	int i;
- 
+
 	for_each_possible_cpu(i) {
 		cpu_buf = &per_cpu(cpu_buffer, i);
 		cpu_buf->sample_received = 0;
@@ -29,18 +29,18 @@
 		cpu_buf->backtrace_aborted = 0;
 		cpu_buf->sample_invalid_eip = 0;
 	}
- 
+
 	atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
 	atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
 	atomic_set(&oprofile_stats.event_lost_overflow, 0);
 }
 
 
-void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
+void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
 {
-	struct oprofile_cpu_buffer * cpu_buf;
-	struct dentry * cpudir;
-	struct dentry * dir;
+	struct oprofile_cpu_buffer *cpu_buf;
+	struct dentry *cpudir;
+	struct dentry *dir;
 	char buf[10];
 	int i;
 
@@ -52,7 +52,7 @@
 		cpu_buf = &per_cpu(cpu_buffer, i);
 		snprintf(buf, 10, "cpu%d", i);
 		cpudir = oprofilefs_mkdir(sb, dir, buf);
- 
+
 		/* Strictly speaking access to these ulongs is racy,
 		 * but we can't simply lock them, and they are
 		 * informational only.
@@ -66,7 +66,7 @@
 		oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip",
 			&cpu_buf->sample_invalid_eip);
 	}
- 
+
 	oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm",
 		&oprofile_stats.sample_lost_no_mm);
 	oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping",
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
index 6d755a6..3da0d08 100644
--- a/drivers/oprofile/oprofile_stats.h
+++ b/drivers/oprofile/oprofile_stats.h
@@ -11,7 +11,7 @@
 #define OPROFILE_STATS_H
 
 #include <asm/atomic.h>
- 
+
 struct oprofile_stat_struct {
 	atomic_t sample_lost_no_mm;
 	atomic_t sample_lost_no_mapping;
@@ -20,14 +20,14 @@
 };
 
 extern struct oprofile_stat_struct oprofile_stats;
- 
+
 /* reset all stats to zero */
 void oprofile_reset_stats(void);
- 
+
 struct super_block;
 struct dentry;
- 
+
 /* create the stats/ dir */
-void oprofile_create_stats_files(struct super_block * sb, struct dentry * root);
+void oprofile_create_stats_files(struct super_block *sb, struct dentry *root);
 
 #endif /* OPROFILE_STATS_H */
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 8543cb2..ddc4c59 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -23,9 +23,9 @@
 
 DEFINE_SPINLOCK(oprofilefs_lock);
 
-static struct inode * oprofilefs_get_inode(struct super_block * sb, int mode)
+static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
 {
-	struct inode * inode = new_inode(sb);
+	struct inode *inode = new_inode(sb);
 
 	if (inode) {
 		inode->i_mode = mode;
@@ -44,7 +44,7 @@
 };
 
 
-ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset)
+ssize_t oprofilefs_str_to_user(char const *str, char __user *buf, size_t count, loff_t *offset)
 {
 	return simple_read_from_buffer(buf, count, offset, str, strlen(str));
 }
@@ -52,7 +52,7 @@
 
 #define TMPBUFSIZE 50
 
-ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset)
+ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t count, loff_t *offset)
 {
 	char tmpbuf[TMPBUFSIZE];
 	size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
@@ -62,7 +62,7 @@
 }
 
 
-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count)
+int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
 {
 	char tmpbuf[TMPBUFSIZE];
 	unsigned long flags;
@@ -85,16 +85,16 @@
 }
 
 
-static ssize_t ulong_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset)
+static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
-	unsigned long * val = file->private_data;
+	unsigned long *val = file->private_data;
 	return oprofilefs_ulong_to_user(*val, buf, count, offset);
 }
 
 
-static ssize_t ulong_write_file(struct file * file, char const __user * buf, size_t count, loff_t * offset)
+static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
 {
-	unsigned long * value = file->private_data;
+	unsigned long *value = file->private_data;
 	int retval;
 
 	if (*offset)
@@ -108,7 +108,7 @@
 }
 
 
-static int default_open(struct inode * inode, struct file * filp)
+static int default_open(struct inode *inode, struct file *filp)
 {
 	if (inode->i_private)
 		filp->private_data = inode->i_private;
@@ -129,12 +129,12 @@
 };
 
 
-static struct dentry * __oprofilefs_create_file(struct super_block * sb,
-	struct dentry * root, char const * name, const struct file_operations * fops,
+static struct dentry *__oprofilefs_create_file(struct super_block *sb,
+	struct dentry *root, char const *name, const struct file_operations *fops,
 	int perm)
 {
-	struct dentry * dentry;
-	struct inode * inode;
+	struct dentry *dentry;
+	struct inode *inode;
 
 	dentry = d_alloc_name(root, name);
 	if (!dentry)
@@ -150,10 +150,10 @@
 }
 
 
-int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
-	char const * name, unsigned long * val)
+int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root,
+	char const *name, unsigned long *val)
 {
-	struct dentry * d = __oprofilefs_create_file(sb, root, name,
+	struct dentry *d = __oprofilefs_create_file(sb, root, name,
 						     &ulong_fops, 0644);
 	if (!d)
 		return -EFAULT;
@@ -163,10 +163,10 @@
 }
 
 
-int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
-	char const * name, unsigned long * val)
+int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root,
+	char const *name, unsigned long *val)
 {
-	struct dentry * d = __oprofilefs_create_file(sb, root, name,
+	struct dentry *d = __oprofilefs_create_file(sb, root, name,
 						     &ulong_ro_fops, 0444);
 	if (!d)
 		return -EFAULT;
@@ -176,23 +176,23 @@
 }
 
 
-static ssize_t atomic_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset)
+static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
-	atomic_t * val = file->private_data;
+	atomic_t *val = file->private_data;
 	return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
 }
- 
+
 
 static const struct file_operations atomic_ro_fops = {
 	.read		= atomic_read_file,
 	.open		= default_open,
 };
- 
 
-int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
-	char const * name, atomic_t * val)
+
+int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
+	char const *name, atomic_t *val)
 {
-	struct dentry * d = __oprofilefs_create_file(sb, root, name,
+	struct dentry *d = __oprofilefs_create_file(sb, root, name,
 						     &atomic_ro_fops, 0444);
 	if (!d)
 		return -EFAULT;
@@ -201,9 +201,9 @@
 	return 0;
 }
 
- 
-int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
-	char const * name, const struct file_operations * fops)
+
+int oprofilefs_create_file(struct super_block *sb, struct dentry *root,
+	char const *name, const struct file_operations *fops)
 {
 	if (!__oprofilefs_create_file(sb, root, name, fops, 0644))
 		return -EFAULT;
@@ -211,8 +211,8 @@
 }
 
 
-int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
-	char const * name, const struct file_operations * fops, int perm)
+int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root,
+	char const *name, const struct file_operations *fops, int perm)
 {
 	if (!__oprofilefs_create_file(sb, root, name, fops, perm))
 		return -EFAULT;
@@ -220,11 +220,11 @@
 }
 
 
-struct dentry * oprofilefs_mkdir(struct super_block * sb,
-	struct dentry * root, char const * name)
+struct dentry *oprofilefs_mkdir(struct super_block *sb,
+	struct dentry *root, char const *name)
 {
-	struct dentry * dentry;
-	struct inode * inode;
+	struct dentry *dentry;
+	struct inode *inode;
 
 	dentry = d_alloc_name(root, name);
 	if (!dentry)
@@ -241,10 +241,10 @@
 }
 
 
-static int oprofilefs_fill_super(struct super_block * sb, void * data, int silent)
+static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
 {
-	struct inode * root_inode;
-	struct dentry * root_dentry;
+	struct inode *root_inode;
+	struct dentry *root_dentry;
 
 	sb->s_blocksize = PAGE_CACHE_SIZE;
 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 710a45f..333f915 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -19,7 +19,7 @@
 
 static int timer_notify(struct pt_regs *regs)
 {
- 	oprofile_add_sample(regs, 0);
+	oprofile_add_sample(regs, 0);
 	return 0;
 }
 
@@ -35,7 +35,7 @@
 }
 
 
-void __init oprofile_timer_init(struct oprofile_operations * ops)
+void __init oprofile_timer_init(struct oprofile_operations *ops)
 {
 	ops->create_files = NULL;
 	ops->setup = NULL;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index b30e38f..dcc1e99 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -66,15 +66,8 @@
 #undef DEBUG_CCIO_RUN_SG
 
 #ifdef CONFIG_PROC_FS
-/*
- * CCIO_SEARCH_TIME can help measure how fast the bitmap search is.
- * impacts performance though - ditch it if you don't use it.
- */
-#define CCIO_SEARCH_TIME
-#undef CCIO_MAP_STATS
-#else
-#undef CCIO_SEARCH_TIME
-#undef CCIO_MAP_STATS
+/* depends on proc fs support. But costs CPU performance. */
+#undef CCIO_COLLECT_STATS
 #endif
 
 #include <linux/proc_fs.h>
@@ -239,12 +232,10 @@
 	u32 res_size;		    	/* size of resource map in bytes */
 	spinlock_t res_lock;
 
-#ifdef CCIO_SEARCH_TIME
+#ifdef CCIO_COLLECT_STATS
 #define CCIO_SEARCH_SAMPLE 0x100
 	unsigned long avg_search[CCIO_SEARCH_SAMPLE];
 	unsigned long avg_idx;		  /* current index into avg_search */
-#endif
-#ifdef CCIO_MAP_STATS
 	unsigned long used_pages;
 	unsigned long msingle_calls;
 	unsigned long msingle_pages;
@@ -351,7 +342,7 @@
 	unsigned int pages_needed = size >> IOVP_SHIFT;
 	unsigned int res_idx;
 	unsigned long boundary_size;
-#ifdef CCIO_SEARCH_TIME
+#ifdef CCIO_COLLECT_STATS
 	unsigned long cr_start = mfctl(16);
 #endif
 	
@@ -406,7 +397,7 @@
 	DBG_RES("%s() res_idx %d res_hint: %d\n",
 		__func__, res_idx, ioc->res_hint);
 
-#ifdef CCIO_SEARCH_TIME
+#ifdef CCIO_COLLECT_STATS
 	{
 		unsigned long cr_end = mfctl(16);
 		unsigned long tmp = cr_end - cr_start;
@@ -416,7 +407,7 @@
 	ioc->avg_search[ioc->avg_idx++] = cr_start;
 	ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
 #endif
-#ifdef CCIO_MAP_STATS
+#ifdef CCIO_COLLECT_STATS
 	ioc->used_pages += pages_needed;
 #endif
 	/* 
@@ -452,7 +443,7 @@
 	DBG_RES("%s():  res_idx: %d pages_mapped %d\n", 
 		__func__, res_idx, pages_mapped);
 
-#ifdef CCIO_MAP_STATS
+#ifdef CCIO_COLLECT_STATS
 	ioc->used_pages -= pages_mapped;
 #endif
 
@@ -764,7 +755,7 @@
 	size = ALIGN(size + offset, IOVP_SIZE);
 	spin_lock_irqsave(&ioc->res_lock, flags);
 
-#ifdef CCIO_MAP_STATS
+#ifdef CCIO_COLLECT_STATS
 	ioc->msingle_calls++;
 	ioc->msingle_pages += size >> IOVP_SHIFT;
 #endif
@@ -828,7 +819,7 @@
 
 	spin_lock_irqsave(&ioc->res_lock, flags);
 
-#ifdef CCIO_MAP_STATS
+#ifdef CCIO_COLLECT_STATS
 	ioc->usingle_calls++;
 	ioc->usingle_pages += size >> IOVP_SHIFT;
 #endif
@@ -894,7 +885,7 @@
 */
 #define PIDE_FLAG 0x80000000UL
 
-#ifdef CCIO_MAP_STATS
+#ifdef CCIO_COLLECT_STATS
 #define IOMMU_MAP_STATS
 #endif
 #include "iommu-helpers.h"
@@ -938,7 +929,7 @@
 	
 	spin_lock_irqsave(&ioc->res_lock, flags);
 
-#ifdef CCIO_MAP_STATS
+#ifdef CCIO_COLLECT_STATS
 	ioc->msg_calls++;
 #endif
 
@@ -997,13 +988,13 @@
 	DBG_RUN_SG("%s() START %d entries,  %08lx,%x\n",
 		__func__, nents, sg_virt_addr(sglist), sglist->length);
 
-#ifdef CCIO_MAP_STATS
+#ifdef CCIO_COLLECT_STATS
 	ioc->usg_calls++;
 #endif
 
 	while(sg_dma_len(sglist) && nents--) {
 
-#ifdef CCIO_MAP_STATS
+#ifdef CCIO_COLLECT_STATS
 		ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
 #endif
 		ccio_unmap_single(dev, sg_dma_address(sglist),
@@ -1048,7 +1039,7 @@
 		len += seq_printf(m, "IO PDIR size    : %d bytes (%d entries)\n",
 			       total_pages * 8, total_pages);
 
-#ifdef CCIO_MAP_STATS
+#ifdef CCIO_COLLECT_STATS
 		len += seq_printf(m, "IO PDIR entries : %ld free  %ld used (%d%%)\n",
 				  total_pages - ioc->used_pages, ioc->used_pages,
 				  (int)(ioc->used_pages * 100 / total_pages));
@@ -1057,7 +1048,7 @@
 		len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n", 
 				  ioc->res_size, total_pages);
 
-#ifdef CCIO_SEARCH_TIME
+#ifdef CCIO_COLLECT_STATS
 		min = max = ioc->avg_search[0];
 		for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) {
 			avg += ioc->avg_search[j];
@@ -1070,7 +1061,7 @@
 		len += seq_printf(m, "  Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
 				  min, avg, max);
 #endif
-#ifdef CCIO_MAP_STATS
+#ifdef CCIO_COLLECT_STATS
 		len += seq_printf(m, "pci_map_single(): %8ld calls  %8ld pages (avg %d/1000)\n",
 				  ioc->msingle_calls, ioc->msingle_pages,
 				  (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
@@ -1088,7 +1079,7 @@
 		len += seq_printf(m, "pci_unmap_sg()  : %8ld calls  %8ld pages (avg %d/1000)\n\n\n",
 				  ioc->usg_calls, ioc->usg_pages,
 				  (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
-#endif	/* CCIO_MAP_STATS */
+#endif	/* CCIO_COLLECT_STATS */
 
 		ioc = ioc->next;
 	}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index fd56128..3bc54b3 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -298,7 +298,8 @@
 
 static void dino_disable_irq(unsigned int irq)
 {
-	struct dino_device *dino_dev = irq_desc[irq].chip_data;
+	struct irq_desc *desc = irq_to_desc(irq);
+	struct dino_device *dino_dev = desc->chip_data;
 	int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
 
 	DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
@@ -310,7 +311,8 @@
 
 static void dino_enable_irq(unsigned int irq)
 {
-	struct dino_device *dino_dev = irq_desc[irq].chip_data;
+	struct irq_desc *desc = irq_to_desc(irq);
+	struct dino_device *dino_dev = desc->chip_data;
 	int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
 	u32 tmp;
 
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 771cef5..7891db5 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -346,10 +346,10 @@
 	}
 	
 	/* Reserve IRQ2 */
-	irq_desc[2].action = &irq2_action;
+	irq_to_desc(2)->action = &irq2_action;
 	
 	for (i = 0; i < 16; i++) {
-		irq_desc[i].chip = &eisa_interrupt_type;
+		irq_to_desc(i)->chip = &eisa_interrupt_type;
 	}
 	
 	EISA_bus = 1;
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index f7d088b..e76db9e 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -108,7 +108,8 @@
 
 static void gsc_asic_disable_irq(unsigned int irq)
 {
-	struct gsc_asic *irq_dev = irq_desc[irq].chip_data;
+	struct irq_desc *desc = irq_to_desc(irq);
+	struct gsc_asic *irq_dev = desc->chip_data;
 	int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
 	u32 imr;
 
@@ -123,7 +124,8 @@
 
 static void gsc_asic_enable_irq(unsigned int irq)
 {
-	struct gsc_asic *irq_dev = irq_desc[irq].chip_data;
+	struct irq_desc *desc = irq_to_desc(irq);
+	struct gsc_asic *irq_dev = desc->chip_data;
 	int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
 	u32 imr;
 
@@ -159,12 +161,14 @@
 int gsc_assign_irq(struct hw_interrupt_type *type, void *data)
 {
 	static int irq = GSC_IRQ_BASE;
+	struct irq_desc *desc;
 
 	if (irq > GSC_IRQ_MAX)
 		return NO_IRQ;
 
-	irq_desc[irq].chip = type;
-	irq_desc[irq].chip_data = data;
+	desc = irq_to_desc(irq);
+	desc->chip = type;
+	desc->chip_data = data;
 	return irq++;
 }
 
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 6fb3f79..7beffca 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -619,7 +619,9 @@
 
 static struct vector_info *iosapic_get_vector(unsigned int irq)
 {
-	return irq_desc[irq].chip_data;
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	return desc->chip_data;
 }
 
 static void iosapic_disable_irq(unsigned int irq)
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 1e8d2d1..1e93c83 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -363,7 +363,9 @@
 #endif
 
 	for (i = 0; i < 16; i++) {
-		irq_desc[i].chip = &superio_interrupt_type;
+		struct irq_desc *desc = irq_to_desc(i);
+
+		desc->chip = &superio_interrupt_type;
 	}
 
 	/*
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 8a846ad..96f3bdf 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2791,6 +2791,7 @@
 	oxsemi_952,
 	oxsemi_954,
 	oxsemi_840,
+	oxsemi_pcie_pport,
 	aks_0100,
 	mobility_pp,
 	netmos_9705,
@@ -2868,6 +2869,7 @@
 	/* oxsemi_952 */		{ 1, { { 0, 1 }, } },
 	/* oxsemi_954 */		{ 1, { { 0, -1 }, } },
 	/* oxsemi_840 */		{ 1, { { 0, 1 }, } },
+	/* oxsemi_pcie_pport */		{ 1, { { 0, 1 }, } },
 	/* aks_0100 */                  { 1, { { 0, -1 }, } },
 	/* mobility_pp */		{ 1, { { 0, 1 }, } },
 	/* netmos_9705 */               { 1, { { 0, -1 }, } }, /* untested */
@@ -2928,7 +2930,6 @@
 	{ 0x1409, 0x7268, 0x1409, 0x0103, 0, 0, timedia_4008a },
 	{ 0x1409, 0x7268, 0x1409, 0x0104, 0, 0, timedia_4018 },
 	{ 0x1409, 0x7268, 0x1409, 0x9018, 0, 0, timedia_9018a },
-	{ 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp },
 	{ PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_2P_EPP,
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_2p_epp },
 	{ PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_1P_ECP,
@@ -2946,8 +2947,25 @@
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 },
 	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840,
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_840 },
+	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840_G,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0_G,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_G,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_U,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
 	{ PCI_VENDOR_ID_AKS, PCI_DEVICE_ID_AKS_ALADDINCARD,
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, aks_0100 },
+	{ 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp },
 	/* NetMos communication controllers */
 	{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9705,
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9705 },
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 529d9d7..999cc40 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -151,6 +151,13 @@
 			if (retval)
 				dev_err(&dev->dev, "Error creating cpuaffinity"
 					" file, continuing...\n");
+
+			retval = device_create_file(&child_bus->dev,
+						&dev_attr_cpulistaffinity);
+			if (retval)
+				dev_err(&dev->dev,
+					"Error creating cpulistaffinity"
+					" file, continuing...\n");
 		}
 	}
 }
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index e842e75..691b3ad 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -188,12 +188,11 @@
 	return 0;
 }
 
-static int __init
-dmar_parse_dev(struct dmar_drhd_unit *dmaru)
+static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
 {
 	struct acpi_dmar_hardware_unit *drhd;
 	static int include_all;
-	int ret;
+	int ret = 0;
 
 	drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
 
@@ -212,7 +211,7 @@
 		include_all = 1;
 	}
 
-	if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) {
+	if (ret) {
 		list_del(&dmaru->list);
 		kfree(dmaru);
 	}
@@ -277,18 +276,37 @@
 		drhd = (struct acpi_dmar_hardware_unit *)header;
 		printk (KERN_INFO PREFIX
 			"DRHD (flags: 0x%08x)base: 0x%016Lx\n",
-			drhd->flags, drhd->address);
+			drhd->flags, (unsigned long long)drhd->address);
 		break;
 	case ACPI_DMAR_TYPE_RESERVED_MEMORY:
 		rmrr = (struct acpi_dmar_reserved_memory *)header;
 
 		printk (KERN_INFO PREFIX
 			"RMRR base: 0x%016Lx end: 0x%016Lx\n",
-			rmrr->base_address, rmrr->end_address);
+			(unsigned long long)rmrr->base_address,
+			(unsigned long long)rmrr->end_address);
 		break;
 	}
 }
 
+/**
+ * dmar_table_detect - checks to see if the platform supports DMAR devices
+ */
+static int __init dmar_table_detect(void)
+{
+	acpi_status status = AE_OK;
+
+	/* if we could find DMAR table, then there are DMAR devices */
+	status = acpi_get_table(ACPI_SIG_DMAR, 0,
+				(struct acpi_table_header **)&dmar_tbl);
+
+	if (ACPI_SUCCESS(status) && !dmar_tbl) {
+		printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
+		status = AE_NOT_FOUND;
+	}
+
+	return (ACPI_SUCCESS(status) ? 1 : 0);
+}
 
 /**
  * parse_dmar_table - parses the DMA reporting table
@@ -300,11 +318,17 @@
 	struct acpi_dmar_header *entry_header;
 	int ret = 0;
 
+	/*
+	 * Do it again, earlier dmar_tbl mapping could be mapped with
+	 * fixed map.
+	 */
+	dmar_table_detect();
+
 	dmar = (struct acpi_table_dmar *)dmar_tbl;
 	if (!dmar)
 		return -ENODEV;
 
-	if (dmar->width < PAGE_SHIFT_4K - 1) {
+	if (dmar->width < PAGE_SHIFT - 1) {
 		printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
 		return -EINVAL;
 	}
@@ -373,10 +397,10 @@
 
 int __init dmar_dev_scope_init(void)
 {
-	struct dmar_drhd_unit *drhd;
+	struct dmar_drhd_unit *drhd, *drhd_n;
 	int ret = -ENODEV;
 
-	for_each_drhd_unit(drhd) {
+	list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
 		ret = dmar_parse_dev(drhd);
 		if (ret)
 			return ret;
@@ -384,8 +408,8 @@
 
 #ifdef CONFIG_DMAR
 	{
-		struct dmar_rmrr_unit *rmrr;
-		for_each_rmrr_units(rmrr) {
+		struct dmar_rmrr_unit *rmrr, *rmrr_n;
+		list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
 			ret = rmrr_parse_dev(rmrr);
 			if (ret)
 				return ret;
@@ -430,33 +454,14 @@
 	return 0;
 }
 
-/**
- * early_dmar_detect - checks to see if the platform supports DMAR devices
- */
-int __init early_dmar_detect(void)
-{
-	acpi_status status = AE_OK;
-
-	/* if we could find DMAR table, then there are DMAR devices */
-	status = acpi_get_table(ACPI_SIG_DMAR, 0,
-				(struct acpi_table_header **)&dmar_tbl);
-
-	if (ACPI_SUCCESS(status) && !dmar_tbl) {
-		printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
-		status = AE_NOT_FOUND;
-	}
-
-	return (ACPI_SUCCESS(status) ? 1 : 0);
-}
-
 void __init detect_intel_iommu(void)
 {
 	int ret;
 
-	ret = early_dmar_detect();
+	ret = dmar_table_detect();
 
-#ifdef CONFIG_DMAR
 	{
+#ifdef CONFIG_INTR_REMAP
 		struct acpi_table_dmar *dmar;
 		/*
 		 * for now we will disable dma-remapping when interrupt
@@ -465,28 +470,18 @@
 		 * is added, we will not need this any more.
 		 */
 		dmar = (struct acpi_table_dmar *) dmar_tbl;
-		if (ret && cpu_has_x2apic && dmar->flags & 0x1) {
+		if (ret && cpu_has_x2apic && dmar->flags & 0x1)
 			printk(KERN_INFO
 			       "Queued invalidation will be enabled to support "
 			       "x2apic and Intr-remapping.\n");
-			printk(KERN_INFO
-			       "Disabling IOMMU detection, because of missing "
-			       "queued invalidation support for IOTLB "
-			       "invalidation\n");
-			printk(KERN_INFO
-			       "Use \"nox2apic\", if you want to use Intel "
-			       " IOMMU for DMA-remapping and don't care about "
-			       " x2apic support\n");
-
-			dmar_disabled = 1;
-			return;
-		}
-
+#endif
+#ifdef CONFIG_DMAR
 		if (ret && !no_iommu && !iommu_detected && !swiotlb &&
 		    !dmar_disabled)
 			iommu_detected = 1;
-	}
 #endif
+	}
+	dmar_tbl = NULL;
 }
 
 
@@ -503,7 +498,7 @@
 
 	iommu->seq_id = iommu_allocated++;
 
-	iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
+	iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
 	if (!iommu->reg) {
 		printk(KERN_ERR "IOMMU: can't map the region\n");
 		goto error;
@@ -514,8 +509,8 @@
 	/* the registers might be more than one page */
 	map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
 		cap_max_fault_reg_offset(iommu->cap));
-	map_size = PAGE_ALIGN_4K(map_size);
-	if (map_size > PAGE_SIZE_4K) {
+	map_size = VTD_PAGE_ALIGN(map_size);
+	if (map_size > VTD_PAGE_SIZE) {
 		iounmap(iommu->reg);
 		iommu->reg = ioremap(drhd->reg_base_addr, map_size);
 		if (!iommu->reg) {
@@ -526,8 +521,10 @@
 
 	ver = readl(iommu->reg + DMAR_VER_REG);
 	pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
-		drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
-		iommu->cap, iommu->ecap);
+		(unsigned long long)drhd->reg_base_addr,
+		DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
+		(unsigned long long)iommu->cap,
+		(unsigned long long)iommu->ecap);
 
 	spin_lock_init(&iommu->register_lock);
 
@@ -580,11 +577,11 @@
 
 	hw = qi->desc;
 
-	spin_lock(&qi->q_lock);
+	spin_lock_irqsave(&qi->q_lock, flags);
 	while (qi->free_cnt < 3) {
-		spin_unlock(&qi->q_lock);
+		spin_unlock_irqrestore(&qi->q_lock, flags);
 		cpu_relax();
-		spin_lock(&qi->q_lock);
+		spin_lock_irqsave(&qi->q_lock, flags);
 	}
 
 	index = qi->free_head;
@@ -605,15 +602,22 @@
 	qi->free_head = (qi->free_head + 2) % QI_LENGTH;
 	qi->free_cnt -= 2;
 
-	spin_lock_irqsave(&iommu->register_lock, flags);
+	spin_lock(&iommu->register_lock);
 	/*
 	 * update the HW tail register indicating the presence of
 	 * new descriptors.
 	 */
 	writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
-	spin_unlock_irqrestore(&iommu->register_lock, flags);
+	spin_unlock(&iommu->register_lock);
 
 	while (qi->desc_status[wait_index] != QI_DONE) {
+		/*
+		 * We will leave the interrupts disabled, to prevent interrupt
+		 * context to queue another cmd while a cmd is already submitted
+		 * and waiting for completion on this cpu. This is to avoid
+		 * a deadlock where the interrupt context can wait indefinitely
+		 * for free slots in the queue.
+		 */
 		spin_unlock(&qi->q_lock);
 		cpu_relax();
 		spin_lock(&qi->q_lock);
@@ -622,7 +626,7 @@
 	qi->desc_status[index] = QI_DONE;
 
 	reclaim_free_desc(qi);
-	spin_unlock(&qi->q_lock);
+	spin_unlock_irqrestore(&qi->q_lock, flags);
 }
 
 /*
@@ -638,6 +642,62 @@
 	qi_submit_sync(&desc, iommu);
 }
 
+int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
+		     u64 type, int non_present_entry_flush)
+{
+
+	struct qi_desc desc;
+
+	if (non_present_entry_flush) {
+		if (!cap_caching_mode(iommu->cap))
+			return 1;
+		else
+			did = 0;
+	}
+
+	desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
+			| QI_CC_GRAN(type) | QI_CC_TYPE;
+	desc.high = 0;
+
+	qi_submit_sync(&desc, iommu);
+
+	return 0;
+
+}
+
+int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+		   unsigned int size_order, u64 type,
+		   int non_present_entry_flush)
+{
+	u8 dw = 0, dr = 0;
+
+	struct qi_desc desc;
+	int ih = 0;
+
+	if (non_present_entry_flush) {
+		if (!cap_caching_mode(iommu->cap))
+			return 1;
+		else
+			did = 0;
+	}
+
+	if (cap_write_drain(iommu->cap))
+		dw = 1;
+
+	if (cap_read_drain(iommu->cap))
+		dr = 1;
+
+	desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
+		| QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
+	desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
+		| QI_IOTLB_AM(size_order);
+
+	qi_submit_sync(&desc, iommu);
+
+	return 0;
+
+}
+
 /*
  * Enable Queued Invalidation interface. This is a must to support
  * interrupt-remapping. Also used by DMA-remapping, which replaces
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 5a58b07..f9e244d 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -50,9 +50,6 @@
 #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
 #define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
 
-/* name size which is used for entries in pcihpfs */
-#define SLOT_NAME_SIZE	20		/* {_SUN} */
-
 struct acpiphp_bridge;
 struct acpiphp_slot;
 
@@ -63,9 +60,13 @@
 	struct hotplug_slot	*hotplug_slot;
 	struct acpiphp_slot	*acpi_slot;
 	struct hotplug_slot_info info;
-	char name[SLOT_NAME_SIZE];
 };
 
+static inline const char *slot_name(struct slot *slot)
+{
+	return hotplug_slot_name(slot->hotplug_slot);
+}
+
 /*
  * struct acpiphp_bridge - PCI bridge information
  *
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 0e496e8..95b536a 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -44,6 +44,9 @@
 
 #define MY_NAME	"acpiphp"
 
+/* name size which is used for entries in pcihpfs */
+#define SLOT_NAME_SIZE  21              /* {_SUN} */
+
 static int debug;
 int acpiphp_debug;
 
@@ -84,7 +87,6 @@
 	.get_adapter_status	= get_adapter_status,
 };
 
-
 /**
  * acpiphp_register_attention - set attention LED callback
  * @info: must be completely filled with LED callbacks
@@ -136,7 +138,7 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	/* enable the specified slot */
 	return acpiphp_enable_slot(slot->acpi_slot);
@@ -154,7 +156,7 @@
 	struct slot *slot = hotplug_slot->private;
 	int retval;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	/* disable the specified slot */
 	retval = acpiphp_disable_slot(slot->acpi_slot);
@@ -177,7 +179,7 @@
  {
 	int retval = -ENODEV;
 
- 	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
  
 	if (attention_info && try_module_get(attention_info->owner)) {
 		retval = attention_info->set_attn(hotplug_slot, status);
@@ -200,7 +202,7 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	*value = acpiphp_get_power_status(slot->acpi_slot);
 
@@ -222,7 +224,7 @@
 {
 	int retval = -EINVAL;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
 
 	if (attention_info && try_module_get(attention_info->owner)) {
 		retval = attention_info->get_attn(hotplug_slot, value);
@@ -245,7 +247,7 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	*value = acpiphp_get_latch_status(slot->acpi_slot);
 
@@ -265,7 +267,7 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	*value = acpiphp_get_adapter_status(slot->acpi_slot);
 
@@ -299,7 +301,7 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	kfree(slot->hotplug_slot);
 	kfree(slot);
@@ -310,6 +312,7 @@
 {
 	struct slot *slot;
 	int retval = -ENOMEM;
+	char name[SLOT_NAME_SIZE];
 
 	slot = kzalloc(sizeof(*slot), GFP_KERNEL);
 	if (!slot)
@@ -321,8 +324,6 @@
 
 	slot->hotplug_slot->info = &slot->info;
 
-	slot->hotplug_slot->name = slot->name;
-
 	slot->hotplug_slot->private = slot;
 	slot->hotplug_slot->release = &release_slot;
 	slot->hotplug_slot->ops = &acpi_hotplug_slot_ops;
@@ -336,11 +337,12 @@
 	slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
 
 	acpiphp_slot->slot = slot;
-	snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun);
+	snprintf(name, SLOT_NAME_SIZE, "%u", slot->acpi_slot->sun);
 
 	retval = pci_hp_register(slot->hotplug_slot,
 					acpiphp_slot->bridge->pci_bus,
-					acpiphp_slot->device);
+					acpiphp_slot->device,
+					name);
 	if (retval == -EBUSY)
 		goto error_hpslot;
 	if (retval) {
@@ -348,7 +350,7 @@
 		goto error_hpslot;
  	}
 
-	info("Slot [%s] registered\n", slot->hotplug_slot->name);
+	info("Slot [%s] registered\n", slot_name(slot));
 
 	return 0;
 error_hpslot:
@@ -365,7 +367,7 @@
 	struct slot *slot = acpiphp_slot->slot;
 	int retval = 0;
 
-	info ("Slot [%s] unregistered\n", slot->hotplug_slot->name);
+	info("Slot [%s] unregistered\n", slot_name(slot));
 
 	retval = pci_hp_deregister(slot->hotplug_slot);
 	if (retval)
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index d9769b3..9fff878 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -30,6 +30,7 @@
 
 #include <linux/types.h>
 #include <linux/pci.h>
+#include <linux/pci_hotplug.h>
 
 /* PICMG 2.1 R2.0 HS CSR bits: */
 #define HS_CSR_INS	0x0080
@@ -69,6 +70,11 @@
 	struct cpci_hp_controller_ops *ops;
 };
 
+static inline const char *slot_name(struct slot *slot)
+{
+	return hotplug_slot_name(slot->hotplug_slot);
+}
+
 extern int cpci_hp_register_controller(struct cpci_hp_controller *controller);
 extern int cpci_hp_unregister_controller(struct cpci_hp_controller *controller);
 extern int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last);
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 9359479..de94f4f 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -108,7 +108,7 @@
 	struct slot *slot = hotplug_slot->private;
 	int retval = 0;
 
-	dbg("%s - physical_slot = %s", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s", __func__, slot_name(slot));
 
 	if (controller->ops->set_power)
 		retval = controller->ops->set_power(slot, 1);
@@ -121,25 +121,23 @@
 	struct slot *slot = hotplug_slot->private;
 	int retval = 0;
 
-	dbg("%s - physical_slot = %s", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s", __func__, slot_name(slot));
 
 	down_write(&list_rwsem);
 
 	/* Unconfigure device */
-	dbg("%s - unconfiguring slot %s",
-	    __func__, slot->hotplug_slot->name);
+	dbg("%s - unconfiguring slot %s", __func__, slot_name(slot));
 	if ((retval = cpci_unconfigure_slot(slot))) {
 		err("%s - could not unconfigure slot %s",
-		    __func__, slot->hotplug_slot->name);
+		    __func__, slot_name(slot));
 		goto disable_error;
 	}
-	dbg("%s - finished unconfiguring slot %s",
-	    __func__, slot->hotplug_slot->name);
+	dbg("%s - finished unconfiguring slot %s", __func__, slot_name(slot));
 
 	/* Clear EXT (by setting it) */
 	if (cpci_clear_ext(slot)) {
 		err("%s - could not clear EXT for slot %s",
-		    __func__, slot->hotplug_slot->name);
+		    __func__, slot_name(slot));
 		retval = -ENODEV;
 		goto disable_error;
 	}
@@ -214,7 +212,6 @@
 	struct slot *slot = hotplug_slot->private;
 
 	kfree(slot->hotplug_slot->info);
-	kfree(slot->hotplug_slot->name);
 	kfree(slot->hotplug_slot);
 	if (slot->dev)
 		pci_dev_put(slot->dev);
@@ -222,12 +219,6 @@
 }
 
 #define SLOT_NAME_SIZE	6
-static void
-make_slot_name(struct slot *slot)
-{
-	snprintf(slot->hotplug_slot->name,
-		 SLOT_NAME_SIZE, "%02x:%02x", slot->bus->number, slot->number);
-}
 
 int
 cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
@@ -235,7 +226,7 @@
 	struct slot *slot;
 	struct hotplug_slot *hotplug_slot;
 	struct hotplug_slot_info *info;
-	char *name;
+	char name[SLOT_NAME_SIZE];
 	int status = -ENOMEM;
 	int i;
 
@@ -262,34 +253,31 @@
 			goto error_hpslot;
 		hotplug_slot->info = info;
 
-		name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
-		if (!name)
-			goto error_info;
-		hotplug_slot->name = name;
-
 		slot->bus = bus;
 		slot->number = i;
 		slot->devfn = PCI_DEVFN(i, 0);
 
+		snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i);
+
 		hotplug_slot->private = slot;
 		hotplug_slot->release = &release_slot;
-		make_slot_name(slot);
 		hotplug_slot->ops = &cpci_hotplug_slot_ops;
 
 		/*
 		 * Initialize the slot info structure with some known
 		 * good values.
 		 */
-		dbg("initializing slot %s", slot->hotplug_slot->name);
+		dbg("initializing slot %s", name);
 		info->power_status = cpci_get_power_status(slot);
 		info->attention_status = cpci_get_attention_status(slot);
 
-		dbg("registering slot %s", slot->hotplug_slot->name);
-		status = pci_hp_register(slot->hotplug_slot, bus, i);
+		dbg("registering slot %s", name);
+		status = pci_hp_register(slot->hotplug_slot, bus, i, name);
 		if (status) {
 			err("pci_hp_register failed with error %d", status);
-			goto error_name;
+			goto error_info;
 		}
+		dbg("slot registered with name: %s", slot_name(slot));
 
 		/* Add slot to our internal list */
 		down_write(&list_rwsem);
@@ -298,8 +286,6 @@
 		up_write(&list_rwsem);
 	}
 	return 0;
-error_name:
-	kfree(name);
 error_info:
 	kfree(info);
 error_hpslot:
@@ -327,7 +313,7 @@
 			list_del(&slot->slot_list);
 			slots--;
 
-			dbg("deregistering slot %s", slot->hotplug_slot->name);
+			dbg("deregistering slot %s", slot_name(slot));
 			status = pci_hp_deregister(slot->hotplug_slot);
 			if (status) {
 				err("pci_hp_deregister failed with error %d",
@@ -379,11 +365,10 @@
 		return -1;
 	}
 	list_for_each_entry(slot, &slot_list, slot_list) {
-		dbg("%s - looking at slot %s",
-		    __func__, slot->hotplug_slot->name);
+		dbg("%s - looking at slot %s", __func__, slot_name(slot));
 		if (clear_ins && cpci_check_and_clear_ins(slot))
 			dbg("%s - cleared INS for slot %s",
-			    __func__, slot->hotplug_slot->name);
+			    __func__, slot_name(slot));
 		dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0));
 		if (dev) {
 			if (update_adapter_status(slot->hotplug_slot, 1))
@@ -414,8 +399,7 @@
 	}
 	extracted = inserted = 0;
 	list_for_each_entry(slot, &slot_list, slot_list) {
-		dbg("%s - looking at slot %s",
-		    __func__, slot->hotplug_slot->name);
+		dbg("%s - looking at slot %s", __func__, slot_name(slot));
 		if (cpci_check_and_clear_ins(slot)) {
 			/*
 			 * Some broken hardware (e.g. PLX 9054AB) asserts
@@ -423,35 +407,34 @@
 			 */
 			if (slot->dev) {
 				warn("slot %s already inserted",
-				     slot->hotplug_slot->name);
+				     slot_name(slot));
 				inserted++;
 				continue;
 			}
 
 			/* Process insertion */
-			dbg("%s - slot %s inserted",
-			    __func__, slot->hotplug_slot->name);
+			dbg("%s - slot %s inserted", __func__, slot_name(slot));
 
 			/* GSM, debug */
 			hs_csr = cpci_get_hs_csr(slot);
 			dbg("%s - slot %s HS_CSR (1) = %04x",
-			    __func__, slot->hotplug_slot->name, hs_csr);
+			    __func__, slot_name(slot), hs_csr);
 
 			/* Configure device */
 			dbg("%s - configuring slot %s",
-			    __func__, slot->hotplug_slot->name);
+			    __func__, slot_name(slot));
 			if (cpci_configure_slot(slot)) {
 				err("%s - could not configure slot %s",
-				    __func__, slot->hotplug_slot->name);
+				    __func__, slot_name(slot));
 				continue;
 			}
 			dbg("%s - finished configuring slot %s",
-			    __func__, slot->hotplug_slot->name);
+			    __func__, slot_name(slot));
 
 			/* GSM, debug */
 			hs_csr = cpci_get_hs_csr(slot);
 			dbg("%s - slot %s HS_CSR (2) = %04x",
-			    __func__, slot->hotplug_slot->name, hs_csr);
+			    __func__, slot_name(slot), hs_csr);
 
 			if (update_latch_status(slot->hotplug_slot, 1))
 				warn("failure to update latch file");
@@ -464,18 +447,18 @@
 			/* GSM, debug */
 			hs_csr = cpci_get_hs_csr(slot);
 			dbg("%s - slot %s HS_CSR (3) = %04x",
-			    __func__, slot->hotplug_slot->name, hs_csr);
+			    __func__, slot_name(slot), hs_csr);
 
 			inserted++;
 		} else if (cpci_check_ext(slot)) {
 			/* Process extraction request */
 			dbg("%s - slot %s extracted",
-			    __func__, slot->hotplug_slot->name);
+			    __func__, slot_name(slot));
 
 			/* GSM, debug */
 			hs_csr = cpci_get_hs_csr(slot);
 			dbg("%s - slot %s HS_CSR = %04x",
-			    __func__, slot->hotplug_slot->name, hs_csr);
+			    __func__, slot_name(slot), hs_csr);
 
 			if (!slot->extracting) {
 				if (update_latch_status(slot->hotplug_slot, 0)) {
@@ -493,7 +476,7 @@
 				 * bother trying to tell the driver or not?
 				 */
 				err("card in slot %s was improperly removed",
-				    slot->hotplug_slot->name);
+				    slot_name(slot));
 				if (update_adapter_status(slot->hotplug_slot, 0))
 					warn("failure to update adapter file");
 				slot->extracting = 0;
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index df82b95..829c327 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -209,7 +209,7 @@
 					      hs_cap + 2,
 					      hs_csr)) {
 			err("Could not set LOO for slot %s",
-			    slot->hotplug_slot->name);
+			    hotplug_slot_name(slot->hotplug_slot));
 			return -ENODEV;
 		}
 	}
@@ -238,7 +238,7 @@
 					      hs_cap + 2,
 					      hs_csr)) {
 			err("Could not clear LOO for slot %s",
-			    slot->hotplug_slot->name);
+			    hotplug_slot_name(slot->hotplug_slot));
 			return -ENODEV;
 		}
 	}
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index b1decfa..afaf8f6 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -449,6 +449,11 @@
 
 /* inline functions */
 
+static inline char *slot_name(struct slot *slot)
+{
+	return hotplug_slot_name(slot->hotplug_slot);
+}
+
 /*
  * return_resource
  *
@@ -696,14 +701,6 @@
 	return presence_save;
 }
 
-#define SLOT_NAME_SIZE 10
-
-static inline void make_slot_name(char *buffer, int buffer_size, struct slot *slot)
-{
-	snprintf(buffer, buffer_size, "%d", slot->number);
-}
-
-
 static inline int wait_for_ctrl_irq(struct controller *ctrl)
 {
         DECLARE_WAITQUEUE(wait, current);
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 54defec..724d42c 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -315,14 +315,15 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	kfree(slot->hotplug_slot->info);
-	kfree(slot->hotplug_slot->name);
 	kfree(slot->hotplug_slot);
 	kfree(slot);
 }
 
+#define SLOT_NAME_SIZE 10
+
 static int ctrl_slot_setup(struct controller *ctrl,
 			void __iomem *smbios_start,
 			void __iomem *smbios_table)
@@ -335,6 +336,7 @@
 	u8 slot_number;
 	u8 ctrl_slot;
 	u32 tempdword;
+	char name[SLOT_NAME_SIZE];
 	void __iomem *slot_entry= NULL;
 	int result = -ENOMEM;
 
@@ -363,16 +365,12 @@
 		if (!hotplug_slot->info)
 			goto error_hpslot;
 		hotplug_slot_info = hotplug_slot->info;
-		hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
-
-		if (!hotplug_slot->name)
-			goto error_info;
 
 		slot->ctrl = ctrl;
 		slot->bus = ctrl->bus;
 		slot->device = slot_device;
 		slot->number = slot_number;
-		dbg("slot->number = %d\n", slot->number);
+		dbg("slot->number = %u\n", slot->number);
 
 		slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
 					slot_entry);
@@ -418,9 +416,9 @@
 		/* register this slot with the hotplug pci core */
 		hotplug_slot->release = &release_slot;
 		hotplug_slot->private = slot;
-		make_slot_name(hotplug_slot->name, SLOT_NAME_SIZE, slot);
+		snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
 		hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
-		
+
 		hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
 		hotplug_slot_info->attention_status =
 			cpq_get_attention_status(ctrl, slot);
@@ -436,10 +434,11 @@
 				slot_number);
 		result = pci_hp_register(hotplug_slot,
 					 ctrl->pci_dev->subordinate,
-					 slot->device);
+					 slot->device,
+					 name);
 		if (result) {
 			err("pci_hp_register failed with error %d\n", result);
-			goto error_name;
+			goto error_info;
 		}
 		
 		slot->next = ctrl->slot;
@@ -451,8 +450,6 @@
 	}
 
 	return 0;
-error_name:
-	kfree(hotplug_slot->name);
 error_info:
 	kfree(hotplug_slot_info);
 error_hpslot:
@@ -638,7 +635,7 @@
 	u8 device;
 	u8 function;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
 		return -ENODEV;
@@ -665,7 +662,7 @@
 	u8 device;
 	u8 function;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
 		return -ENODEV;
@@ -697,7 +694,7 @@
 	u8 device;
 	u8 function;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
 		return -ENODEV;
@@ -720,7 +717,7 @@
 	struct slot *slot = hotplug_slot->private;
 	struct controller *ctrl = slot->ctrl;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	return cpqhp_hardware_test(ctrl, value);	
 }
@@ -731,7 +728,7 @@
 	struct slot *slot = hotplug_slot->private;
 	struct controller *ctrl = slot->ctrl;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	*value = get_slot_enabled(ctrl, slot);
 	return 0;
@@ -742,7 +739,7 @@
 	struct slot *slot = hotplug_slot->private;
 	struct controller *ctrl = slot->ctrl;
 	
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	*value = cpq_get_attention_status(ctrl, slot);
 	return 0;
@@ -753,7 +750,7 @@
 	struct slot *slot = hotplug_slot->private;
 	struct controller *ctrl = slot->ctrl;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	*value = cpq_get_latch_status(ctrl, slot);
 
@@ -765,7 +762,7 @@
 	struct slot *slot = hotplug_slot->private;
 	struct controller *ctrl = slot->ctrl;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	*value = get_presence_status(ctrl, slot);
 
@@ -777,7 +774,7 @@
 	struct slot *slot = hotplug_slot->private;
 	struct controller *ctrl = slot->ctrl;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	*value = ctrl->speed_capability;
 
@@ -789,7 +786,7 @@
 	struct slot *slot = hotplug_slot->private;
 	struct controller *ctrl = slot->ctrl;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	*value = ctrl->speed;
 
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index ef041ca..a60a252 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1139,7 +1139,7 @@
 	for(slot = ctrl->slot; slot; slot = slot->next) {
 		if (slot->device == (hp_slot + ctrl->slot_device_offset)) 
 			continue;
-		if (!slot->hotplug_slot && !slot->hotplug_slot->info) 
+		if (!slot->hotplug_slot || !slot->hotplug_slot->info)
 			continue;
 		if (slot->hotplug_slot->info->adapter_status == 0) 
 			continue;
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index 146ca9c..3a2637a 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -66,10 +66,10 @@
 	struct pci_dev *dev;
 	struct work_struct remove_work;
 	unsigned long removed;
-	char name[8];
 };
 
 static int debug;
+static int dup_slots;
 static LIST_HEAD(slot_list);
 static struct workqueue_struct *dummyphp_wq;
 
@@ -96,10 +96,13 @@
 	kfree(dslot);
 }
 
+#define SLOT_NAME_SIZE	8
+
 static int add_slot(struct pci_dev *dev)
 {
 	struct dummy_slot *dslot;
 	struct hotplug_slot *slot;
+	char name[SLOT_NAME_SIZE];
 	int retval = -ENOMEM;
 	static int count = 1;
 
@@ -119,19 +122,22 @@
 	if (!dslot)
 		goto error_info;
 
-	slot->name = dslot->name;
-	snprintf(slot->name, sizeof(dslot->name), "fake%d", count++);
-	dbg("slot->name = %s\n", slot->name);
+	if (dup_slots)
+		snprintf(name, SLOT_NAME_SIZE, "fake");
+	else
+		snprintf(name, SLOT_NAME_SIZE, "fake%d", count++);
+	dbg("slot->name = %s\n", name);
 	slot->ops = &dummy_hotplug_slot_ops;
 	slot->release = &dummy_release;
 	slot->private = dslot;
 
-	retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn));
+	retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn), name);
 	if (retval) {
 		err("pci_hp_register failed with error %d\n", retval);
 		goto error_dslot;
 	}
 
+	dbg("slot->name = %s\n", hotplug_slot_name(slot));
 	dslot->slot = slot;
 	dslot->dev = pci_dev_get(dev);
 	list_add (&dslot->node, &slot_list);
@@ -167,10 +173,11 @@
 {
 	int retval;
 
-	dbg("removing slot %s\n", dslot->slot->name);
+	dbg("removing slot %s\n", hotplug_slot_name(dslot->slot));
 	retval = pci_hp_deregister(dslot->slot);
 	if (retval)
-		err("Problem unregistering a slot %s\n", dslot->slot->name);
+		err("Problem unregistering a slot %s\n",
+			hotplug_slot_name(dslot->slot));
 }
 
 /* called from the single-threaded workqueue handler to remove a slot */
@@ -308,7 +315,7 @@
 		return -ENODEV;
 	dslot = slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(slot));
 
 	for (func = 7; func >= 0; func--) {
 		dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func);
@@ -373,4 +380,5 @@
 MODULE_LICENSE("GPL");
 module_param(debug, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-
+module_param(dup_slots, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dup_slots, "Force duplicate slot names for debugging");
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index 612d963..a8d391a 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -707,17 +707,16 @@
 	u8 device;
 	u8 number;
 	u8 real_physical_slot_num;
-	char name[100];
 	u32 capabilities;
 	u8 supported_speed;
 	u8 supported_bus_mode;
+	u8 flag;		/* this is for disable slot and polling */
+	u8 ctlr_index;
 	struct hotplug_slot *hotplug_slot;
 	struct controller *ctrl;
 	struct pci_func *func;
 	u8 irq[4];
-	u8 flag;		/* this is for disable slot and polling */
 	int bit_mode;		/* 0 = 32, 1 = 64 */
-	u8 ctlr_index;
 	struct bus_info *bus_on;
 	struct list_head ibm_slot_list;
 	u8 status;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 7d27631..c1abac8 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -123,10 +123,8 @@
 static void __init print_bus_info (void)
 {
 	struct bus_info *ptr;
-	struct list_head *ptr1;
 	
-	list_for_each (ptr1, &bus_info_head) {
-		ptr = list_entry (ptr1, struct bus_info, bus_info_list);
+	list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
 		debug ("%s - slot_min = %x\n", __func__, ptr->slot_min);
 		debug ("%s - slot_max = %x\n", __func__, ptr->slot_max);
 		debug ("%s - slot_count = %x\n", __func__, ptr->slot_count);
@@ -146,10 +144,8 @@
 static void print_lo_info (void)
 {
 	struct rio_detail *ptr;
-	struct list_head *ptr1;
 	debug ("print_lo_info ----\n");	
-	list_for_each (ptr1, &rio_lo_head) {
-		ptr = list_entry (ptr1, struct rio_detail, rio_detail_list);
+	list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) {
 		debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
 		debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
 		debug ("%s - owner_id = %x\n", __func__, ptr->owner_id);
@@ -163,10 +159,8 @@
 static void print_vg_info (void)
 {
 	struct rio_detail *ptr;
-	struct list_head *ptr1;
 	debug ("%s ---\n", __func__);
-	list_for_each (ptr1, &rio_vg_head) {
-		ptr = list_entry (ptr1, struct rio_detail, rio_detail_list);
+	list_for_each_entry(ptr, &rio_vg_head, rio_detail_list) {
 		debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
 		debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
 		debug ("%s - owner_id = %x\n", __func__, ptr->owner_id);
@@ -180,10 +174,8 @@
 static void __init print_ebda_pci_rsrc (void)
 {
 	struct ebda_pci_rsrc *ptr;
-	struct list_head *ptr1;
 
-	list_for_each (ptr1, &ibmphp_ebda_pci_rsrc_head) {
-		ptr = list_entry (ptr1, struct ebda_pci_rsrc, ebda_pci_rsrc_list);
+	list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) {
 		debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", 
 			__func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr);
 	}
@@ -192,10 +184,8 @@
 static void __init print_ibm_slot (void)
 {
 	struct slot *ptr;
-	struct list_head *ptr1;
 
-	list_for_each (ptr1, &ibmphp_slot_head) {
-		ptr = list_entry (ptr1, struct slot, ibm_slot_list);
+	list_for_each_entry(ptr, &ibmphp_slot_head, ibm_slot_list) {
 		debug ("%s - slot_number: %x\n", __func__, ptr->number);
 	}
 }
@@ -203,10 +193,8 @@
 static void __init print_opt_vg (void)
 {
 	struct opt_rio *ptr;
-	struct list_head *ptr1;
 	debug ("%s ---\n", __func__);
-	list_for_each (ptr1, &opt_vg_head) {
-		ptr = list_entry (ptr1, struct opt_rio, opt_rio_list);
+	list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
 		debug ("%s - rio_type %x\n", __func__, ptr->rio_type);
 		debug ("%s - chassis_num: %x\n", __func__, ptr->chassis_num);
 		debug ("%s - first_slot_num: %x\n", __func__, ptr->first_slot_num);
@@ -217,13 +205,9 @@
 static void __init print_ebda_hpc (void)
 {
 	struct controller *hpc_ptr;
-	struct list_head *ptr1;
 	u16 index;
 
-	list_for_each (ptr1, &ebda_hpc_head) {
-
-		hpc_ptr = list_entry (ptr1, struct controller, ebda_hpc_list); 
-
+	list_for_each_entry(hpc_ptr, &ebda_hpc_head, ebda_hpc_list) {
 		for (index = 0; index < hpc_ptr->slot_count; index++) {
 			debug ("%s - physical slot#: %x\n", __func__, hpc_ptr->slots[index].slot_num);
 			debug ("%s - pci bus# of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_bus_num);
@@ -460,9 +444,7 @@
 static struct opt_rio *search_opt_vg (u8 chassis_num)
 {
 	struct opt_rio *ptr;
-	struct list_head *ptr1;
-	list_for_each (ptr1, &opt_vg_head) {
-		ptr = list_entry (ptr1, struct opt_rio, opt_rio_list);
+	list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
 		if (ptr->chassis_num == chassis_num)
 			return ptr;
 	}		
@@ -473,10 +455,8 @@
 {
 	struct opt_rio *opt_rio_ptr = NULL;
 	struct rio_detail *rio_detail_ptr = NULL;
-	struct list_head *list_head_ptr = NULL;
 	
-	list_for_each (list_head_ptr, &rio_vg_head) {
-		rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list);
+	list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) {
 		opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num);
 		if (!opt_rio_ptr) {
 			opt_rio_ptr = kzalloc(sizeof(struct opt_rio), GFP_KERNEL);
@@ -497,14 +477,12 @@
 }	
 
 /*
- * reorgnizing linked list of expansion box	 
+ * reorganizing linked list of expansion box
  */
 static struct opt_rio_lo *search_opt_lo (u8 chassis_num)
 {
 	struct opt_rio_lo *ptr;
-	struct list_head *ptr1;
-	list_for_each (ptr1, &opt_lo_head) {
-		ptr = list_entry (ptr1, struct opt_rio_lo, opt_rio_lo_list);
+	list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) {
 		if (ptr->chassis_num == chassis_num)
 			return ptr;
 	}		
@@ -515,10 +493,8 @@
 {
 	struct opt_rio_lo *opt_rio_lo_ptr = NULL;
 	struct rio_detail *rio_detail_ptr = NULL;
-	struct list_head *list_head_ptr = NULL;
 	
-	list_for_each (list_head_ptr, &rio_lo_head) {
-		rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list);
+	list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) {
 		opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num);
 		if (!opt_rio_lo_ptr) {
 			opt_rio_lo_ptr = kzalloc(sizeof(struct opt_rio_lo), GFP_KERNEL);
@@ -550,20 +526,17 @@
 {
 	struct opt_rio *opt_vg_ptr = NULL;
 	struct opt_rio_lo *opt_lo_ptr = NULL;
-	struct list_head *ptr = NULL;
 	int rc = 0;
 
 	if (!var) {
-		list_for_each (ptr, &opt_vg_head) {
-			opt_vg_ptr = list_entry (ptr, struct opt_rio, opt_rio_list);
+		list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
 			if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) { 
 				rc = -ENODEV;
 				break;
 			}
 		}
 	} else {
-		list_for_each (ptr, &opt_lo_head) {
-			opt_lo_ptr = list_entry (ptr, struct opt_rio_lo, opt_rio_lo_list);
+		list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
 			if ((first_slot < opt_lo_ptr->first_slot_num) && (slot_num >= opt_lo_ptr->first_slot_num)) {
 				rc = -ENODEV;
 				break;
@@ -576,10 +549,8 @@
 static struct opt_rio_lo * find_rxe_num (u8 slot_num)
 {
 	struct opt_rio_lo *opt_lo_ptr;
-	struct list_head *ptr;
 
-	list_for_each (ptr, &opt_lo_head) {
-		opt_lo_ptr = list_entry (ptr, struct opt_rio_lo, opt_rio_lo_list);
+	list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
 		//check to see if this slot_num belongs to expansion box
 		if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1))) 
 			return opt_lo_ptr;
@@ -590,10 +561,8 @@
 static struct opt_rio * find_chassis_num (u8 slot_num)
 {
 	struct opt_rio *opt_vg_ptr;
-	struct list_head *ptr;
 
-	list_for_each (ptr, &opt_vg_head) {
-		opt_vg_ptr = list_entry (ptr, struct opt_rio, opt_rio_list);
+	list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
 		//check to see if this slot_num belongs to chassis 
 		if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0))) 
 			return opt_vg_ptr;
@@ -607,11 +576,9 @@
 static u8 calculate_first_slot (u8 slot_num)
 {
 	u8 first_slot = 1;
-	struct list_head * list;
 	struct slot * slot_cur;
 	
-	list_for_each (list, &ibmphp_slot_head) {
-		slot_cur = list_entry (list, struct slot, ibm_slot_list);
+	list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) {
 		if (slot_cur->ctrl) {
 			if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num)) 
 				first_slot = slot_cur->ctrl->ending_slot_num;
@@ -620,11 +587,14 @@
 	return first_slot + 1;
 
 }
+
+#define SLOT_NAME_SIZE 30
+
 static char *create_file_name (struct slot * slot_cur)
 {
 	struct opt_rio *opt_vg_ptr = NULL;
 	struct opt_rio_lo *opt_lo_ptr = NULL;
-	static char str[30];
+	static char str[SLOT_NAME_SIZE];
 	int which = 0; /* rxe = 1, chassis = 0 */
 	u8 number = 1; /* either chassis or rxe # */
 	u8 first_slot = 1;
@@ -736,7 +706,6 @@
 
 	slot = hotplug_slot->private;
 	kfree(slot->hotplug_slot->info);
-	kfree(slot->hotplug_slot->name);
 	kfree(slot->hotplug_slot);
 	slot->ctrl = NULL;
 	slot->bus_on = NULL;
@@ -767,7 +736,7 @@
 	struct bus_info *bus_info_ptr1, *bus_info_ptr2;
 	int rc;
 	struct slot *tmp_slot;
-	struct list_head *list;
+	char name[SLOT_NAME_SIZE];
 
 	addr = hpc_list_ptr->phys_addr;
 	for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) {
@@ -931,12 +900,6 @@
 				goto error_no_hp_info;
 			}
 
-			hp_slot_ptr->name = kmalloc(30, GFP_KERNEL);
-			if (!hp_slot_ptr->name) {
-				rc = -ENOMEM;
-				goto error_no_hp_name;
-			}
-
 			tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL);
 			if (!tmp_slot) {
 				rc = -ENOMEM;
@@ -997,12 +960,10 @@
 
 	}			/* each hpc  */
 
-	list_for_each (list, &ibmphp_slot_head) {
-		tmp_slot = list_entry (list, struct slot, ibm_slot_list);
-
-		snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot));
+	list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) {
+		snprintf(name, SLOT_NAME_SIZE, "%s", create_file_name(tmp_slot));
 		pci_hp_register(tmp_slot->hotplug_slot,
-			pci_find_bus(0, tmp_slot->bus), tmp_slot->device);
+			pci_find_bus(0, tmp_slot->bus), tmp_slot->device, name);
 	}
 
 	print_ebda_hpc ();
@@ -1012,8 +973,6 @@
 error:
 	kfree (hp_slot_ptr->private);
 error_no_slot:
-	kfree (hp_slot_ptr->name);
-error_no_hp_name:
 	kfree (hp_slot_ptr->info);
 error_no_hp_info:
 	kfree (hp_slot_ptr);
@@ -1101,10 +1060,8 @@
 struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num)
 {
 	struct slot *slot;
-	struct list_head *list;
 
-	list_for_each (list, &ibmphp_slot_head) {
-		slot = list_entry (list, struct slot, ibm_slot_list);
+	list_for_each_entry(slot, &ibmphp_slot_head, ibm_slot_list) {
 		if (slot->number == physical_num)
 			return slot;
 	}
@@ -1120,10 +1077,8 @@
 struct bus_info *ibmphp_find_same_bus_num (u32 num)
 {
 	struct bus_info *ptr;
-	struct list_head  *ptr1;
 
-	list_for_each (ptr1, &bus_info_head) {
-		ptr = list_entry (ptr1, struct bus_info, bus_info_list); 
+	list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
 		if (ptr->busno == num) 
 			 return ptr;
 	}
@@ -1136,10 +1091,8 @@
 int ibmphp_get_bus_index (u8 num)
 {
 	struct bus_info *ptr;
-	struct list_head  *ptr1;
 
-	list_for_each (ptr1, &bus_info_head) {
-		ptr = list_entry (ptr1, struct bus_info, bus_info_list);
+	list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
 		if (ptr->busno == num)  
 			return ptr->index;
 	}
@@ -1212,11 +1165,9 @@
 int ibmphp_register_pci (void)
 {
 	struct controller *ctrl;
-	struct list_head *tmp;
 	int rc = 0;
 
-	list_for_each (tmp, &ebda_hpc_head) {
-		ctrl = list_entry (tmp, struct controller, ebda_hpc_list);
+	list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
 		if (ctrl->ctlr_type == 1) {
 			rc = pci_register_driver(&ibmphp_driver);
 			break;
@@ -1227,12 +1178,10 @@
 static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids)
 {
 	struct controller *ctrl;
-	struct list_head *tmp;
 
 	debug ("inside ibmphp_probe\n");
 	
-	list_for_each (tmp, &ebda_hpc_head) {
-		ctrl = list_entry (tmp, struct controller, ebda_hpc_list);
+	list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
 		if (ctrl->ctlr_type == 1) {
 			if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) {
 				ctrl->ctrl_dev = dev;
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 5f85b1b..535fce0f 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -37,6 +37,7 @@
 #include <linux/init.h>
 #include <linux/mount.h>
 #include <linux/namei.h>
+#include <linux/mutex.h>
 #include <linux/pci.h>
 #include <linux/pci_hotplug.h>
 #include <asm/uaccess.h>
@@ -61,7 +62,7 @@
 //////////////////////////////////////////////////////////////////
 
 static LIST_HEAD(pci_hotplug_slot_list);
-static DEFINE_SPINLOCK(pci_hotplug_slot_list_lock);
+static DEFINE_MUTEX(pci_hp_mutex);
 
 /* these strings match up with the values in pci_bus_speed */
 static char *pci_bus_speed_strings[] = {
@@ -102,13 +103,13 @@
 {									\
 	struct hotplug_slot_ops *ops = slot->ops;			\
 	int retval = 0;							\
-	if (try_module_get(ops->owner)) {				\
-		if (ops->get_##name)					\
-			retval = ops->get_##name(slot, value);		\
-		else							\
-			*value = slot->info->name;			\
-		module_put(ops->owner);					\
-	}								\
+	if (!try_module_get(ops->owner))				\
+		return -ENODEV;						\
+	if (ops->get_##name)						\
+		retval = ops->get_##name(slot, value);			\
+	else								\
+		*value = slot->info->name;				\
+	module_put(ops->owner);						\
 	return retval;							\
 }
 
@@ -530,16 +531,12 @@
 	struct hotplug_slot *slot;
 	struct list_head *tmp;
 
-	spin_lock(&pci_hotplug_slot_list_lock);
 	list_for_each (tmp, &pci_hotplug_slot_list) {
 		slot = list_entry (tmp, struct hotplug_slot, slot_list);
-		if (strcmp(slot->name, name) == 0)
-			goto out;
+		if (strcmp(hotplug_slot_name(slot), name) == 0)
+			return slot;
 	}
-	slot = NULL;
-out:
-	spin_unlock(&pci_hotplug_slot_list_lock);
-	return slot;
+	return NULL;
 }
 
 /**
@@ -547,13 +544,15 @@
  * @bus: bus this slot is on
  * @slot: pointer to the &struct hotplug_slot to register
  * @slot_nr: slot number
+ * @name: name registered with kobject core
  *
  * Registers a hotplug slot with the pci hotplug subsystem, which will allow
  * userspace interaction to the slot.
  *
  * Returns 0 if successful, anything else for an error.
  */
-int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr)
+int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
+			const char *name)
 {
 	int result;
 	struct pci_slot *pci_slot;
@@ -568,48 +567,29 @@
 		return -EINVAL;
 	}
 
-	/* Check if we have already registered a slot with the same name. */
-	if (get_slot_from_name(slot->name))
-		return -EEXIST;
+	mutex_lock(&pci_hp_mutex);
 
 	/*
 	 * No problems if we call this interface from both ACPI_PCI_SLOT
 	 * driver and call it here again. If we've already created the
 	 * pci_slot, the interface will simply bump the refcount.
 	 */
-	pci_slot = pci_create_slot(bus, slot_nr, slot->name);
-	if (IS_ERR(pci_slot))
-		return PTR_ERR(pci_slot);
-
-	if (pci_slot->hotplug) {
-		dbg("%s: already claimed\n", __func__);
-		pci_destroy_slot(pci_slot);
-		return -EBUSY;
+	pci_slot = pci_create_slot(bus, slot_nr, name, slot);
+	if (IS_ERR(pci_slot)) {
+		result = PTR_ERR(pci_slot);
+		goto out;
 	}
 
 	slot->pci_slot = pci_slot;
 	pci_slot->hotplug = slot;
 
-	/*
-	 * Allow pcihp drivers to override the ACPI_PCI_SLOT name.
-	 */
-	if (strcmp(kobject_name(&pci_slot->kobj), slot->name)) {
-		result = kobject_rename(&pci_slot->kobj, slot->name);
-		if (result) {
-			pci_destroy_slot(pci_slot);
-			return result;
-		}
-	}
-
-	spin_lock(&pci_hotplug_slot_list_lock);
 	list_add(&slot->slot_list, &pci_hotplug_slot_list);
-	spin_unlock(&pci_hotplug_slot_list_lock);
 
 	result = fs_add_slot(pci_slot);
 	kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
-	dbg("Added slot %s to the list\n", slot->name);
-
-
+	dbg("Added slot %s to the list\n", name);
+out:
+	mutex_unlock(&pci_hp_mutex);
 	return result;
 }
 
@@ -630,21 +610,23 @@
 	if (!hotplug)
 		return -ENODEV;
 
-	temp = get_slot_from_name(hotplug->name);
-	if (temp != hotplug)
+	mutex_lock(&pci_hp_mutex);
+	temp = get_slot_from_name(hotplug_slot_name(hotplug));
+	if (temp != hotplug) {
+		mutex_unlock(&pci_hp_mutex);
 		return -ENODEV;
+	}
 
-	spin_lock(&pci_hotplug_slot_list_lock);
 	list_del(&hotplug->slot_list);
-	spin_unlock(&pci_hotplug_slot_list_lock);
 
 	slot = hotplug->pci_slot;
 	fs_remove_slot(slot);
-	dbg("Removed slot %s from the list\n", hotplug->name);
+	dbg("Removed slot %s from the list\n", hotplug_slot_name(hotplug));
 
 	hotplug->release(hotplug);
 	slot->hotplug = NULL;
 	pci_destroy_slot(slot);
+	mutex_unlock(&pci_hp_mutex);
 
 	return 0;
 }
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 9e6cec6..a4817a841 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -57,19 +57,30 @@
 #define warn(format, arg...)						\
 	printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
 
+#define ctrl_dbg(ctrl, format, arg...)					\
+	do {								\
+		if (pciehp_debug)					\
+			dev_printk(, &ctrl->pcie->device,		\
+					format, ## arg);		\
+	} while (0)
+#define ctrl_err(ctrl, format, arg...)					\
+	dev_err(&ctrl->pcie->device, format, ## arg)
+#define ctrl_info(ctrl, format, arg...)					\
+	dev_info(&ctrl->pcie->device, format, ## arg)
+#define ctrl_warn(ctrl, format, arg...)					\
+	dev_warn(&ctrl->pcie->device, format, ## arg)
+
 #define SLOT_NAME_SIZE 10
 struct slot {
 	u8 bus;
 	u8 device;
-	u32 number;
 	u8 state;
-	struct timer_list task_event;
 	u8 hp_slot;
+	u32 number;
 	struct controller *ctrl;
 	struct hpc_ops *hpc_ops;
 	struct hotplug_slot *hotplug_slot;
 	struct list_head	slot_list;
-	char name[SLOT_NAME_SIZE];
 	unsigned long last_emi_toggle;
 	struct delayed_work work;	/* work for button event */
 	struct mutex lock;
@@ -87,6 +98,7 @@
 	int num_slots;			/* Number of slots on ctlr */
 	int slot_num_inc;		/* 1 or -1 */
 	struct pci_dev *pci_dev;
+	struct pcie_device *pcie;	/* PCI Express port service */
 	struct list_head slot_list;
 	struct hpc_ops *hpc_ops;
 	wait_queue_head_t queue;	/* sleep & wake process */
@@ -98,6 +110,7 @@
 	struct timer_list poll_timer;
 	int cmd_busy;
 	unsigned int no_cmd_complete:1;
+	unsigned int link_active_reporting:1;
 };
 
 #define INT_BUTTON_IGNORE		0
@@ -161,6 +174,11 @@
 int pciehp_disable_slot(struct slot *p_slot);
 int pcie_enable_notification(struct controller *ctrl);
 
+static inline const char *slot_name(struct slot *slot)
+{
+	return hotplug_slot_name(slot->hotplug_slot);
+}
+
 static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
 {
 	struct slot *slot;
@@ -170,7 +188,7 @@
 			return slot;
 	}
 
-	err("%s: slot (device=0x%x) not found\n", __func__, device);
+	ctrl_err(ctrl, "%s: slot (device=0x%x) not found\n", __func__, device);
 	return NULL;
 }
 
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 4fd5355..62be1b5 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -144,9 +144,10 @@
  * sysfs interface which allows the user to toggle the Electro Mechanical
  * Interlock.  Valid values are either 0 or 1.  0 == unlock, 1 == lock
  */
-static ssize_t lock_write_file(struct hotplug_slot *slot, const char *buf,
-		size_t count)
+static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot,
+		const char *buf, size_t count)
 {
+	struct slot *slot = hotplug_slot->private;
 	unsigned long llock;
 	u8 lock;
 	int retval = 0;
@@ -157,10 +158,11 @@
 	switch (lock) {
 		case 0:
 		case 1:
-			retval = set_lock_status(slot, lock);
+			retval = set_lock_status(hotplug_slot, lock);
 			break;
 		default:
-			err ("%d is an invalid lock value\n", lock);
+			ctrl_err(slot->ctrl, "%d is an invalid lock value\n",
+				 lock);
 			retval = -EINVAL;
 	}
 	if (retval)
@@ -180,7 +182,10 @@
  */
 static void release_slot(struct hotplug_slot *hotplug_slot)
 {
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	struct slot *slot = hotplug_slot->private;
+
+	ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
+		 __func__, hotplug_slot_name(hotplug_slot));
 
 	kfree(hotplug_slot->info);
 	kfree(hotplug_slot);
@@ -191,7 +196,7 @@
 	struct slot *slot;
 	struct hotplug_slot *hotplug_slot;
 	struct hotplug_slot_info *info;
-	int len, dup = 1;
+	char name[SLOT_NAME_SIZE];
 	int retval = -ENOMEM;
 
 	list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
@@ -205,46 +210,36 @@
 
 		/* register this slot with the hotplug pci core */
 		hotplug_slot->info = info;
-		hotplug_slot->name = slot->name;
 		hotplug_slot->private = slot;
 		hotplug_slot->release = &release_slot;
 		hotplug_slot->ops = &pciehp_hotplug_slot_ops;
+		slot->hotplug_slot = hotplug_slot;
+		snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
+
+		ctrl_dbg(ctrl, "Registering bus=%x dev=%x hp_slot=%x sun=%x "
+			 "slot_device_offset=%x\n", slot->bus, slot->device,
+			 slot->hp_slot, slot->number, ctrl->slot_device_offset);
+		retval = pci_hp_register(hotplug_slot,
+					 ctrl->pci_dev->subordinate,
+					 slot->device,
+					 name);
+		if (retval) {
+			ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
+				 retval);
+			goto error_info;
+		}
 		get_power_status(hotplug_slot, &info->power_status);
 		get_attention_status(hotplug_slot, &info->attention_status);
 		get_latch_status(hotplug_slot, &info->latch_status);
 		get_adapter_status(hotplug_slot, &info->adapter_status);
-		slot->hotplug_slot = hotplug_slot;
-
-		dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
-		    "slot_device_offset=%x\n", slot->bus, slot->device,
-		    slot->hp_slot, slot->number, ctrl->slot_device_offset);
-duplicate_name:
-		retval = pci_hp_register(hotplug_slot,
-					 ctrl->pci_dev->subordinate,
-					 slot->device);
-		if (retval) {
-			/*
-			 * If slot N already exists, we'll try to create
-			 * slot N-1, N-2 ... N-M, until we overflow.
-			 */
-			if (retval == -EEXIST) {
-				len = snprintf(slot->name, SLOT_NAME_SIZE,
-					       "%d-%d", slot->number, dup++);
-				if (len < SLOT_NAME_SIZE)
-					goto duplicate_name;
-				else
-					err("duplicate slot name overflow\n");
-			}
-			err("pci_hp_register failed with error %d\n", retval);
-			goto error_info;
-		}
 		/* create additional sysfs entries */
 		if (EMI(ctrl)) {
 			retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
 				&hotplug_slot_attr_lock.attr);
 			if (retval) {
 				pci_hp_deregister(hotplug_slot);
-				err("cannot create additional sysfs entries\n");
+				ctrl_err(ctrl, "cannot create additional sysfs "
+					 "entries\n");
 				goto error_info;
 			}
 		}
@@ -278,7 +273,8 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
+		  __func__, slot_name(slot));
 
 	hotplug_slot->info->attention_status = status;
 
@@ -293,7 +289,8 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
+		 __func__, slot_name(slot));
 
 	return pciehp_sysfs_enable_slot(slot);
 }
@@ -303,7 +300,8 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
+		  __func__, slot_name(slot));
 
 	return pciehp_sysfs_disable_slot(slot);
 }
@@ -313,7 +311,8 @@
 	struct slot *slot = hotplug_slot->private;
 	int retval;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
+		  __func__, slot_name(slot));
 
 	retval = slot->hpc_ops->get_power_status(slot, value);
 	if (retval < 0)
@@ -327,7 +326,8 @@
 	struct slot *slot = hotplug_slot->private;
 	int retval;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
+		  __func__, slot_name(slot));
 
 	retval = slot->hpc_ops->get_attention_status(slot, value);
 	if (retval < 0)
@@ -341,7 +341,8 @@
 	struct slot *slot = hotplug_slot->private;
 	int retval;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
+		 __func__, slot_name(slot));
 
 	retval = slot->hpc_ops->get_latch_status(slot, value);
 	if (retval < 0)
@@ -355,7 +356,8 @@
 	struct slot *slot = hotplug_slot->private;
 	int retval;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
+		 __func__, slot_name(slot));
 
 	retval = slot->hpc_ops->get_adapter_status(slot, value);
 	if (retval < 0)
@@ -370,7 +372,8 @@
 	struct slot *slot = hotplug_slot->private;
 	int retval;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
+		 __func__, slot_name(slot));
 
 	retval = slot->hpc_ops->get_max_bus_speed(slot, value);
 	if (retval < 0)
@@ -384,7 +387,8 @@
 	struct slot *slot = hotplug_slot->private;
 	int retval;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
+		 __func__, slot_name(slot));
 
 	retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
 	if (retval < 0)
@@ -402,14 +406,15 @@
 	struct pci_dev *pdev = dev->port;
 
 	if (pciehp_force)
-		dbg("Bypassing BIOS check for pciehp use on %s\n",
-		    pci_name(pdev));
+		dev_info(&dev->device,
+			 "Bypassing BIOS check for pciehp use on %s\n",
+			 pci_name(pdev));
 	else if (pciehp_get_hp_hw_control_from_firmware(pdev))
 		goto err_out_none;
 
 	ctrl = pcie_init(dev);
 	if (!ctrl) {
-		dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME);
+		dev_err(&dev->device, "controller initialization failed\n");
 		goto err_out_none;
 	}
 	set_service_data(dev, ctrl);
@@ -418,11 +423,10 @@
 	rc = init_slots(ctrl);
 	if (rc) {
 		if (rc == -EBUSY)
-			warn("%s: slot already registered by another "
-				"hotplug driver\n", PCIE_MODULE_NAME);
+			ctrl_warn(ctrl, "slot already registered by another "
+				  "hotplug driver\n");
 		else
-			err("%s: slot initialization failed\n",
-				PCIE_MODULE_NAME);
+			ctrl_err(ctrl, "slot initialization failed\n");
 		goto err_out_release_ctlr;
 	}
 
@@ -461,13 +465,13 @@
 #ifdef CONFIG_PM
 static int pciehp_suspend (struct pcie_device *dev, pm_message_t state)
 {
-	printk("%s ENTRY\n", __func__);
+	dev_info(&dev->device, "%s ENTRY\n", __func__);
 	return 0;
 }
 
 static int pciehp_resume (struct pcie_device *dev)
 {
-	printk("%s ENTRY\n", __func__);
+	dev_info(&dev->device, "%s ENTRY\n", __func__);
 	if (pciehp_force) {
 		struct controller *ctrl = get_service_data(dev);
 		struct slot *t_slot;
@@ -497,10 +501,9 @@
 	.driver_data =	0,
 	}, { /* end: all zeroes */ }
 };
-static const char device_name[] = "hpdriver";
 
 static struct pcie_port_service_driver hpdriver_portdrv = {
-	.name		= (char *)device_name,
+	.name		= PCIE_MODULE_NAME,
 	.id_table	= &port_pci_ids[0],
 
 	.probe		= pciehp_probe,
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 96a5d55..d6c5eb2 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -58,14 +58,15 @@
 u8 pciehp_handle_attention_button(struct slot *p_slot)
 {
 	u32 event_type;
+	struct controller *ctrl = p_slot->ctrl;
 
 	/* Attention Button Change */
-	dbg("pciehp:  Attention button interrupt received.\n");
+	ctrl_dbg(ctrl, "Attention button interrupt received.\n");
 
 	/*
 	 *  Button pressed - See if need to TAKE ACTION!!!
 	 */
-	info("Button pressed on Slot(%s)\n", p_slot->name);
+	ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot));
 	event_type = INT_BUTTON_PRESS;
 
 	queue_interrupt_event(p_slot, event_type);
@@ -77,22 +78,23 @@
 {
 	u8 getstatus;
 	u32 event_type;
+	struct controller *ctrl = p_slot->ctrl;
 
 	/* Switch Change */
-	dbg("pciehp:  Switch interrupt received.\n");
+	ctrl_dbg(ctrl, "Switch interrupt received.\n");
 
 	p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
 	if (getstatus) {
 		/*
 		 * Switch opened
 		 */
-		info("Latch open on Slot(%s)\n", p_slot->name);
+		ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot));
 		event_type = INT_SWITCH_OPEN;
 	} else {
 		/*
 		 *  Switch closed
 		 */
-		info("Latch close on Slot(%s)\n", p_slot->name);
+		ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot));
 		event_type = INT_SWITCH_CLOSE;
 	}
 
@@ -105,9 +107,10 @@
 {
 	u32 event_type;
 	u8 presence_save;
+	struct controller *ctrl = p_slot->ctrl;
 
 	/* Presence Change */
-	dbg("pciehp:  Presence/Notify input change.\n");
+	ctrl_dbg(ctrl, "Presence/Notify input change.\n");
 
 	/* Switch is open, assume a presence change
 	 * Save the presence state
@@ -117,13 +120,14 @@
 		/*
 		 * Card Present
 		 */
-		info("Card present on Slot(%s)\n", p_slot->name);
+		ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot));
 		event_type = INT_PRESENCE_ON;
 	} else {
 		/*
 		 * Not Present
 		 */
-		info("Card not present on Slot(%s)\n", p_slot->name);
+		ctrl_info(ctrl, "Card not present on Slot(%s)\n",
+			  slot_name(p_slot));
 		event_type = INT_PRESENCE_OFF;
 	}
 
@@ -135,23 +139,25 @@
 u8 pciehp_handle_power_fault(struct slot *p_slot)
 {
 	u32 event_type;
+	struct controller *ctrl = p_slot->ctrl;
 
 	/* power fault */
-	dbg("pciehp:  Power fault interrupt received.\n");
+	ctrl_dbg(ctrl, "Power fault interrupt received.\n");
 
 	if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) {
 		/*
 		 * power fault Cleared
 		 */
-		info("Power fault cleared on Slot(%s)\n", p_slot->name);
+		ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n",
+			  slot_name(p_slot));
 		event_type = INT_POWER_FAULT_CLEAR;
 	} else {
 		/*
 		 *   power fault
 		 */
-		info("Power fault on Slot(%s)\n", p_slot->name);
+		ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot));
 		event_type = INT_POWER_FAULT;
-		info("power fault bit %x set\n", 0);
+		ctrl_info(ctrl, "power fault bit %x set\n", 0);
 	}
 
 	queue_interrupt_event(p_slot, event_type);
@@ -168,8 +174,9 @@
 	/* turn off slot, turn on Amber LED, turn off Green LED if supported*/
 	if (POWER_CTRL(ctrl)) {
 		if (pslot->hpc_ops->power_off_slot(pslot)) {
-			err("%s: Issue of Slot Power Off command failed\n",
-			    __func__);
+			ctrl_err(ctrl,
+				 "%s: Issue of Slot Power Off command failed\n",
+				 __func__);
 			return;
 		}
 	}
@@ -186,8 +193,8 @@
 
 	if (ATTN_LED(ctrl)) {
 		if (pslot->hpc_ops->set_attention_status(pslot, 1)) {
-			err("%s: Issue of Set Attention Led command failed\n",
-			    __func__);
+			ctrl_err(ctrl, "%s: Issue of Set Attention "
+				 "Led command failed\n", __func__);
 			return;
 		}
 	}
@@ -205,9 +212,9 @@
 	int retval = 0;
 	struct controller *ctrl = p_slot->ctrl;
 
-	dbg("%s: slot device, slot offset, hp slot = %d, %d ,%d\n",
-			__func__, p_slot->device,
-			ctrl->slot_device_offset, p_slot->hp_slot);
+	ctrl_dbg(ctrl, "%s: slot device, slot offset, hp slot = %d, %d ,%d\n",
+		 __func__, p_slot->device, ctrl->slot_device_offset,
+		 p_slot->hp_slot);
 
 	if (POWER_CTRL(ctrl)) {
 		/* Power on slot */
@@ -219,28 +226,25 @@
 	if (PWR_LED(ctrl))
 		p_slot->hpc_ops->green_led_blink(p_slot);
 
-	/* Wait for ~1 second */
-	msleep(1000);
-
 	/* Check link training status */
 	retval = p_slot->hpc_ops->check_lnk_status(ctrl);
 	if (retval) {
-		err("%s: Failed to check link status\n", __func__);
+		ctrl_err(ctrl, "%s: Failed to check link status\n", __func__);
 		set_slot_off(ctrl, p_slot);
 		return retval;
 	}
 
 	/* Check for a power fault */
 	if (p_slot->hpc_ops->query_power_fault(p_slot)) {
-		dbg("%s: power fault detected\n", __func__);
+		ctrl_dbg(ctrl, "%s: power fault detected\n", __func__);
 		retval = POWER_FAILURE;
 		goto err_exit;
 	}
 
 	retval = pciehp_configure_device(p_slot);
 	if (retval) {
-		err("Cannot add device 0x%x:%x\n", p_slot->bus,
-		    p_slot->device);
+		ctrl_err(ctrl, "Cannot add device 0x%x:%x\n",
+			 p_slot->bus, p_slot->device);
 		goto err_exit;
 	}
 
@@ -272,14 +276,14 @@
 	if (retval)
 		return retval;
 
-	dbg("In %s, hp_slot = %d\n", __func__, p_slot->hp_slot);
+	ctrl_dbg(ctrl, "In %s, hp_slot = %d\n", __func__, p_slot->hp_slot);
 
 	if (POWER_CTRL(ctrl)) {
 		/* power off slot */
 		retval = p_slot->hpc_ops->power_off_slot(p_slot);
 		if (retval) {
-			err("%s: Issue of Slot Disable command failed\n",
-			    __func__);
+			ctrl_err(ctrl, "%s: Issue of Slot Disable command "
+				 "failed\n", __func__);
 			return retval;
 		}
 	}
@@ -320,8 +324,8 @@
 	switch (p_slot->state) {
 	case POWEROFF_STATE:
 		mutex_unlock(&p_slot->lock);
-		dbg("%s: disabling bus:device(%x:%x)\n",
-		    __func__, p_slot->bus, p_slot->device);
+		ctrl_dbg(p_slot->ctrl, "%s: disabling bus:device(%x:%x)\n",
+			 __func__, p_slot->bus, p_slot->device);
 		pciehp_disable_slot(p_slot);
 		mutex_lock(&p_slot->lock);
 		p_slot->state = STATIC_STATE;
@@ -349,7 +353,8 @@
 
 	info = kmalloc(sizeof(*info), GFP_KERNEL);
 	if (!info) {
-		err("%s: Cannot allocate memory\n", __func__);
+		ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
+			 __func__);
 		return;
 	}
 	info->p_slot = p_slot;
@@ -403,12 +408,14 @@
 		p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
 		if (getstatus) {
 			p_slot->state = BLINKINGOFF_STATE;
-			info("PCI slot #%s - powering off due to button "
-			     "press.\n", p_slot->name);
+			ctrl_info(ctrl,
+				  "PCI slot #%s - powering off due to button "
+				  "press.\n", slot_name(p_slot));
 		} else {
 			p_slot->state = BLINKINGON_STATE;
-			info("PCI slot #%s - powering on due to button "
-			     "press.\n", p_slot->name);
+			ctrl_info(ctrl,
+				  "PCI slot #%s - powering on due to button "
+				  "press.\n", slot_name(p_slot));
 		}
 		/* blink green LED and turn off amber */
 		if (PWR_LED(ctrl))
@@ -425,8 +432,8 @@
 		 * press the attention again before the 5 sec. limit
 		 * expires to cancel hot-add or hot-remove
 		 */
-		info("Button cancel on Slot(%s)\n", p_slot->name);
-		dbg("%s: button cancel\n", __func__);
+		ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
+		ctrl_dbg(ctrl, "%s: button cancel\n", __func__);
 		cancel_delayed_work(&p_slot->work);
 		if (p_slot->state == BLINKINGOFF_STATE) {
 			if (PWR_LED(ctrl))
@@ -437,8 +444,8 @@
 		}
 		if (ATTN_LED(ctrl))
 			p_slot->hpc_ops->set_attention_status(p_slot, 0);
-		info("PCI slot #%s - action canceled due to button press\n",
-		     p_slot->name);
+		ctrl_info(ctrl, "PCI slot #%s - action canceled "
+			  "due to button press\n", slot_name(p_slot));
 		p_slot->state = STATIC_STATE;
 		break;
 	case POWEROFF_STATE:
@@ -448,11 +455,11 @@
 		 * this means that the previous attention button action
 		 * to hot-add or hot-remove is undergoing
 		 */
-		info("Button ignore on Slot(%s)\n", p_slot->name);
+		ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
 		update_slot_info(p_slot);
 		break;
 	default:
-		warn("Not a valid state\n");
+		ctrl_warn(ctrl, "Not a valid state\n");
 		break;
 	}
 }
@@ -467,7 +474,8 @@
 
 	info = kmalloc(sizeof(*info), GFP_KERNEL);
 	if (!info) {
-		err("%s: Cannot allocate memory\n", __func__);
+		ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
+			 __func__);
 		return;
 	}
 	info->p_slot = p_slot;
@@ -505,7 +513,7 @@
 	case INT_PRESENCE_OFF:
 		if (!HP_SUPR_RM(ctrl))
 			break;
-		dbg("Surprise Removal\n");
+		ctrl_dbg(ctrl, "Surprise Removal\n");
 		update_slot_info(p_slot);
 		handle_surprise_event(p_slot);
 		break;
@@ -522,22 +530,23 @@
 {
 	u8 getstatus = 0;
 	int rc;
+	struct controller *ctrl = p_slot->ctrl;
 
 	/* Check to see if (latch closed, card present, power off) */
 	mutex_lock(&p_slot->ctrl->crit_sect);
 
 	rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
 	if (rc || !getstatus) {
-		info("%s: no adapter on slot(%s)\n", __func__,
-		     p_slot->name);
+		ctrl_info(ctrl, "%s: no adapter on slot(%s)\n",
+			  __func__, slot_name(p_slot));
 		mutex_unlock(&p_slot->ctrl->crit_sect);
 		return -ENODEV;
 	}
 	if (MRL_SENS(p_slot->ctrl)) {
 		rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
 		if (rc || getstatus) {
-			info("%s: latch open on slot(%s)\n", __func__,
-			     p_slot->name);
+			ctrl_info(ctrl, "%s: latch open on slot(%s)\n",
+				  __func__, slot_name(p_slot));
 			mutex_unlock(&p_slot->ctrl->crit_sect);
 			return -ENODEV;
 		}
@@ -546,8 +555,8 @@
 	if (POWER_CTRL(p_slot->ctrl)) {
 		rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
 		if (rc || getstatus) {
-			info("%s: already enabled on slot(%s)\n", __func__,
-			     p_slot->name);
+			ctrl_info(ctrl, "%s: already enabled on slot(%s)\n",
+				  __func__, slot_name(p_slot));
 			mutex_unlock(&p_slot->ctrl->crit_sect);
 			return -EINVAL;
 		}
@@ -571,6 +580,7 @@
 {
 	u8 getstatus = 0;
 	int ret = 0;
+	struct controller *ctrl = p_slot->ctrl;
 
 	if (!p_slot->ctrl)
 		return 1;
@@ -581,8 +591,8 @@
 	if (!HP_SUPR_RM(p_slot->ctrl)) {
 		ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
 		if (ret || !getstatus) {
-			info("%s: no adapter on slot(%s)\n", __func__,
-			     p_slot->name);
+			ctrl_info(ctrl, "%s: no adapter on slot(%s)\n",
+				  __func__, slot_name(p_slot));
 			mutex_unlock(&p_slot->ctrl->crit_sect);
 			return -ENODEV;
 		}
@@ -591,8 +601,8 @@
 	if (MRL_SENS(p_slot->ctrl)) {
 		ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
 		if (ret || getstatus) {
-			info("%s: latch open on slot(%s)\n", __func__,
-			     p_slot->name);
+			ctrl_info(ctrl, "%s: latch open on slot(%s)\n",
+				  __func__, slot_name(p_slot));
 			mutex_unlock(&p_slot->ctrl->crit_sect);
 			return -ENODEV;
 		}
@@ -601,8 +611,8 @@
 	if (POWER_CTRL(p_slot->ctrl)) {
 		ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
 		if (ret || !getstatus) {
-			info("%s: already disabled slot(%s)\n", __func__,
-			     p_slot->name);
+			ctrl_info(ctrl, "%s: already disabled slot(%s)\n",
+				  __func__, slot_name(p_slot));
 			mutex_unlock(&p_slot->ctrl->crit_sect);
 			return -EINVAL;
 		}
@@ -618,6 +628,7 @@
 int pciehp_sysfs_enable_slot(struct slot *p_slot)
 {
 	int retval = -ENODEV;
+	struct controller *ctrl = p_slot->ctrl;
 
 	mutex_lock(&p_slot->lock);
 	switch (p_slot->state) {
@@ -631,15 +642,17 @@
 		p_slot->state = STATIC_STATE;
 		break;
 	case POWERON_STATE:
-		info("Slot %s is already in powering on state\n",
-		     p_slot->name);
+		ctrl_info(ctrl, "Slot %s is already in powering on state\n",
+			  slot_name(p_slot));
 		break;
 	case BLINKINGOFF_STATE:
 	case POWEROFF_STATE:
-		info("Already enabled on slot %s\n", p_slot->name);
+		ctrl_info(ctrl, "Already enabled on slot %s\n",
+			  slot_name(p_slot));
 		break;
 	default:
-		err("Not a valid state on slot %s\n", p_slot->name);
+		ctrl_err(ctrl, "Not a valid state on slot %s\n",
+			 slot_name(p_slot));
 		break;
 	}
 	mutex_unlock(&p_slot->lock);
@@ -650,6 +663,7 @@
 int pciehp_sysfs_disable_slot(struct slot *p_slot)
 {
 	int retval = -ENODEV;
+	struct controller *ctrl = p_slot->ctrl;
 
 	mutex_lock(&p_slot->lock);
 	switch (p_slot->state) {
@@ -663,15 +677,17 @@
 		p_slot->state = STATIC_STATE;
 		break;
 	case POWEROFF_STATE:
-		info("Slot %s is already in powering off state\n",
-		     p_slot->name);
+		ctrl_info(ctrl, "Slot %s is already in powering off state\n",
+			  slot_name(p_slot));
 		break;
 	case BLINKINGON_STATE:
 	case POWERON_STATE:
-		info("Already disabled on slot %s\n", p_slot->name);
+		ctrl_info(ctrl, "Already disabled on slot %s\n",
+			  slot_name(p_slot));
 		break;
 	default:
-		err("Not a valid state on slot %s\n", p_slot->name);
+		ctrl_err(ctrl, "Not a valid state on slot %s\n",
+			 slot_name(p_slot));
 		break;
 	}
 	mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 9d934dd..58c72d2 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -125,6 +125,7 @@
 /* Field definitions in Link Capabilities Register */
 #define MAX_LNK_SPEED		0x000F
 #define MAX_LNK_WIDTH		0x03F0
+#define LINK_ACTIVE_REPORTING	0x00100000
 
 /* Link Width Encoding */
 #define LNK_X1		0x01
@@ -141,6 +142,7 @@
 #define LNK_TRN_ERR	0x0400
 #define	LNK_TRN		0x0800
 #define SLOT_CLK_CONF	0x1000
+#define LINK_ACTIVE	0x2000
 
 /* Field definitions in Slot Capabilities Register */
 #define ATTN_BUTTN_PRSN	0x00000001
@@ -223,7 +225,7 @@
 
 static inline int pciehp_request_irq(struct controller *ctrl)
 {
-	int retval, irq = ctrl->pci_dev->irq;
+	int retval, irq = ctrl->pcie->irq;
 
 	/* Install interrupt polling timer. Start with 10 sec delay */
 	if (pciehp_poll_mode) {
@@ -235,7 +237,8 @@
 	/* Installs the interrupt handler */
 	retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl);
 	if (retval)
-		err("Cannot get irq %d for the hotplug controller\n", irq);
+		ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
+			 irq);
 	return retval;
 }
 
@@ -244,7 +247,7 @@
 	if (pciehp_poll_mode)
 		del_timer_sync(&ctrl->poll_timer);
 	else
-		free_irq(ctrl->pci_dev->irq, ctrl);
+		free_irq(ctrl->pcie->irq, ctrl);
 }
 
 static int pcie_poll_cmd(struct controller *ctrl)
@@ -282,7 +285,7 @@
 	else
 		rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
 	if (!rc)
-		dbg("Command not completed in 1000 msec\n");
+		ctrl_dbg(ctrl, "Command not completed in 1000 msec\n");
 }
 
 /**
@@ -301,7 +304,8 @@
 
 	retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
 	if (retval) {
-		err("%s: Cannot read SLOTSTATUS register\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
+			 __func__);
 		goto out;
 	}
 
@@ -312,26 +316,28 @@
 			 * proceed forward to issue the next command according
 			 * to spec. Just print out the error message.
 			 */
-			dbg("%s: CMD_COMPLETED not clear after 1 sec.\n",
-			    __func__);
+			ctrl_dbg(ctrl,
+				 "%s: CMD_COMPLETED not clear after 1 sec.\n",
+				 __func__);
 		} else if (!NO_CMD_CMPL(ctrl)) {
 			/*
 			 * This controller semms to notify of command completed
 			 * event even though it supports none of power
 			 * controller, attention led, power led and EMI.
 			 */
-			dbg("%s: Unexpected CMD_COMPLETED. Need to wait for "
-			    "command completed event.\n", __func__);
+			ctrl_dbg(ctrl, "%s: Unexpected CMD_COMPLETED. Need to "
+				 "wait for command completed event.\n",
+				 __func__);
 			ctrl->no_cmd_complete = 0;
 		} else {
-			dbg("%s: Unexpected CMD_COMPLETED. Maybe the "
-			    "controller is broken.\n", __func__);
+			ctrl_dbg(ctrl, "%s: Unexpected CMD_COMPLETED. Maybe "
+				 "the controller is broken.\n", __func__);
 		}
 	}
 
 	retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
 	if (retval) {
-		err("%s: Cannot read SLOTCTRL register\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
 		goto out;
 	}
 
@@ -341,7 +347,8 @@
 	smp_mb();
 	retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl);
 	if (retval)
-		err("%s: Cannot write to SLOTCTRL register\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot write to SLOTCTRL register\n",
+			 __func__);
 
 	/*
 	 * Wait for command completion.
@@ -363,21 +370,63 @@
 	return retval;
 }
 
+static inline int check_link_active(struct controller *ctrl)
+{
+	u16 link_status;
+
+	if (pciehp_readw(ctrl, LNKSTATUS, &link_status))
+		return 0;
+	return !!(link_status & LINK_ACTIVE);
+}
+
+static void pcie_wait_link_active(struct controller *ctrl)
+{
+	int timeout = 1000;
+
+	if (check_link_active(ctrl))
+		return;
+	while (timeout > 0) {
+		msleep(10);
+		timeout -= 10;
+		if (check_link_active(ctrl))
+			return;
+	}
+	ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n");
+}
+
 static int hpc_check_lnk_status(struct controller *ctrl)
 {
 	u16 lnk_status;
 	int retval = 0;
 
+        /*
+         * Data Link Layer Link Active Reporting must be capable for
+         * hot-plug capable downstream port. But old controller might
+         * not implement it. In this case, we wait for 1000 ms.
+         */
+        if (ctrl->link_active_reporting){
+                /* Wait for Data Link Layer Link Active bit to be set */
+                pcie_wait_link_active(ctrl);
+                /*
+                 * We must wait for 100 ms after the Data Link Layer
+                 * Link Active bit reads 1b before initiating a
+                 * configuration access to the hot added device.
+                 */
+                msleep(100);
+        } else
+                msleep(1000);
+
 	retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
 	if (retval) {
-		err("%s: Cannot read LNKSTATUS register\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
+			 __func__);
 		return retval;
 	}
 
-	dbg("%s: lnk_status = %x\n", __func__, lnk_status);
+	ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
 	if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) ||
 		!(lnk_status & NEG_LINK_WD)) {
-		err("%s : Link Training Error occurs \n", __func__);
+		ctrl_err(ctrl, "%s : Link Training Error occurs \n", __func__);
 		retval = -1;
 		return retval;
 	}
@@ -394,12 +443,12 @@
 
 	retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
 	if (retval) {
-		err("%s: Cannot read SLOTCTRL register\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
 		return retval;
 	}
 
-	dbg("%s: SLOTCTRL %x, value read %x\n",
-	    __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n",
+		 __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
 
 	atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6;
 
@@ -433,11 +482,11 @@
 
 	retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
 	if (retval) {
-		err("%s: Cannot read SLOTCTRL register\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
 		return retval;
 	}
-	dbg("%s: SLOTCTRL %x value read %x\n",
-	    __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n",
+		 __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
 
 	pwr_state = (slot_ctrl & PWR_CTRL) >> 10;
 
@@ -464,7 +513,8 @@
 
 	retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
 	if (retval) {
-		err("%s: Cannot read SLOTSTATUS register\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
+			 __func__);
 		return retval;
 	}
 
@@ -482,7 +532,8 @@
 
 	retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
 	if (retval) {
-		err("%s: Cannot read SLOTSTATUS register\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
+			 __func__);
 		return retval;
 	}
 	card_state = (u8)((slot_status & PRSN_STATE) >> 6);
@@ -500,7 +551,7 @@
 
 	retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
 	if (retval) {
-		err("%s: Cannot check for power fault\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot check for power fault\n", __func__);
 		return retval;
 	}
 	pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1);
@@ -516,7 +567,7 @@
 
 	retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
 	if (retval) {
-		err("%s : Cannot check EMI status\n", __func__);
+		ctrl_err(ctrl, "%s : Cannot check EMI status\n", __func__);
 		return retval;
 	}
 	*status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT;
@@ -560,8 +611,8 @@
 			return -1;
 	}
 	rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
-	dbg("%s: SLOTCTRL %x write cmd %x\n",
-	    __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
+		 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
 
 	return rc;
 }
@@ -575,8 +626,8 @@
 	slot_cmd = 0x0100;
 	cmd_mask = PWR_LED_CTRL;
 	pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
-	dbg("%s: SLOTCTRL %x write cmd %x\n",
-	    __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
+		 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
 }
 
 static void hpc_set_green_led_off(struct slot *slot)
@@ -588,8 +639,8 @@
 	slot_cmd = 0x0300;
 	cmd_mask = PWR_LED_CTRL;
 	pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
-	dbg("%s: SLOTCTRL %x write cmd %x\n",
-	    __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
+		 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
 }
 
 static void hpc_set_green_led_blink(struct slot *slot)
@@ -601,8 +652,8 @@
 	slot_cmd = 0x0200;
 	cmd_mask = PWR_LED_CTRL;
 	pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
-	dbg("%s: SLOTCTRL %x write cmd %x\n",
-	    __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
+		 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
 }
 
 static int hpc_power_on_slot(struct slot * slot)
@@ -613,20 +664,22 @@
 	u16 slot_status;
 	int retval = 0;
 
-	dbg("%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
+	ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
 
 	/* Clear sticky power-fault bit from previous power failures */
 	retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
 	if (retval) {
-		err("%s: Cannot read SLOTSTATUS register\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
+			 __func__);
 		return retval;
 	}
 	slot_status &= PWR_FAULT_DETECTED;
 	if (slot_status) {
 		retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status);
 		if (retval) {
-			err("%s: Cannot write to SLOTSTATUS register\n",
-			    __func__);
+			ctrl_err(ctrl,
+				 "%s: Cannot write to SLOTSTATUS register\n",
+				 __func__);
 			return retval;
 		}
 	}
@@ -644,11 +697,12 @@
 	retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
 
 	if (retval) {
-		err("%s: Write %x command failed!\n", __func__, slot_cmd);
+		ctrl_err(ctrl, "%s: Write %x command failed!\n",
+			 __func__, slot_cmd);
 		return -1;
 	}
-	dbg("%s: SLOTCTRL %x write cmd %x\n",
-	    __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
+		 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
 
 	return retval;
 }
@@ -694,7 +748,7 @@
 	int retval = 0;
 	int changed;
 
-	dbg("%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
+	ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
 
 	/*
 	 * Set Bad DLLP Mask bit in Correctable Error Mask
@@ -722,12 +776,12 @@
 
 	retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
 	if (retval) {
-		err("%s: Write command failed!\n", __func__);
+		ctrl_err(ctrl, "%s: Write command failed!\n", __func__);
 		retval = -1;
 		goto out;
 	}
-	dbg("%s: SLOTCTRL %x write cmd %x\n",
-	    __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
+		 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
  out:
 	if (changed)
 		pcie_unmask_bad_dllp(ctrl);
@@ -749,7 +803,8 @@
 	intr_loc = 0;
 	do {
 		if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) {
-			err("%s: Cannot read SLOTSTATUS\n", __func__);
+			ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n",
+				 __func__);
 			return IRQ_NONE;
 		}
 
@@ -760,12 +815,13 @@
 		if (!intr_loc)
 			return IRQ_NONE;
 		if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) {
-			err("%s: Cannot write to SLOTSTATUS\n", __func__);
+			ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n",
+				 __func__);
 			return IRQ_NONE;
 		}
 	} while (detected);
 
-	dbg("%s: intr_loc %x\n", __FUNCTION__, intr_loc);
+	ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc);
 
 	/* Check Command Complete Interrupt Pending */
 	if (intr_loc & CMD_COMPLETED) {
@@ -807,7 +863,7 @@
 
 	retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
 	if (retval) {
-		err("%s: Cannot read LNKCAP register\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
 		return retval;
 	}
 
@@ -821,7 +877,7 @@
 	}
 
 	*value = lnk_speed;
-	dbg("Max link speed = %d\n", lnk_speed);
+	ctrl_dbg(ctrl, "Max link speed = %d\n", lnk_speed);
 
 	return retval;
 }
@@ -836,7 +892,7 @@
 
 	retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
 	if (retval) {
-		err("%s: Cannot read LNKCAP register\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
 		return retval;
 	}
 
@@ -871,7 +927,7 @@
 	}
 
 	*value = lnk_wdth;
-	dbg("Max link width = %d\n", lnk_wdth);
+	ctrl_dbg(ctrl, "Max link width = %d\n", lnk_wdth);
 
 	return retval;
 }
@@ -885,7 +941,8 @@
 
 	retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
 	if (retval) {
-		err("%s: Cannot read LNKSTATUS register\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
+			 __func__);
 		return retval;
 	}
 
@@ -899,7 +956,7 @@
 	}
 
 	*value = lnk_speed;
-	dbg("Current link speed = %d\n", lnk_speed);
+	ctrl_dbg(ctrl, "Current link speed = %d\n", lnk_speed);
 
 	return retval;
 }
@@ -914,7 +971,8 @@
 
 	retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
 	if (retval) {
-		err("%s: Cannot read LNKSTATUS register\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
+			 __func__);
 		return retval;
 	}
 
@@ -949,7 +1007,7 @@
 	}
 
 	*value = lnk_wdth;
-	dbg("Current link width = %d\n", lnk_wdth);
+	ctrl_dbg(ctrl, "Current link width = %d\n", lnk_wdth);
 
 	return retval;
 }
@@ -998,7 +1056,8 @@
 	       PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
 
 	if (pcie_write_cmd(ctrl, cmd, mask)) {
-		err("%s: Cannot enable software notification\n", __func__);
+		ctrl_err(ctrl, "%s: Cannot enable software notification\n",
+			 __func__);
 		return -1;
 	}
 	return 0;
@@ -1010,7 +1069,8 @@
 	mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
 	       PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
 	if (pcie_write_cmd(ctrl, 0, mask))
-		warn("%s: Cannot disable software notification\n", __func__);
+		ctrl_warn(ctrl, "%s: Cannot disable software notification\n",
+			  __func__);
 }
 
 static int pcie_init_notification(struct controller *ctrl)
@@ -1044,7 +1104,6 @@
 	slot->device = ctrl->slot_device_offset + slot->hp_slot;
 	slot->hpc_ops = ctrl->hpc_ops;
 	slot->number = ctrl->first_slot;
-	snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
 	mutex_init(&slot->lock);
 	INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
 	list_add(&slot->slot_list, &ctrl->slot_list);
@@ -1071,58 +1130,71 @@
 	if (!pciehp_debug)
 		return;
 
-	dbg("Hotplug Controller:\n");
-	dbg("  Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n", pci_name(pdev), pdev->irq);
-	dbg("  Vendor ID            : 0x%04x\n", pdev->vendor);
-	dbg("  Device ID            : 0x%04x\n", pdev->device);
-	dbg("  Subsystem ID         : 0x%04x\n", pdev->subsystem_device);
-	dbg("  Subsystem Vendor ID  : 0x%04x\n", pdev->subsystem_vendor);
-	dbg("  PCIe Cap offset      : 0x%02x\n", ctrl->cap_base);
+	ctrl_info(ctrl, "Hotplug Controller:\n");
+	ctrl_info(ctrl, "  Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n",
+		  pci_name(pdev), pdev->irq);
+	ctrl_info(ctrl, "  Vendor ID            : 0x%04x\n", pdev->vendor);
+	ctrl_info(ctrl, "  Device ID            : 0x%04x\n", pdev->device);
+	ctrl_info(ctrl, "  Subsystem ID         : 0x%04x\n",
+		  pdev->subsystem_device);
+	ctrl_info(ctrl, "  Subsystem Vendor ID  : 0x%04x\n",
+		  pdev->subsystem_vendor);
+	ctrl_info(ctrl, "  PCIe Cap offset      : 0x%02x\n", ctrl->cap_base);
 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
 		if (!pci_resource_len(pdev, i))
 			continue;
-		dbg("  PCI resource [%d]     : 0x%llx@0x%llx\n", i,
-		    (unsigned long long)pci_resource_len(pdev, i),
-		    (unsigned long long)pci_resource_start(pdev, i));
+		ctrl_info(ctrl, "  PCI resource [%d]     : 0x%llx@0x%llx\n",
+			  i, (unsigned long long)pci_resource_len(pdev, i),
+			  (unsigned long long)pci_resource_start(pdev, i));
 	}
-	dbg("Slot Capabilities      : 0x%08x\n", ctrl->slot_cap);
-	dbg("  Physical Slot Number : %d\n", ctrl->first_slot);
-	dbg("  Attention Button     : %3s\n", ATTN_BUTTN(ctrl) ? "yes" : "no");
-	dbg("  Power Controller     : %3s\n", POWER_CTRL(ctrl) ? "yes" : "no");
-	dbg("  MRL Sensor           : %3s\n", MRL_SENS(ctrl)   ? "yes" : "no");
-	dbg("  Attention Indicator  : %3s\n", ATTN_LED(ctrl)   ? "yes" : "no");
-	dbg("  Power Indicator      : %3s\n", PWR_LED(ctrl)    ? "yes" : "no");
-	dbg("  Hot-Plug Surprise    : %3s\n", HP_SUPR_RM(ctrl) ? "yes" : "no");
-	dbg("  EMI Present          : %3s\n", EMI(ctrl)        ? "yes" : "no");
-	dbg("  Command Completed    : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes");
+	ctrl_info(ctrl, "Slot Capabilities      : 0x%08x\n", ctrl->slot_cap);
+	ctrl_info(ctrl, "  Physical Slot Number : %d\n", ctrl->first_slot);
+	ctrl_info(ctrl, "  Attention Button     : %3s\n",
+		  ATTN_BUTTN(ctrl) ? "yes" : "no");
+	ctrl_info(ctrl, "  Power Controller     : %3s\n",
+		  POWER_CTRL(ctrl) ? "yes" : "no");
+	ctrl_info(ctrl, "  MRL Sensor           : %3s\n",
+		  MRL_SENS(ctrl)   ? "yes" : "no");
+	ctrl_info(ctrl, "  Attention Indicator  : %3s\n",
+		  ATTN_LED(ctrl)   ? "yes" : "no");
+	ctrl_info(ctrl, "  Power Indicator      : %3s\n",
+		  PWR_LED(ctrl)    ? "yes" : "no");
+	ctrl_info(ctrl, "  Hot-Plug Surprise    : %3s\n",
+		  HP_SUPR_RM(ctrl) ? "yes" : "no");
+	ctrl_info(ctrl, "  EMI Present          : %3s\n",
+		  EMI(ctrl)        ? "yes" : "no");
+	ctrl_info(ctrl, "  Command Completed    : %3s\n",
+		  NO_CMD_CMPL(ctrl) ? "no" : "yes");
 	pciehp_readw(ctrl, SLOTSTATUS, &reg16);
-	dbg("Slot Status            : 0x%04x\n", reg16);
+	ctrl_info(ctrl, "Slot Status            : 0x%04x\n", reg16);
 	pciehp_readw(ctrl, SLOTCTRL, &reg16);
-	dbg("Slot Control           : 0x%04x\n", reg16);
+	ctrl_info(ctrl, "Slot Control           : 0x%04x\n", reg16);
 }
 
 struct controller *pcie_init(struct pcie_device *dev)
 {
 	struct controller *ctrl;
-	u32 slot_cap;
+	u32 slot_cap, link_cap;
 	struct pci_dev *pdev = dev->port;
 
 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
 	if (!ctrl) {
-		err("%s : out of memory\n", __func__);
+		dev_err(&dev->device, "%s : out of memory\n", __func__);
 		goto abort;
 	}
 	INIT_LIST_HEAD(&ctrl->slot_list);
 
+	ctrl->pcie = dev;
 	ctrl->pci_dev = pdev;
 	ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
 	if (!ctrl->cap_base) {
-		err("%s: Cannot find PCI Express capability\n", __func__);
-		goto abort;
+		ctrl_err(ctrl, "%s: Cannot find PCI Express capability\n",
+			 __func__);
+		goto abort_ctrl;
 	}
 	if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) {
-		err("%s: Cannot read SLOTCAP register\n", __func__);
-		goto abort;
+		ctrl_err(ctrl, "%s: Cannot read SLOTCAP register\n", __func__);
+		goto abort_ctrl;
 	}
 
 	ctrl->slot_cap = slot_cap;
@@ -1144,6 +1216,16 @@
 	    !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
 	    ctrl->no_cmd_complete = 1;
 
+        /* Check if Data Link Layer Link Active Reporting is implemented */
+        if (pciehp_readl(ctrl, LNKCAP, &link_cap)) {
+                ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
+                goto abort_ctrl;
+        }
+        if (link_cap & LINK_ACTIVE_REPORTING) {
+                ctrl_dbg(ctrl, "Link Active Reporting supported\n");
+                ctrl->link_active_reporting = 1;
+        }
+
 	/* Clear all remaining event bits in Slot Status register */
 	if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f))
 		goto abort_ctrl;
@@ -1161,9 +1243,9 @@
 			goto abort_ctrl;
 	}
 
-	info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
-	     pdev->vendor, pdev->device,
-	     pdev->subsystem_vendor, pdev->subsystem_device);
+	ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
+		  pdev->vendor, pdev->device, pdev->subsystem_vendor,
+		  pdev->subsystem_device);
 
 	if (pcie_init_slot(ctrl))
 		goto abort_ctrl;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 6040dcc..ffd1114 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -198,18 +198,20 @@
 	struct pci_dev *dev;
 	struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
 	int num, fn;
+	struct controller *ctrl = p_slot->ctrl;
 
 	dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
 	if (dev) {
-		err("Device %s already exists at %x:%x, cannot hot-add\n",
-				pci_name(dev), p_slot->bus, p_slot->device);
+		ctrl_err(ctrl,
+			 "Device %s already exists at %x:%x, cannot hot-add\n",
+			 pci_name(dev), p_slot->bus, p_slot->device);
 		pci_dev_put(dev);
 		return -EINVAL;
 	}
 
 	num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0));
 	if (num == 0) {
-		err("No new device found\n");
+		ctrl_err(ctrl, "No new device found\n");
 		return -ENODEV;
 	}
 
@@ -218,8 +220,8 @@
 		if (!dev)
 			continue;
 		if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
-			err("Cannot hot-add display device %s\n",
-					pci_name(dev));
+			ctrl_err(ctrl, "Cannot hot-add display device %s\n",
+				 pci_name(dev));
 			pci_dev_put(dev);
 			continue;
 		}
@@ -244,9 +246,10 @@
 	u8 presence = 0;
 	struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
 	u16 command;
+	struct controller *ctrl = p_slot->ctrl;
 
-	dbg("%s: bus/dev = %x/%x\n", __func__, p_slot->bus,
-				p_slot->device);
+	ctrl_dbg(ctrl, "%s: bus/dev = %x/%x\n", __func__,
+		 p_slot->bus, p_slot->device);
 	ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence);
 	if (ret)
 		presence = 0;
@@ -257,16 +260,17 @@
 		if (!temp)
 			continue;
 		if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
-			err("Cannot remove display device %s\n",
-					pci_name(temp));
+			ctrl_err(ctrl, "Cannot remove display device %s\n",
+				 pci_name(temp));
 			pci_dev_put(temp);
 			continue;
 		}
 		if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
 			pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
 			if (bctl & PCI_BRIDGE_CTL_VGA) {
-				err("Cannot remove display device %s\n",
-				    pci_name(temp));
+				ctrl_err(ctrl,
+					 "Cannot remove display device %s\n",
+					 pci_name(temp));
 				pci_dev_put(temp);
 				continue;
 			}
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index 7d5921b..419919a 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -46,10 +46,10 @@
 #define PRESENT         1	/* Card in slot */
 
 #define MY_NAME "rpaphp"
-extern int debug;
+extern int rpaphp_debug;
 #define dbg(format, arg...)					\
 	do {							\
-		if (debug)					\
+		if (rpaphp_debug)					\
 			printk(KERN_DEBUG "%s: " format,	\
 				MY_NAME , ## arg); 		\
 	} while (0)
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 1f84f40..95d02a0 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -37,7 +37,7 @@
 				/* and pci_do_scan_bus */
 #include "rpaphp.h"
 
-int debug;
+int rpaphp_debug;
 LIST_HEAD(rpaphp_slot_head);
 
 #define DRIVER_VERSION	"0.1"
@@ -50,7 +50,7 @@
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 
-module_param(debug, bool, 0644);
+module_param_named(debug, rpaphp_debug, bool, 0644);
 
 /**
  * set_attention_status - set attention LED
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 5acfd4f..513e1e2 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -123,7 +123,7 @@
 			slot->state = CONFIGURED;
 		}
 
-		if (debug) {
+		if (rpaphp_debug) {
 			struct pci_dev *dev;
 			dbg("%s: pci_devs of slot[%s]\n", __func__, slot->dn->full_name);
 			list_for_each_entry (dev, &bus->devices, bus_list)
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 5088450..2ea9cf1 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -43,7 +43,7 @@
 void dealloc_slot_struct(struct slot *slot)
 {
 	kfree(slot->hotplug_slot->info);
-	kfree(slot->hotplug_slot->name);
+	kfree(slot->name);
 	kfree(slot->hotplug_slot);
 	kfree(slot);
 }
@@ -63,11 +63,9 @@
 					   GFP_KERNEL);
 	if (!slot->hotplug_slot->info)
 		goto error_hpslot;
-	slot->hotplug_slot->name = kmalloc(strlen(drc_name) + 1, GFP_KERNEL);
-	if (!slot->hotplug_slot->name)
+	slot->name = kstrdup(drc_name, GFP_KERNEL);
+	if (!slot->name)
 		goto error_info;	
-	slot->name = slot->hotplug_slot->name;
-	strcpy(slot->name, drc_name);
 	slot->dn = dn;
 	slot->index = drc_index;
 	slot->power_domain = power_domain;
@@ -137,7 +135,7 @@
 		slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
 	else
 		slotno = -1;
-	retval = pci_hp_register(php_slot, slot->bus, slotno);
+	retval = pci_hp_register(php_slot, slot->bus, slotno, slot->name);
 	if (retval) {
 		err("pci_hp_register failed with error %d\n", retval);
 		return retval;
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 410fe03..d748698 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -161,7 +161,8 @@
 }
 
 static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
-				    struct pci_bus *pci_bus, int device)
+				    struct pci_bus *pci_bus, int device,
+				    char *name)
 {
 	struct pcibus_info *pcibus_info;
 	struct slot *slot;
@@ -173,15 +174,9 @@
 		return -ENOMEM;
 	bss_hotplug_slot->private = slot;
 
-	bss_hotplug_slot->name = kmalloc(SN_SLOT_NAME_SIZE, GFP_KERNEL);
-	if (!bss_hotplug_slot->name) {
-		kfree(bss_hotplug_slot->private);
-		return -ENOMEM;
-	}
-
 	slot->device_num = device;
 	slot->pci_bus = pci_bus;
-	sprintf(bss_hotplug_slot->name, "%04x:%02x:%02x",
+	sprintf(name, "%04x:%02x:%02x",
 		pci_domain_nr(pci_bus),
 		((u16)pcibus_info->pbi_buscommon.bs_persist_busnum),
 		device + 1);
@@ -608,7 +603,6 @@
 static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
 {
 	kfree(bss_hotplug_slot->info);
-	kfree(bss_hotplug_slot->name);
 	kfree(bss_hotplug_slot->private);
 	kfree(bss_hotplug_slot);
 }
@@ -618,6 +612,7 @@
 	int device;
 	struct pci_slot *pci_slot;
 	struct hotplug_slot *bss_hotplug_slot;
+	char name[SN_SLOT_NAME_SIZE];
 	int rc = 0;
 
 	/*
@@ -645,15 +640,14 @@
 		}
 
 		if (sn_hp_slot_private_alloc(bss_hotplug_slot,
-					     pci_bus, device)) {
+					     pci_bus, device, name)) {
 			rc = -ENOMEM;
 			goto alloc_err;
 		}
-
 		bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
 		bss_hotplug_slot->release = &sn_release_slot;
 
-		rc = pci_hp_register(bss_hotplug_slot, pci_bus, device);
+		rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name);
 		if (rc)
 			goto register_err;
 
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 8a026f7..4d9fed0 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -69,15 +69,13 @@
 	u8 state;
 	u8 presence_save;
 	u8 pwr_save;
-	struct timer_list task_event;
-	u8 hp_slot;
 	struct controller *ctrl;
 	struct hpc_ops *hpc_ops;
 	struct hotplug_slot *hotplug_slot;
 	struct list_head	slot_list;
-	char name[SLOT_NAME_SIZE];
 	struct delayed_work work;	/* work for button event */
 	struct mutex lock;
+	u8 hp_slot;
 };
 
 struct event_info {
@@ -169,6 +167,11 @@
 extern void shpchp_queue_pushbutton_work(struct work_struct *work);
 extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
 
+static inline const char *slot_name(struct slot *slot)
+{
+	return hotplug_slot_name(slot->hotplug_slot);
+}
+
 #ifdef CONFIG_ACPI
 #include <linux/pci-acpi.h>
 static inline int get_hp_params_from_firmware(struct pci_dev *dev,
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index cc38615..7af9191 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -89,7 +89,7 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	kfree(slot->hotplug_slot->info);
 	kfree(slot->hotplug_slot);
@@ -101,8 +101,9 @@
 	struct slot *slot;
 	struct hotplug_slot *hotplug_slot;
 	struct hotplug_slot_info *info;
+	char name[SLOT_NAME_SIZE];
 	int retval = -ENOMEM;
-	int i, len, dup = 1;
+	int i;
 
 	for (i = 0; i < ctrl->num_slots; i++) {
 		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
@@ -119,8 +120,6 @@
 			goto error_hpslot;
 		hotplug_slot->info = info;
 
-		hotplug_slot->name = slot->name;
-
 		slot->hp_slot = i;
 		slot->ctrl = ctrl;
 		slot->bus = ctrl->pci_dev->subordinate->number;
@@ -133,37 +132,24 @@
 		/* register this slot with the hotplug pci core */
 		hotplug_slot->private = slot;
 		hotplug_slot->release = &release_slot;
-		snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
+		snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
 		hotplug_slot->ops = &shpchp_hotplug_slot_ops;
 
+		dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
+		    "slot_device_offset=%x\n", slot->bus, slot->device,
+		    slot->hp_slot, slot->number, ctrl->slot_device_offset);
+		retval = pci_hp_register(slot->hotplug_slot,
+				ctrl->pci_dev->subordinate, slot->device, name);
+		if (retval) {
+			err("pci_hp_register failed with error %d\n", retval);
+			goto error_info;
+		}
+
 		get_power_status(hotplug_slot, &info->power_status);
 		get_attention_status(hotplug_slot, &info->attention_status);
 		get_latch_status(hotplug_slot, &info->latch_status);
 		get_adapter_status(hotplug_slot, &info->adapter_status);
 
-		dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
-		    "slot_device_offset=%x\n", slot->bus, slot->device,
-		    slot->hp_slot, slot->number, ctrl->slot_device_offset);
-duplicate_name:
-		retval = pci_hp_register(slot->hotplug_slot,
-				ctrl->pci_dev->subordinate, slot->device);
-		if (retval) {
-			/*
-			 * If slot N already exists, we'll try to create
-			 * slot N-1, N-2 ... N-M, until we overflow.
-			 */
-			if (retval == -EEXIST) {
-				len = snprintf(slot->name, SLOT_NAME_SIZE,
-					       "%d-%d", slot->number, dup++);
-				if (len < SLOT_NAME_SIZE)
-					goto duplicate_name;
-				else
-					err("duplicate slot name overflow\n");
-			}
-			err("pci_hp_register failed with error %d\n", retval);
-			goto error_info;
-		}
-
 		list_add(&slot->slot_list, &ctrl->slot_list);
 	}
 
@@ -201,7 +187,7 @@
 {
 	struct slot *slot = get_slot(hotplug_slot);
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	hotplug_slot->info->attention_status = status;
 	slot->hpc_ops->set_attention_status(slot, status);
@@ -213,7 +199,7 @@
 {
 	struct slot *slot = get_slot(hotplug_slot);
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	return shpchp_sysfs_enable_slot(slot);
 }
@@ -222,7 +208,7 @@
 {
 	struct slot *slot = get_slot(hotplug_slot);
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	return shpchp_sysfs_disable_slot(slot);
 }
@@ -232,7 +218,7 @@
 	struct slot *slot = get_slot(hotplug_slot);
 	int retval;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	retval = slot->hpc_ops->get_power_status(slot, value);
 	if (retval < 0)
@@ -246,7 +232,7 @@
 	struct slot *slot = get_slot(hotplug_slot);
 	int retval;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	retval = slot->hpc_ops->get_attention_status(slot, value);
 	if (retval < 0)
@@ -260,7 +246,7 @@
 	struct slot *slot = get_slot(hotplug_slot);
 	int retval;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	retval = slot->hpc_ops->get_latch_status(slot, value);
 	if (retval < 0)
@@ -274,7 +260,7 @@
 	struct slot *slot = get_slot(hotplug_slot);
 	int retval;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	retval = slot->hpc_ops->get_adapter_status(slot, value);
 	if (retval < 0)
@@ -289,7 +275,7 @@
 	struct slot *slot = get_slot(hotplug_slot);
 	int retval;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	retval = slot->hpc_ops->get_max_bus_speed(slot, value);
 	if (retval < 0)
@@ -303,7 +289,7 @@
 	struct slot *slot = get_slot(hotplug_slot);
 	int retval;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
 	if (retval < 0)
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index dfb5393..919b1ee 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -70,7 +70,7 @@
 	/*
 	 *  Button pressed - See if need to TAKE ACTION!!!
 	 */
-	info("Button pressed on Slot(%s)\n", p_slot->name);
+	info("Button pressed on Slot(%s)\n", slot_name(p_slot));
 	event_type = INT_BUTTON_PRESS;
 
 	queue_interrupt_event(p_slot, event_type);
@@ -98,7 +98,7 @@
 		/*
 		 * Switch opened
 		 */
-		info("Latch open on Slot(%s)\n", p_slot->name);
+		info("Latch open on Slot(%s)\n", slot_name(p_slot));
 		event_type = INT_SWITCH_OPEN;
 		if (p_slot->pwr_save && p_slot->presence_save) {
 			event_type = INT_POWER_FAULT;
@@ -108,7 +108,7 @@
 		/*
 		 *  Switch closed
 		 */
-		info("Latch close on Slot(%s)\n", p_slot->name);
+		info("Latch close on Slot(%s)\n", slot_name(p_slot));
 		event_type = INT_SWITCH_CLOSE;
 	}
 
@@ -135,13 +135,13 @@
 		/*
 		 * Card Present
 		 */
-		info("Card present on Slot(%s)\n", p_slot->name);
+		info("Card present on Slot(%s)\n", slot_name(p_slot));
 		event_type = INT_PRESENCE_ON;
 	} else {
 		/*
 		 * Not Present
 		 */
-		info("Card not present on Slot(%s)\n", p_slot->name);
+		info("Card not present on Slot(%s)\n", slot_name(p_slot));
 		event_type = INT_PRESENCE_OFF;
 	}
 
@@ -164,14 +164,14 @@
 		/*
 		 * Power fault Cleared
 		 */
-		info("Power fault cleared on Slot(%s)\n", p_slot->name);
+		info("Power fault cleared on Slot(%s)\n", slot_name(p_slot));
 		p_slot->status = 0x00;
 		event_type = INT_POWER_FAULT_CLEAR;
 	} else {
 		/*
 		 *   Power fault
 		 */
-		info("Power fault on Slot(%s)\n", p_slot->name);
+		info("Power fault on Slot(%s)\n", slot_name(p_slot));
 		event_type = INT_POWER_FAULT;
 		/* set power fault status for this board */
 		p_slot->status = 0xFF;
@@ -493,11 +493,11 @@
 		if (getstatus) {
 			p_slot->state = BLINKINGOFF_STATE;
 			info("PCI slot #%s - powering off due to button "
-			     "press.\n", p_slot->name);
+			     "press.\n", slot_name(p_slot));
 		} else {
 			p_slot->state = BLINKINGON_STATE;
 			info("PCI slot #%s - powering on due to button "
-			     "press.\n", p_slot->name);
+			     "press.\n", slot_name(p_slot));
 		}
 		/* blink green LED and turn off amber */
 		p_slot->hpc_ops->green_led_blink(p_slot);
@@ -512,7 +512,7 @@
 		 * press the attention again before the 5 sec. limit
 		 * expires to cancel hot-add or hot-remove
 		 */
-		info("Button cancel on Slot(%s)\n", p_slot->name);
+		info("Button cancel on Slot(%s)\n", slot_name(p_slot));
 		dbg("%s: button cancel\n", __func__);
 		cancel_delayed_work(&p_slot->work);
 		if (p_slot->state == BLINKINGOFF_STATE)
@@ -521,7 +521,7 @@
 			p_slot->hpc_ops->green_led_off(p_slot);
 		p_slot->hpc_ops->set_attention_status(p_slot, 0);
 		info("PCI slot #%s - action canceled due to button press\n",
-		     p_slot->name);
+		     slot_name(p_slot));
 		p_slot->state = STATIC_STATE;
 		break;
 	case POWEROFF_STATE:
@@ -531,7 +531,7 @@
 		 * this means that the previous attention button action
 		 * to hot-add or hot-remove is undergoing
 		 */
-		info("Button ignore on Slot(%s)\n", p_slot->name);
+		info("Button ignore on Slot(%s)\n", slot_name(p_slot));
 		update_slot_info(p_slot);
 		break;
 	default:
@@ -574,17 +574,17 @@
 	mutex_lock(&p_slot->ctrl->crit_sect);
 	rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
 	if (rc || !getstatus) {
-		info("No adapter on slot(%s)\n", p_slot->name);
+		info("No adapter on slot(%s)\n", slot_name(p_slot));
 		goto out;
 	}
 	rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
 	if (rc || getstatus) {
-		info("Latch open on slot(%s)\n", p_slot->name);
+		info("Latch open on slot(%s)\n", slot_name(p_slot));
 		goto out;
 	}
 	rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
 	if (rc || getstatus) {
-		info("Already enabled on slot(%s)\n", p_slot->name);
+		info("Already enabled on slot(%s)\n", slot_name(p_slot));
 		goto out;
 	}
 
@@ -633,17 +633,17 @@
 
 	rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
 	if (rc || !getstatus) {
-		info("No adapter on slot(%s)\n", p_slot->name);
+		info("No adapter on slot(%s)\n", slot_name(p_slot));
 		goto out;
 	}
 	rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
 	if (rc || getstatus) {
-		info("Latch open on slot(%s)\n", p_slot->name);
+		info("Latch open on slot(%s)\n", slot_name(p_slot));
 		goto out;
 	}
 	rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
 	if (rc || !getstatus) {
-		info("Already disabled slot(%s)\n", p_slot->name);
+		info("Already disabled slot(%s)\n", slot_name(p_slot));
 		goto out;
 	}
 
@@ -671,14 +671,14 @@
 		break;
 	case POWERON_STATE:
 		info("Slot %s is already in powering on state\n",
-		     p_slot->name);
+		     slot_name(p_slot));
 		break;
 	case BLINKINGOFF_STATE:
 	case POWEROFF_STATE:
-		info("Already enabled on slot %s\n", p_slot->name);
+		info("Already enabled on slot %s\n", slot_name(p_slot));
 		break;
 	default:
-		err("Not a valid state on slot %s\n", p_slot->name);
+		err("Not a valid state on slot %s\n", slot_name(p_slot));
 		break;
 	}
 	mutex_unlock(&p_slot->lock);
@@ -703,14 +703,14 @@
 		break;
 	case POWEROFF_STATE:
 		info("Slot %s is already in powering off state\n",
-		     p_slot->name);
+		     slot_name(p_slot));
 		break;
 	case BLINKINGON_STATE:
 	case POWERON_STATE:
-		info("Already disabled on slot %s\n", p_slot->name);
+		info("Already disabled on slot %s\n", slot_name(p_slot));
 		break;
 	default:
-		err("Not a valid state on slot %s\n", p_slot->name);
+		err("Not a valid state on slot %s\n", slot_name(p_slot));
 		break;
 	}
 	mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index 279c940..bf7d6ce 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -126,7 +126,8 @@
 	cfg->msg.address_hi = 0xffffffff;
 
 	irq = create_irq();
-	if (irq < 0) {
+
+	if (irq <= 0) {
 		kfree(cfg);
 		return -EBUSY;
 	}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index fc5f2db..a269272 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -18,6 +18,7 @@
  * Author: Ashok Raj <ashok.raj@intel.com>
  * Author: Shaohua Li <shaohua.li@intel.com>
  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * Author: Fenghua Yu <fenghua.yu@intel.com>
  */
 
 #include <linux/init.h>
@@ -35,11 +36,13 @@
 #include <linux/timer.h>
 #include <linux/iova.h>
 #include <linux/intel-iommu.h>
-#include <asm/proto.h> /* force_iommu in this header in x86-64*/
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
 #include "pci.h"
 
+#define ROOT_SIZE		VTD_PAGE_SIZE
+#define CONTEXT_SIZE		VTD_PAGE_SIZE
+
 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
 
@@ -199,7 +202,7 @@
 			spin_unlock_irqrestore(&iommu->lock, flags);
 			return NULL;
 		}
-		__iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K);
+		__iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
 		phy_addr = virt_to_phys((void *)context);
 		set_root_value(root, phy_addr);
 		set_root_present(root);
@@ -345,7 +348,7 @@
 				return NULL;
 			}
 			__iommu_flush_cache(domain->iommu, tmp_page,
-					PAGE_SIZE_4K);
+					PAGE_SIZE);
 			dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
 			/*
 			 * high level table always sets r/w, last level page
@@ -408,13 +411,13 @@
 	start &= (((u64)1) << addr_width) - 1;
 	end &= (((u64)1) << addr_width) - 1;
 	/* in case it's partial page */
-	start = PAGE_ALIGN_4K(start);
-	end &= PAGE_MASK_4K;
+	start = PAGE_ALIGN(start);
+	end &= PAGE_MASK;
 
 	/* we don't need lock here, nobody else touches the iova range */
 	while (start < end) {
 		dma_pte_clear_one(domain, start);
-		start += PAGE_SIZE_4K;
+		start += VTD_PAGE_SIZE;
 	}
 }
 
@@ -468,7 +471,7 @@
 	if (!root)
 		return -ENOMEM;
 
-	__iommu_flush_cache(iommu, root, PAGE_SIZE_4K);
+	__iommu_flush_cache(iommu, root, ROOT_SIZE);
 
 	spin_lock_irqsave(&iommu->lock, flags);
 	iommu->root_entry = root;
@@ -563,31 +566,10 @@
 
 	spin_unlock_irqrestore(&iommu->register_lock, flag);
 
-	/* flush context entry will implictly flush write buffer */
+	/* flush context entry will implicitly flush write buffer */
 	return 0;
 }
 
-static int inline iommu_flush_context_global(struct intel_iommu *iommu,
-	int non_present_entry_flush)
-{
-	return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
-		non_present_entry_flush);
-}
-
-static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did,
-	int non_present_entry_flush)
-{
-	return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
-		non_present_entry_flush);
-}
-
-static int inline iommu_flush_context_device(struct intel_iommu *iommu,
-	u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush)
-{
-	return __iommu_flush_context(iommu, did, source_id, function_mask,
-		DMA_CCMD_DEVICE_INVL, non_present_entry_flush);
-}
-
 /* return value determine if we need a write buffer flush */
 static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
 	u64 addr, unsigned int size_order, u64 type,
@@ -655,37 +637,25 @@
 		printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
 	if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
 		pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
-			DMA_TLB_IIRG(type), DMA_TLB_IAIG(val));
-	/* flush context entry will implictly flush write buffer */
+			(unsigned long long)DMA_TLB_IIRG(type),
+			(unsigned long long)DMA_TLB_IAIG(val));
+	/* flush iotlb entry will implicitly flush write buffer */
 	return 0;
 }
 
-static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu,
-	int non_present_entry_flush)
-{
-	return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
-		non_present_entry_flush);
-}
-
-static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did,
-	int non_present_entry_flush)
-{
-	return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
-		non_present_entry_flush);
-}
-
 static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
 	u64 addr, unsigned int pages, int non_present_entry_flush)
 {
 	unsigned int mask;
 
-	BUG_ON(addr & (~PAGE_MASK_4K));
+	BUG_ON(addr & (~VTD_PAGE_MASK));
 	BUG_ON(pages == 0);
 
 	/* Fallback to domain selective flush if no PSI support */
 	if (!cap_pgsel_inv(iommu->cap))
-		return iommu_flush_iotlb_dsi(iommu, did,
-			non_present_entry_flush);
+		return iommu->flush.flush_iotlb(iommu, did, 0, 0,
+						DMA_TLB_DSI_FLUSH,
+						non_present_entry_flush);
 
 	/*
 	 * PSI requires page size to be 2 ^ x, and the base address is naturally
@@ -694,11 +664,12 @@
 	mask = ilog2(__roundup_pow_of_two(pages));
 	/* Fallback to domain selective flush if size is too big */
 	if (mask > cap_max_amask_val(iommu->cap))
-		return iommu_flush_iotlb_dsi(iommu, did,
-			non_present_entry_flush);
+		return iommu->flush.flush_iotlb(iommu, did, 0, 0,
+			DMA_TLB_DSI_FLUSH, non_present_entry_flush);
 
-	return __iommu_flush_iotlb(iommu, did, addr, mask,
-		DMA_TLB_PSI_FLUSH, non_present_entry_flush);
+	return iommu->flush.flush_iotlb(iommu, did, addr, mask,
+					DMA_TLB_PSI_FLUSH,
+					non_present_entry_flush);
 }
 
 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -831,7 +802,7 @@
 }
 
 static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
-		u8 fault_reason, u16 source_id, u64 addr)
+		u8 fault_reason, u16 source_id, unsigned long long addr)
 {
 	const char *reason;
 
@@ -1084,9 +1055,9 @@
 			if (!r->flags || !(r->flags & IORESOURCE_MEM))
 				continue;
 			addr = r->start;
-			addr &= PAGE_MASK_4K;
+			addr &= PAGE_MASK;
 			size = r->end - addr;
-			size = PAGE_ALIGN_4K(size);
+			size = PAGE_ALIGN(size);
 			iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
 				IOVA_PFN(size + addr) - 1);
 			if (!iova)
@@ -1148,7 +1119,7 @@
 	domain->pgd = (struct dma_pte *)alloc_pgtable_page();
 	if (!domain->pgd)
 		return -ENOMEM;
-	__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K);
+	__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
 	return 0;
 }
 
@@ -1164,7 +1135,7 @@
 	/* destroy iovas */
 	put_iova_domain(&domain->iovad);
 	end = DOMAIN_MAX_ADDR(domain->gaw);
-	end = end & (~PAGE_MASK_4K);
+	end = end & (~PAGE_MASK);
 
 	/* clear ptes */
 	dma_pte_clear_range(domain, 0, end);
@@ -1204,11 +1175,13 @@
 	__iommu_flush_cache(iommu, context, sizeof(*context));
 
 	/* it's a non-present to present mapping */
-	if (iommu_flush_context_device(iommu, domain->id,
-			(((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1))
+	if (iommu->flush.flush_context(iommu, domain->id,
+		(((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
+		DMA_CCMD_DEVICE_INVL, 1))
 		iommu_flush_write_buffer(iommu);
 	else
-		iommu_flush_iotlb_dsi(iommu, 0, 0);
+		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
+
 	spin_unlock_irqrestore(&iommu->lock, flags);
 	return 0;
 }
@@ -1283,22 +1256,25 @@
 	u64 start_pfn, end_pfn;
 	struct dma_pte *pte;
 	int index;
+	int addr_width = agaw_to_width(domain->agaw);
+
+	hpa &= (((u64)1) << addr_width) - 1;
 
 	if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
 		return -EINVAL;
-	iova &= PAGE_MASK_4K;
-	start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K;
-	end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K;
+	iova &= PAGE_MASK;
+	start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
+	end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
 	index = 0;
 	while (start_pfn < end_pfn) {
-		pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index);
+		pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
 		if (!pte)
 			return -ENOMEM;
 		/* We don't need lock here, nobody else
 		 * touches the iova range
 		 */
 		BUG_ON(dma_pte_addr(*pte));
-		dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K);
+		dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT);
 		dma_set_pte_prot(*pte, prot);
 		__iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
 		start_pfn++;
@@ -1310,8 +1286,10 @@
 static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
 {
 	clear_context_table(domain->iommu, bus, devfn);
-	iommu_flush_context_global(domain->iommu, 0);
-	iommu_flush_iotlb_global(domain->iommu, 0);
+	domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0,
+					   DMA_CCMD_GLOBAL_INVL, 0);
+	domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0,
+					 DMA_TLB_GLOBAL_FLUSH, 0);
 }
 
 static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -1474,11 +1452,13 @@
 	return find_domain(pdev);
 }
 
-static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
+static int iommu_prepare_identity_map(struct pci_dev *pdev,
+				      unsigned long long start,
+				      unsigned long long end)
 {
 	struct dmar_domain *domain;
 	unsigned long size;
-	u64 base;
+	unsigned long long base;
 	int ret;
 
 	printk(KERN_INFO
@@ -1490,9 +1470,9 @@
 		return -ENOMEM;
 
 	/* The address might not be aligned */
-	base = start & PAGE_MASK_4K;
+	base = start & PAGE_MASK;
 	size = end - base;
-	size = PAGE_ALIGN_4K(size);
+	size = PAGE_ALIGN(size);
 	if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
 			IOVA_PFN(base + size) - 1)) {
 		printk(KERN_ERR "IOMMU: reserve iova failed\n");
@@ -1662,6 +1642,28 @@
 		}
 	}
 
+	for_each_drhd_unit(drhd) {
+		if (drhd->ignored)
+			continue;
+
+		iommu = drhd->iommu;
+		if (dmar_enable_qi(iommu)) {
+			/*
+			 * Queued Invalidate not enabled, use Register Based
+			 * Invalidate
+			 */
+			iommu->flush.flush_context = __iommu_flush_context;
+			iommu->flush.flush_iotlb = __iommu_flush_iotlb;
+			printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
+			       "invalidation\n", drhd->reg_base_addr);
+		} else {
+			iommu->flush.flush_context = qi_flush_context;
+			iommu->flush.flush_iotlb = qi_flush_iotlb;
+			printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
+			       "invalidation\n", drhd->reg_base_addr);
+		}
+	}
+
 	/*
 	 * For each rmrr
 	 *   for each dev attached to rmrr
@@ -1714,9 +1716,10 @@
 
 		iommu_set_root_entry(iommu);
 
-		iommu_flush_context_global(iommu, 0);
-		iommu_flush_iotlb_global(iommu, 0);
-
+		iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
+					   0);
+		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
+					 0);
 		iommu_disable_protect_mem_regions(iommu);
 
 		ret = iommu_enable_translation(iommu);
@@ -1738,8 +1741,8 @@
 static inline u64 aligned_size(u64 host_addr, size_t size)
 {
 	u64 addr;
-	addr = (host_addr & (~PAGE_MASK_4K)) + size;
-	return PAGE_ALIGN_4K(addr);
+	addr = (host_addr & (~PAGE_MASK)) + size;
+	return PAGE_ALIGN(addr);
 }
 
 struct iova *
@@ -1753,20 +1756,20 @@
 		return NULL;
 
 	piova = alloc_iova(&domain->iovad,
-			size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1);
+			size >> PAGE_SHIFT, IOVA_PFN(end), 1);
 	return piova;
 }
 
 static struct iova *
 __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
-		size_t size)
+		   size_t size, u64 dma_mask)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct iova *iova = NULL;
 
-	if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) {
-		iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
-	} else  {
+	if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
+		iova = iommu_alloc_iova(domain, size, dma_mask);
+	else {
 		/*
 		 * First try to allocate an io virtual address in
 		 * DMA_32BIT_MASK and if that fails then try allocating
@@ -1774,7 +1777,7 @@
 		 */
 		iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
 		if (!iova)
-			iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
+			iova = iommu_alloc_iova(domain, size, dma_mask);
 	}
 
 	if (!iova) {
@@ -1813,12 +1816,12 @@
 	return domain;
 }
 
-static dma_addr_t
-intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
+static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
+				     size_t size, int dir, u64 dma_mask)
 {
 	struct pci_dev *pdev = to_pci_dev(hwdev);
 	struct dmar_domain *domain;
-	unsigned long start_paddr;
+	phys_addr_t start_paddr;
 	struct iova *iova;
 	int prot = 0;
 	int ret;
@@ -1833,11 +1836,11 @@
 
 	size = aligned_size((u64)paddr, size);
 
-	iova = __intel_alloc_iova(hwdev, domain, size);
+	iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
 	if (!iova)
 		goto error;
 
-	start_paddr = iova->pfn_lo << PAGE_SHIFT_4K;
+	start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
 
 	/*
 	 * Check if DMAR supports zero-length reads on write only
@@ -1855,30 +1858,33 @@
 	 * is not a big problem
 	 */
 	ret = domain_page_mapping(domain, start_paddr,
-		((u64)paddr) & PAGE_MASK_4K, size, prot);
+		((u64)paddr) & PAGE_MASK, size, prot);
 	if (ret)
 		goto error;
 
-	pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
-		pci_name(pdev), size, (u64)paddr,
-		size, (u64)start_paddr, dir);
-
 	/* it's a non-present to present mapping */
 	ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
-			start_paddr, size >> PAGE_SHIFT_4K, 1);
+			start_paddr, size >> VTD_PAGE_SHIFT, 1);
 	if (ret)
 		iommu_flush_write_buffer(domain->iommu);
 
-	return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K)));
+	return start_paddr + ((u64)paddr & (~PAGE_MASK));
 
 error:
 	if (iova)
 		__free_iova(&domain->iovad, iova);
 	printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
-		pci_name(pdev), size, (u64)paddr, dir);
+		pci_name(pdev), size, (unsigned long long)paddr, dir);
 	return 0;
 }
 
+dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
+			    size_t size, int dir)
+{
+	return __intel_map_single(hwdev, paddr, size, dir,
+				  to_pci_dev(hwdev)->dma_mask);
+}
+
 static void flush_unmaps(void)
 {
 	int i, j;
@@ -1891,7 +1897,8 @@
 			struct intel_iommu *iommu =
 				deferred_flush[i].domain[0]->iommu;
 
-			iommu_flush_iotlb_global(iommu, 0);
+			iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+						 DMA_TLB_GLOBAL_FLUSH, 0);
 			for (j = 0; j < deferred_flush[i].next; j++) {
 				__free_iova(&deferred_flush[i].domain[j]->iovad,
 						deferred_flush[i].iova[j]);
@@ -1936,8 +1943,8 @@
 	spin_unlock_irqrestore(&async_umap_flush_lock, flags);
 }
 
-static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
-	size_t size, int dir)
+void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
+			int dir)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct dmar_domain *domain;
@@ -1953,11 +1960,11 @@
 	if (!iova)
 		return;
 
-	start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+	start_addr = iova->pfn_lo << PAGE_SHIFT;
 	size = aligned_size((u64)dev_addr, size);
 
 	pr_debug("Device %s unmapping: %lx@%llx\n",
-		pci_name(pdev), size, (u64)start_addr);
+		pci_name(pdev), size, (unsigned long long)start_addr);
 
 	/*  clear the whole page */
 	dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -1965,7 +1972,7 @@
 	dma_pte_free_pagetable(domain, start_addr, start_addr + size);
 	if (intel_iommu_strict) {
 		if (iommu_flush_iotlb_psi(domain->iommu,
-			domain->id, start_addr, size >> PAGE_SHIFT_4K, 0))
+			domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
 			iommu_flush_write_buffer(domain->iommu);
 		/* free iova */
 		__free_iova(&domain->iovad, iova);
@@ -1978,13 +1985,13 @@
 	}
 }
 
-static void * intel_alloc_coherent(struct device *hwdev, size_t size,
-		       dma_addr_t *dma_handle, gfp_t flags)
+void *intel_alloc_coherent(struct device *hwdev, size_t size,
+			   dma_addr_t *dma_handle, gfp_t flags)
 {
 	void *vaddr;
 	int order;
 
-	size = PAGE_ALIGN_4K(size);
+	size = PAGE_ALIGN(size);
 	order = get_order(size);
 	flags &= ~(GFP_DMA | GFP_DMA32);
 
@@ -1993,19 +2000,21 @@
 		return NULL;
 	memset(vaddr, 0, size);
 
-	*dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL);
+	*dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
+					 DMA_BIDIRECTIONAL,
+					 hwdev->coherent_dma_mask);
 	if (*dma_handle)
 		return vaddr;
 	free_pages((unsigned long)vaddr, order);
 	return NULL;
 }
 
-static void intel_free_coherent(struct device *hwdev, size_t size,
-	void *vaddr, dma_addr_t dma_handle)
+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+			 dma_addr_t dma_handle)
 {
 	int order;
 
-	size = PAGE_ALIGN_4K(size);
+	size = PAGE_ALIGN(size);
 	order = get_order(size);
 
 	intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
@@ -2013,8 +2022,9 @@
 }
 
 #define SG_ENT_VIRT_ADDRESS(sg)	(sg_virt((sg)))
-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
-	int nelems, int dir)
+
+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
+		    int nelems, int dir)
 {
 	int i;
 	struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2038,7 +2048,7 @@
 		size += aligned_size((u64)addr, sg->length);
 	}
 
-	start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+	start_addr = iova->pfn_lo << PAGE_SHIFT;
 
 	/*  clear the whole page */
 	dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -2046,7 +2056,7 @@
 	dma_pte_free_pagetable(domain, start_addr, start_addr + size);
 
 	if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
-			size >> PAGE_SHIFT_4K, 0))
+			size >> VTD_PAGE_SHIFT, 0))
 		iommu_flush_write_buffer(domain->iommu);
 
 	/* free iova */
@@ -2067,8 +2077,8 @@
 	return nelems;
 }
 
-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
-				int nelems, int dir)
+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
+		 int dir)
 {
 	void *addr;
 	int i;
@@ -2096,7 +2106,7 @@
 		size += aligned_size((u64)addr, sg->length);
 	}
 
-	iova = __intel_alloc_iova(hwdev, domain, size);
+	iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
 	if (!iova) {
 		sglist->dma_length = 0;
 		return 0;
@@ -2112,14 +2122,14 @@
 	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
 		prot |= DMA_PTE_WRITE;
 
-	start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+	start_addr = iova->pfn_lo << PAGE_SHIFT;
 	offset = 0;
 	for_each_sg(sglist, sg, nelems, i) {
 		addr = SG_ENT_VIRT_ADDRESS(sg);
 		addr = (void *)virt_to_phys(addr);
 		size = aligned_size((u64)addr, sg->length);
 		ret = domain_page_mapping(domain, start_addr + offset,
-			((u64)addr) & PAGE_MASK_4K,
+			((u64)addr) & PAGE_MASK,
 			size, prot);
 		if (ret) {
 			/*  clear the page */
@@ -2133,14 +2143,14 @@
 			return 0;
 		}
 		sg->dma_address = start_addr + offset +
-				((u64)addr & (~PAGE_MASK_4K));
+				((u64)addr & (~PAGE_MASK));
 		sg->dma_length = sg->length;
 		offset += size;
 	}
 
 	/* it's a non-present to present mapping */
 	if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
-			start_addr, offset >> PAGE_SHIFT_4K, 1))
+			start_addr, offset >> VTD_PAGE_SHIFT, 1))
 		iommu_flush_write_buffer(domain->iommu);
 	return nelems;
 }
@@ -2180,7 +2190,6 @@
 					 sizeof(struct device_domain_info),
 					 0,
 					 SLAB_HWCACHE_ALIGN,
-
 					 NULL);
 	if (!iommu_devinfo_cache) {
 		printk(KERN_ERR "Couldn't create devinfo cache\n");
@@ -2198,7 +2207,6 @@
 					 sizeof(struct iova),
 					 0,
 					 SLAB_HWCACHE_ALIGN,
-
 					 NULL);
 	if (!iommu_iova_cache) {
 		printk(KERN_ERR "Couldn't create iova cache\n");
@@ -2327,7 +2335,7 @@
 		return;
 
 	end = DOMAIN_MAX_ADDR(domain->gaw);
-	end = end & (~PAGE_MASK_4K);
+	end = end & (~VTD_PAGE_MASK);
 
 	/* clear ptes */
 	dma_pte_clear_range(domain, 0, end);
@@ -2423,6 +2431,6 @@
 	if (pte)
 		pfn = dma_pte_addr(*pte);
 
-	return pfn >> PAGE_SHIFT_4K;
+	return pfn >> VTD_PAGE_SHIFT;
 }
 EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 738d4c8..2de5a32 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -1,3 +1,4 @@
+#include <linux/interrupt.h>
 #include <linux/dmar.h>
 #include <linux/spinlock.h>
 #include <linux/jiffies.h>
@@ -11,41 +12,64 @@
 static int ir_ioapic_num;
 int intr_remapping_enabled;
 
-static struct {
+struct irq_2_iommu {
 	struct intel_iommu *iommu;
 	u16 irte_index;
 	u16 sub_handle;
 	u8  irte_mask;
-} irq_2_iommu[NR_IRQS];
+};
+
+static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
+
+static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
+{
+	return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL;
+}
+
+static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
+{
+	return irq_2_iommu(irq);
+}
 
 static DEFINE_SPINLOCK(irq_2_ir_lock);
 
+static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
+{
+	struct irq_2_iommu *irq_iommu;
+
+	irq_iommu = irq_2_iommu(irq);
+
+	if (!irq_iommu)
+		return NULL;
+
+	if (!irq_iommu->iommu)
+		return NULL;
+
+	return irq_iommu;
+}
+
 int irq_remapped(int irq)
 {
-	if (irq > NR_IRQS)
-		return 0;
-
-	if (!irq_2_iommu[irq].iommu)
-		return 0;
-
-	return 1;
+	return valid_irq_2_iommu(irq) != NULL;
 }
 
 int get_irte(int irq, struct irte *entry)
 {
 	int index;
+	struct irq_2_iommu *irq_iommu;
 
-	if (!entry || irq > NR_IRQS)
+	if (!entry)
 		return -1;
 
 	spin_lock(&irq_2_ir_lock);
-	if (!irq_2_iommu[irq].iommu) {
+	irq_iommu = valid_irq_2_iommu(irq);
+	if (!irq_iommu) {
 		spin_unlock(&irq_2_ir_lock);
 		return -1;
 	}
 
-	index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
-	*entry = *(irq_2_iommu[irq].iommu->ir_table->base + index);
+	index = irq_iommu->irte_index + irq_iommu->sub_handle;
+	*entry = *(irq_iommu->iommu->ir_table->base + index);
 
 	spin_unlock(&irq_2_ir_lock);
 	return 0;
@@ -54,6 +78,7 @@
 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
 {
 	struct ir_table *table = iommu->ir_table;
+	struct irq_2_iommu *irq_iommu;
 	u16 index, start_index;
 	unsigned int mask = 0;
 	int i;
@@ -61,6 +86,10 @@
 	if (!count)
 		return -1;
 
+	/* protect irq_2_iommu_alloc later */
+	if (irq >= nr_irqs)
+		return -1;
+
 	/*
 	 * start the IRTE search from index 0.
 	 */
@@ -100,10 +129,11 @@
 	for (i = index; i < index + count; i++)
 		table->base[i].present = 1;
 
-	irq_2_iommu[irq].iommu = iommu;
-	irq_2_iommu[irq].irte_index =  index;
-	irq_2_iommu[irq].sub_handle = 0;
-	irq_2_iommu[irq].irte_mask = mask;
+	irq_iommu = irq_2_iommu_alloc(irq);
+	irq_iommu->iommu = iommu;
+	irq_iommu->irte_index =  index;
+	irq_iommu->sub_handle = 0;
+	irq_iommu->irte_mask = mask;
 
 	spin_unlock(&irq_2_ir_lock);
 
@@ -124,31 +154,33 @@
 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
 {
 	int index;
+	struct irq_2_iommu *irq_iommu;
 
 	spin_lock(&irq_2_ir_lock);
-	if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
+	irq_iommu = valid_irq_2_iommu(irq);
+	if (!irq_iommu) {
 		spin_unlock(&irq_2_ir_lock);
 		return -1;
 	}
 
-	*sub_handle = irq_2_iommu[irq].sub_handle;
-	index = irq_2_iommu[irq].irte_index;
+	*sub_handle = irq_iommu->sub_handle;
+	index = irq_iommu->irte_index;
 	spin_unlock(&irq_2_ir_lock);
 	return index;
 }
 
 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
 {
-	spin_lock(&irq_2_ir_lock);
-	if (irq >= NR_IRQS || irq_2_iommu[irq].iommu) {
-		spin_unlock(&irq_2_ir_lock);
-		return -1;
-	}
+	struct irq_2_iommu *irq_iommu;
 
-	irq_2_iommu[irq].iommu = iommu;
-	irq_2_iommu[irq].irte_index = index;
-	irq_2_iommu[irq].sub_handle = subhandle;
-	irq_2_iommu[irq].irte_mask = 0;
+	spin_lock(&irq_2_ir_lock);
+
+	irq_iommu = irq_2_iommu_alloc(irq);
+
+	irq_iommu->iommu = iommu;
+	irq_iommu->irte_index = index;
+	irq_iommu->sub_handle = subhandle;
+	irq_iommu->irte_mask = 0;
 
 	spin_unlock(&irq_2_ir_lock);
 
@@ -157,16 +189,19 @@
 
 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
 {
+	struct irq_2_iommu *irq_iommu;
+
 	spin_lock(&irq_2_ir_lock);
-	if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
+	irq_iommu = valid_irq_2_iommu(irq);
+	if (!irq_iommu) {
 		spin_unlock(&irq_2_ir_lock);
 		return -1;
 	}
 
-	irq_2_iommu[irq].iommu = NULL;
-	irq_2_iommu[irq].irte_index = 0;
-	irq_2_iommu[irq].sub_handle = 0;
-	irq_2_iommu[irq].irte_mask = 0;
+	irq_iommu->iommu = NULL;
+	irq_iommu->irte_index = 0;
+	irq_iommu->sub_handle = 0;
+	irq_2_iommu(irq)->irte_mask = 0;
 
 	spin_unlock(&irq_2_ir_lock);
 
@@ -178,16 +213,18 @@
 	int index;
 	struct irte *irte;
 	struct intel_iommu *iommu;
+	struct irq_2_iommu *irq_iommu;
 
 	spin_lock(&irq_2_ir_lock);
-	if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
+	irq_iommu = valid_irq_2_iommu(irq);
+	if (!irq_iommu) {
 		spin_unlock(&irq_2_ir_lock);
 		return -1;
 	}
 
-	iommu = irq_2_iommu[irq].iommu;
+	iommu = irq_iommu->iommu;
 
-	index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
+	index = irq_iommu->irte_index + irq_iommu->sub_handle;
 	irte = &iommu->ir_table->base[index];
 
 	set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
@@ -203,18 +240,20 @@
 {
 	int index;
 	struct intel_iommu *iommu;
+	struct irq_2_iommu *irq_iommu;
 
 	spin_lock(&irq_2_ir_lock);
-	if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
+	irq_iommu = valid_irq_2_iommu(irq);
+	if (!irq_iommu) {
 		spin_unlock(&irq_2_ir_lock);
 		return -1;
 	}
 
-	iommu = irq_2_iommu[irq].iommu;
+	iommu = irq_iommu->iommu;
 
-	index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
+	index = irq_iommu->irte_index + irq_iommu->sub_handle;
 
-	qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask);
+	qi_flush_iec(iommu, index, irq_iommu->irte_mask);
 	spin_unlock(&irq_2_ir_lock);
 
 	return 0;
@@ -246,28 +285,30 @@
 	int index, i;
 	struct irte *irte;
 	struct intel_iommu *iommu;
+	struct irq_2_iommu *irq_iommu;
 
 	spin_lock(&irq_2_ir_lock);
-	if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
+	irq_iommu = valid_irq_2_iommu(irq);
+	if (!irq_iommu) {
 		spin_unlock(&irq_2_ir_lock);
 		return -1;
 	}
 
-	iommu = irq_2_iommu[irq].iommu;
+	iommu = irq_iommu->iommu;
 
-	index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
+	index = irq_iommu->irte_index + irq_iommu->sub_handle;
 	irte = &iommu->ir_table->base[index];
 
-	if (!irq_2_iommu[irq].sub_handle) {
-		for (i = 0; i < (1 << irq_2_iommu[irq].irte_mask); i++)
+	if (!irq_iommu->sub_handle) {
+		for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
 			set_64bit((unsigned long *)irte, 0);
-		qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask);
+		qi_flush_iec(iommu, index, irq_iommu->irte_mask);
 	}
 
-	irq_2_iommu[irq].iommu = NULL;
-	irq_2_iommu[irq].irte_index = 0;
-	irq_2_iommu[irq].sub_handle = 0;
-	irq_2_iommu[irq].irte_mask = 0;
+	irq_iommu->iommu = NULL;
+	irq_iommu->irte_index = 0;
+	irq_iommu->sub_handle = 0;
+	irq_iommu->irte_mask = 0;
 
 	spin_unlock(&irq_2_ir_lock);
 
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 4a10b56..74801f7 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -378,23 +378,21 @@
 	entry->msi_attrib.masked = 1;
 	entry->msi_attrib.default_irq = dev->irq;	/* Save IOAPIC IRQ */
 	entry->msi_attrib.pos = pos;
-	if (is_mask_bit_support(control)) {
+	if (entry->msi_attrib.maskbit) {
 		entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
-				is_64bit_address(control));
+				entry->msi_attrib.is_64);
 	}
 	entry->dev = dev;
 	if (entry->msi_attrib.maskbit) {
 		unsigned int maskbits, temp;
 		/* All MSIs are unmasked by default, Mask them all */
 		pci_read_config_dword(dev,
-			msi_mask_bits_reg(pos, is_64bit_address(control)),
+			msi_mask_bits_reg(pos, entry->msi_attrib.is_64),
 			&maskbits);
 		temp = (1 << multi_msi_capable(control));
 		temp = ((temp - 1) & ~temp);
 		maskbits |= temp;
-		pci_write_config_dword(dev,
-			msi_mask_bits_reg(pos, is_64bit_address(control)),
-			maskbits);
+		pci_write_config_dword(dev, entry->msi_attrib.is_64, maskbits);
 		entry->msi_attrib.maskbits_mask = temp;
 	}
 	list_add_tail(&entry->list, &dev->msi_list);
@@ -761,3 +759,24 @@
 {
 	INIT_LIST_HEAD(&dev->msi_list);
 }
+
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+static void __devinit msi_acpi_init(void)
+{
+	if (acpi_pci_disabled)
+		return;
+	pci_osc_support_set(OSC_MSI_SUPPORT);
+	pcie_osc_support_set(OSC_MSI_SUPPORT);
+}
+#else
+static inline void msi_acpi_init(void) { }
+#endif /* CONFIG_ACPI */
+
+void __devinit msi_init(void)
+{
+	if (!pci_msi_enable)
+		return;
+	msi_acpi_init();
+}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 89a2f0f..dfe7c8e 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -24,17 +24,17 @@
 	acpi_handle handle;
 	u32 support_set;
 	u32 control_set;
-	int is_queried;
-	u32 query_result;
 	struct list_head sibiling;
 };
 static LIST_HEAD(acpi_osc_data_list);
 
 struct acpi_osc_args {
 	u32 capbuf[3];
-	u32 query_result;
+	u32 ctrl_result;
 };
 
+static DEFINE_MUTEX(pci_acpi_lock);
+
 static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
 {
 	struct acpi_osc_data *data;
@@ -108,9 +108,8 @@
 		goto out_kfree;
 	}
 out_success:
-	if (flags & OSC_QUERY_ENABLE)
-		osc_args->query_result =
-			*((u32 *)(out_obj->buffer.pointer + 8));
+	osc_args->ctrl_result =
+		*((u32 *)(out_obj->buffer.pointer + 8));
 	status = AE_OK;
 
 out_kfree:
@@ -118,39 +117,51 @@
 	return status;
 }
 
+static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data,
+				    u32 *result)
+{
+	acpi_status status;
+	u32 support_set;
+	struct acpi_osc_args osc_args;
+
+	/* do _OSC query for all possible controls */
+	support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
+	osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
+	osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
+	osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
+
+	status = acpi_run_osc(osc_data->handle, &osc_args);
+	if (ACPI_SUCCESS(status)) {
+		osc_data->support_set = support_set;
+		*result = osc_args.ctrl_result;
+	}
+
+	return status;
+}
+
 static acpi_status acpi_query_osc(acpi_handle handle,
 				  u32 level, void *context, void **retval)
 {
 	acpi_status status;
 	struct acpi_osc_data *osc_data;
-	u32 flags = (unsigned long)context, support_set;
+	u32 flags = (unsigned long)context, dummy;
 	acpi_handle tmp;
-	struct acpi_osc_args osc_args;
 
 	status = acpi_get_handle(handle, "_OSC", &tmp);
 	if (ACPI_FAILURE(status))
-		return status;
+		return AE_OK;
 
+	mutex_lock(&pci_acpi_lock);
 	osc_data = acpi_get_osc_data(handle);
 	if (!osc_data) {
 		printk(KERN_ERR "acpi osc data array is full\n");
-		return AE_ERROR;
+		goto out;
 	}
 
-	/* do _OSC query for all possible controls */
-	support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
-	osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-	osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
-	osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
-
-	status = acpi_run_osc(handle, &osc_args);
-	if (ACPI_SUCCESS(status)) {
-		osc_data->support_set = support_set;
-		osc_data->query_result = osc_args.query_result;
-		osc_data->is_queried = 1;
-	}
-
-	return status;
+	__acpi_query_osc(flags, osc_data, &dummy);
+out:
+	mutex_unlock(&pci_acpi_lock);
+	return AE_OK;
 }
 
 /**
@@ -181,7 +192,7 @@
 acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
 {
 	acpi_status status;
-	u32 ctrlset, control_set;
+	u32 ctrlset, control_set, result;
 	acpi_handle tmp;
 	struct acpi_osc_data *osc_data;
 	struct acpi_osc_args osc_args;
@@ -190,19 +201,28 @@
 	if (ACPI_FAILURE(status))
 		return status;
 
+	mutex_lock(&pci_acpi_lock);
 	osc_data = acpi_get_osc_data(handle);
 	if (!osc_data) {
 		printk(KERN_ERR "acpi osc data array is full\n");
-		return AE_ERROR;
+		status = AE_ERROR;
+		goto out;
 	}
 
 	ctrlset = (flags & OSC_CONTROL_MASKS);
-	if (!ctrlset)
-		return AE_TYPE;
+	if (!ctrlset) {
+		status = AE_TYPE;
+		goto out;
+	}
 
-	if (osc_data->is_queried &&
-	    ((osc_data->query_result & ctrlset) != ctrlset))
-		return AE_SUPPORT;
+	status = __acpi_query_osc(osc_data->support_set, osc_data, &result);
+	if (ACPI_FAILURE(status))
+		goto out;
+
+	if ((result & ctrlset) != ctrlset) {
+		status = AE_SUPPORT;
+		goto out;
+	}
 
 	control_set = osc_data->control_set | ctrlset;
 	osc_args.capbuf[OSC_QUERY_TYPE] = 0;
@@ -211,7 +231,8 @@
 	status = acpi_run_osc(handle, &osc_args);
 	if (ACPI_SUCCESS(status))
 		osc_data->control_set = control_set;
-
+out:
+	mutex_unlock(&pci_acpi_lock);
 	return status;
 }
 EXPORT_SYMBOL(pci_osc_control_set);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index a13f534..b4cdd69 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -43,18 +43,32 @@
 {
 	struct pci_dynid *dynid;
 	struct pci_driver *pdrv = to_pci_driver(driver);
+	const struct pci_device_id *ids = pdrv->id_table;
 	__u32 vendor, device, subvendor=PCI_ANY_ID,
 		subdevice=PCI_ANY_ID, class=0, class_mask=0;
 	unsigned long driver_data=0;
 	int fields=0;
-	int retval = 0;
+	int retval;
 
-	fields = sscanf(buf, "%x %x %x %x %x %x %lux",
+	fields = sscanf(buf, "%x %x %x %x %x %x %lx",
 			&vendor, &device, &subvendor, &subdevice,
 			&class, &class_mask, &driver_data);
 	if (fields < 2)
 		return -EINVAL;
 
+	/* Only accept driver_data values that match an existing id_table
+	   entry */
+	retval = -EINVAL;
+	while (ids->vendor || ids->subvendor || ids->class_mask) {
+		if (driver_data == ids->driver_data) {
+			retval = 0;
+			break;
+		}
+		ids++;
+	}
+	if (retval)	/* No match */
+		return retval;
+
 	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
 	if (!dynid)
 		return -ENOMEM;
@@ -65,8 +79,7 @@
 	dynid->id.subdevice = subdevice;
 	dynid->id.class = class;
 	dynid->id.class_mask = class_mask;
-	dynid->id.driver_data = pdrv->dynids.use_driver_data ?
-		driver_data : 0UL;
+	dynid->id.driver_data = driver_data;
 
 	spin_lock(&pdrv->dynids.lock);
 	list_add_tail(&dynid->node, &pdrv->dynids.list);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 77baff0..110022d 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -423,7 +423,7 @@
  * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
  * callback routine (pci_legacy_read).
  */
-ssize_t
+static ssize_t
 pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
 		   char *buf, loff_t off, size_t count)
 {
@@ -448,7 +448,7 @@
  * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
  * callback routine (pci_legacy_write).
  */
-ssize_t
+static ssize_t
 pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
 		    char *buf, loff_t off, size_t count)
 {
@@ -468,11 +468,11 @@
  * @attr: struct bin_attribute for this file
  * @vma: struct vm_area_struct passed to mmap
  *
- * Uses an arch specific callback, pci_mmap_legacy_page_range, to mmap
+ * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap
  * legacy memory space (first meg of bus space) into application virtual
  * memory space.
  */
-int
+static int
 pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
                     struct vm_area_struct *vma)
 {
@@ -480,7 +480,90 @@
                                                       struct device,
 						      kobj));
 
-        return pci_mmap_legacy_page_range(bus, vma);
+        return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
+}
+
+/**
+ * pci_mmap_legacy_io - map legacy PCI IO into user memory space
+ * @kobj: kobject corresponding to device to be mapped
+ * @attr: struct bin_attribute for this file
+ * @vma: struct vm_area_struct passed to mmap
+ *
+ * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap
+ * legacy IO space (first meg of bus space) into application virtual
+ * memory space. Returns -ENOSYS if the operation isn't supported
+ */
+static int
+pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr,
+		   struct vm_area_struct *vma)
+{
+        struct pci_bus *bus = to_pci_bus(container_of(kobj,
+                                                      struct device,
+						      kobj));
+
+        return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
+}
+
+/**
+ * pci_create_legacy_files - create legacy I/O port and memory files
+ * @b: bus to create files under
+ *
+ * Some platforms allow access to legacy I/O port and ISA memory space on
+ * a per-bus basis.  This routine creates the files and ties them into
+ * their associated read, write and mmap files from pci-sysfs.c
+ *
+ * On error unwind, but don't propogate the error to the caller
+ * as it is ok to set up the PCI bus without these files.
+ */
+void pci_create_legacy_files(struct pci_bus *b)
+{
+	int error;
+
+	b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
+			       GFP_ATOMIC);
+	if (!b->legacy_io)
+		goto kzalloc_err;
+
+	b->legacy_io->attr.name = "legacy_io";
+	b->legacy_io->size = 0xffff;
+	b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
+	b->legacy_io->read = pci_read_legacy_io;
+	b->legacy_io->write = pci_write_legacy_io;
+	b->legacy_io->mmap = pci_mmap_legacy_io;
+	error = device_create_bin_file(&b->dev, b->legacy_io);
+	if (error)
+		goto legacy_io_err;
+
+	/* Allocated above after the legacy_io struct */
+	b->legacy_mem = b->legacy_io + 1;
+	b->legacy_mem->attr.name = "legacy_mem";
+	b->legacy_mem->size = 1024*1024;
+	b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
+	b->legacy_mem->mmap = pci_mmap_legacy_mem;
+	error = device_create_bin_file(&b->dev, b->legacy_mem);
+	if (error)
+		goto legacy_mem_err;
+
+	return;
+
+legacy_mem_err:
+	device_remove_bin_file(&b->dev, b->legacy_io);
+legacy_io_err:
+	kfree(b->legacy_io);
+	b->legacy_io = NULL;
+kzalloc_err:
+	printk(KERN_WARNING "pci: warning: could not create legacy I/O port "
+	       "and ISA memory resources to sysfs\n");
+	return;
+}
+
+void pci_remove_legacy_files(struct pci_bus *b)
+{
+	if (b->legacy_io) {
+		device_remove_bin_file(&b->dev, b->legacy_io);
+		device_remove_bin_file(&b->dev, b->legacy_mem);
+		kfree(b->legacy_io); /* both are allocated here */
+	}
 }
 #endif /* HAVE_PCI_LEGACY */
 
@@ -715,7 +798,7 @@
 		.name = "config",
 		.mode = S_IRUGO | S_IWUSR,
 	},
-	.size = 256,
+	.size = PCI_CFG_SPACE_SIZE,
 	.read = pci_read_config,
 	.write = pci_write_config,
 };
@@ -725,7 +808,7 @@
 		.name = "config",
 		.mode = S_IRUGO | S_IWUSR,
 	},
-	.size = 4096,
+	.size = PCI_CFG_SPACE_EXP_SIZE,
 	.read = pci_read_config,
 	.write = pci_write_config,
 };
@@ -735,86 +818,103 @@
 	return 0;
 }
 
+static int pci_create_capabilities_sysfs(struct pci_dev *dev)
+{
+	int retval;
+	struct bin_attribute *attr;
+
+	/* If the device has VPD, try to expose it in sysfs. */
+	if (dev->vpd) {
+		attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
+		if (!attr)
+			return -ENOMEM;
+
+		attr->size = dev->vpd->len;
+		attr->attr.name = "vpd";
+		attr->attr.mode = S_IRUSR | S_IWUSR;
+		attr->read = pci_read_vpd;
+		attr->write = pci_write_vpd;
+		retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
+		if (retval) {
+			kfree(dev->vpd->attr);
+			return retval;
+		}
+		dev->vpd->attr = attr;
+	}
+
+	/* Active State Power Management */
+	pcie_aspm_create_sysfs_dev_files(dev);
+
+	return 0;
+}
+
 int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
 {
-	struct bin_attribute *attr = NULL;
 	int retval;
+	int rom_size = 0;
+	struct bin_attribute *attr;
 
 	if (!sysfs_initialized)
 		return -EACCES;
 
-	if (pdev->cfg_size < 4096)
+	if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
 		retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
 	else
 		retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr);
 	if (retval)
 		goto err;
 
-	/* If the device has VPD, try to expose it in sysfs. */
-	if (pdev->vpd) {
-		attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
-		if (attr) {
-			pdev->vpd->attr = attr;
-			attr->size = pdev->vpd->len;
-			attr->attr.name = "vpd";
-			attr->attr.mode = S_IRUSR | S_IWUSR;
-			attr->read = pci_read_vpd;
-			attr->write = pci_write_vpd;
-			retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
-			if (retval)
-				goto err_vpd;
-		} else {
-			retval = -ENOMEM;
-			goto err_config_file;
-		}
-	}
-
 	retval = pci_create_resource_files(pdev);
 	if (retval)
-		goto err_vpd_file;
+		goto err_config_file;
+
+	if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
+		rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
+	else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
+		rom_size = 0x20000;
 
 	/* If the device has a ROM, try to expose it in sysfs. */
-	if (pci_resource_len(pdev, PCI_ROM_RESOURCE) ||
-	    (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)) {
+	if (rom_size) {
 		attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
-		if (attr) {
-			pdev->rom_attr = attr;
-			attr->size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
-			attr->attr.name = "rom";
-			attr->attr.mode = S_IRUSR;
-			attr->read = pci_read_rom;
-			attr->write = pci_write_rom;
-			retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
-			if (retval)
-				goto err_rom;
-		} else {
+		if (!attr) {
 			retval = -ENOMEM;
 			goto err_resource_files;
 		}
+		attr->size = rom_size;
+		attr->attr.name = "rom";
+		attr->attr.mode = S_IRUSR;
+		attr->read = pci_read_rom;
+		attr->write = pci_write_rom;
+		retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
+		if (retval) {
+			kfree(attr);
+			goto err_resource_files;
+		}
+		pdev->rom_attr = attr;
 	}
+
 	/* add platform-specific attributes */
-	if (pcibios_add_platform_entries(pdev))
+	retval = pcibios_add_platform_entries(pdev);
+	if (retval)
 		goto err_rom_file;
 
-	pcie_aspm_create_sysfs_dev_files(pdev);
+	/* add sysfs entries for various capabilities */
+	retval = pci_create_capabilities_sysfs(pdev);
+	if (retval)
+		goto err_rom_file;
 
 	return 0;
 
 err_rom_file:
-	if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
+	if (rom_size) {
 		sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
-err_rom:
-	kfree(pdev->rom_attr);
+		kfree(pdev->rom_attr);
+		pdev->rom_attr = NULL;
+	}
 err_resource_files:
 	pci_remove_resource_files(pdev);
-err_vpd_file:
-	if (pdev->vpd) {
-		sysfs_remove_bin_file(&pdev->dev.kobj, pdev->vpd->attr);
-err_vpd:
-		kfree(pdev->vpd->attr);
-	}
 err_config_file:
-	if (pdev->cfg_size < 4096)
+	if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
 		sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
 	else
 		sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
@@ -822,6 +922,16 @@
 	return retval;
 }
 
+static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
+{
+	if (dev->vpd && dev->vpd->attr) {
+		sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
+		kfree(dev->vpd->attr);
+	}
+
+	pcie_aspm_remove_sysfs_dev_files(dev);
+}
+
 /**
  * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files
  * @pdev: device whose entries we should free
@@ -830,27 +940,28 @@
  */
 void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
 {
+	int rom_size = 0;
+
 	if (!sysfs_initialized)
 		return;
 
-	pcie_aspm_remove_sysfs_dev_files(pdev);
+	pci_remove_capabilities_sysfs(pdev);
 
-	if (pdev->vpd) {
-		sysfs_remove_bin_file(&pdev->dev.kobj, pdev->vpd->attr);
-		kfree(pdev->vpd->attr);
-	}
-	if (pdev->cfg_size < 4096)
+	if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
 		sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
 	else
 		sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
 
 	pci_remove_resource_files(pdev);
 
-	if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) {
-		if (pdev->rom_attr) {
-			sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
-			kfree(pdev->rom_attr);
-		}
+	if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
+		rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
+	else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
+		rom_size = 0x20000;
+
+	if (rom_size && pdev->rom_attr) {
+		sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
+		kfree(pdev->rom_attr);
 	}
 }
 
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c9884bb..533aeb5 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -18,6 +18,7 @@
 #include <linux/log2.h>
 #include <linux/pci-aspm.h>
 #include <linux/pm_wakeup.h>
+#include <linux/interrupt.h>
 #include <asm/dma.h>	/* isa_dma_bridge_buggy */
 #include "pci.h"
 
@@ -213,10 +214,13 @@
 int pci_find_ext_capability(struct pci_dev *dev, int cap)
 {
 	u32 header;
-	int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */
-	int pos = 0x100;
+	int ttl;
+	int pos = PCI_CFG_SPACE_SIZE;
 
-	if (dev->cfg_size <= 256)
+	/* minimum 8 bytes per capability */
+	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
+
+	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 		return 0;
 
 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
@@ -234,7 +238,7 @@
 			return pos;
 
 		pos = PCI_EXT_CAP_NEXT(header);
-		if (pos < 0x100)
+		if (pos < PCI_CFG_SPACE_SIZE)
 			break;
 
 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
@@ -1127,6 +1131,27 @@
 }
 
 /**
+ * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
+ * @dev: PCI device to prepare
+ * @enable: True to enable wake-up event generation; false to disable
+ *
+ * Many drivers want the device to wake up the system from D3_hot or D3_cold
+ * and this function allows them to set that up cleanly - pci_enable_wake()
+ * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
+ * ordering constraints.
+ *
+ * This function only returns error code if the device is not capable of
+ * generating PME# from both D3_hot and D3_cold, and the platform is unable to
+ * enable wake-up power for it.
+ */
+int pci_wake_from_d3(struct pci_dev *dev, bool enable)
+{
+	return pci_pme_capable(dev, PCI_D3cold) ?
+			pci_enable_wake(dev, PCI_D3cold, enable) :
+			pci_enable_wake(dev, PCI_D3hot, enable);
+}
+
+/**
  * pci_target_state - find an appropriate low power state for a given PCI dev
  * @dev: PCI device
  *
@@ -1242,25 +1267,25 @@
 	dev->d1_support = false;
 	dev->d2_support = false;
 	if (!pci_no_d1d2(dev)) {
-		if (pmc & PCI_PM_CAP_D1) {
-			dev_printk(KERN_DEBUG, &dev->dev, "supports D1\n");
+		if (pmc & PCI_PM_CAP_D1)
 			dev->d1_support = true;
-		}
-		if (pmc & PCI_PM_CAP_D2) {
-			dev_printk(KERN_DEBUG, &dev->dev, "supports D2\n");
+		if (pmc & PCI_PM_CAP_D2)
 			dev->d2_support = true;
-		}
+
+		if (dev->d1_support || dev->d2_support)
+			dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
+				   dev->d1_support ? " D1" : "",
+				   dev->d2_support ? " D2" : "");
 	}
 
 	pmc &= PCI_PM_CAP_PME_MASK;
 	if (pmc) {
-		dev_printk(KERN_INFO, &dev->dev,
-			"PME# supported from%s%s%s%s%s\n",
-			(pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
-			(pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
-			(pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
-			(pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
-			(pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
+		dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n",
+			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
+			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
+			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
+			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
+			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
 		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
 		/*
 		 * Make device's PM flags reflect the wake-up capability, but
@@ -1275,6 +1300,38 @@
 	}
 }
 
+/**
+ * pci_enable_ari - enable ARI forwarding if hardware support it
+ * @dev: the PCI device
+ */
+void pci_enable_ari(struct pci_dev *dev)
+{
+	int pos;
+	u32 cap;
+	u16 ctrl;
+
+	if (!dev->is_pcie)
+		return;
+
+	if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
+	    dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+		return;
+
+	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	if (!pos)
+		return;
+
+	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
+	if (!(cap & PCI_EXP_DEVCAP2_ARI))
+		return;
+
+	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
+	ctrl |= PCI_EXP_DEVCTL2_ARI;
+	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
+
+	dev->ari_enabled = 1;
+}
+
 int
 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
 {
@@ -1358,11 +1415,10 @@
 	return 0;
 
 err_out:
-	dev_warn(&pdev->dev, "BAR %d: can't reserve %s region [%#llx-%#llx]\n",
+	dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n",
 		 bar,
 		 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
-		 (unsigned long long)pci_resource_start(pdev, bar),
-		 (unsigned long long)pci_resource_end(pdev, bar));
+		 &pdev->resource[bar]);
 	return -EBUSY;
 }
 
@@ -1691,6 +1747,103 @@
 #endif
 
 /**
+ * pci_execute_reset_function() - Reset a PCI device function
+ * @dev: Device function to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device.  The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * The device function is presumed to be unused when this function is called.
+ * Resetting the device will make the contents of PCI configuration space
+ * random, so any caller of this must be prepared to reinitialise the
+ * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
+ * etc.
+ *
+ * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * device doesn't support resetting a single function.
+ */
+int pci_execute_reset_function(struct pci_dev *dev)
+{
+	u16 status;
+	u32 cap;
+	int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+
+	if (!exppos)
+		return -ENOTTY;
+	pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
+	if (!(cap & PCI_EXP_DEVCAP_FLR))
+		return -ENOTTY;
+
+	pci_block_user_cfg_access(dev);
+
+	/* Wait for Transaction Pending bit clean */
+	msleep(100);
+	pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
+	if (status & PCI_EXP_DEVSTA_TRPND) {
+		dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
+			"sleeping for 1 second\n");
+		ssleep(1);
+		pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
+		if (status & PCI_EXP_DEVSTA_TRPND)
+			dev_info(&dev->dev, "Still busy after 1s; "
+				"proceeding with reset anyway\n");
+	}
+
+	pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
+				PCI_EXP_DEVCTL_BCR_FLR);
+	mdelay(100);
+
+	pci_unblock_user_cfg_access(dev);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pci_execute_reset_function);
+
+/**
+ * pci_reset_function() - quiesce and reset a PCI device function
+ * @dev: Device function to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device.  The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * This function does not just reset the PCI portion of a device, but
+ * clears all the state associated with the device.  This function differs
+ * from pci_execute_reset_function in that it saves and restores device state
+ * over the reset.
+ *
+ * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * device doesn't support resetting a single function.
+ */
+int pci_reset_function(struct pci_dev *dev)
+{
+	u32 cap;
+	int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	int r;
+
+	if (!exppos)
+		return -ENOTTY;
+	pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
+	if (!(cap & PCI_EXP_DEVCAP_FLR))
+		return -ENOTTY;
+
+	if (!dev->msi_enabled && !dev->msix_enabled)
+		disable_irq(dev->irq);
+	pci_save_state(dev);
+
+	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+
+	r = pci_execute_reset_function(dev);
+
+	pci_restore_state(dev);
+	if (!dev->msi_enabled && !dev->msix_enabled)
+		enable_irq(dev->irq);
+
+	return r;
+}
+EXPORT_SYMBOL_GPL(pci_reset_function);
+
+/**
  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
  * @dev: PCI device to query
  *
@@ -1878,6 +2031,9 @@
 	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
 		pci_fixup_device(pci_fixup_final, dev);
 	}
+
+	msi_init();
+
 	return 0;
 }
 
@@ -1943,6 +2099,7 @@
 EXPORT_SYMBOL(pci_pme_capable);
 EXPORT_SYMBOL(pci_pme_active);
 EXPORT_SYMBOL(pci_enable_wake);
+EXPORT_SYMBOL(pci_wake_from_d3);
 EXPORT_SYMBOL(pci_target_state);
 EXPORT_SYMBOL(pci_prepare_to_sleep);
 EXPORT_SYMBOL(pci_back_from_sleep);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d807cd7..9de87e9 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,3 +1,9 @@
+#ifndef DRIVERS_PCI_H
+#define DRIVERS_PCI_H
+
+#define PCI_CFG_SPACE_SIZE	256
+#define PCI_CFG_SPACE_EXP_SIZE	4096
+
 /* Functions internal to the PCI core code */
 
 extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env);
@@ -76,7 +82,13 @@
 /* Functions for PCI Hotplug drivers to use */
 extern unsigned int pci_do_scan_bus(struct pci_bus *bus);
 
+#ifdef HAVE_PCI_LEGACY
+extern void pci_create_legacy_files(struct pci_bus *bus);
 extern void pci_remove_legacy_files(struct pci_bus *bus);
+#else
+static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
+static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
+#endif
 
 /* Lock for read/write access to pci device and bus lists */
 extern struct rw_semaphore pci_bus_sem;
@@ -86,9 +98,11 @@
 #ifdef CONFIG_PCI_MSI
 void pci_no_msi(void);
 extern void pci_msi_init_pci_dev(struct pci_dev *dev);
+extern void __devinit msi_init(void);
 #else
 static inline void pci_no_msi(void) { }
 static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
+static inline void msi_init(void) { }
 #endif
 
 #ifdef CONFIG_PCIEAER
@@ -109,6 +123,7 @@
 extern int pcie_mch_quirk;
 extern struct device_attribute pci_dev_attrs[];
 extern struct device_attribute dev_attr_cpuaffinity;
+extern struct device_attribute dev_attr_cpulistaffinity;
 
 /**
  * pci_match_one_device - Tell if a PCI device structure has a matching
@@ -144,3 +159,16 @@
 };
 #define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr)
 
+extern void pci_enable_ari(struct pci_dev *dev);
+/**
+ * pci_ari_enabled - query ARI forwarding status
+ * @dev: the PCI device
+ *
+ * Returns 1 if ARI forwarding is enabled, or 0 if not enabled;
+ */
+static inline int pci_ari_enabled(struct pci_dev *dev)
+{
+	return dev->ari_enabled;
+}
+
+#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 77036f4..e390707 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -105,7 +105,7 @@
 	unsigned long flags;
 	int pos;
 
-	pos = pci_find_aer_capability(pdev->port);
+	pos = pci_find_ext_capability(pdev->port, PCI_EXT_CAP_ID_ERR);
 	/*
 	 * Must lock access to Root Error Status Reg, Root Error ID Reg,
 	 * and Root error producer/consumer index
@@ -252,7 +252,7 @@
 	u32 status;
 	int pos;
 
-	pos = pci_find_aer_capability(dev);
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
 
 	/* Disable Root's interrupt in response to error messages */
 	pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, 0);
@@ -316,7 +316,7 @@
 	pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16);
 
 	/* Clean AER Root Error Status */
-	pos = pci_find_aer_capability(dev);
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
 	if (dev->error_state == pci_channel_io_normal)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index ee5e7b5..dfc63d0 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -28,41 +28,15 @@
 static int forceload;
 module_param(forceload, bool, 0);
 
-#define PCI_CFG_SPACE_SIZE	(0x100)
-int pci_find_aer_capability(struct pci_dev *dev)
-{
-	int pos;
-	u32 reg32 = 0;
-
-	/* Check if it's a pci-express device */
-	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
-	if (!pos)
-		return 0;
-
-	/* Check if it supports pci-express AER */
-	pos = PCI_CFG_SPACE_SIZE;
-	while (pos) {
-		if (pci_read_config_dword(dev, pos, &reg32))
-			return 0;
-
-		/* some broken boards return ~0 */
-		if (reg32 == 0xffffffff)
-			return 0;
-
-		if (PCI_EXT_CAP_ID(reg32) == PCI_EXT_CAP_ID_ERR)
-			break;
-
-		pos = reg32 >> 20;
-	}
-
-	return pos;
-}
-
 int pci_enable_pcie_error_reporting(struct pci_dev *dev)
 {
 	u16 reg16 = 0;
 	int pos;
 
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+	if (!pos)
+		return -EIO;
+
 	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
 	if (!pos)
 		return -EIO;
@@ -102,7 +76,7 @@
 	int pos;
 	u32 status, mask;
 
-	pos = pci_find_aer_capability(dev);
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
 	if (!pos)
 		return -EIO;
 
@@ -123,7 +97,7 @@
 	int pos;
 	u32 status;
 
-	pos = pci_find_aer_capability(dev);
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
 	if (!pos)
 		return -EIO;
 
@@ -502,7 +476,7 @@
 		 * Correctable error does not need software intevention.
 		 * No need to go through error recovery process.
 		 */
-		pos = pci_find_aer_capability(dev);
+		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
 		if (pos)
 			pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
 					info.status);
@@ -542,7 +516,7 @@
 	reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK);
 	pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16);
 
-	aer_pos = pci_find_aer_capability(pdev);
+	aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
 	/* Clear error status */
 	pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
 	pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
@@ -579,7 +553,7 @@
 	u32 reg32;
 	int pos;
 
-	pos = pci_find_aer_capability(pdev);
+	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
 	/* Disable Root's interrupt in response to error messages */
 	pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0);
 
@@ -618,7 +592,7 @@
 {
 	int pos;
 
-	pos = pci_find_aer_capability(dev);
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
 
 	/* The device might not support AER */
 	if (!pos)
@@ -755,7 +729,6 @@
 	return AER_SUCCESS;
 }
 
-EXPORT_SYMBOL_GPL(pci_find_aer_capability);
 EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
 EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
 EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 851f5b8..8f63f4c 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -528,9 +528,9 @@
 		pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP,
 			&reg32);
 		if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
-			printk("Pre-1.1 PCIe device detected, "
-				"disable ASPM for %s. It can be enabled forcedly"
-				" with 'pcie_aspm=force'\n", pci_name(pdev));
+			dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM"
+				" on pre-1.1 PCIe device.  You can enable it"
+				" with 'pcie_aspm=force'\n");
 			return -EINVAL;
 		}
 	}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 3656e03..2529f3f 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -25,7 +25,6 @@
 #define PCIE_CAPABILITIES_REG		0x2
 #define PCIE_SLOT_CAPABILITIES_REG	0x14
 #define PCIE_PORT_DEVICE_MAXSERVICES	4
-#define PCI_CFG_SPACE_SIZE		256
 
 #define get_descriptor_id(type, service) (((type - 4) << 4) | service)
 
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 890f0d2..2e091e0 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -195,24 +195,11 @@
 	/* PME Capable - root port capability */
 	if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT)
 		services |= PCIE_PORT_SERVICE_PME;
-	
-	pos = PCI_CFG_SPACE_SIZE;
-	while (pos) {
-		pci_read_config_dword(dev, pos, &reg32);
-		switch (reg32 & 0xffff) {
-		case PCI_EXT_CAP_ID_ERR:
-			services |= PCIE_PORT_SERVICE_AER;
-			pos = reg32 >> 20;
-			break;
-		case PCI_EXT_CAP_ID_VC:
-			services |= PCIE_PORT_SERVICE_VC;
-			pos = reg32 >> 20;
-			break;
-		default:
-			pos = 0;
-			break;
-		}
-	}
+
+	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
+		services |= PCIE_PORT_SERVICE_AER;
+	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
+		services |= PCIE_PORT_SERVICE_VC;
 
 	return services;
 }
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 367c9c2..584422d 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -91,7 +91,7 @@
 	
 	pci_set_master(dev);
         if (!dev->irq && dev->pin) {
-		dev_warn(&dev->dev, "device [%04x/%04x] has invalid IRQ; "
+		dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
 			 "check vendor BIOS\n", dev->vendor, dev->device);
 	}
 	if (pcie_port_device_register(dev)) {
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index dd9161a05..6f1e51d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -14,8 +14,6 @@
 
 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
 #define CARDBUS_RESERVE_BUSNR	3
-#define PCI_CFG_SPACE_SIZE	256
-#define PCI_CFG_SPACE_EXP_SIZE	4096
 
 /* Ugh.  Need to stop exporting this to modules. */
 LIST_HEAD(pci_root_buses);
@@ -44,72 +42,6 @@
 }
 EXPORT_SYMBOL(no_pci_devices);
 
-#ifdef HAVE_PCI_LEGACY
-/**
- * pci_create_legacy_files - create legacy I/O port and memory files
- * @b: bus to create files under
- *
- * Some platforms allow access to legacy I/O port and ISA memory space on
- * a per-bus basis.  This routine creates the files and ties them into
- * their associated read, write and mmap files from pci-sysfs.c
- *
- * On error unwind, but don't propogate the error to the caller
- * as it is ok to set up the PCI bus without these files.
- */
-static void pci_create_legacy_files(struct pci_bus *b)
-{
-	int error;
-
-	b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
-			       GFP_ATOMIC);
-	if (!b->legacy_io)
-		goto kzalloc_err;
-
-	b->legacy_io->attr.name = "legacy_io";
-	b->legacy_io->size = 0xffff;
-	b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
-	b->legacy_io->read = pci_read_legacy_io;
-	b->legacy_io->write = pci_write_legacy_io;
-	error = device_create_bin_file(&b->dev, b->legacy_io);
-	if (error)
-		goto legacy_io_err;
-
-	/* Allocated above after the legacy_io struct */
-	b->legacy_mem = b->legacy_io + 1;
-	b->legacy_mem->attr.name = "legacy_mem";
-	b->legacy_mem->size = 1024*1024;
-	b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
-	b->legacy_mem->mmap = pci_mmap_legacy_mem;
-	error = device_create_bin_file(&b->dev, b->legacy_mem);
-	if (error)
-		goto legacy_mem_err;
-
-	return;
-
-legacy_mem_err:
-	device_remove_bin_file(&b->dev, b->legacy_io);
-legacy_io_err:
-	kfree(b->legacy_io);
-	b->legacy_io = NULL;
-kzalloc_err:
-	printk(KERN_WARNING "pci: warning: could not create legacy I/O port "
-	       "and ISA memory resources to sysfs\n");
-	return;
-}
-
-void pci_remove_legacy_files(struct pci_bus *b)
-{
-	if (b->legacy_io) {
-		device_remove_bin_file(&b->dev, b->legacy_io);
-		device_remove_bin_file(&b->dev, b->legacy_mem);
-		kfree(b->legacy_io); /* both are allocated here */
-	}
-}
-#else /* !HAVE_PCI_LEGACY */
-static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
-void pci_remove_legacy_files(struct pci_bus *bus) { return; }
-#endif /* HAVE_PCI_LEGACY */
-
 /*
  * PCI Bus Class Devices
  */
@@ -219,7 +151,7 @@
 
 	res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
 
-	if (res->flags == PCI_BASE_ADDRESS_MEM_TYPE_64)
+	if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
 		return pci_bar_mem64;
 	return pci_bar_mem32;
 }
@@ -304,9 +236,8 @@
 		} else {
 			res->start = l64;
 			res->end = l64 + sz64;
-			printk(KERN_DEBUG "PCI: %s reg %x 64bit mmio: [%llx, %llx]\n",
-				pci_name(dev), pos, (unsigned long long)res->start,
-				(unsigned long long)res->end);
+			dev_printk(KERN_DEBUG, &dev->dev,
+				"reg %x 64bit mmio: %pR\n", pos, res);
 		}
 	} else {
 		sz = pci_size(l, sz, mask);
@@ -316,9 +247,10 @@
 
 		res->start = l;
 		res->end = l + sz;
-		printk(KERN_DEBUG "PCI: %s reg %x %s: [%llx, %llx]\n", pci_name(dev),
-			pos, (res->flags & IORESOURCE_IO) ? "io port":"32bit mmio",
-			(unsigned long long)res->start, (unsigned long long)res->end);
+
+		dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos,
+			(res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio",
+			res);
 	}
 
  out:
@@ -389,9 +321,7 @@
 			res->start = base;
 		if (!res->end)
 			res->end = limit + 0xfff;
-		printk(KERN_DEBUG "PCI: bridge %s io port: [%llx, %llx]\n",
-			pci_name(dev), (unsigned long long) res->start,
-			(unsigned long long) res->end);
+		dev_printk(KERN_DEBUG, &dev->dev, "bridge io port: %pR\n", res);
 	}
 
 	res = child->resource[1];
@@ -403,9 +333,8 @@
 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
 		res->start = base;
 		res->end = limit + 0xfffff;
-		printk(KERN_DEBUG "PCI: bridge %s 32bit mmio: [%llx, %llx]\n",
-			pci_name(dev), (unsigned long long) res->start,
-			(unsigned long long) res->end);
+		dev_printk(KERN_DEBUG, &dev->dev, "bridge 32bit mmio: %pR\n",
+			res);
 	}
 
 	res = child->resource[2];
@@ -441,9 +370,9 @@
 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
 		res->start = base;
 		res->end = limit + 0xfffff;
-		printk(KERN_DEBUG "PCI: bridge %s %sbit mmio pref: [%llx, %llx]\n",
-			pci_name(dev), (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32",
-			(unsigned long long) res->start, (unsigned long long) res->end);
+		dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
+			(res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32",
+			res);
 	}
 }
 
@@ -551,19 +480,27 @@
 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
 	u32 buses, i, j = 0;
 	u16 bctl;
+	int broken = 0;
 
 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
 
 	dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n",
 		buses & 0xffffff, pass);
 
+	/* Check if setup is sensible at all */
+	if (!pass &&
+	    ((buses & 0xff) != bus->number || ((buses >> 8) & 0xff) <= bus->number)) {
+		dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
+		broken = 1;
+	}
+
 	/* Disable MasterAbortMode during probing to avoid reporting
 	   of bus errors (in some architectures) */ 
 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
 
-	if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) {
+	if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus && !broken) {
 		unsigned int cmax, busnr;
 		/*
 		 * Bus already configured by firmware, process it in the first
@@ -601,7 +538,7 @@
 		 * do in the second pass.
 		 */
 		if (!pass) {
-			if (pcibios_assign_all_busses())
+			if (pcibios_assign_all_busses() || broken)
 				/* Temporarily disable forwarding of the
 				   configuration cycles on all bridges in
 				   this bus segment to avoid possible
@@ -764,7 +701,7 @@
 	dev->class = class;
 	class >>= 8;
 
-	dev_dbg(&dev->dev, "found [%04x/%04x] class %06x header type %02x\n",
+	dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
 		 dev->vendor, dev->device, class, dev->hdr_type);
 
 	/* "Unknown power state" */
@@ -846,6 +783,11 @@
 	return 0;
 }
 
+static void pci_release_capabilities(struct pci_dev *dev)
+{
+	pci_vpd_release(dev);
+}
+
 /**
  * pci_release_dev - free a pci device structure when all users of it are finished.
  * @dev: device that's been disconnected
@@ -858,7 +800,7 @@
 	struct pci_dev *pci_dev;
 
 	pci_dev = to_pci_dev(dev);
-	pci_vpd_release(pci_dev);
+	pci_release_capabilities(pci_dev);
 	kfree(pci_dev);
 }
 
@@ -889,8 +831,9 @@
 int pci_cfg_space_size_ext(struct pci_dev *dev)
 {
 	u32 status;
+	int pos = PCI_CFG_SPACE_SIZE;
 
-	if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL)
+	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
 		goto fail;
 	if (status == 0xffffffff)
 		goto fail;
@@ -938,8 +881,6 @@
 
 	INIT_LIST_HEAD(&dev->bus_list);
 
-	pci_msi_init_pci_dev(dev);
-
 	return dev;
 }
 EXPORT_SYMBOL(alloc_pci_dev);
@@ -951,6 +892,7 @@
 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
 {
 	struct pci_dev *dev;
+	struct pci_slot *slot;
 	u32 l;
 	u8 hdr_type;
 	int delay = 1;
@@ -999,6 +941,10 @@
 	dev->error_state = pci_channel_io_normal;
 	set_pcie_port_type(dev);
 
+	list_for_each_entry(slot, &bus->slots, list)
+		if (PCI_SLOT(devfn) == slot->number)
+			dev->slot = slot;
+
 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
 	   set this higher, assuming the system even supports it.  */
 	dev->dma_mask = 0xffffffff;
@@ -1007,9 +953,22 @@
 		return NULL;
 	}
 
+	return dev;
+}
+
+static void pci_init_capabilities(struct pci_dev *dev)
+{
+	/* MSI/MSI-X list */
+	pci_msi_init_pci_dev(dev);
+
+	/* Power Management */
+	pci_pm_init(dev);
+
+	/* Vital Product Data */
 	pci_vpd_pci22_init(dev);
 
-	return dev;
+	/* Alternative Routing-ID Forwarding */
+	pci_enable_ari(dev);
 }
 
 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
@@ -1028,8 +987,8 @@
 	/* Fix up broken headers */
 	pci_fixup_device(pci_fixup_header, dev);
 
-	/* Initialize power management of the device */
-	pci_pm_init(dev);
+	/* Initialize various capabilities */
+	pci_init_capabilities(dev);
 
 	/*
 	 * Add the device to our list of discovered devices
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e872ac9..96cf8ec 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -24,6 +24,14 @@
 #include <linux/kallsyms.h>
 #include "pci.h"
 
+int isa_dma_bridge_buggy;
+EXPORT_SYMBOL(isa_dma_bridge_buggy);
+int pci_pci_problems;
+EXPORT_SYMBOL(pci_pci_problems);
+int pcie_mch_quirk;
+EXPORT_SYMBOL(pcie_mch_quirk);
+
+#ifdef CONFIG_PCI_QUIRKS
 /* The Mellanox Tavor device gives false positive parity errors
  * Mark this device with a broken_parity_status, to allow
  * PCI scanning code to "skip" this now blacklisted device.
@@ -35,6 +43,20 @@
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
 
+/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
+int forbid_dac __read_mostly;
+EXPORT_SYMBOL(forbid_dac);
+
+static __devinit void via_no_dac(struct pci_dev *dev)
+{
+	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
+		dev_info(&dev->dev,
+			"VIA PCI bridge detected. Disabling DAC.\n");
+		forbid_dac = 1;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
+
 /* Deal with broken BIOS'es that neglect to enable passive release,
    which can cause problems in combination with the 82441FX/PPro MTRRs */
 static void quirk_passive_release(struct pci_dev *dev)
@@ -62,8 +84,6 @@
     
     This appears to be BIOS not version dependent. So presumably there is a 
     chipset level fix */
-int isa_dma_bridge_buggy;
-EXPORT_SYMBOL(isa_dma_bridge_buggy);
     
 static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev)
 {
@@ -84,9 +104,6 @@
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_2,	quirk_isa_dma_hangs);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_3,	quirk_isa_dma_hangs);
 
-int pci_pci_problems;
-EXPORT_SYMBOL(pci_pci_problems);
-
 /*
  *	Chipsets where PCI->PCI transfers vanish or hang
  */
@@ -1362,9 +1379,6 @@
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_EESSC,	quirk_alder_ioapic);
 #endif
 
-int pcie_mch_quirk;
-EXPORT_SYMBOL(pcie_mch_quirk);
-
 static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
 {
 	pcie_mch_quirk = 1;
@@ -1555,84 +1569,6 @@
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
 
-static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
-{
-	while (f < end) {
-		if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
- 		    (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
-#ifdef DEBUG
-			dev_dbg(&dev->dev, "calling %pF\n", f->hook);
-#endif
-			f->hook(dev);
-		}
-		f++;
-	}
-}
-
-extern struct pci_fixup __start_pci_fixups_early[];
-extern struct pci_fixup __end_pci_fixups_early[];
-extern struct pci_fixup __start_pci_fixups_header[];
-extern struct pci_fixup __end_pci_fixups_header[];
-extern struct pci_fixup __start_pci_fixups_final[];
-extern struct pci_fixup __end_pci_fixups_final[];
-extern struct pci_fixup __start_pci_fixups_enable[];
-extern struct pci_fixup __end_pci_fixups_enable[];
-extern struct pci_fixup __start_pci_fixups_resume[];
-extern struct pci_fixup __end_pci_fixups_resume[];
-extern struct pci_fixup __start_pci_fixups_resume_early[];
-extern struct pci_fixup __end_pci_fixups_resume_early[];
-extern struct pci_fixup __start_pci_fixups_suspend[];
-extern struct pci_fixup __end_pci_fixups_suspend[];
-
-
-void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
-{
-	struct pci_fixup *start, *end;
-
-	switch(pass) {
-	case pci_fixup_early:
-		start = __start_pci_fixups_early;
-		end = __end_pci_fixups_early;
-		break;
-
-	case pci_fixup_header:
-		start = __start_pci_fixups_header;
-		end = __end_pci_fixups_header;
-		break;
-
-	case pci_fixup_final:
-		start = __start_pci_fixups_final;
-		end = __end_pci_fixups_final;
-		break;
-
-	case pci_fixup_enable:
-		start = __start_pci_fixups_enable;
-		end = __end_pci_fixups_enable;
-		break;
-
-	case pci_fixup_resume:
-		start = __start_pci_fixups_resume;
-		end = __end_pci_fixups_resume;
-		break;
-
-	case pci_fixup_resume_early:
-		start = __start_pci_fixups_resume_early;
-		end = __end_pci_fixups_resume_early;
-		break;
-
-	case pci_fixup_suspend:
-		start = __start_pci_fixups_suspend;
-		end = __end_pci_fixups_suspend;
-		break;
-
-	default:
-		/* stupid compiler warning, you would think with an enum... */
-		return;
-	}
-	pci_do_fixups(dev, start, end);
-}
-EXPORT_SYMBOL(pci_fixup_device);
-
 /* Enable 1k I/O space granularity on the Intel P64H2 */
 static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
 {
@@ -2006,3 +1942,82 @@
 			quirk_msi_intx_disable_bug);
 
 #endif /* CONFIG_PCI_MSI */
+
+static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
+{
+	while (f < end) {
+		if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
+ 		    (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
+			dev_dbg(&dev->dev, "calling %pF\n", f->hook);
+			f->hook(dev);
+		}
+		f++;
+	}
+}
+
+extern struct pci_fixup __start_pci_fixups_early[];
+extern struct pci_fixup __end_pci_fixups_early[];
+extern struct pci_fixup __start_pci_fixups_header[];
+extern struct pci_fixup __end_pci_fixups_header[];
+extern struct pci_fixup __start_pci_fixups_final[];
+extern struct pci_fixup __end_pci_fixups_final[];
+extern struct pci_fixup __start_pci_fixups_enable[];
+extern struct pci_fixup __end_pci_fixups_enable[];
+extern struct pci_fixup __start_pci_fixups_resume[];
+extern struct pci_fixup __end_pci_fixups_resume[];
+extern struct pci_fixup __start_pci_fixups_resume_early[];
+extern struct pci_fixup __end_pci_fixups_resume_early[];
+extern struct pci_fixup __start_pci_fixups_suspend[];
+extern struct pci_fixup __end_pci_fixups_suspend[];
+
+
+void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
+{
+	struct pci_fixup *start, *end;
+
+	switch(pass) {
+	case pci_fixup_early:
+		start = __start_pci_fixups_early;
+		end = __end_pci_fixups_early;
+		break;
+
+	case pci_fixup_header:
+		start = __start_pci_fixups_header;
+		end = __end_pci_fixups_header;
+		break;
+
+	case pci_fixup_final:
+		start = __start_pci_fixups_final;
+		end = __end_pci_fixups_final;
+		break;
+
+	case pci_fixup_enable:
+		start = __start_pci_fixups_enable;
+		end = __end_pci_fixups_enable;
+		break;
+
+	case pci_fixup_resume:
+		start = __start_pci_fixups_resume;
+		end = __end_pci_fixups_resume;
+		break;
+
+	case pci_fixup_resume_early:
+		start = __start_pci_fixups_resume_early;
+		end = __end_pci_fixups_resume_early;
+		break;
+
+	case pci_fixup_suspend:
+		start = __start_pci_fixups_suspend;
+		end = __end_pci_fixups_suspend;
+		break;
+
+	default:
+		/* stupid compiler warning, you would think with an enum... */
+		return;
+	}
+	pci_do_fixups(dev, start, end);
+}
+#else
+void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
+#endif
+EXPORT_SYMBOL(pci_fixup_device);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index bdc2a44..042e089 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -73,6 +73,7 @@
 	up_write(&pci_bus_sem);
 	pci_remove_legacy_files(pci_bus);
 	device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity);
+	device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity);
 	device_unregister(&pci_bus->dev);
 }
 EXPORT_SYMBOL(pci_remove_bus);
@@ -114,13 +115,9 @@
 {
 	struct list_head *l, *n;
 
-	if (dev->subordinate) {
-		list_for_each_safe(l, n, &dev->subordinate->devices) {
-			struct pci_dev *dev = pci_dev_b(l);
-
-			pci_remove_bus_device(dev);
-		}
-	}
+	if (dev->subordinate)
+		list_for_each_safe(l, n, &dev->subordinate->devices)
+			pci_remove_bus_device(pci_dev_b(l));
 }
 
 static void pci_stop_bus_devices(struct pci_bus *bus)
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index bd5c0e0..1f5f614 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -21,7 +21,7 @@
  * between the ROM and other resources, so enabling it may disable access
  * to MMIO registers or other card memory.
  */
-static int pci_enable_rom(struct pci_dev *pdev)
+int pci_enable_rom(struct pci_dev *pdev)
 {
 	struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
 	struct pci_bus_region region;
@@ -45,7 +45,7 @@
  * Disable ROM decoding on a PCI device by turning off the last bit in the
  * ROM BAR.
  */
-static void pci_disable_rom(struct pci_dev *pdev)
+void pci_disable_rom(struct pci_dev *pdev)
 {
 	u32 rom_addr;
 	pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
@@ -260,3 +260,5 @@
 
 EXPORT_SYMBOL(pci_map_rom);
 EXPORT_SYMBOL(pci_unmap_rom);
+EXPORT_SYMBOL_GPL(pci_enable_rom);
+EXPORT_SYMBOL_GPL(pci_disable_rom);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 4edfc47..5af8bd5 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -166,6 +166,7 @@
 {
 	struct pci_dev *pdev;
 
+	pci_dev_get(from);
 	pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
 	pci_dev_put(pdev);
 	return pdev;
@@ -270,12 +271,8 @@
 	struct pci_dev *pdev = NULL;
 
 	WARN_ON(in_interrupt());
-	if (from) {
-		/* FIXME
-		 * take the cast off, when bus_find_device is made const.
-		 */
-		dev_start = (struct device *)&from->dev;
-	}
+	if (from)
+		dev_start = &from->dev;
 	dev = bus_find_device(&pci_bus_type, dev_start, (void *)id,
 			      match_pci_dev_by_id);
 	if (dev)
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index d5e2106..ea979f2 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -299,7 +299,7 @@
 
 			if (r->parent || !(r->flags & IORESOURCE_IO))
 				continue;
-			r_size = r->end - r->start + 1;
+			r_size = resource_size(r);
 
 			if (r_size < 0x400)
 				/* Might be re-aligned for ISA */
@@ -350,16 +350,13 @@
 
 			if (r->parent || (r->flags & mask) != type)
 				continue;
-			r_size = r->end - r->start + 1;
+			r_size = resource_size(r);
 			/* For bridges size != alignment */
 			align = resource_alignment(r);
 			order = __ffs(align) - 20;
 			if (order > 11) {
 				dev_warn(&dev->dev, "BAR %d bad alignment %llx: "
-				       "%#016llx-%#016llx\n", i,
-				       (unsigned long long)align,
-				       (unsigned long long)r->start,
-				       (unsigned long long)r->end);
+					 "%pR\n", i, (unsigned long long)align, r);
 				r->flags = 0;
 				continue;
 			}
@@ -539,11 +536,9 @@
                 if (!res)
                         continue;
 
-		printk(KERN_INFO "bus: %02x index %x %s: [%llx, %llx]\n",
-			bus->number, i,
-			(res->flags & IORESOURCE_IO) ? "io port" : "mmio",
-			(unsigned long long) res->start,
-			(unsigned long long) res->end);
+		printk(KERN_INFO "bus: %02x index %x %s: %pR\n",
+		       bus->number, i,
+		       (res->flags & IORESOURCE_IO) ? "io port" : "mmio", res);
         }
 }
 
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 1a5fc83..2dbd96c 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -49,10 +49,8 @@
 
 	pcibios_resource_to_bus(dev, &region, res);
 
-	dev_dbg(&dev->dev, "BAR %d: got res [%#llx-%#llx] bus [%#llx-%#llx] "
-		"flags %#lx\n", resno,
-		 (unsigned long long)res->start,
-		 (unsigned long long)res->end,
+	dev_dbg(&dev->dev, "BAR %d: got res %pR bus [%#llx-%#llx] "
+		"flags %#lx\n", resno, res,
 		 (unsigned long long)region.start,
 		 (unsigned long long)region.end,
 		 (unsigned long)res->flags);
@@ -114,13 +112,11 @@
 		err = insert_resource(root, res);
 
 	if (err) {
-		dev_err(&dev->dev, "BAR %d: %s of %s [%#llx-%#llx]\n",
+		dev_err(&dev->dev, "BAR %d: %s of %s %pR\n",
 			resource,
 			root ? "address space collision on" :
 				"no parent found for",
-			dtype,
-			(unsigned long long)res->start,
-			(unsigned long long)res->end);
+			dtype, res);
 	}
 
 	return err;
@@ -133,15 +129,14 @@
 	resource_size_t size, min, align;
 	int ret;
 
-	size = res->end - res->start + 1;
+	size = resource_size(res);
 	min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
 
 	align = resource_alignment(res);
 	if (!align) {
 		dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus "
-			"alignment) [%#llx-%#llx] flags %#lx\n",
-			resno, (unsigned long long)res->start,
-			(unsigned long long)res->end, res->flags);
+			"alignment) %pR flags %#lx\n",
+			resno, res, res->flags);
 		return -EINVAL;
 	}
 
@@ -162,11 +157,8 @@
 	}
 
 	if (ret) {
-		dev_err(&dev->dev, "BAR %d: can't allocate %s resource "
-			"[%#llx-%#llx]\n", resno,
-			res->flags & IORESOURCE_IO ? "I/O" : "mem",
-			(unsigned long long)res->start,
-			(unsigned long long)res->end);
+		dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
+			resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
 	} else {
 		res->flags &= ~IORESOURCE_STARTALIGN;
 		if (resno < PCI_BRIDGE_RESOURCES)
@@ -202,11 +194,8 @@
 	}
 
 	if (ret) {
-		dev_err(&dev->dev, "BAR %d: can't allocate %s resource "
-			"[%#llx-%#llx\n]", resno,
-			res->flags & IORESOURCE_IO ? "I/O" : "mem",
-			(unsigned long long)res->start,
-			(unsigned long long)res->end);
+		dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
+			resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
 	} else if (resno < PCI_BRIDGE_RESOURCES) {
 		pci_update_resource(dev, res, resno);
 	}
@@ -237,9 +226,8 @@
 		r_align = resource_alignment(r);
 		if (!r_align) {
 			dev_warn(&dev->dev, "BAR %d: bogus alignment "
-				"[%#llx-%#llx] flags %#lx\n",
-				i, (unsigned long long)r->start,
-				(unsigned long long)r->end, r->flags);
+				"%pR flags %#lx\n",
+				i, r, r->flags);
 			continue;
 		}
 		for (list = head; ; list = list->next) {
@@ -287,9 +275,7 @@
 
 		if (!r->parent) {
 			dev_err(&dev->dev, "device not available because of "
-				"BAR %d [%#llx-%#llx] collisions\n", i,
-				(unsigned long long) r->start,
-				(unsigned long long) r->end);
+				"BAR %d %pR collisions\n", i, r);
 			return -EINVAL;
 		}
 
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 7e5b85c..4dd1c3e 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -49,11 +49,16 @@
 
 static void pci_slot_release(struct kobject *kobj)
 {
+	struct pci_dev *dev;
 	struct pci_slot *slot = to_pci_slot(kobj);
 
 	pr_debug("%s: releasing pci_slot on %x:%d\n", __func__,
 		 slot->bus->number, slot->number);
 
+	list_for_each_entry(dev, &slot->bus->devices, bus_list)
+		if (PCI_SLOT(dev->devfn) == slot->number)
+			dev->slot = NULL;
+
 	list_del(&slot->list);
 
 	kfree(slot);
@@ -73,18 +78,100 @@
 	.default_attrs = pci_slot_default_attrs,
 };
 
+static char *make_slot_name(const char *name)
+{
+	char *new_name;
+	int len, max, dup;
+
+	new_name = kstrdup(name, GFP_KERNEL);
+	if (!new_name)
+		return NULL;
+
+	/*
+	 * Make sure we hit the realloc case the first time through the
+	 * loop.  'len' will be strlen(name) + 3 at that point which is
+	 * enough space for "name-X" and the trailing NUL.
+	 */
+	len = strlen(name) + 2;
+	max = 1;
+	dup = 1;
+
+	for (;;) {
+		struct kobject *dup_slot;
+		dup_slot = kset_find_obj(pci_slots_kset, new_name);
+		if (!dup_slot)
+			break;
+		kobject_put(dup_slot);
+		if (dup == max) {
+			len++;
+			max *= 10;
+			kfree(new_name);
+			new_name = kmalloc(len, GFP_KERNEL);
+			if (!new_name)
+				break;
+		}
+		sprintf(new_name, "%s-%d", name, dup++);
+	}
+
+	return new_name;
+}
+
+static int rename_slot(struct pci_slot *slot, const char *name)
+{
+	int result = 0;
+	char *slot_name;
+
+	if (strcmp(pci_slot_name(slot), name) == 0)
+		return result;
+
+	slot_name = make_slot_name(name);
+	if (!slot_name)
+		return -ENOMEM;
+
+	result = kobject_rename(&slot->kobj, slot_name);
+	kfree(slot_name);
+
+	return result;
+}
+
+static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr)
+{
+	struct pci_slot *slot;
+	/*
+	 * We already hold pci_bus_sem so don't worry
+	 */
+	list_for_each_entry(slot, &parent->slots, list)
+		if (slot->number == slot_nr) {
+			kobject_get(&slot->kobj);
+			return slot;
+		}
+
+	return NULL;
+}
+
 /**
  * pci_create_slot - create or increment refcount for physical PCI slot
  * @parent: struct pci_bus of parent bridge
  * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder
  * @name: user visible string presented in /sys/bus/pci/slots/<name>
+ * @hotplug: set if caller is hotplug driver, NULL otherwise
  *
  * PCI slots have first class attributes such as address, speed, width,
  * and a &struct pci_slot is used to manage them. This interface will
  * either return a new &struct pci_slot to the caller, or if the pci_slot
  * already exists, its refcount will be incremented.
  *
- * Slots are uniquely identified by a @pci_bus, @slot_nr, @name tuple.
+ * Slots are uniquely identified by a @pci_bus, @slot_nr tuple.
+ *
+ * There are known platforms with broken firmware that assign the same
+ * name to multiple slots. Workaround these broken platforms by renaming
+ * the slots on behalf of the caller. If firmware assigns name N to
+ * multiple slots:
+ *
+ * The first slot is assigned N
+ * The second slot is assigned N-1
+ * The third slot is assigned N-2
+ * etc.
  *
  * Placeholder slots:
  * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify
@@ -93,71 +180,82 @@
  * the slot. In this scenario, the caller may pass -1 for @slot_nr.
  *
  * The following semantics are imposed when the caller passes @slot_nr ==
- * -1. First, the check for existing %struct pci_slot is skipped, as the
- * caller may know about several unpopulated slots on a given %struct
- * pci_bus, and each slot would have a @slot_nr of -1.  Uniqueness for
- * these slots is then determined by the @name parameter. We expect
- * kobject_init_and_add() to warn us if the caller attempts to create
- * multiple slots with the same name. The other change in semantics is
+ * -1. First, we no longer check for an existing %struct pci_slot, as there
+ * may be many slots with @slot_nr of -1.  The other change in semantics is
  * user-visible, which is the 'address' parameter presented in sysfs will
  * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the
  * %struct pci_bus and bb is the bus number. In other words, the devfn of
  * the 'placeholder' slot will not be displayed.
  */
-
 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
-				 const char *name)
+				 const char *name,
+				 struct hotplug_slot *hotplug)
 {
+	struct pci_dev *dev;
 	struct pci_slot *slot;
-	int err;
+	int err = 0;
+	char *slot_name = NULL;
 
 	down_write(&pci_bus_sem);
 
 	if (slot_nr == -1)
 		goto placeholder;
 
-	/* If we've already created this slot, bump refcount and return. */
-	list_for_each_entry(slot, &parent->slots, list) {
-		if (slot->number == slot_nr) {
-			kobject_get(&slot->kobj);
-			pr_debug("%s: inc refcount to %d on %04x:%02x:%02x\n",
-				 __func__,
-				 atomic_read(&slot->kobj.kref.refcount),
-				 pci_domain_nr(parent), parent->number,
-				 slot_nr);
-			goto out;
+	/*
+	 * Hotplug drivers are allowed to rename an existing slot,
+	 * but only if not already claimed.
+	 */
+	slot = get_slot(parent, slot_nr);
+	if (slot) {
+		if (hotplug) {
+			if ((err = slot->hotplug ? -EBUSY : 0)
+			     || (err = rename_slot(slot, name))) {
+				kobject_put(&slot->kobj);
+				slot = NULL;
+				goto err;
+			}
 		}
+		goto out;
 	}
 
 placeholder:
 	slot = kzalloc(sizeof(*slot), GFP_KERNEL);
 	if (!slot) {
-		slot = ERR_PTR(-ENOMEM);
-		goto out;
+		err = -ENOMEM;
+		goto err;
 	}
 
 	slot->bus = parent;
 	slot->number = slot_nr;
 
 	slot->kobj.kset = pci_slots_kset;
-	err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
-				   "%s", name);
-	if (err) {
-		printk(KERN_ERR "Unable to register kobject %s\n", name);
+
+	slot_name = make_slot_name(name);
+	if (!slot_name) {
+		err = -ENOMEM;
 		goto err;
 	}
 
+	err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
+				   "%s", slot_name);
+	if (err)
+		goto err;
+
 	INIT_LIST_HEAD(&slot->list);
 	list_add(&slot->list, &parent->slots);
 
+	list_for_each_entry(dev, &parent->devices, bus_list)
+		if (PCI_SLOT(dev->devfn) == slot_nr)
+			dev->slot = slot;
+
 	/* Don't care if debug printk has a -1 for slot_nr */
 	pr_debug("%s: created pci_slot on %04x:%02x:%02x\n",
 		 __func__, pci_domain_nr(parent), parent->number, slot_nr);
 
- out:
+out:
 	up_write(&pci_bus_sem);
 	return slot;
- err:
+err:
 	kfree(slot);
 	slot = ERR_PTR(err);
 	goto out;
@@ -165,7 +263,7 @@
 EXPORT_SYMBOL_GPL(pci_create_slot);
 
 /**
- * pci_update_slot_number - update %struct pci_slot -> number
+ * pci_renumber_slot - update %struct pci_slot -> number
  * @slot - %struct pci_slot to update
  * @slot_nr - new number for slot
  *
@@ -173,27 +271,22 @@
  * created a placeholder slot in pci_create_slot() by passing a -1 as
  * slot_nr, to update their %struct pci_slot with the correct @slot_nr.
  */
-
-void pci_update_slot_number(struct pci_slot *slot, int slot_nr)
+void pci_renumber_slot(struct pci_slot *slot, int slot_nr)
 {
-	int name_count = 0;
 	struct pci_slot *tmp;
 
 	down_write(&pci_bus_sem);
 
 	list_for_each_entry(tmp, &slot->bus->slots, list) {
 		WARN_ON(tmp->number == slot_nr);
-		if (!strcmp(kobject_name(&tmp->kobj), kobject_name(&slot->kobj)))
-			name_count++;
+		goto out;
 	}
 
-	if (name_count > 1)
-		printk(KERN_WARNING "pci_update_slot_number found %d slots with the same name: %s\n", name_count, kobject_name(&slot->kobj));
-
 	slot->number = slot_nr;
+out:
 	up_write(&pci_bus_sem);
 }
-EXPORT_SYMBOL_GPL(pci_update_slot_number);
+EXPORT_SYMBOL_GPL(pci_renumber_slot);
 
 /**
  * pci_destroy_slot - decrement refcount for physical PCI slot
@@ -203,7 +296,6 @@
  * just call kobject_put on its kobj and let our release methods do the
  * rest.
  */
-
 void pci_destroy_slot(struct pci_slot *slot)
 {
 	pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__,
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index b46c60b..23e492b 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -70,7 +70,7 @@
 pxa2xx-obj-$(CONFIG_PXA_SHARPSL)		+= pxa2xx_sharpsl.o
 pxa2xx-obj-$(CONFIG_MACH_ARMCORE)		+= pxa2xx_cm_x2xx_cs.o
 pxa2xx-obj-$(CONFIG_ARCH_VIPER)			+= pxa2xx_viper.o
-pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA)		+= pxa2xx_trizeps.o
+pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA)		+= pxa2xx_trizeps4.o
 pxa2xx-obj-$(CONFIG_MACH_PALMTX)		+= pxa2xx_palmtx.o
 pxa2xx-obj-$(CONFIG_MACH_PALMLD)		+= pxa2xx_palmld.o
 
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index a0ffb8e..9e1140f 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -273,7 +273,7 @@
 			goto fail0d;
 		cf->socket.pci_irq = board->irq_pin;
 	} else
-		cf->socket.pci_irq = NR_IRQS + 1;
+		cf->socket.pci_irq = nr_irqs + 1;
 
 	/* pcmcia layer only remaps "real" memory not iospace */
 	cf->socket.io_offset = (unsigned long)
diff --git a/drivers/pcmcia/hd64465_ss.c b/drivers/pcmcia/hd64465_ss.c
index 117dc12..9ef69cd 100644
--- a/drivers/pcmcia/hd64465_ss.c
+++ b/drivers/pcmcia/hd64465_ss.c
@@ -233,15 +233,18 @@
  */
 static void hs_map_irq(hs_socket_t *sp, unsigned int irq)
 {
+	struct irq_desc *desc;
+
     	DPRINTK("hs_map_irq(sock=%d irq=%d)\n", sp->number, irq);
 	
 	if (irq >= HS_NUM_MAPPED_IRQS)
 	    return;
 
+	desc = irq_to_desc(irq);
     	hs_mapped_irq[irq].sock = sp;
 	/* insert ourselves as the irq controller */
-	hs_mapped_irq[irq].old_handler = irq_desc[irq].chip;
-	irq_desc[irq].chip = &hd64465_ss_irq_type;
+	hs_mapped_irq[irq].old_handler = desc->chip;
+	desc->chip = &hd64465_ss_irq_type;
 }
 
 
@@ -250,13 +253,16 @@
  */
 static void hs_unmap_irq(hs_socket_t *sp, unsigned int irq)
 {
+	struct irq_desc *desc;
+
     	DPRINTK("hs_unmap_irq(sock=%d irq=%d)\n", sp->number, irq);
 	
 	if (irq >= HS_NUM_MAPPED_IRQS)
 	    return;
 		
+	desc = irq_to_desc(irq);
 	/* restore the original irq controller */
-	irq_desc[irq].chip = hs_mapped_irq[irq].old_handler;
+	desc->chip = hs_mapped_irq[irq].old_handler;
 }
 
 /*============================================================*/
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index eee2f1c..b2c4124 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -639,7 +639,7 @@
 		int irq;
 		options += 4;
 		irq = simple_strtoul(options, &options, 0);
-		if (irq >= 0 && irq < NR_IRQS)
+		if (irq >= 0 && irq < nr_irqs)
 			vrc4171_irq = irq;
 
 		if (*options != ',')
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 63bb579..8e0c2b4 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -51,7 +51,7 @@
 
 config BATTERY_TOSA
 	tristate "Sharp SL-6000 (tosa) battery"
-	depends on MACH_TOSA && MFD_TC6393XB
+	depends on MACH_TOSA && MFD_TC6393XB && TOUCHSCREEN_WM97XX
 	help
 	  Say Y to enable support for the battery on the Sharp Zaurus
 	  SL-6000 (tosa) models.
@@ -62,4 +62,10 @@
 	help
 	  Say Y to enable support for battery measured by WM97xx aux port.
 
+config BATTERY_BQ27x00
+	tristate "BQ27200 battery driver"
+	depends on I2C
+	help
+	  Say Y here to enable support for batteries with BQ27200(I2C) chip.
+
 endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 4e20026..e8f1ece 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -21,4 +21,5 @@
 obj-$(CONFIG_BATTERY_PMU)	+= pmu_battery.o
 obj-$(CONFIG_BATTERY_OLPC)	+= olpc_battery.o
 obj-$(CONFIG_BATTERY_TOSA)	+= tosa_battery.o
-obj-$(CONFIG_BATTERY_WM97XX)	+= wm97xx_battery.o
\ No newline at end of file
+obj-$(CONFIG_BATTERY_WM97XX)	+= wm97xx_battery.o
+obj-$(CONFIG_BATTERY_BQ27x00)	+= bq27x00_battery.o
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
new file mode 100644
index 0000000..0c056fcc
--- /dev/null
+++ b/drivers/power/bq27x00_battery.c
@@ -0,0 +1,381 @@
+/*
+ * BQ27x00 battery driver
+ *
+ * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
+ *
+ * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/idr.h>
+#include <linux/i2c.h>
+#include <asm/unaligned.h>
+
+#define DRIVER_VERSION			"1.0.0"
+
+#define BQ27x00_REG_TEMP		0x06
+#define BQ27x00_REG_VOLT		0x08
+#define BQ27x00_REG_RSOC		0x0B /* Relative State-of-Charge */
+#define BQ27x00_REG_AI			0x14
+#define BQ27x00_REG_FLAGS		0x0A
+
+/* If the system has several batteries we need a different name for each
+ * of them...
+ */
+static DEFINE_IDR(battery_id);
+static DEFINE_MUTEX(battery_mutex);
+
+struct bq27x00_device_info;
+struct bq27x00_access_methods {
+	int (*read)(u8 reg, int *rt_value, int b_single,
+		struct bq27x00_device_info *di);
+};
+
+struct bq27x00_device_info {
+	struct device 		*dev;
+	int			id;
+	int			voltage_uV;
+	int			current_uA;
+	int			temp_C;
+	int			charge_rsoc;
+	struct bq27x00_access_methods	*bus;
+	struct power_supply	bat;
+
+	struct i2c_client	*client;
+};
+
+static enum power_supply_property bq27x00_battery_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_TEMP,
+};
+
+/*
+ * Common code for BQ27x00 devices
+ */
+
+static int bq27x00_read(u8 reg, int *rt_value, int b_single,
+			struct bq27x00_device_info *di)
+{
+	int ret;
+
+	ret = di->bus->read(reg, rt_value, b_single, di);
+	*rt_value = be16_to_cpu(*rt_value);
+
+	return ret;
+}
+
+/*
+ * Return the battery temperature in Celcius degrees
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_temperature(struct bq27x00_device_info *di)
+{
+	int ret;
+	int temp = 0;
+
+	ret = bq27x00_read(BQ27x00_REG_TEMP, &temp, 0, di);
+	if (ret) {
+		dev_err(di->dev, "error reading temperature\n");
+		return ret;
+	}
+
+	return (temp >> 2) - 273;
+}
+
+/*
+ * Return the battery Voltage in milivolts
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_voltage(struct bq27x00_device_info *di)
+{
+	int ret;
+	int volt = 0;
+
+	ret = bq27x00_read(BQ27x00_REG_VOLT, &volt, 0, di);
+	if (ret) {
+		dev_err(di->dev, "error reading voltage\n");
+		return ret;
+	}
+
+	return volt;
+}
+
+/*
+ * Return the battery average current
+ * Note that current can be negative signed as well
+ * Or 0 if something fails.
+ */
+static int bq27x00_battery_current(struct bq27x00_device_info *di)
+{
+	int ret;
+	int curr = 0;
+	int flags = 0;
+
+	ret = bq27x00_read(BQ27x00_REG_AI, &curr, 0, di);
+	if (ret) {
+		dev_err(di->dev, "error reading current\n");
+		return 0;
+	}
+	ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di);
+	if (ret < 0) {
+		dev_err(di->dev, "error reading flags\n");
+		return 0;
+	}
+	if ((flags & (1 << 7)) != 0) {
+		dev_dbg(di->dev, "negative current!\n");
+		return -curr;
+	}
+	return curr;
+}
+
+/*
+ * Return the battery Relative State-of-Charge
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_rsoc(struct bq27x00_device_info *di)
+{
+	int ret;
+	int rsoc = 0;
+
+	ret = bq27x00_read(BQ27x00_REG_RSOC, &rsoc, 1, di);
+	if (ret) {
+		dev_err(di->dev, "error reading relative State-of-Charge\n");
+		return ret;
+	}
+
+	return rsoc >> 8;
+}
+
+#define to_bq27x00_device_info(x) container_of((x), \
+				struct bq27x00_device_info, bat);
+
+static int bq27x00_battery_get_property(struct power_supply *psy,
+					enum power_supply_property psp,
+					union power_supply_propval *val)
+{
+	struct bq27x00_device_info *di = to_bq27x00_device_info(psy);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = bq27x00_battery_voltage(di);
+		if (psp == POWER_SUPPLY_PROP_PRESENT)
+			val->intval = val->intval <= 0 ? 0 : 1;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		val->intval = bq27x00_battery_current(di);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = bq27x00_battery_rsoc(di);
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		val->intval = bq27x00_battery_temperature(di);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void bq27x00_powersupply_init(struct bq27x00_device_info *di)
+{
+	di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
+	di->bat.properties = bq27x00_battery_props;
+	di->bat.num_properties = ARRAY_SIZE(bq27x00_battery_props);
+	di->bat.get_property = bq27x00_battery_get_property;
+	di->bat.external_power_changed = NULL;
+}
+
+/*
+ * BQ27200 specific code
+ */
+
+static int bq27200_read(u8 reg, int *rt_value, int b_single,
+			struct bq27x00_device_info *di)
+{
+	struct i2c_client *client = di->client;
+	struct i2c_msg msg[1];
+	unsigned char data[2];
+	int err;
+
+	if (!client->adapter)
+		return -ENODEV;
+
+	msg->addr = client->addr;
+	msg->flags = 0;
+	msg->len = 1;
+	msg->buf = data;
+
+	data[0] = reg;
+	err = i2c_transfer(client->adapter, msg, 1);
+
+	if (err >= 0) {
+		if (!b_single)
+			msg->len = 2;
+		else
+			msg->len = 1;
+
+		msg->flags = I2C_M_RD;
+		err = i2c_transfer(client->adapter, msg, 1);
+		if (err >= 0) {
+			if (!b_single)
+				*rt_value = get_unaligned_be16(data);
+			else
+				*rt_value = data[0];
+
+			return 0;
+		}
+	}
+	return err;
+}
+
+static int bq27200_battery_probe(struct i2c_client *client,
+				 const struct i2c_device_id *id)
+{
+	char *name;
+	struct bq27x00_device_info *di;
+	struct bq27x00_access_methods *bus;
+	int num;
+	int retval = 0;
+
+	/* Get new ID for the new battery device */
+	retval = idr_pre_get(&battery_id, GFP_KERNEL);
+	if (retval == 0)
+		return -ENOMEM;
+	mutex_lock(&battery_mutex);
+	retval = idr_get_new(&battery_id, client, &num);
+	mutex_unlock(&battery_mutex);
+	if (retval < 0)
+		return retval;
+
+	name = kasprintf(GFP_KERNEL, "bq27200-%d", num);
+	if (!name) {
+		dev_err(&client->dev, "failed to allocate device name\n");
+		retval = -ENOMEM;
+		goto batt_failed_1;
+	}
+
+	di = kzalloc(sizeof(*di), GFP_KERNEL);
+	if (!di) {
+		dev_err(&client->dev, "failed to allocate device info data\n");
+		retval = -ENOMEM;
+		goto batt_failed_2;
+	}
+	di->id = num;
+
+	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+	if (!bus) {
+		dev_err(&client->dev, "failed to allocate access method "
+					"data\n");
+		retval = -ENOMEM;
+		goto batt_failed_3;
+	}
+
+	i2c_set_clientdata(client, di);
+	di->dev = &client->dev;
+	di->bat.name = name;
+	bus->read = &bq27200_read;
+	di->bus = bus;
+	di->client = client;
+
+	bq27x00_powersupply_init(di);
+
+	retval = power_supply_register(&client->dev, &di->bat);
+	if (retval) {
+		dev_err(&client->dev, "failed to register battery\n");
+		goto batt_failed_4;
+	}
+
+	dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION);
+
+	return 0;
+
+batt_failed_4:
+	kfree(bus);
+batt_failed_3:
+	kfree(di);
+batt_failed_2:
+	kfree(name);
+batt_failed_1:
+	mutex_lock(&battery_mutex);
+	idr_remove(&battery_id, num);
+	mutex_unlock(&battery_mutex);
+
+	return retval;
+}
+
+static int bq27200_battery_remove(struct i2c_client *client)
+{
+	struct bq27x00_device_info *di = i2c_get_clientdata(client);
+
+	power_supply_unregister(&di->bat);
+
+	kfree(di->bat.name);
+
+	mutex_lock(&battery_mutex);
+	idr_remove(&battery_id, di->id);
+	mutex_unlock(&battery_mutex);
+
+	kfree(di);
+
+	return 0;
+}
+
+/*
+ * Module stuff
+ */
+
+static const struct i2c_device_id bq27200_id[] = {
+	{ "bq27200", 0 },
+	{},
+};
+
+static struct i2c_driver bq27200_battery_driver = {
+	.driver = {
+		.name = "bq27200-battery",
+	},
+	.probe = bq27200_battery_probe,
+	.remove = bq27200_battery_remove,
+	.id_table = bq27200_id,
+};
+
+static int __init bq27x00_battery_init(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&bq27200_battery_driver);
+	if (ret)
+		printk(KERN_ERR "Unable to register BQ27200 driver\n");
+
+	return ret;
+}
+module_init(bq27x00_battery_init);
+
+static void __exit bq27x00_battery_exit(void)
+{
+	i2c_del_driver(&bq27200_battery_driver);
+}
+module_exit(bq27x00_battery_exit);
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
+MODULE_DESCRIPTION("BQ27x00 battery monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index 0471ec7..d30bb76 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -334,13 +334,16 @@
 }
 
 #ifdef CONFIG_PM
+static int ac_wakeup_enabled;
+static int usb_wakeup_enabled;
+
 static int pda_power_suspend(struct platform_device *pdev, pm_message_t state)
 {
 	if (device_may_wakeup(&pdev->dev)) {
 		if (ac_irq)
-			enable_irq_wake(ac_irq->start);
+			ac_wakeup_enabled = !enable_irq_wake(ac_irq->start);
 		if (usb_irq)
-			enable_irq_wake(usb_irq->start);
+			usb_wakeup_enabled = !enable_irq_wake(usb_irq->start);
 	}
 
 	return 0;
@@ -349,9 +352,9 @@
 static int pda_power_resume(struct platform_device *pdev)
 {
 	if (device_may_wakeup(&pdev->dev)) {
-		if (usb_irq)
+		if (usb_irq && usb_wakeup_enabled)
 			disable_irq_wake(usb_irq->start);
-		if (ac_irq)
+		if (ac_irq && ac_wakeup_enabled)
 			disable_irq_wake(ac_irq->start);
 	}
 
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 3007695..55200404 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -87,6 +87,30 @@
 	return error;
 }
 
+static int __power_supply_is_system_supplied(struct device *dev, void *data)
+{
+	union power_supply_propval ret = {0,};
+	struct power_supply *psy = dev_get_drvdata(dev);
+
+	if (psy->type != POWER_SUPPLY_TYPE_BATTERY) {
+		if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &ret))
+			return 0;
+		if (ret.intval)
+			return ret.intval;
+	}
+	return 0;
+}
+
+int power_supply_is_system_supplied(void)
+{
+	int error;
+
+	error = class_for_each_device(power_supply_class, NULL, NULL,
+				      __power_supply_is_system_supplied);
+
+	return error;
+}
+
 int power_supply_register(struct device *parent, struct power_supply *psy)
 {
 	int rc = 0;
@@ -148,6 +172,7 @@
 
 EXPORT_SYMBOL_GPL(power_supply_changed);
 EXPORT_SYMBOL_GPL(power_supply_am_i_supplied);
+EXPORT_SYMBOL_GPL(power_supply_is_system_supplied);
 EXPORT_SYMBOL_GPL(power_supply_register);
 EXPORT_SYMBOL_GPL(power_supply_unregister);
 
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index fe2aeb1..23ae846 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -30,7 +30,7 @@
 
 #define POWER_SUPPLY_ATTR(_name)					\
 {									\
-	.attr = { .name = #_name, .mode = 0444, .owner = THIS_MODULE },	\
+	.attr = { .name = #_name, .mode = 0444 },	\
 	.show = power_supply_show_property,				\
 	.store = NULL,							\
 }
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index 6f2f90e..06848b2 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -915,6 +915,22 @@
 
 EXPORT_SYMBOL_GPL(ps3av_video_mute);
 
+/* mute analog output only */
+int ps3av_audio_mute_analog(int mute)
+{
+	int i, res;
+
+	for (i = 0; i < ps3av->av_hw_conf.num_of_avmulti; i++) {
+		res = ps3av_cmd_av_audio_mute(1,
+			&ps3av->av_port[i + ps3av->av_hw_conf.num_of_hdmi],
+			mute);
+		if (res < 0)
+			return -1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ps3av_audio_mute_analog);
+
 int ps3av_audio_mute(int mute)
 {
 	return ps3av_set_audio_mute(mute ? PS3AV_CMD_MUTE_ON
diff --git a/drivers/ps3/ps3av_cmd.c b/drivers/ps3/ps3av_cmd.c
index 7f880c2..11eb503 100644
--- a/drivers/ps3/ps3av_cmd.c
+++ b/drivers/ps3/ps3av_cmd.c
@@ -660,9 +660,10 @@
 }
 
 /* default cs val */
-static const u8 ps3av_mode_cs_info[] = {
+u8 ps3av_mode_cs_info[] = {
 	0x00, 0x09, 0x00, 0x02, 0x01, 0x00, 0x00, 0x00
 };
+EXPORT_SYMBOL_GPL(ps3av_mode_cs_info);
 
 #define CS_44	0x00
 #define CS_48	0x02
@@ -677,7 +678,7 @@
 			      u32 ch, u32 fs, u32 word_bits, u32 format,
 			      u32 source)
 {
-	int spdif_through, spdif_bitstream;
+	int spdif_through;
 	int i;
 
 	if (!(ch | fs | format | word_bits | source)) {
@@ -687,7 +688,6 @@
 		format = PS3AV_CMD_AUDIO_FORMAT_PCM;
 		source = PS3AV_CMD_AUDIO_SOURCE_SERIAL;
 	}
-	spdif_through = spdif_bitstream = 0;	/* XXX not supported */
 
 	/* audio mode */
 	memset(audio, 0, sizeof(*audio));
@@ -777,16 +777,17 @@
 		break;
 	}
 
+	/* non-audio bit */
+	spdif_through = audio->audio_cs_info[0] & 0x02;
+
 	/* pass through setting */
 	if (spdif_through &&
 	    (avport == PS3AV_CMD_AVPORT_SPDIF_0 ||
-	     avport == PS3AV_CMD_AVPORT_SPDIF_1)) {
+	     avport == PS3AV_CMD_AVPORT_SPDIF_1 ||
+	     avport == PS3AV_CMD_AVPORT_HDMI_0 ||
+	     avport == PS3AV_CMD_AVPORT_HDMI_1)) {
 		audio->audio_word_bits = PS3AV_CMD_AUDIO_WORD_BITS_16;
-		audio->audio_source = PS3AV_CMD_AUDIO_SOURCE_SPDIF;
-		if (spdif_bitstream) {
-			audio->audio_format = PS3AV_CMD_AUDIO_FORMAT_BITSTREAM;
-			audio->audio_cs_info[0] |= CS_BIT;
-		}
+		audio->audio_format = PS3AV_CMD_AUDIO_FORMAT_BITSTREAM;
 	}
 }
 
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f660ef3..847481d 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -246,6 +246,16 @@
 	  platforms.  The support is integrated with the rest of
 	  the Menelaus driver; it's not separate module.
 
+config RTC_DRV_TWL4030
+	tristate "TI TWL4030/TWL5030/TPS659x0"
+	depends on RTC_CLASS && TWL4030_CORE
+	help
+	  If you say yes here you get support for the RTC on the
+	  TWL4030 family chips, used mostly with OMAP3 platforms.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called rtc-twl4030.
+
 config RTC_DRV_S35390A
 	tristate "Seiko Instruments S-35390A"
 	select BITREVERSE
@@ -610,6 +620,14 @@
 	help
 	  If you say yes here you get support for the Ricoh RS5C313 RTC chips.
 
+config RTC_DRV_PARISC
+	tristate "PA-RISC firmware RTC support"
+	depends on PARISC
+	help
+	  Say Y or M here to enable RTC support on PA-RISC systems using
+	  firmware calls. If you do not know what you are doing, you should
+	  just say Y.
+
 config RTC_DRV_PPC
        tristate "PowerPC machine dependent RTC support"
        depends on PPC
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index d05928b..e9e8474 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -51,6 +51,7 @@
 obj-$(CONFIG_RTC_DRV_PCF8583)	+= rtc-pcf8583.o
 obj-$(CONFIG_RTC_DRV_PL030)	+= rtc-pl030.o
 obj-$(CONFIG_RTC_DRV_PL031)	+= rtc-pl031.o
+obj-$(CONFIG_RTC_DRV_PARISC)	+= rtc-parisc.o
 obj-$(CONFIG_RTC_DRV_PPC)	+= rtc-ppc.o
 obj-$(CONFIG_RTC_DRV_R9701)	+= rtc-r9701.o
 obj-$(CONFIG_RTC_DRV_RS5C313)	+= rtc-rs5c313.o
@@ -62,6 +63,7 @@
 obj-$(CONFIG_RTC_DRV_SH)	+= rtc-sh.o
 obj-$(CONFIG_RTC_DRV_STK17TA8)	+= rtc-stk17ta8.o
 obj-$(CONFIG_RTC_DRV_TEST)	+= rtc-test.o
+obj-$(CONFIG_RTC_DRV_TWL4030)	+= rtc-twl4030.o
 obj-$(CONFIG_RTC_DRV_V3020)	+= rtc-v3020.o
 obj-$(CONFIG_RTC_DRV_VR41XX)	+= rtc-vr41xx.o
 obj-$(CONFIG_RTC_DRV_X1205)	+= rtc-x1205.o
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 3708261..b5bf937 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -53,21 +53,21 @@
 	} while ((time != at91_sys_read(timereg)) ||
 			(date != at91_sys_read(calreg)));
 
-	tm->tm_sec  = BCD2BIN((time & AT91_RTC_SEC) >> 0);
-	tm->tm_min  = BCD2BIN((time & AT91_RTC_MIN) >> 8);
-	tm->tm_hour = BCD2BIN((time & AT91_RTC_HOUR) >> 16);
+	tm->tm_sec  = bcd2bin((time & AT91_RTC_SEC) >> 0);
+	tm->tm_min  = bcd2bin((time & AT91_RTC_MIN) >> 8);
+	tm->tm_hour = bcd2bin((time & AT91_RTC_HOUR) >> 16);
 
 	/*
 	 * The Calendar Alarm register does not have a field for
 	 * the year - so these will return an invalid value.  When an
 	 * alarm is set, at91_alarm_year wille store the current year.
 	 */
-	tm->tm_year  = BCD2BIN(date & AT91_RTC_CENT) * 100;	/* century */
-	tm->tm_year += BCD2BIN((date & AT91_RTC_YEAR) >> 8);	/* year */
+	tm->tm_year  = bcd2bin(date & AT91_RTC_CENT) * 100;	/* century */
+	tm->tm_year += bcd2bin((date & AT91_RTC_YEAR) >> 8);	/* year */
 
-	tm->tm_wday = BCD2BIN((date & AT91_RTC_DAY) >> 21) - 1;	/* day of the week [0-6], Sunday=0 */
-	tm->tm_mon  = BCD2BIN((date & AT91_RTC_MONTH) >> 16) - 1;
-	tm->tm_mday = BCD2BIN((date & AT91_RTC_DATE) >> 24);
+	tm->tm_wday = bcd2bin((date & AT91_RTC_DAY) >> 21) - 1;	/* day of the week [0-6], Sunday=0 */
+	tm->tm_mon  = bcd2bin((date & AT91_RTC_MONTH) >> 16) - 1;
+	tm->tm_mday = bcd2bin((date & AT91_RTC_DATE) >> 24);
 }
 
 /*
@@ -106,16 +106,16 @@
 	at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
 
 	at91_sys_write(AT91_RTC_TIMR,
-			  BIN2BCD(tm->tm_sec) << 0
-			| BIN2BCD(tm->tm_min) << 8
-			| BIN2BCD(tm->tm_hour) << 16);
+			  bin2bcd(tm->tm_sec) << 0
+			| bin2bcd(tm->tm_min) << 8
+			| bin2bcd(tm->tm_hour) << 16);
 
 	at91_sys_write(AT91_RTC_CALR,
-			  BIN2BCD((tm->tm_year + 1900) / 100)	/* century */
-			| BIN2BCD(tm->tm_year % 100) << 8	/* year */
-			| BIN2BCD(tm->tm_mon + 1) << 16		/* tm_mon starts at zero */
-			| BIN2BCD(tm->tm_wday + 1) << 21	/* day of the week [0-6], Sunday=0 */
-			| BIN2BCD(tm->tm_mday) << 24);
+			  bin2bcd((tm->tm_year + 1900) / 100)	/* century */
+			| bin2bcd(tm->tm_year % 100) << 8	/* year */
+			| bin2bcd(tm->tm_mon + 1) << 16		/* tm_mon starts at zero */
+			| bin2bcd(tm->tm_wday + 1) << 21	/* day of the week [0-6], Sunday=0 */
+			| bin2bcd(tm->tm_mday) << 24);
 
 	/* Restart Time/Calendar */
 	cr = at91_sys_read(AT91_RTC_CR);
@@ -162,13 +162,13 @@
 
 	at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
 	at91_sys_write(AT91_RTC_TIMALR,
-		  BIN2BCD(tm.tm_sec) << 0
-		| BIN2BCD(tm.tm_min) << 8
-		| BIN2BCD(tm.tm_hour) << 16
+		  bin2bcd(tm.tm_sec) << 0
+		| bin2bcd(tm.tm_min) << 8
+		| bin2bcd(tm.tm_hour) << 16
 		| AT91_RTC_HOUREN | AT91_RTC_MINEN | AT91_RTC_SECEN);
 	at91_sys_write(AT91_RTC_CALALR,
-		  BIN2BCD(tm.tm_mon + 1) << 16		/* tm_mon starts at zero */
-		| BIN2BCD(tm.tm_mday) << 24
+		  bin2bcd(tm.tm_mon + 1) << 16		/* tm_mon starts at zero */
+		| bin2bcd(tm.tm_mday) << 24
 		| AT91_RTC_DATEEN | AT91_RTC_MTHEN);
 
 	if (alrm->enabled) {
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c
index 189a018..d00a274 100644
--- a/drivers/rtc/rtc-bq4802.c
+++ b/drivers/rtc/rtc-bq4802.c
@@ -71,14 +71,14 @@
 
 	spin_unlock_irqrestore(&p->lock, flags);
 
-	BCD_TO_BIN(tm->tm_sec);
-	BCD_TO_BIN(tm->tm_min);
-	BCD_TO_BIN(tm->tm_hour);
-	BCD_TO_BIN(tm->tm_mday);
-	BCD_TO_BIN(tm->tm_mon);
-	BCD_TO_BIN(tm->tm_year);
-	BCD_TO_BIN(tm->tm_wday);
-	BCD_TO_BIN(century);
+	tm->tm_sec = bcd2bin(tm->tm_sec);
+	tm->tm_min = bcd2bin(tm->tm_min);
+	tm->tm_hour = bcd2bin(tm->tm_hour);
+	tm->tm_mday = bcd2bin(tm->tm_mday);
+	tm->tm_mon = bcd2bin(tm->tm_mon);
+	tm->tm_year = bcd2bin(tm->tm_year);
+	tm->tm_wday = bcd2bin(tm->tm_wday);
+	century = bcd2bin(century);
 
 	tm->tm_year += (century * 100);
 	tm->tm_year -= 1900;
@@ -106,13 +106,13 @@
 	min = tm->tm_min;
 	sec = tm->tm_sec;
 
-	BIN_TO_BCD(sec);
-	BIN_TO_BCD(min);
-	BIN_TO_BCD(hrs);
-	BIN_TO_BCD(day);
-	BIN_TO_BCD(mon);
-	BIN_TO_BCD(yrs);
-	BIN_TO_BCD(century);
+	sec = bin2bcd(sec);
+	min = bin2bcd(min);
+	hrs = bin2bcd(hrs);
+	day = bin2bcd(day);
+	mon = bin2bcd(mon);
+	yrs = bin2bcd(yrs);
+	century = bin2bcd(century);
 
 	spin_lock_irqsave(&p->lock, flags);
 
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 963ad0b..5549231 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -143,6 +143,43 @@
 
 /*----------------------------------------------------------------*/
 
+#ifdef RTC_PORT
+
+/* Most newer x86 systems have two register banks, the first used
+ * for RTC and NVRAM and the second only for NVRAM.  Caller must
+ * own rtc_lock ... and we won't worry about access during NMI.
+ */
+#define can_bank2	true
+
+static inline unsigned char cmos_read_bank2(unsigned char addr)
+{
+	outb(addr, RTC_PORT(2));
+	return inb(RTC_PORT(3));
+}
+
+static inline void cmos_write_bank2(unsigned char val, unsigned char addr)
+{
+	outb(addr, RTC_PORT(2));
+	outb(val, RTC_PORT(2));
+}
+
+#else
+
+#define can_bank2	false
+
+static inline unsigned char cmos_read_bank2(unsigned char addr)
+{
+	return 0;
+}
+
+static inline void cmos_write_bank2(unsigned char val, unsigned char addr)
+{
+}
+
+#endif
+
+/*----------------------------------------------------------------*/
+
 static int cmos_read_time(struct device *dev, struct rtc_time *t)
 {
 	/* REVISIT:  if the clock has a "century" register, use
@@ -203,26 +240,26 @@
 	/* REVISIT this assumes PC style usage:  always BCD */
 
 	if (((unsigned)t->time.tm_sec) < 0x60)
-		t->time.tm_sec = BCD2BIN(t->time.tm_sec);
+		t->time.tm_sec = bcd2bin(t->time.tm_sec);
 	else
 		t->time.tm_sec = -1;
 	if (((unsigned)t->time.tm_min) < 0x60)
-		t->time.tm_min = BCD2BIN(t->time.tm_min);
+		t->time.tm_min = bcd2bin(t->time.tm_min);
 	else
 		t->time.tm_min = -1;
 	if (((unsigned)t->time.tm_hour) < 0x24)
-		t->time.tm_hour = BCD2BIN(t->time.tm_hour);
+		t->time.tm_hour = bcd2bin(t->time.tm_hour);
 	else
 		t->time.tm_hour = -1;
 
 	if (cmos->day_alrm) {
 		if (((unsigned)t->time.tm_mday) <= 0x31)
-			t->time.tm_mday = BCD2BIN(t->time.tm_mday);
+			t->time.tm_mday = bcd2bin(t->time.tm_mday);
 		else
 			t->time.tm_mday = -1;
 		if (cmos->mon_alrm) {
 			if (((unsigned)t->time.tm_mon) <= 0x12)
-				t->time.tm_mon = BCD2BIN(t->time.tm_mon) - 1;
+				t->time.tm_mon = bcd2bin(t->time.tm_mon) - 1;
 			else
 				t->time.tm_mon = -1;
 		}
@@ -294,19 +331,19 @@
 	/* Writing 0xff means "don't care" or "match all".  */
 
 	mon = t->time.tm_mon + 1;
-	mon = (mon <= 12) ? BIN2BCD(mon) : 0xff;
+	mon = (mon <= 12) ? bin2bcd(mon) : 0xff;
 
 	mday = t->time.tm_mday;
-	mday = (mday >= 1 && mday <= 31) ? BIN2BCD(mday) : 0xff;
+	mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff;
 
 	hrs = t->time.tm_hour;
-	hrs = (hrs < 24) ? BIN2BCD(hrs) : 0xff;
+	hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff;
 
 	min = t->time.tm_min;
-	min = (min < 60) ? BIN2BCD(min) : 0xff;
+	min = (min < 60) ? bin2bcd(min) : 0xff;
 
 	sec = t->time.tm_sec;
-	sec = (sec < 60) ? BIN2BCD(sec) : 0xff;
+	sec = (sec < 60) ? bin2bcd(sec) : 0xff;
 
 	spin_lock_irq(&rtc_lock);
 
@@ -491,12 +528,21 @@
 
 	if (unlikely(off >= attr->size))
 		return 0;
+	if (unlikely(off < 0))
+		return -EINVAL;
 	if ((off + count) > attr->size)
 		count = attr->size - off;
 
+	off += NVRAM_OFFSET;
 	spin_lock_irq(&rtc_lock);
-	for (retval = 0, off += NVRAM_OFFSET; count--; retval++, off++)
-		*buf++ = CMOS_READ(off);
+	for (retval = 0; count; count--, off++, retval++) {
+		if (off < 128)
+			*buf++ = CMOS_READ(off);
+		else if (can_bank2)
+			*buf++ = cmos_read_bank2(off);
+		else
+			break;
+	}
 	spin_unlock_irq(&rtc_lock);
 
 	return retval;
@@ -512,6 +558,8 @@
 	cmos = dev_get_drvdata(container_of(kobj, struct device, kobj));
 	if (unlikely(off >= attr->size))
 		return -EFBIG;
+	if (unlikely(off < 0))
+		return -EINVAL;
 	if ((off + count) > attr->size)
 		count = attr->size - off;
 
@@ -520,15 +568,20 @@
 	 * here.  If userspace is smart enough to know what fields of
 	 * NVRAM to update, updating checksums is also part of its job.
 	 */
+	off += NVRAM_OFFSET;
 	spin_lock_irq(&rtc_lock);
-	for (retval = 0, off += NVRAM_OFFSET; count--; retval++, off++) {
+	for (retval = 0; count; count--, off++, retval++) {
 		/* don't trash RTC registers */
 		if (off == cmos->day_alrm
 				|| off == cmos->mon_alrm
 				|| off == cmos->century)
 			buf++;
-		else
+		else if (off < 128)
 			CMOS_WRITE(*buf++, off);
+		else if (can_bank2)
+			cmos_write_bank2(*buf++, off);
+		else
+			break;
 	}
 	spin_unlock_irq(&rtc_lock);
 
@@ -539,7 +592,6 @@
 	.attr = {
 		.name	= "nvram",
 		.mode	= S_IRUGO | S_IWUSR,
-		.owner	= THIS_MODULE,
 	},
 
 	.read	= cmos_nvram_read,
@@ -631,8 +683,8 @@
 
 	/* Heuristic to deduce NVRAM size ... do what the legacy NVRAM
 	 * driver did, but don't reject unknown configs.   Old hardware
-	 * won't address 128 bytes, and for now we ignore the way newer
-	 * chips can address 256 bytes (using two more i/o ports).
+	 * won't address 128 bytes.  Newer chips have multiple banks,
+	 * though they may not be listed in one I/O resource.
 	 */
 #if	defined(CONFIG_ATARI)
 	address_space = 64;
@@ -642,6 +694,8 @@
 #warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
 	address_space = 128;
 #endif
+	if (can_bank2 && ports->end > (ports->start + 1))
+		address_space = 256;
 
 	/* For ACPI systems extension info comes from the FADT.  On others,
 	 * board specific setup provides it as appropriate.  Systems where
@@ -740,7 +794,7 @@
 		goto cleanup2;
 	}
 
-	pr_info("%s: alarms up to one %s%s%s\n",
+	pr_info("%s: alarms up to one %s%s, %zd bytes nvram, %s irqs\n",
 			cmos_rtc.rtc->dev.bus_id,
 			is_valid_irq(rtc_irq)
 				?  (cmos_rtc.mon_alrm
@@ -749,6 +803,7 @@
 						? "month" : "day"))
 				: "no",
 			cmos_rtc.century ? ", y3k" : "",
+			nvram.size,
 			is_hpet_enabled() ? ", hpet irqs" : "");
 
 	return 0;
diff --git a/drivers/rtc/rtc-ds1216.c b/drivers/rtc/rtc-ds1216.c
index 0b17770..9a234a4 100644
--- a/drivers/rtc/rtc-ds1216.c
+++ b/drivers/rtc/rtc-ds1216.c
@@ -86,19 +86,19 @@
 	ds1216_switch_ds_to_clock(priv->ioaddr);
 	ds1216_read(priv->ioaddr, (u8 *)&regs);
 
-	tm->tm_sec = BCD2BIN(regs.sec);
-	tm->tm_min = BCD2BIN(regs.min);
+	tm->tm_sec = bcd2bin(regs.sec);
+	tm->tm_min = bcd2bin(regs.min);
 	if (regs.hour & DS1216_HOUR_1224) {
 		/* AM/PM mode */
-		tm->tm_hour = BCD2BIN(regs.hour & 0x1f);
+		tm->tm_hour = bcd2bin(regs.hour & 0x1f);
 		if (regs.hour & DS1216_HOUR_AMPM)
 			tm->tm_hour += 12;
 	} else
-		tm->tm_hour = BCD2BIN(regs.hour & 0x3f);
+		tm->tm_hour = bcd2bin(regs.hour & 0x3f);
 	tm->tm_wday = (regs.wday & 7) - 1;
-	tm->tm_mday = BCD2BIN(regs.mday & 0x3f);
-	tm->tm_mon = BCD2BIN(regs.month & 0x1f);
-	tm->tm_year = BCD2BIN(regs.year);
+	tm->tm_mday = bcd2bin(regs.mday & 0x3f);
+	tm->tm_mon = bcd2bin(regs.month & 0x1f);
+	tm->tm_year = bcd2bin(regs.year);
 	if (tm->tm_year < 70)
 		tm->tm_year += 100;
 	return 0;
@@ -114,19 +114,19 @@
 	ds1216_read(priv->ioaddr, (u8 *)&regs);
 
 	regs.tsec = 0; /* clear 0.1 and 0.01 seconds */
-	regs.sec = BIN2BCD(tm->tm_sec);
-	regs.min = BIN2BCD(tm->tm_min);
+	regs.sec = bin2bcd(tm->tm_sec);
+	regs.min = bin2bcd(tm->tm_min);
 	regs.hour &= DS1216_HOUR_1224;
 	if (regs.hour && tm->tm_hour > 12) {
 		regs.hour |= DS1216_HOUR_AMPM;
 		tm->tm_hour -= 12;
 	}
-	regs.hour |= BIN2BCD(tm->tm_hour);
+	regs.hour |= bin2bcd(tm->tm_hour);
 	regs.wday &= ~7;
 	regs.wday |= tm->tm_wday;
-	regs.mday = BIN2BCD(tm->tm_mday);
-	regs.month = BIN2BCD(tm->tm_mon);
-	regs.year = BIN2BCD(tm->tm_year % 100);
+	regs.mday = bin2bcd(tm->tm_mday);
+	regs.month = bin2bcd(tm->tm_mon);
+	regs.year = bin2bcd(tm->tm_year % 100);
 
 	ds1216_switch_ds_to_clock(priv->ioaddr);
 	ds1216_write(priv->ioaddr, (u8 *)&regs);
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index b939781..1845566 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -40,7 +40,7 @@
 #define	RTC_SCLK	0x0400
 
 #ifdef CONFIG_SH_SECUREEDGE5410
-#include <asm/snapgear.h>
+#include <mach/snapgear.h>
 #define set_dp(x)	SECUREEDGE_WRITE_IOPORT(x, 0x1c00)
 #define get_dp()	SECUREEDGE_READ_IOPORT()
 #else
@@ -107,13 +107,13 @@
 
 	spin_lock_irq(&rtc->lock);
 
-	tm->tm_sec	= BCD2BIN(ds1302_readbyte(RTC_ADDR_SEC));
-	tm->tm_min	= BCD2BIN(ds1302_readbyte(RTC_ADDR_MIN));
-	tm->tm_hour	= BCD2BIN(ds1302_readbyte(RTC_ADDR_HOUR));
-	tm->tm_wday	= BCD2BIN(ds1302_readbyte(RTC_ADDR_DAY));
-	tm->tm_mday	= BCD2BIN(ds1302_readbyte(RTC_ADDR_DATE));
-	tm->tm_mon	= BCD2BIN(ds1302_readbyte(RTC_ADDR_MON)) - 1;
-	tm->tm_year	= BCD2BIN(ds1302_readbyte(RTC_ADDR_YEAR));
+	tm->tm_sec	= bcd2bin(ds1302_readbyte(RTC_ADDR_SEC));
+	tm->tm_min	= bcd2bin(ds1302_readbyte(RTC_ADDR_MIN));
+	tm->tm_hour	= bcd2bin(ds1302_readbyte(RTC_ADDR_HOUR));
+	tm->tm_wday	= bcd2bin(ds1302_readbyte(RTC_ADDR_DAY));
+	tm->tm_mday	= bcd2bin(ds1302_readbyte(RTC_ADDR_DATE));
+	tm->tm_mon	= bcd2bin(ds1302_readbyte(RTC_ADDR_MON)) - 1;
+	tm->tm_year	= bcd2bin(ds1302_readbyte(RTC_ADDR_YEAR));
 
 	if (tm->tm_year < 70)
 		tm->tm_year += 100;
@@ -141,13 +141,13 @@
 	/* Stop RTC */
 	ds1302_writebyte(RTC_ADDR_SEC, ds1302_readbyte(RTC_ADDR_SEC) | 0x80);
 
-	ds1302_writebyte(RTC_ADDR_SEC, BIN2BCD(tm->tm_sec));
-	ds1302_writebyte(RTC_ADDR_MIN, BIN2BCD(tm->tm_min));
-	ds1302_writebyte(RTC_ADDR_HOUR, BIN2BCD(tm->tm_hour));
-	ds1302_writebyte(RTC_ADDR_DAY, BIN2BCD(tm->tm_wday));
-	ds1302_writebyte(RTC_ADDR_DATE, BIN2BCD(tm->tm_mday));
-	ds1302_writebyte(RTC_ADDR_MON, BIN2BCD(tm->tm_mon + 1));
-	ds1302_writebyte(RTC_ADDR_YEAR, BIN2BCD(tm->tm_year % 100));
+	ds1302_writebyte(RTC_ADDR_SEC, bin2bcd(tm->tm_sec));
+	ds1302_writebyte(RTC_ADDR_MIN, bin2bcd(tm->tm_min));
+	ds1302_writebyte(RTC_ADDR_HOUR, bin2bcd(tm->tm_hour));
+	ds1302_writebyte(RTC_ADDR_DAY, bin2bcd(tm->tm_wday));
+	ds1302_writebyte(RTC_ADDR_DATE, bin2bcd(tm->tm_mday));
+	ds1302_writebyte(RTC_ADDR_MON, bin2bcd(tm->tm_mon + 1));
+	ds1302_writebyte(RTC_ADDR_YEAR, bin2bcd(tm->tm_year % 100));
 
 	/* Start RTC */
 	ds1302_writebyte(RTC_ADDR_SEC, ds1302_readbyte(RTC_ADDR_SEC) & ~0x80);
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index b91d02a..fc372df 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -114,10 +114,10 @@
 			hour = 12;
 			bcd &= ~DS1305_HR_PM;
 		}
-		hour += BCD2BIN(bcd);
+		hour += bcd2bin(bcd);
 		return hour - 1;
 	}
-	return BCD2BIN(bcd);
+	return bcd2bin(bcd);
 }
 
 static u8 hour2bcd(bool hr12, int hour)
@@ -125,11 +125,11 @@
 	if (hr12) {
 		hour++;
 		if (hour <= 12)
-			return DS1305_HR_12 | BIN2BCD(hour);
+			return DS1305_HR_12 | bin2bcd(hour);
 		hour -= 12;
-		return DS1305_HR_12 | DS1305_HR_PM | BIN2BCD(hour);
+		return DS1305_HR_12 | DS1305_HR_PM | bin2bcd(hour);
 	}
-	return BIN2BCD(hour);
+	return bin2bcd(hour);
 }
 
 /*----------------------------------------------------------------------*/
@@ -206,13 +206,13 @@
 		buf[4], buf[5], buf[6]);
 
 	/* Decode the registers */
-	time->tm_sec = BCD2BIN(buf[DS1305_SEC]);
-	time->tm_min = BCD2BIN(buf[DS1305_MIN]);
+	time->tm_sec = bcd2bin(buf[DS1305_SEC]);
+	time->tm_min = bcd2bin(buf[DS1305_MIN]);
 	time->tm_hour = bcd2hour(buf[DS1305_HOUR]);
 	time->tm_wday = buf[DS1305_WDAY] - 1;
-	time->tm_mday = BCD2BIN(buf[DS1305_MDAY]);
-	time->tm_mon = BCD2BIN(buf[DS1305_MON]) - 1;
-	time->tm_year = BCD2BIN(buf[DS1305_YEAR]) + 100;
+	time->tm_mday = bcd2bin(buf[DS1305_MDAY]);
+	time->tm_mon = bcd2bin(buf[DS1305_MON]) - 1;
+	time->tm_year = bcd2bin(buf[DS1305_YEAR]) + 100;
 
 	dev_vdbg(dev, "%s secs=%d, mins=%d, "
 		"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -239,13 +239,13 @@
 	/* Write registers starting at the first time/date address. */
 	*bp++ = DS1305_WRITE | DS1305_SEC;
 
-	*bp++ = BIN2BCD(time->tm_sec);
-	*bp++ = BIN2BCD(time->tm_min);
+	*bp++ = bin2bcd(time->tm_sec);
+	*bp++ = bin2bcd(time->tm_min);
 	*bp++ = hour2bcd(ds1305->hr12, time->tm_hour);
 	*bp++ = (time->tm_wday < 7) ? (time->tm_wday + 1) : 1;
-	*bp++ = BIN2BCD(time->tm_mday);
-	*bp++ = BIN2BCD(time->tm_mon + 1);
-	*bp++ = BIN2BCD(time->tm_year - 100);
+	*bp++ = bin2bcd(time->tm_mday);
+	*bp++ = bin2bcd(time->tm_mon + 1);
+	*bp++ = bin2bcd(time->tm_year - 100);
 
 	dev_dbg(dev, "%s: %02x %02x %02x, %02x %02x %02x %02x\n",
 		"write", buf[1], buf[2], buf[3],
@@ -329,8 +329,8 @@
 	 * fill in the rest ... and also handle rollover to tomorrow when
 	 * that's needed.
 	 */
-	alm->time.tm_sec = BCD2BIN(buf[DS1305_SEC]);
-	alm->time.tm_min = BCD2BIN(buf[DS1305_MIN]);
+	alm->time.tm_sec = bcd2bin(buf[DS1305_SEC]);
+	alm->time.tm_min = bcd2bin(buf[DS1305_MIN]);
 	alm->time.tm_hour = bcd2hour(buf[DS1305_HOUR]);
 	alm->time.tm_mday = -1;
 	alm->time.tm_mon = -1;
@@ -387,8 +387,8 @@
 
 	/* write alarm */
 	buf[0] = DS1305_WRITE | DS1305_ALM0(DS1305_SEC);
-	buf[1 + DS1305_SEC] = BIN2BCD(alm->time.tm_sec);
-	buf[1 + DS1305_MIN] = BIN2BCD(alm->time.tm_min);
+	buf[1 + DS1305_SEC] = bin2bcd(alm->time.tm_sec);
+	buf[1 + DS1305_MIN] = bin2bcd(alm->time.tm_min);
 	buf[1 + DS1305_HOUR] = hour2bcd(ds1305->hr12, alm->time.tm_hour);
 	buf[1 + DS1305_WDAY] = DS1305_ALM_DISABLE;
 
@@ -606,7 +606,6 @@
 static struct bin_attribute nvram = {
 	.attr.name	= "nvram",
 	.attr.mode	= S_IRUGO | S_IWUSR,
-	.attr.owner	= THIS_MODULE,
 	.read		= ds1305_nvram_read,
 	.write		= ds1305_nvram_write,
 	.size		= DS1305_NVRAM_LEN,
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 4fcf073..162330b 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -222,17 +222,17 @@
 			ds1307->regs[4], ds1307->regs[5],
 			ds1307->regs[6]);
 
-	t->tm_sec = BCD2BIN(ds1307->regs[DS1307_REG_SECS] & 0x7f);
-	t->tm_min = BCD2BIN(ds1307->regs[DS1307_REG_MIN] & 0x7f);
+	t->tm_sec = bcd2bin(ds1307->regs[DS1307_REG_SECS] & 0x7f);
+	t->tm_min = bcd2bin(ds1307->regs[DS1307_REG_MIN] & 0x7f);
 	tmp = ds1307->regs[DS1307_REG_HOUR] & 0x3f;
-	t->tm_hour = BCD2BIN(tmp);
-	t->tm_wday = BCD2BIN(ds1307->regs[DS1307_REG_WDAY] & 0x07) - 1;
-	t->tm_mday = BCD2BIN(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
+	t->tm_hour = bcd2bin(tmp);
+	t->tm_wday = bcd2bin(ds1307->regs[DS1307_REG_WDAY] & 0x07) - 1;
+	t->tm_mday = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
 	tmp = ds1307->regs[DS1307_REG_MONTH] & 0x1f;
-	t->tm_mon = BCD2BIN(tmp) - 1;
+	t->tm_mon = bcd2bin(tmp) - 1;
 
 	/* assume 20YY not 19YY, and ignore DS1337_BIT_CENTURY */
-	t->tm_year = BCD2BIN(ds1307->regs[DS1307_REG_YEAR]) + 100;
+	t->tm_year = bcd2bin(ds1307->regs[DS1307_REG_YEAR]) + 100;
 
 	dev_dbg(dev, "%s secs=%d, mins=%d, "
 		"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -258,16 +258,16 @@
 		t->tm_mon, t->tm_year, t->tm_wday);
 
 	*buf++ = 0;		/* first register addr */
-	buf[DS1307_REG_SECS] = BIN2BCD(t->tm_sec);
-	buf[DS1307_REG_MIN] = BIN2BCD(t->tm_min);
-	buf[DS1307_REG_HOUR] = BIN2BCD(t->tm_hour);
-	buf[DS1307_REG_WDAY] = BIN2BCD(t->tm_wday + 1);
-	buf[DS1307_REG_MDAY] = BIN2BCD(t->tm_mday);
-	buf[DS1307_REG_MONTH] = BIN2BCD(t->tm_mon + 1);
+	buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
+	buf[DS1307_REG_MIN] = bin2bcd(t->tm_min);
+	buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
+	buf[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
+	buf[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
+	buf[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
 
 	/* assume 20YY not 19YY */
 	tmp = t->tm_year - 100;
-	buf[DS1307_REG_YEAR] = BIN2BCD(tmp);
+	buf[DS1307_REG_YEAR] = bin2bcd(tmp);
 
 	switch (ds1307->type) {
 	case ds_1337:
@@ -551,7 +551,6 @@
 	.attr = {
 		.name	= "nvram",
 		.mode	= S_IRUGO | S_IWUSR,
-		.owner	= THIS_MODULE,
 	},
 
 	.read	= ds1307_nvram_read,
@@ -709,18 +708,18 @@
 	}
 
 	tmp = ds1307->regs[DS1307_REG_SECS];
-	tmp = BCD2BIN(tmp & 0x7f);
+	tmp = bcd2bin(tmp & 0x7f);
 	if (tmp > 60)
 		goto exit_bad;
-	tmp = BCD2BIN(ds1307->regs[DS1307_REG_MIN] & 0x7f);
+	tmp = bcd2bin(ds1307->regs[DS1307_REG_MIN] & 0x7f);
 	if (tmp > 60)
 		goto exit_bad;
 
-	tmp = BCD2BIN(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
+	tmp = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
 	if (tmp == 0 || tmp > 31)
 		goto exit_bad;
 
-	tmp = BCD2BIN(ds1307->regs[DS1307_REG_MONTH] & 0x1f);
+	tmp = bcd2bin(ds1307->regs[DS1307_REG_MONTH] & 0x1f);
 	if (tmp == 0 || tmp > 12)
 		goto exit_bad;
 
@@ -739,14 +738,14 @@
 		/* Be sure we're in 24 hour mode.  Multi-master systems
 		 * take note...
 		 */
-		tmp = BCD2BIN(tmp & 0x1f);
+		tmp = bcd2bin(tmp & 0x1f);
 		if (tmp == 12)
 			tmp = 0;
 		if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
 			tmp += 12;
 		i2c_smbus_write_byte_data(client,
 				DS1307_REG_HOUR,
-				BIN2BCD(tmp));
+				bin2bcd(tmp));
 	}
 
 	ds1307->rtc = rtc_device_register(client->name, &client->dev,
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 86981d3..25caada 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -153,8 +153,8 @@
 	/*
 	 * set the wdog values in the wdog registers
 	 */
-	rtc_write(BIN2BCD(deciseconds % 100), DS1511_WD_MSEC);
-	rtc_write(BIN2BCD(deciseconds / 100), DS1511_WD_SEC);
+	rtc_write(bin2bcd(deciseconds % 100), DS1511_WD_MSEC);
+	rtc_write(bin2bcd(deciseconds / 100), DS1511_WD_SEC);
 	/*
 	 * set wdog enable and wdog 'steering' bit to issue a reset
 	 */
@@ -220,13 +220,13 @@
 	/*
 	 * each register is a different number of valid bits
 	 */
-	sec = BIN2BCD(sec) & 0x7f;
-	min = BIN2BCD(min) & 0x7f;
-	hrs = BIN2BCD(hrs) & 0x3f;
-	day = BIN2BCD(day) & 0x3f;
-	mon = BIN2BCD(mon) & 0x1f;
-	yrs = BIN2BCD(yrs) & 0xff;
-	cen = BIN2BCD(cen) & 0xff;
+	sec = bin2bcd(sec) & 0x7f;
+	min = bin2bcd(min) & 0x7f;
+	hrs = bin2bcd(hrs) & 0x3f;
+	day = bin2bcd(day) & 0x3f;
+	mon = bin2bcd(mon) & 0x1f;
+	yrs = bin2bcd(yrs) & 0xff;
+	cen = bin2bcd(cen) & 0xff;
 
 	spin_lock_irqsave(&ds1511_lock, flags);
 	rtc_disable_update();
@@ -264,14 +264,14 @@
 	rtc_enable_update();
 	spin_unlock_irqrestore(&ds1511_lock, flags);
 
-	rtc_tm->tm_sec = BCD2BIN(rtc_tm->tm_sec);
-	rtc_tm->tm_min = BCD2BIN(rtc_tm->tm_min);
-	rtc_tm->tm_hour = BCD2BIN(rtc_tm->tm_hour);
-	rtc_tm->tm_mday = BCD2BIN(rtc_tm->tm_mday);
-	rtc_tm->tm_wday = BCD2BIN(rtc_tm->tm_wday);
-	rtc_tm->tm_mon = BCD2BIN(rtc_tm->tm_mon);
-	rtc_tm->tm_year = BCD2BIN(rtc_tm->tm_year);
-	century = BCD2BIN(century) * 100;
+	rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
+	rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
+	rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
+	rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
+	rtc_tm->tm_wday = bcd2bin(rtc_tm->tm_wday);
+	rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
+	rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
+	century = bcd2bin(century) * 100;
 
 	/*
 	 * Account for differences between how the RTC uses the values
@@ -304,16 +304,16 @@
 
 	spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
 	rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
-	       0x80 : BIN2BCD(pdata->alrm_mday) & 0x3f,
+	       0x80 : bin2bcd(pdata->alrm_mday) & 0x3f,
 	       RTC_ALARM_DATE);
 	rtc_write(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ?
-	       0x80 : BIN2BCD(pdata->alrm_hour) & 0x3f,
+	       0x80 : bin2bcd(pdata->alrm_hour) & 0x3f,
 	       RTC_ALARM_HOUR);
 	rtc_write(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ?
-	       0x80 : BIN2BCD(pdata->alrm_min) & 0x7f,
+	       0x80 : bin2bcd(pdata->alrm_min) & 0x7f,
 	       RTC_ALARM_MIN);
 	rtc_write(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ?
-	       0x80 : BIN2BCD(pdata->alrm_sec) & 0x7f,
+	       0x80 : bin2bcd(pdata->alrm_sec) & 0x7f,
 	       RTC_ALARM_SEC);
 	rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD);
 	rtc_read(RTC_CMD1);	/* clear interrupts */
@@ -481,7 +481,6 @@
 	.attr = {
 		.name = "nvram",
 		.mode = S_IRUGO | S_IWUGO,
-		.owner = THIS_MODULE,
 	},
 	.size = DS1511_RAM_MAX,
 	.read = ds1511_nvram_read,
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 4ef5928..b9475cd 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -78,17 +78,17 @@
 	void __iomem *ioaddr = pdata->ioaddr;
 	u8 century;
 
-	century = BIN2BCD((tm->tm_year + 1900) / 100);
+	century = bin2bcd((tm->tm_year + 1900) / 100);
 
 	writeb(RTC_WRITE, pdata->ioaddr + RTC_CONTROL);
 
-	writeb(BIN2BCD(tm->tm_year % 100), ioaddr + RTC_YEAR);
-	writeb(BIN2BCD(tm->tm_mon + 1), ioaddr + RTC_MONTH);
-	writeb(BIN2BCD(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY);
-	writeb(BIN2BCD(tm->tm_mday), ioaddr + RTC_DATE);
-	writeb(BIN2BCD(tm->tm_hour), ioaddr + RTC_HOURS);
-	writeb(BIN2BCD(tm->tm_min), ioaddr + RTC_MINUTES);
-	writeb(BIN2BCD(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS);
+	writeb(bin2bcd(tm->tm_year % 100), ioaddr + RTC_YEAR);
+	writeb(bin2bcd(tm->tm_mon + 1), ioaddr + RTC_MONTH);
+	writeb(bin2bcd(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY);
+	writeb(bin2bcd(tm->tm_mday), ioaddr + RTC_DATE);
+	writeb(bin2bcd(tm->tm_hour), ioaddr + RTC_HOURS);
+	writeb(bin2bcd(tm->tm_min), ioaddr + RTC_MINUTES);
+	writeb(bin2bcd(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS);
 
 	/* RTC_CENTURY and RTC_CONTROL share same register */
 	writeb(RTC_WRITE | (century & RTC_CENTURY_MASK), ioaddr + RTC_CENTURY);
@@ -118,14 +118,14 @@
 	year = readb(ioaddr + RTC_YEAR);
 	century = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK;
 	writeb(0, ioaddr + RTC_CONTROL);
-	tm->tm_sec = BCD2BIN(second);
-	tm->tm_min = BCD2BIN(minute);
-	tm->tm_hour = BCD2BIN(hour);
-	tm->tm_mday = BCD2BIN(day);
-	tm->tm_wday = BCD2BIN(week);
-	tm->tm_mon = BCD2BIN(month) - 1;
+	tm->tm_sec = bcd2bin(second);
+	tm->tm_min = bcd2bin(minute);
+	tm->tm_hour = bcd2bin(hour);
+	tm->tm_mday = bcd2bin(day);
+	tm->tm_wday = bcd2bin(week);
+	tm->tm_mon = bcd2bin(month) - 1;
 	/* year is 1900 + tm->tm_year */
-	tm->tm_year = BCD2BIN(year) + BCD2BIN(century) * 100 - 1900;
+	tm->tm_year = bcd2bin(year) + bcd2bin(century) * 100 - 1900;
 
 	if (rtc_valid_tm(tm) < 0) {
 		dev_err(dev, "retrieved date/time is not valid.\n");
@@ -141,16 +141,16 @@
 
 	spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
 	writeb(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
-	       0x80 : BIN2BCD(pdata->alrm_mday),
+	       0x80 : bin2bcd(pdata->alrm_mday),
 	       ioaddr + RTC_DATE_ALARM);
 	writeb(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ?
-	       0x80 : BIN2BCD(pdata->alrm_hour),
+	       0x80 : bin2bcd(pdata->alrm_hour),
 	       ioaddr + RTC_HOURS_ALARM);
 	writeb(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ?
-	       0x80 : BIN2BCD(pdata->alrm_min),
+	       0x80 : bin2bcd(pdata->alrm_min),
 	       ioaddr + RTC_MINUTES_ALARM);
 	writeb(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ?
-	       0x80 : BIN2BCD(pdata->alrm_sec),
+	       0x80 : bin2bcd(pdata->alrm_sec),
 	       ioaddr + RTC_SECONDS_ALARM);
 	writeb(pdata->irqen ? RTC_INTS_AE : 0, ioaddr + RTC_INTERRUPTS);
 	readb(ioaddr + RTC_FLAGS);	/* clear interrupts */
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 24d35ede..8bc8501 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -66,17 +66,17 @@
 	void __iomem *ioaddr = pdata->ioaddr_rtc;
 	u8 century;
 
-	century = BIN2BCD((tm->tm_year + 1900) / 100);
+	century = bin2bcd((tm->tm_year + 1900) / 100);
 
 	writeb(RTC_WRITE, ioaddr + RTC_CONTROL);
 
-	writeb(BIN2BCD(tm->tm_year % 100), ioaddr + RTC_YEAR);
-	writeb(BIN2BCD(tm->tm_mon + 1), ioaddr + RTC_MONTH);
-	writeb(BIN2BCD(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY);
-	writeb(BIN2BCD(tm->tm_mday), ioaddr + RTC_DATE);
-	writeb(BIN2BCD(tm->tm_hour), ioaddr + RTC_HOURS);
-	writeb(BIN2BCD(tm->tm_min), ioaddr + RTC_MINUTES);
-	writeb(BIN2BCD(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS);
+	writeb(bin2bcd(tm->tm_year % 100), ioaddr + RTC_YEAR);
+	writeb(bin2bcd(tm->tm_mon + 1), ioaddr + RTC_MONTH);
+	writeb(bin2bcd(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY);
+	writeb(bin2bcd(tm->tm_mday), ioaddr + RTC_DATE);
+	writeb(bin2bcd(tm->tm_hour), ioaddr + RTC_HOURS);
+	writeb(bin2bcd(tm->tm_min), ioaddr + RTC_MINUTES);
+	writeb(bin2bcd(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS);
 
 	/* RTC_CENTURY and RTC_CONTROL share same register */
 	writeb(RTC_WRITE | (century & RTC_CENTURY_MASK), ioaddr + RTC_CENTURY);
@@ -106,14 +106,14 @@
 	year = readb(ioaddr + RTC_YEAR);
 	century = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK;
 	writeb(0, ioaddr + RTC_CONTROL);
-	tm->tm_sec = BCD2BIN(second);
-	tm->tm_min = BCD2BIN(minute);
-	tm->tm_hour = BCD2BIN(hour);
-	tm->tm_mday = BCD2BIN(day);
-	tm->tm_wday = BCD2BIN(week);
-	tm->tm_mon = BCD2BIN(month) - 1;
+	tm->tm_sec = bcd2bin(second);
+	tm->tm_min = bcd2bin(minute);
+	tm->tm_hour = bcd2bin(hour);
+	tm->tm_mday = bcd2bin(day);
+	tm->tm_wday = bcd2bin(week);
+	tm->tm_mon = bcd2bin(month) - 1;
 	/* year is 1900 + tm->tm_year */
-	tm->tm_year = BCD2BIN(year) + BCD2BIN(century) * 100 - 1900;
+	tm->tm_year = bcd2bin(year) + bcd2bin(century) * 100 - 1900;
 
 	if (rtc_valid_tm(tm) < 0) {
 		dev_err(dev, "retrieved date/time is not valid.\n");
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index abfdfcb..3a7be11 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -131,17 +131,17 @@
 			fm3130->regs[0xc], fm3130->regs[0xd],
 			fm3130->regs[0xe]);
 
-	t->tm_sec = BCD2BIN(fm3130->regs[FM3130_RTC_SECONDS] & 0x7f);
-	t->tm_min = BCD2BIN(fm3130->regs[FM3130_RTC_MINUTES] & 0x7f);
+	t->tm_sec = bcd2bin(fm3130->regs[FM3130_RTC_SECONDS] & 0x7f);
+	t->tm_min = bcd2bin(fm3130->regs[FM3130_RTC_MINUTES] & 0x7f);
 	tmp = fm3130->regs[FM3130_RTC_HOURS] & 0x3f;
-	t->tm_hour = BCD2BIN(tmp);
-	t->tm_wday = BCD2BIN(fm3130->regs[FM3130_RTC_DAY] & 0x07) - 1;
-	t->tm_mday = BCD2BIN(fm3130->regs[FM3130_RTC_DATE] & 0x3f);
+	t->tm_hour = bcd2bin(tmp);
+	t->tm_wday = bcd2bin(fm3130->regs[FM3130_RTC_DAY] & 0x07) - 1;
+	t->tm_mday = bcd2bin(fm3130->regs[FM3130_RTC_DATE] & 0x3f);
 	tmp = fm3130->regs[FM3130_RTC_MONTHS] & 0x1f;
-	t->tm_mon = BCD2BIN(tmp) - 1;
+	t->tm_mon = bcd2bin(tmp) - 1;
 
 	/* assume 20YY not 19YY, and ignore CF bit */
-	t->tm_year = BCD2BIN(fm3130->regs[FM3130_RTC_YEARS]) + 100;
+	t->tm_year = bcd2bin(fm3130->regs[FM3130_RTC_YEARS]) + 100;
 
 	dev_dbg(dev, "%s secs=%d, mins=%d, "
 		"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -167,16 +167,16 @@
 		t->tm_mon, t->tm_year, t->tm_wday);
 
 	/* first register addr */
-	buf[FM3130_RTC_SECONDS] = BIN2BCD(t->tm_sec);
-	buf[FM3130_RTC_MINUTES] = BIN2BCD(t->tm_min);
-	buf[FM3130_RTC_HOURS] = BIN2BCD(t->tm_hour);
-	buf[FM3130_RTC_DAY] = BIN2BCD(t->tm_wday + 1);
-	buf[FM3130_RTC_DATE] = BIN2BCD(t->tm_mday);
-	buf[FM3130_RTC_MONTHS] = BIN2BCD(t->tm_mon + 1);
+	buf[FM3130_RTC_SECONDS] = bin2bcd(t->tm_sec);
+	buf[FM3130_RTC_MINUTES] = bin2bcd(t->tm_min);
+	buf[FM3130_RTC_HOURS] = bin2bcd(t->tm_hour);
+	buf[FM3130_RTC_DAY] = bin2bcd(t->tm_wday + 1);
+	buf[FM3130_RTC_DATE] = bin2bcd(t->tm_mday);
+	buf[FM3130_RTC_MONTHS] = bin2bcd(t->tm_mon + 1);
 
 	/* assume 20YY not 19YY */
 	tmp = t->tm_year - 100;
-	buf[FM3130_RTC_YEARS] = BIN2BCD(tmp);
+	buf[FM3130_RTC_YEARS] = bin2bcd(tmp);
 
 	dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x"
 		"%02x %02x %02x %02x %02x %02x %02x %02x\n",
@@ -222,11 +222,11 @@
 			fm3130->regs[FM3130_ALARM_MONTHS]);
 
 
-	tm->tm_sec	= BCD2BIN(fm3130->regs[FM3130_ALARM_SECONDS] & 0x7F);
-	tm->tm_min	= BCD2BIN(fm3130->regs[FM3130_ALARM_MINUTES] & 0x7F);
-	tm->tm_hour	= BCD2BIN(fm3130->regs[FM3130_ALARM_HOURS] & 0x3F);
-	tm->tm_mday	= BCD2BIN(fm3130->regs[FM3130_ALARM_DATE] & 0x3F);
-	tm->tm_mon	= BCD2BIN(fm3130->regs[FM3130_ALARM_MONTHS] & 0x1F);
+	tm->tm_sec	= bcd2bin(fm3130->regs[FM3130_ALARM_SECONDS] & 0x7F);
+	tm->tm_min	= bcd2bin(fm3130->regs[FM3130_ALARM_MINUTES] & 0x7F);
+	tm->tm_hour	= bcd2bin(fm3130->regs[FM3130_ALARM_HOURS] & 0x3F);
+	tm->tm_mday	= bcd2bin(fm3130->regs[FM3130_ALARM_DATE] & 0x3F);
+	tm->tm_mon	= bcd2bin(fm3130->regs[FM3130_ALARM_MONTHS] & 0x1F);
 	if (tm->tm_mon > 0)
 		tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */
 	dev_dbg(dev, "%s secs=%d, mins=%d, "
@@ -252,23 +252,23 @@
 
 	if (tm->tm_sec != -1)
 		fm3130->regs[FM3130_ALARM_SECONDS] =
-			BIN2BCD(tm->tm_sec) | 0x80;
+			bin2bcd(tm->tm_sec) | 0x80;
 
 	if (tm->tm_min != -1)
 		fm3130->regs[FM3130_ALARM_MINUTES] =
-			BIN2BCD(tm->tm_min) | 0x80;
+			bin2bcd(tm->tm_min) | 0x80;
 
 	if (tm->tm_hour != -1)
 		fm3130->regs[FM3130_ALARM_HOURS] =
-			BIN2BCD(tm->tm_hour) | 0x80;
+			bin2bcd(tm->tm_hour) | 0x80;
 
 	if (tm->tm_mday != -1)
 		fm3130->regs[FM3130_ALARM_DATE] =
-			BIN2BCD(tm->tm_mday) | 0x80;
+			bin2bcd(tm->tm_mday) | 0x80;
 
 	if (tm->tm_mon != -1)
 		fm3130->regs[FM3130_ALARM_MONTHS] =
-			BIN2BCD(tm->tm_mon + 1) | 0x80;
+			bin2bcd(tm->tm_mon + 1) | 0x80;
 
 	dev_dbg(dev, "alarm write %02x %02x %02x %02x %02x\n",
 			fm3130->regs[FM3130_ALARM_SECONDS],
@@ -414,18 +414,18 @@
 	/* TODO */
 	/* TODO need to sanity check alarm */
 	tmp = fm3130->regs[FM3130_RTC_SECONDS];
-	tmp = BCD2BIN(tmp & 0x7f);
+	tmp = bcd2bin(tmp & 0x7f);
 	if (tmp > 60)
 		goto exit_bad;
-	tmp = BCD2BIN(fm3130->regs[FM3130_RTC_MINUTES] & 0x7f);
+	tmp = bcd2bin(fm3130->regs[FM3130_RTC_MINUTES] & 0x7f);
 	if (tmp > 60)
 		goto exit_bad;
 
-	tmp = BCD2BIN(fm3130->regs[FM3130_RTC_DATE] & 0x3f);
+	tmp = bcd2bin(fm3130->regs[FM3130_RTC_DATE] & 0x3f);
 	if (tmp == 0 || tmp > 31)
 		goto exit_bad;
 
-	tmp = BCD2BIN(fm3130->regs[FM3130_RTC_MONTHS] & 0x1f);
+	tmp = bcd2bin(fm3130->regs[FM3130_RTC_MONTHS] & 0x1f);
 	if (tmp == 0 || tmp > 12)
 		goto exit_bad;
 
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index a81adab..2cd77ab 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -259,26 +259,26 @@
 		return sr;
 	}
 
-	tm->tm_sec = BCD2BIN(regs[ISL1208_REG_SC]);
-	tm->tm_min = BCD2BIN(regs[ISL1208_REG_MN]);
+	tm->tm_sec = bcd2bin(regs[ISL1208_REG_SC]);
+	tm->tm_min = bcd2bin(regs[ISL1208_REG_MN]);
 
 	/* HR field has a more complex interpretation */
 	{
 		const u8 _hr = regs[ISL1208_REG_HR];
 		if (_hr & ISL1208_REG_HR_MIL)	/* 24h format */
-			tm->tm_hour = BCD2BIN(_hr & 0x3f);
+			tm->tm_hour = bcd2bin(_hr & 0x3f);
 		else {
 			/* 12h format */
-			tm->tm_hour = BCD2BIN(_hr & 0x1f);
+			tm->tm_hour = bcd2bin(_hr & 0x1f);
 			if (_hr & ISL1208_REG_HR_PM)	/* PM flag set */
 				tm->tm_hour += 12;
 		}
 	}
 
-	tm->tm_mday = BCD2BIN(regs[ISL1208_REG_DT]);
-	tm->tm_mon = BCD2BIN(regs[ISL1208_REG_MO]) - 1;	/* rtc starts at 1 */
-	tm->tm_year = BCD2BIN(regs[ISL1208_REG_YR]) + 100;
-	tm->tm_wday = BCD2BIN(regs[ISL1208_REG_DW]);
+	tm->tm_mday = bcd2bin(regs[ISL1208_REG_DT]);
+	tm->tm_mon = bcd2bin(regs[ISL1208_REG_MO]) - 1;	/* rtc starts at 1 */
+	tm->tm_year = bcd2bin(regs[ISL1208_REG_YR]) + 100;
+	tm->tm_wday = bcd2bin(regs[ISL1208_REG_DW]);
 
 	return 0;
 }
@@ -305,13 +305,13 @@
 	}
 
 	/* MSB of each alarm register is an enable bit */
-	tm->tm_sec = BCD2BIN(regs[ISL1208_REG_SCA - ISL1208_REG_SCA] & 0x7f);
-	tm->tm_min = BCD2BIN(regs[ISL1208_REG_MNA - ISL1208_REG_SCA] & 0x7f);
-	tm->tm_hour = BCD2BIN(regs[ISL1208_REG_HRA - ISL1208_REG_SCA] & 0x3f);
-	tm->tm_mday = BCD2BIN(regs[ISL1208_REG_DTA - ISL1208_REG_SCA] & 0x3f);
+	tm->tm_sec = bcd2bin(regs[ISL1208_REG_SCA - ISL1208_REG_SCA] & 0x7f);
+	tm->tm_min = bcd2bin(regs[ISL1208_REG_MNA - ISL1208_REG_SCA] & 0x7f);
+	tm->tm_hour = bcd2bin(regs[ISL1208_REG_HRA - ISL1208_REG_SCA] & 0x3f);
+	tm->tm_mday = bcd2bin(regs[ISL1208_REG_DTA - ISL1208_REG_SCA] & 0x3f);
 	tm->tm_mon =
-		BCD2BIN(regs[ISL1208_REG_MOA - ISL1208_REG_SCA] & 0x1f) - 1;
-	tm->tm_wday = BCD2BIN(regs[ISL1208_REG_DWA - ISL1208_REG_SCA] & 0x03);
+		bcd2bin(regs[ISL1208_REG_MOA - ISL1208_REG_SCA] & 0x1f) - 1;
+	tm->tm_wday = bcd2bin(regs[ISL1208_REG_DWA - ISL1208_REG_SCA] & 0x03);
 
 	return 0;
 }
@@ -328,15 +328,15 @@
 	int sr;
 	u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, };
 
-	regs[ISL1208_REG_SC] = BIN2BCD(tm->tm_sec);
-	regs[ISL1208_REG_MN] = BIN2BCD(tm->tm_min);
-	regs[ISL1208_REG_HR] = BIN2BCD(tm->tm_hour) | ISL1208_REG_HR_MIL;
+	regs[ISL1208_REG_SC] = bin2bcd(tm->tm_sec);
+	regs[ISL1208_REG_MN] = bin2bcd(tm->tm_min);
+	regs[ISL1208_REG_HR] = bin2bcd(tm->tm_hour) | ISL1208_REG_HR_MIL;
 
-	regs[ISL1208_REG_DT] = BIN2BCD(tm->tm_mday);
-	regs[ISL1208_REG_MO] = BIN2BCD(tm->tm_mon + 1);
-	regs[ISL1208_REG_YR] = BIN2BCD(tm->tm_year - 100);
+	regs[ISL1208_REG_DT] = bin2bcd(tm->tm_mday);
+	regs[ISL1208_REG_MO] = bin2bcd(tm->tm_mon + 1);
+	regs[ISL1208_REG_YR] = bin2bcd(tm->tm_year - 100);
 
-	regs[ISL1208_REG_DW] = BIN2BCD(tm->tm_wday & 7);
+	regs[ISL1208_REG_DW] = bin2bcd(tm->tm_wday & 7);
 
 	sr = isl1208_i2c_get_sr(client);
 	if (sr < 0) {
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 470fb2d..893f7de 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -110,15 +110,15 @@
 		return -EIO;
 	}
 
-	tm->tm_sec = BCD2BIN(buf[M41T80_REG_SEC] & 0x7f);
-	tm->tm_min = BCD2BIN(buf[M41T80_REG_MIN] & 0x7f);
-	tm->tm_hour = BCD2BIN(buf[M41T80_REG_HOUR] & 0x3f);
-	tm->tm_mday = BCD2BIN(buf[M41T80_REG_DAY] & 0x3f);
+	tm->tm_sec = bcd2bin(buf[M41T80_REG_SEC] & 0x7f);
+	tm->tm_min = bcd2bin(buf[M41T80_REG_MIN] & 0x7f);
+	tm->tm_hour = bcd2bin(buf[M41T80_REG_HOUR] & 0x3f);
+	tm->tm_mday = bcd2bin(buf[M41T80_REG_DAY] & 0x3f);
 	tm->tm_wday = buf[M41T80_REG_WDAY] & 0x07;
-	tm->tm_mon = BCD2BIN(buf[M41T80_REG_MON] & 0x1f) - 1;
+	tm->tm_mon = bcd2bin(buf[M41T80_REG_MON] & 0x1f) - 1;
 
 	/* assume 20YY not 19YY, and ignore the Century Bit */
-	tm->tm_year = BCD2BIN(buf[M41T80_REG_YEAR]) + 100;
+	tm->tm_year = bcd2bin(buf[M41T80_REG_YEAR]) + 100;
 	return 0;
 }
 
@@ -161,19 +161,19 @@
 	/* Merge time-data and register flags into buf[0..7] */
 	buf[M41T80_REG_SSEC] = 0;
 	buf[M41T80_REG_SEC] =
-		BIN2BCD(tm->tm_sec) | (buf[M41T80_REG_SEC] & ~0x7f);
+		bin2bcd(tm->tm_sec) | (buf[M41T80_REG_SEC] & ~0x7f);
 	buf[M41T80_REG_MIN] =
-		BIN2BCD(tm->tm_min) | (buf[M41T80_REG_MIN] & ~0x7f);
+		bin2bcd(tm->tm_min) | (buf[M41T80_REG_MIN] & ~0x7f);
 	buf[M41T80_REG_HOUR] =
-		BIN2BCD(tm->tm_hour) | (buf[M41T80_REG_HOUR] & ~0x3f) ;
+		bin2bcd(tm->tm_hour) | (buf[M41T80_REG_HOUR] & ~0x3f) ;
 	buf[M41T80_REG_WDAY] =
 		(tm->tm_wday & 0x07) | (buf[M41T80_REG_WDAY] & ~0x07);
 	buf[M41T80_REG_DAY] =
-		BIN2BCD(tm->tm_mday) | (buf[M41T80_REG_DAY] & ~0x3f);
+		bin2bcd(tm->tm_mday) | (buf[M41T80_REG_DAY] & ~0x3f);
 	buf[M41T80_REG_MON] =
-		BIN2BCD(tm->tm_mon + 1) | (buf[M41T80_REG_MON] & ~0x1f);
+		bin2bcd(tm->tm_mon + 1) | (buf[M41T80_REG_MON] & ~0x1f);
 	/* assume 20YY not 19YY */
-	buf[M41T80_REG_YEAR] = BIN2BCD(tm->tm_year % 100);
+	buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year % 100);
 
 	if (i2c_transfer(client->adapter, msgs, 1) != 1) {
 		dev_err(&client->dev, "write error\n");
@@ -288,15 +288,15 @@
 
 	wbuf[0] = M41T80_REG_ALARM_MON; /* offset into rtc's regs */
 	reg[M41T80_REG_ALARM_SEC] |= t->time.tm_sec >= 0 ?
-		BIN2BCD(t->time.tm_sec) : 0x80;
+		bin2bcd(t->time.tm_sec) : 0x80;
 	reg[M41T80_REG_ALARM_MIN] |= t->time.tm_min >= 0 ?
-		BIN2BCD(t->time.tm_min) : 0x80;
+		bin2bcd(t->time.tm_min) : 0x80;
 	reg[M41T80_REG_ALARM_HOUR] |= t->time.tm_hour >= 0 ?
-		BIN2BCD(t->time.tm_hour) : 0x80;
+		bin2bcd(t->time.tm_hour) : 0x80;
 	reg[M41T80_REG_ALARM_DAY] |= t->time.tm_mday >= 0 ?
-		BIN2BCD(t->time.tm_mday) : 0x80;
+		bin2bcd(t->time.tm_mday) : 0x80;
 	if (t->time.tm_mon >= 0)
-		reg[M41T80_REG_ALARM_MON] |= BIN2BCD(t->time.tm_mon + 1);
+		reg[M41T80_REG_ALARM_MON] |= bin2bcd(t->time.tm_mon + 1);
 	else
 		reg[M41T80_REG_ALARM_DAY] |= 0x40;
 
@@ -347,15 +347,15 @@
 	t->time.tm_mday = -1;
 	t->time.tm_mon = -1;
 	if (!(reg[M41T80_REG_ALARM_SEC] & 0x80))
-		t->time.tm_sec = BCD2BIN(reg[M41T80_REG_ALARM_SEC] & 0x7f);
+		t->time.tm_sec = bcd2bin(reg[M41T80_REG_ALARM_SEC] & 0x7f);
 	if (!(reg[M41T80_REG_ALARM_MIN] & 0x80))
-		t->time.tm_min = BCD2BIN(reg[M41T80_REG_ALARM_MIN] & 0x7f);
+		t->time.tm_min = bcd2bin(reg[M41T80_REG_ALARM_MIN] & 0x7f);
 	if (!(reg[M41T80_REG_ALARM_HOUR] & 0x80))
-		t->time.tm_hour = BCD2BIN(reg[M41T80_REG_ALARM_HOUR] & 0x3f);
+		t->time.tm_hour = bcd2bin(reg[M41T80_REG_ALARM_HOUR] & 0x3f);
 	if (!(reg[M41T80_REG_ALARM_DAY] & 0x80))
-		t->time.tm_mday = BCD2BIN(reg[M41T80_REG_ALARM_DAY] & 0x3f);
+		t->time.tm_mday = bcd2bin(reg[M41T80_REG_ALARM_DAY] & 0x3f);
 	if (!(reg[M41T80_REG_ALARM_DAY] & 0x40))
-		t->time.tm_mon = BCD2BIN(reg[M41T80_REG_ALARM_MON] & 0x1f) - 1;
+		t->time.tm_mon = bcd2bin(reg[M41T80_REG_ALARM_MON] & 0x1f) - 1;
 	t->time.tm_year = -1;
 	t->time.tm_wday = -1;
 	t->time.tm_yday = -1;
diff --git a/drivers/rtc/rtc-m41t94.c b/drivers/rtc/rtc-m41t94.c
index 9b19499..c3a18c5 100644
--- a/drivers/rtc/rtc-m41t94.c
+++ b/drivers/rtc/rtc-m41t94.c
@@ -41,17 +41,17 @@
 		tm->tm_mon, tm->tm_year, tm->tm_wday);
 
 	buf[0] = 0x80 | M41T94_REG_SECONDS; /* write time + date */
-	buf[M41T94_REG_SECONDS] = BIN2BCD(tm->tm_sec);
-	buf[M41T94_REG_MINUTES] = BIN2BCD(tm->tm_min);
-	buf[M41T94_REG_HOURS]   = BIN2BCD(tm->tm_hour);
-	buf[M41T94_REG_WDAY]    = BIN2BCD(tm->tm_wday + 1);
-	buf[M41T94_REG_DAY]     = BIN2BCD(tm->tm_mday);
-	buf[M41T94_REG_MONTH]   = BIN2BCD(tm->tm_mon + 1);
+	buf[M41T94_REG_SECONDS] = bin2bcd(tm->tm_sec);
+	buf[M41T94_REG_MINUTES] = bin2bcd(tm->tm_min);
+	buf[M41T94_REG_HOURS]   = bin2bcd(tm->tm_hour);
+	buf[M41T94_REG_WDAY]    = bin2bcd(tm->tm_wday + 1);
+	buf[M41T94_REG_DAY]     = bin2bcd(tm->tm_mday);
+	buf[M41T94_REG_MONTH]   = bin2bcd(tm->tm_mon + 1);
 
 	buf[M41T94_REG_HOURS] |= M41T94_BIT_CEB;
 	if (tm->tm_year >= 100)
 		buf[M41T94_REG_HOURS] |= M41T94_BIT_CB;
-	buf[M41T94_REG_YEAR] = BIN2BCD(tm->tm_year % 100);
+	buf[M41T94_REG_YEAR] = bin2bcd(tm->tm_year % 100);
 
 	return spi_write(spi, buf, 8);
 }
@@ -82,14 +82,14 @@
 		spi_write(spi, buf, 2);
 	}
 
-	tm->tm_sec  = BCD2BIN(spi_w8r8(spi, M41T94_REG_SECONDS));
-	tm->tm_min  = BCD2BIN(spi_w8r8(spi, M41T94_REG_MINUTES));
+	tm->tm_sec  = bcd2bin(spi_w8r8(spi, M41T94_REG_SECONDS));
+	tm->tm_min  = bcd2bin(spi_w8r8(spi, M41T94_REG_MINUTES));
 	hour = spi_w8r8(spi, M41T94_REG_HOURS);
-	tm->tm_hour = BCD2BIN(hour & 0x3f);
-	tm->tm_wday = BCD2BIN(spi_w8r8(spi, M41T94_REG_WDAY)) - 1;
-	tm->tm_mday = BCD2BIN(spi_w8r8(spi, M41T94_REG_DAY));
-	tm->tm_mon  = BCD2BIN(spi_w8r8(spi, M41T94_REG_MONTH)) - 1;
-	tm->tm_year = BCD2BIN(spi_w8r8(spi, M41T94_REG_YEAR));
+	tm->tm_hour = bcd2bin(hour & 0x3f);
+	tm->tm_wday = bcd2bin(spi_w8r8(spi, M41T94_REG_WDAY)) - 1;
+	tm->tm_mday = bcd2bin(spi_w8r8(spi, M41T94_REG_DAY));
+	tm->tm_mon  = bcd2bin(spi_w8r8(spi, M41T94_REG_MONTH)) - 1;
+	tm->tm_year = bcd2bin(spi_w8r8(spi, M41T94_REG_YEAR));
 	if ((hour & M41T94_BIT_CB) || !(hour & M41T94_BIT_CEB))
 		tm->tm_year += 100;
 
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index ce4eff6..04b63da 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -76,10 +76,10 @@
 	/* Issue the READ command */
 	M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL);
 
-	tm->tm_year	= BCD2BIN(M48T59_READ(M48T59_YEAR));
+	tm->tm_year	= bcd2bin(M48T59_READ(M48T59_YEAR));
 	/* tm_mon is 0-11 */
-	tm->tm_mon	= BCD2BIN(M48T59_READ(M48T59_MONTH)) - 1;
-	tm->tm_mday	= BCD2BIN(M48T59_READ(M48T59_MDAY));
+	tm->tm_mon	= bcd2bin(M48T59_READ(M48T59_MONTH)) - 1;
+	tm->tm_mday	= bcd2bin(M48T59_READ(M48T59_MDAY));
 
 	val = M48T59_READ(M48T59_WDAY);
 	if ((pdata->type == M48T59RTC_TYPE_M48T59) &&
@@ -88,10 +88,10 @@
 		tm->tm_year += 100;	/* one century */
 	}
 
-	tm->tm_wday	= BCD2BIN(val & 0x07);
-	tm->tm_hour	= BCD2BIN(M48T59_READ(M48T59_HOUR) & 0x3F);
-	tm->tm_min	= BCD2BIN(M48T59_READ(M48T59_MIN) & 0x7F);
-	tm->tm_sec	= BCD2BIN(M48T59_READ(M48T59_SEC) & 0x7F);
+	tm->tm_wday	= bcd2bin(val & 0x07);
+	tm->tm_hour	= bcd2bin(M48T59_READ(M48T59_HOUR) & 0x3F);
+	tm->tm_min	= bcd2bin(M48T59_READ(M48T59_MIN) & 0x7F);
+	tm->tm_sec	= bcd2bin(M48T59_READ(M48T59_SEC) & 0x7F);
 
 	/* Clear the READ bit */
 	M48T59_CLEAR_BITS(M48T59_CNTL_READ, M48T59_CNTL);
@@ -119,17 +119,17 @@
 	/* Issue the WRITE command */
 	M48T59_SET_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
 
-	M48T59_WRITE((BIN2BCD(tm->tm_sec) & 0x7F), M48T59_SEC);
-	M48T59_WRITE((BIN2BCD(tm->tm_min) & 0x7F), M48T59_MIN);
-	M48T59_WRITE((BIN2BCD(tm->tm_hour) & 0x3F), M48T59_HOUR);
-	M48T59_WRITE((BIN2BCD(tm->tm_mday) & 0x3F), M48T59_MDAY);
+	M48T59_WRITE((bin2bcd(tm->tm_sec) & 0x7F), M48T59_SEC);
+	M48T59_WRITE((bin2bcd(tm->tm_min) & 0x7F), M48T59_MIN);
+	M48T59_WRITE((bin2bcd(tm->tm_hour) & 0x3F), M48T59_HOUR);
+	M48T59_WRITE((bin2bcd(tm->tm_mday) & 0x3F), M48T59_MDAY);
 	/* tm_mon is 0-11 */
-	M48T59_WRITE((BIN2BCD(tm->tm_mon + 1) & 0x1F), M48T59_MONTH);
-	M48T59_WRITE(BIN2BCD(tm->tm_year % 100), M48T59_YEAR);
+	M48T59_WRITE((bin2bcd(tm->tm_mon + 1) & 0x1F), M48T59_MONTH);
+	M48T59_WRITE(bin2bcd(tm->tm_year % 100), M48T59_YEAR);
 
 	if (pdata->type == M48T59RTC_TYPE_M48T59 && (tm->tm_year / 100))
 		val = (M48T59_WDAY_CEB | M48T59_WDAY_CB);
-	val |= (BIN2BCD(tm->tm_wday) & 0x07);
+	val |= (bin2bcd(tm->tm_wday) & 0x07);
 	M48T59_WRITE(val, M48T59_WDAY);
 
 	/* Clear the WRITE bit */
@@ -158,18 +158,18 @@
 	/* Issue the READ command */
 	M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL);
 
-	tm->tm_year = BCD2BIN(M48T59_READ(M48T59_YEAR));
+	tm->tm_year = bcd2bin(M48T59_READ(M48T59_YEAR));
 	/* tm_mon is 0-11 */
-	tm->tm_mon = BCD2BIN(M48T59_READ(M48T59_MONTH)) - 1;
+	tm->tm_mon = bcd2bin(M48T59_READ(M48T59_MONTH)) - 1;
 
 	val = M48T59_READ(M48T59_WDAY);
 	if ((val & M48T59_WDAY_CEB) && (val & M48T59_WDAY_CB))
 		tm->tm_year += 100;	/* one century */
 
-	tm->tm_mday = BCD2BIN(M48T59_READ(M48T59_ALARM_DATE));
-	tm->tm_hour = BCD2BIN(M48T59_READ(M48T59_ALARM_HOUR));
-	tm->tm_min = BCD2BIN(M48T59_READ(M48T59_ALARM_MIN));
-	tm->tm_sec = BCD2BIN(M48T59_READ(M48T59_ALARM_SEC));
+	tm->tm_mday = bcd2bin(M48T59_READ(M48T59_ALARM_DATE));
+	tm->tm_hour = bcd2bin(M48T59_READ(M48T59_ALARM_HOUR));
+	tm->tm_min = bcd2bin(M48T59_READ(M48T59_ALARM_MIN));
+	tm->tm_sec = bcd2bin(M48T59_READ(M48T59_ALARM_SEC));
 
 	/* Clear the READ bit */
 	M48T59_CLEAR_BITS(M48T59_CNTL_READ, M48T59_CNTL);
@@ -201,18 +201,18 @@
 	 * 0xff means "always match"
 	 */
 	mday = tm->tm_mday;
-	mday = (mday >= 1 && mday <= 31) ? BIN2BCD(mday) : 0xff;
+	mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff;
 	if (mday == 0xff)
 		mday = M48T59_READ(M48T59_MDAY);
 
 	hour = tm->tm_hour;
-	hour = (hour < 24) ? BIN2BCD(hour) : 0x00;
+	hour = (hour < 24) ? bin2bcd(hour) : 0x00;
 
 	min = tm->tm_min;
-	min = (min < 60) ? BIN2BCD(min) : 0x00;
+	min = (min < 60) ? bin2bcd(min) : 0x00;
 
 	sec = tm->tm_sec;
-	sec = (sec < 60) ? BIN2BCD(sec) : 0x00;
+	sec = (sec < 60) ? bin2bcd(sec) : 0x00;
 
 	spin_lock_irqsave(&m48t59->lock, flags);
 	/* Issue the WRITE command */
@@ -360,7 +360,6 @@
 	.attr = {
 		.name = "nvram",
 		.mode = S_IRUGO | S_IWUSR,
-		.owner = THIS_MODULE,
 	},
 	.read = m48t59_nvram_read,
 	.write = m48t59_nvram_write,
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 3f7f99a..7c045cf 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -62,14 +62,14 @@
 		tm->tm_wday	= ops->readbyte(M48T86_REG_DOW);
 	} else {
 		/* bcd mode */
-		tm->tm_sec	= BCD2BIN(ops->readbyte(M48T86_REG_SEC));
-		tm->tm_min	= BCD2BIN(ops->readbyte(M48T86_REG_MIN));
-		tm->tm_hour	= BCD2BIN(ops->readbyte(M48T86_REG_HOUR) & 0x3F);
-		tm->tm_mday	= BCD2BIN(ops->readbyte(M48T86_REG_DOM));
+		tm->tm_sec	= bcd2bin(ops->readbyte(M48T86_REG_SEC));
+		tm->tm_min	= bcd2bin(ops->readbyte(M48T86_REG_MIN));
+		tm->tm_hour	= bcd2bin(ops->readbyte(M48T86_REG_HOUR) & 0x3F);
+		tm->tm_mday	= bcd2bin(ops->readbyte(M48T86_REG_DOM));
 		/* tm_mon is 0-11 */
-		tm->tm_mon	= BCD2BIN(ops->readbyte(M48T86_REG_MONTH)) - 1;
-		tm->tm_year	= BCD2BIN(ops->readbyte(M48T86_REG_YEAR)) + 100;
-		tm->tm_wday	= BCD2BIN(ops->readbyte(M48T86_REG_DOW));
+		tm->tm_mon	= bcd2bin(ops->readbyte(M48T86_REG_MONTH)) - 1;
+		tm->tm_year	= bcd2bin(ops->readbyte(M48T86_REG_YEAR)) + 100;
+		tm->tm_wday	= bcd2bin(ops->readbyte(M48T86_REG_DOW));
 	}
 
 	/* correct the hour if the clock is in 12h mode */
@@ -103,13 +103,13 @@
 		ops->writebyte(tm->tm_wday, M48T86_REG_DOW);
 	} else {
 		/* bcd mode */
-		ops->writebyte(BIN2BCD(tm->tm_sec), M48T86_REG_SEC);
-		ops->writebyte(BIN2BCD(tm->tm_min), M48T86_REG_MIN);
-		ops->writebyte(BIN2BCD(tm->tm_hour), M48T86_REG_HOUR);
-		ops->writebyte(BIN2BCD(tm->tm_mday), M48T86_REG_DOM);
-		ops->writebyte(BIN2BCD(tm->tm_mon + 1), M48T86_REG_MONTH);
-		ops->writebyte(BIN2BCD(tm->tm_year % 100), M48T86_REG_YEAR);
-		ops->writebyte(BIN2BCD(tm->tm_wday), M48T86_REG_DOW);
+		ops->writebyte(bin2bcd(tm->tm_sec), M48T86_REG_SEC);
+		ops->writebyte(bin2bcd(tm->tm_min), M48T86_REG_MIN);
+		ops->writebyte(bin2bcd(tm->tm_hour), M48T86_REG_HOUR);
+		ops->writebyte(bin2bcd(tm->tm_mday), M48T86_REG_DOM);
+		ops->writebyte(bin2bcd(tm->tm_mon + 1), M48T86_REG_MONTH);
+		ops->writebyte(bin2bcd(tm->tm_year % 100), M48T86_REG_YEAR);
+		ops->writebyte(bin2bcd(tm->tm_wday), M48T86_REG_DOW);
 	}
 
 	/* update ended */
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 12c9cd2..8078279 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -150,14 +150,14 @@
 	if (rc < 0)
 		return rc;
 
-	tm->tm_sec = BCD2BIN(regs[MAX6900_REG_SC]);
-	tm->tm_min = BCD2BIN(regs[MAX6900_REG_MN]);
-	tm->tm_hour = BCD2BIN(regs[MAX6900_REG_HR] & 0x3f);
-	tm->tm_mday = BCD2BIN(regs[MAX6900_REG_DT]);
-	tm->tm_mon = BCD2BIN(regs[MAX6900_REG_MO]) - 1;
-	tm->tm_year = BCD2BIN(regs[MAX6900_REG_YR]) +
-	    BCD2BIN(regs[MAX6900_REG_CENTURY]) * 100 - 1900;
-	tm->tm_wday = BCD2BIN(regs[MAX6900_REG_DW]);
+	tm->tm_sec = bcd2bin(regs[MAX6900_REG_SC]);
+	tm->tm_min = bcd2bin(regs[MAX6900_REG_MN]);
+	tm->tm_hour = bcd2bin(regs[MAX6900_REG_HR] & 0x3f);
+	tm->tm_mday = bcd2bin(regs[MAX6900_REG_DT]);
+	tm->tm_mon = bcd2bin(regs[MAX6900_REG_MO]) - 1;
+	tm->tm_year = bcd2bin(regs[MAX6900_REG_YR]) +
+		      bcd2bin(regs[MAX6900_REG_CENTURY]) * 100 - 1900;
+	tm->tm_wday = bcd2bin(regs[MAX6900_REG_DW]);
 
 	return 0;
 }
@@ -184,14 +184,14 @@
 	if (rc < 0)
 		return rc;
 
-	regs[MAX6900_REG_SC] = BIN2BCD(tm->tm_sec);
-	regs[MAX6900_REG_MN] = BIN2BCD(tm->tm_min);
-	regs[MAX6900_REG_HR] = BIN2BCD(tm->tm_hour);
-	regs[MAX6900_REG_DT] = BIN2BCD(tm->tm_mday);
-	regs[MAX6900_REG_MO] = BIN2BCD(tm->tm_mon + 1);
-	regs[MAX6900_REG_DW] = BIN2BCD(tm->tm_wday);
-	regs[MAX6900_REG_YR] = BIN2BCD(tm->tm_year % 100);
-	regs[MAX6900_REG_CENTURY] = BIN2BCD((tm->tm_year + 1900) / 100);
+	regs[MAX6900_REG_SC] = bin2bcd(tm->tm_sec);
+	regs[MAX6900_REG_MN] = bin2bcd(tm->tm_min);
+	regs[MAX6900_REG_HR] = bin2bcd(tm->tm_hour);
+	regs[MAX6900_REG_DT] = bin2bcd(tm->tm_mday);
+	regs[MAX6900_REG_MO] = bin2bcd(tm->tm_mon + 1);
+	regs[MAX6900_REG_DW] = bin2bcd(tm->tm_wday);
+	regs[MAX6900_REG_YR] = bin2bcd(tm->tm_year % 100);
+	regs[MAX6900_REG_CENTURY] = bin2bcd((tm->tm_year + 1900) / 100);
 	/* set write protect */
 	regs[MAX6900_REG_CT] = MAX6900_REG_CT_WP;
 
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 78b2551..2f6507d 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -124,15 +124,15 @@
 
 	/* The chip sends data in this order:
 	 * Seconds, Minutes, Hours, Date, Month, Day, Year */
-	dt->tm_sec	= BCD2BIN(chip->buf[1]);
-	dt->tm_min	= BCD2BIN(chip->buf[2]);
-	dt->tm_hour	= BCD2BIN(chip->buf[3]);
-	dt->tm_mday	= BCD2BIN(chip->buf[4]);
-	dt->tm_mon	= BCD2BIN(chip->buf[5]) - 1;
-	dt->tm_wday	= BCD2BIN(chip->buf[6]);
-	dt->tm_year = BCD2BIN(chip->buf[7]);
+	dt->tm_sec	= bcd2bin(chip->buf[1]);
+	dt->tm_min	= bcd2bin(chip->buf[2]);
+	dt->tm_hour	= bcd2bin(chip->buf[3]);
+	dt->tm_mday	= bcd2bin(chip->buf[4]);
+	dt->tm_mon	= bcd2bin(chip->buf[5]) - 1;
+	dt->tm_wday	= bcd2bin(chip->buf[6]);
+	dt->tm_year = bcd2bin(chip->buf[7]);
 
-	century = BCD2BIN(tmp) * 100;
+	century = bcd2bin(tmp) * 100;
 
 	dt->tm_year += century;
 	dt->tm_year -= 1900;
@@ -168,15 +168,15 @@
 	/* Remove write protection */
 	max6902_set_reg(dev, 0xF, 0);
 
-	max6902_set_reg(dev, 0x01, BIN2BCD(dt->tm_sec));
-	max6902_set_reg(dev, 0x03, BIN2BCD(dt->tm_min));
-	max6902_set_reg(dev, 0x05, BIN2BCD(dt->tm_hour));
+	max6902_set_reg(dev, 0x01, bin2bcd(dt->tm_sec));
+	max6902_set_reg(dev, 0x03, bin2bcd(dt->tm_min));
+	max6902_set_reg(dev, 0x05, bin2bcd(dt->tm_hour));
 
-	max6902_set_reg(dev, 0x07, BIN2BCD(dt->tm_mday));
-	max6902_set_reg(dev, 0x09, BIN2BCD(dt->tm_mon+1));
-	max6902_set_reg(dev, 0x0B, BIN2BCD(dt->tm_wday));
-	max6902_set_reg(dev, 0x0D, BIN2BCD(dt->tm_year%100));
-	max6902_set_reg(dev, 0x13, BIN2BCD(dt->tm_year/100));
+	max6902_set_reg(dev, 0x07, bin2bcd(dt->tm_mday));
+	max6902_set_reg(dev, 0x09, bin2bcd(dt->tm_mon+1));
+	max6902_set_reg(dev, 0x0B, bin2bcd(dt->tm_wday));
+	max6902_set_reg(dev, 0x0D, bin2bcd(dt->tm_year%100));
+	max6902_set_reg(dev, 0x13, bin2bcd(dt->tm_year/100));
 
 	/* Compulab used a delay here. However, the datasheet
 	 * does not mention a delay being required anywhere... */
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 8876605d..2cbeb07 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -186,30 +186,30 @@
 	if (rtc_valid_tm(tm) != 0)
 		return -EINVAL;
 
-	tm->tm_sec = BIN2BCD(tm->tm_sec);
-	tm->tm_min = BIN2BCD(tm->tm_min);
-	tm->tm_hour = BIN2BCD(tm->tm_hour);
-	tm->tm_mday = BIN2BCD(tm->tm_mday);
+	tm->tm_sec = bin2bcd(tm->tm_sec);
+	tm->tm_min = bin2bcd(tm->tm_min);
+	tm->tm_hour = bin2bcd(tm->tm_hour);
+	tm->tm_mday = bin2bcd(tm->tm_mday);
 
-	tm->tm_mon = BIN2BCD(tm->tm_mon + 1);
+	tm->tm_mon = bin2bcd(tm->tm_mon + 1);
 
 	/* epoch == 1900 */
 	if (tm->tm_year < 100 || tm->tm_year > 199)
 		return -EINVAL;
-	tm->tm_year = BIN2BCD(tm->tm_year - 100);
+	tm->tm_year = bin2bcd(tm->tm_year - 100);
 
 	return 0;
 }
 
 static void bcd2tm(struct rtc_time *tm)
 {
-	tm->tm_sec = BCD2BIN(tm->tm_sec);
-	tm->tm_min = BCD2BIN(tm->tm_min);
-	tm->tm_hour = BCD2BIN(tm->tm_hour);
-	tm->tm_mday = BCD2BIN(tm->tm_mday);
-	tm->tm_mon = BCD2BIN(tm->tm_mon) - 1;
+	tm->tm_sec = bcd2bin(tm->tm_sec);
+	tm->tm_min = bcd2bin(tm->tm_min);
+	tm->tm_hour = bcd2bin(tm->tm_hour);
+	tm->tm_mday = bcd2bin(tm->tm_mday);
+	tm->tm_mon = bcd2bin(tm->tm_mon) - 1;
 	/* epoch == 1900 */
-	tm->tm_year = BCD2BIN(tm->tm_year) + 100;
+	tm->tm_year = bcd2bin(tm->tm_year) + 100;
 }
 
 
diff --git a/drivers/rtc/rtc-parisc.c b/drivers/rtc/rtc-parisc.c
new file mode 100644
index 0000000..346d633
--- /dev/null
+++ b/drivers/rtc/rtc-parisc.c
@@ -0,0 +1,111 @@
+/* rtc-parisc: RTC for HP PA-RISC firmware
+ *
+ * Copyright (C) 2008 Kyle McMartin <kyle@mcmartin.ca>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+
+#include <asm/rtc.h>
+
+/* as simple as can be, and no simpler. */
+struct parisc_rtc {
+	struct rtc_device *rtc;
+	spinlock_t lock;
+};
+
+static int parisc_get_time(struct device *dev, struct rtc_time *tm)
+{
+	struct parisc_rtc *p = dev_get_drvdata(dev);
+	unsigned long flags, ret;
+
+	spin_lock_irqsave(&p->lock, flags);
+	ret = get_rtc_time(tm);
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	if (ret & RTC_BATT_BAD)
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
+static int parisc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct parisc_rtc *p = dev_get_drvdata(dev);
+	unsigned long flags, ret;
+
+	spin_lock_irqsave(&p->lock, flags);
+	ret = set_rtc_time(tm);
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	if (ret < 0)
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
+static const struct rtc_class_ops parisc_rtc_ops = {
+	.read_time = parisc_get_time,
+	.set_time = parisc_set_time,
+};
+
+static int __devinit parisc_rtc_probe(struct platform_device *dev)
+{
+	struct parisc_rtc *p;
+
+	p = kzalloc(sizeof (*p), GFP_KERNEL);
+	if (!p)
+		return -ENOMEM;
+
+	spin_lock_init(&p->lock);
+
+	p->rtc = rtc_device_register("rtc-parisc", &dev->dev, &parisc_rtc_ops,
+					THIS_MODULE);
+	if (IS_ERR(p->rtc)) {
+		int err = PTR_ERR(p->rtc);
+		kfree(p);
+		return err;
+	}
+
+	platform_set_drvdata(dev, p);
+
+	return 0;
+}
+
+static int __devexit parisc_rtc_remove(struct platform_device *dev)
+{
+	struct parisc_rtc *p = platform_get_drvdata(dev);
+
+	rtc_device_unregister(p->rtc);
+	kfree(p);
+
+	return 0;
+}
+
+static struct platform_driver parisc_rtc_driver = {
+	.driver = {
+		.name = "rtc-parisc",
+		.owner = THIS_MODULE,
+	},
+	.probe = parisc_rtc_probe,
+	.remove = __devexit_p(parisc_rtc_remove),
+};
+
+static int __init parisc_rtc_init(void)
+{
+	return platform_driver_register(&parisc_rtc_driver);
+}
+
+static void __exit parisc_rtc_fini(void)
+{
+	platform_driver_unregister(&parisc_rtc_driver);
+}
+
+module_init(parisc_rtc_init);
+module_exit(parisc_rtc_fini);
+
+MODULE_AUTHOR("Kyle McMartin <kyle@mcmartin.ca>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HP PA-RISC RTC driver");
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index a829f20..b725913 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -97,13 +97,13 @@
 		buf[8]);
 
 
-	tm->tm_sec = BCD2BIN(buf[PCF8563_REG_SC] & 0x7F);
-	tm->tm_min = BCD2BIN(buf[PCF8563_REG_MN] & 0x7F);
-	tm->tm_hour = BCD2BIN(buf[PCF8563_REG_HR] & 0x3F); /* rtc hr 0-23 */
-	tm->tm_mday = BCD2BIN(buf[PCF8563_REG_DM] & 0x3F);
+	tm->tm_sec = bcd2bin(buf[PCF8563_REG_SC] & 0x7F);
+	tm->tm_min = bcd2bin(buf[PCF8563_REG_MN] & 0x7F);
+	tm->tm_hour = bcd2bin(buf[PCF8563_REG_HR] & 0x3F); /* rtc hr 0-23 */
+	tm->tm_mday = bcd2bin(buf[PCF8563_REG_DM] & 0x3F);
 	tm->tm_wday = buf[PCF8563_REG_DW] & 0x07;
-	tm->tm_mon = BCD2BIN(buf[PCF8563_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
-	tm->tm_year = BCD2BIN(buf[PCF8563_REG_YR]);
+	tm->tm_mon = bcd2bin(buf[PCF8563_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
+	tm->tm_year = bcd2bin(buf[PCF8563_REG_YR]);
 	if (tm->tm_year < 70)
 		tm->tm_year += 100;	/* assume we are in 1970...2069 */
 	/* detect the polarity heuristically. see note above. */
@@ -138,17 +138,17 @@
 		tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
 	/* hours, minutes and seconds */
-	buf[PCF8563_REG_SC] = BIN2BCD(tm->tm_sec);
-	buf[PCF8563_REG_MN] = BIN2BCD(tm->tm_min);
-	buf[PCF8563_REG_HR] = BIN2BCD(tm->tm_hour);
+	buf[PCF8563_REG_SC] = bin2bcd(tm->tm_sec);
+	buf[PCF8563_REG_MN] = bin2bcd(tm->tm_min);
+	buf[PCF8563_REG_HR] = bin2bcd(tm->tm_hour);
 
-	buf[PCF8563_REG_DM] = BIN2BCD(tm->tm_mday);
+	buf[PCF8563_REG_DM] = bin2bcd(tm->tm_mday);
 
 	/* month, 1 - 12 */
-	buf[PCF8563_REG_MO] = BIN2BCD(tm->tm_mon + 1);
+	buf[PCF8563_REG_MO] = bin2bcd(tm->tm_mon + 1);
 
 	/* year and century */
-	buf[PCF8563_REG_YR] = BIN2BCD(tm->tm_year % 100);
+	buf[PCF8563_REG_YR] = bin2bcd(tm->tm_year % 100);
 	if (pcf8563->c_polarity ? (tm->tm_year >= 100) : (tm->tm_year < 100))
 		buf[PCF8563_REG_MO] |= PCF8563_MO_C;
 
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index d388c66..7d33cda 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -76,11 +76,11 @@
 		buf[4] &= 0x3f;
 		buf[5] &= 0x1f;
 
-		dt->tm_sec = BCD2BIN(buf[1]);
-		dt->tm_min = BCD2BIN(buf[2]);
-		dt->tm_hour = BCD2BIN(buf[3]);
-		dt->tm_mday = BCD2BIN(buf[4]);
-		dt->tm_mon = BCD2BIN(buf[5]) - 1;
+		dt->tm_sec = bcd2bin(buf[1]);
+		dt->tm_min = bcd2bin(buf[2]);
+		dt->tm_hour = bcd2bin(buf[3]);
+		dt->tm_mday = bcd2bin(buf[4]);
+		dt->tm_mon = bcd2bin(buf[5]) - 1;
 	}
 
 	return ret == 2 ? 0 : -EIO;
@@ -94,14 +94,14 @@
 	buf[0] = 0;
 	buf[1] = get_ctrl(client) | 0x80;
 	buf[2] = 0;
-	buf[3] = BIN2BCD(dt->tm_sec);
-	buf[4] = BIN2BCD(dt->tm_min);
-	buf[5] = BIN2BCD(dt->tm_hour);
+	buf[3] = bin2bcd(dt->tm_sec);
+	buf[4] = bin2bcd(dt->tm_min);
+	buf[5] = bin2bcd(dt->tm_hour);
 
 	if (datetoo) {
 		len = 8;
-		buf[6] = BIN2BCD(dt->tm_mday) | (dt->tm_year << 6);
-		buf[7] = BIN2BCD(dt->tm_mon + 1)  | (dt->tm_wday << 5);
+		buf[6] = bin2bcd(dt->tm_mday) | (dt->tm_year << 6);
+		buf[7] = bin2bcd(dt->tm_mon + 1)  | (dt->tm_wday << 5);
 	}
 
 	ret = i2c_master_send(client, (char *)buf, len);
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index 395985b..42028f2 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -80,13 +80,13 @@
 
 	memset(dt, 0, sizeof(*dt));
 
-	dt->tm_sec = BCD2BIN(buf[0]); /* RSECCNT */
-	dt->tm_min = BCD2BIN(buf[1]); /* RMINCNT */
-	dt->tm_hour = BCD2BIN(buf[2]); /* RHRCNT */
+	dt->tm_sec = bcd2bin(buf[0]); /* RSECCNT */
+	dt->tm_min = bcd2bin(buf[1]); /* RMINCNT */
+	dt->tm_hour = bcd2bin(buf[2]); /* RHRCNT */
 
-	dt->tm_mday = BCD2BIN(buf[3]); /* RDAYCNT */
-	dt->tm_mon = BCD2BIN(buf[4]) - 1; /* RMONCNT */
-	dt->tm_year = BCD2BIN(buf[5]) + 100; /* RYRCNT */
+	dt->tm_mday = bcd2bin(buf[3]); /* RDAYCNT */
+	dt->tm_mon = bcd2bin(buf[4]) - 1; /* RMONCNT */
+	dt->tm_year = bcd2bin(buf[5]) + 100; /* RYRCNT */
 
 	/* the rtc device may contain illegal values on power up
 	 * according to the data sheet. make sure they are valid.
@@ -103,12 +103,12 @@
 	if (year >= 2100 || year < 2000)
 		return -EINVAL;
 
-	ret = write_reg(dev, RHRCNT, BIN2BCD(dt->tm_hour));
-	ret = ret ? ret : write_reg(dev, RMINCNT, BIN2BCD(dt->tm_min));
-	ret = ret ? ret : write_reg(dev, RSECCNT, BIN2BCD(dt->tm_sec));
-	ret = ret ? ret : write_reg(dev, RDAYCNT, BIN2BCD(dt->tm_mday));
-	ret = ret ? ret : write_reg(dev, RMONCNT, BIN2BCD(dt->tm_mon + 1));
-	ret = ret ? ret : write_reg(dev, RYRCNT, BIN2BCD(dt->tm_year - 100));
+	ret = write_reg(dev, RHRCNT, bin2bcd(dt->tm_hour));
+	ret = ret ? ret : write_reg(dev, RMINCNT, bin2bcd(dt->tm_min));
+	ret = ret ? ret : write_reg(dev, RSECCNT, bin2bcd(dt->tm_sec));
+	ret = ret ? ret : write_reg(dev, RDAYCNT, bin2bcd(dt->tm_mday));
+	ret = ret ? ret : write_reg(dev, RMONCNT, bin2bcd(dt->tm_mon + 1));
+	ret = ret ? ret : write_reg(dev, RYRCNT, bin2bcd(dt->tm_year - 100));
 	ret = ret ? ret : write_reg(dev, RWKCNT, 1 << dt->tm_wday);
 
 	return ret;
diff --git a/drivers/rtc/rtc-rs5c313.c b/drivers/rtc/rtc-rs5c313.c
index 1c14d44..e6ea3f5 100644
--- a/drivers/rtc/rtc-rs5c313.c
+++ b/drivers/rtc/rtc-rs5c313.c
@@ -235,33 +235,33 @@
 
 	data = rs5c313_read_reg(RS5C313_ADDR_SEC);
 	data |= (rs5c313_read_reg(RS5C313_ADDR_SEC10) << 4);
-	tm->tm_sec = BCD2BIN(data);
+	tm->tm_sec = bcd2bin(data);
 
 	data = rs5c313_read_reg(RS5C313_ADDR_MIN);
 	data |= (rs5c313_read_reg(RS5C313_ADDR_MIN10) << 4);
-	tm->tm_min = BCD2BIN(data);
+	tm->tm_min = bcd2bin(data);
 
 	data = rs5c313_read_reg(RS5C313_ADDR_HOUR);
 	data |= (rs5c313_read_reg(RS5C313_ADDR_HOUR10) << 4);
-	tm->tm_hour = BCD2BIN(data);
+	tm->tm_hour = bcd2bin(data);
 
 	data = rs5c313_read_reg(RS5C313_ADDR_DAY);
 	data |= (rs5c313_read_reg(RS5C313_ADDR_DAY10) << 4);
-	tm->tm_mday = BCD2BIN(data);
+	tm->tm_mday = bcd2bin(data);
 
 	data = rs5c313_read_reg(RS5C313_ADDR_MON);
 	data |= (rs5c313_read_reg(RS5C313_ADDR_MON10) << 4);
-	tm->tm_mon = BCD2BIN(data) - 1;
+	tm->tm_mon = bcd2bin(data) - 1;
 
 	data = rs5c313_read_reg(RS5C313_ADDR_YEAR);
 	data |= (rs5c313_read_reg(RS5C313_ADDR_YEAR10) << 4);
-	tm->tm_year = BCD2BIN(data);
+	tm->tm_year = bcd2bin(data);
 
 	if (tm->tm_year < 70)
 		tm->tm_year += 100;
 
 	data = rs5c313_read_reg(RS5C313_ADDR_WEEK);
-	tm->tm_wday = BCD2BIN(data);
+	tm->tm_wday = bcd2bin(data);
 
 	RS5C313_CEDISABLE;
 	ndelay(700);		/* CE:L */
@@ -294,31 +294,31 @@
 		}
 	}
 
-	data = BIN2BCD(tm->tm_sec);
+	data = bin2bcd(tm->tm_sec);
 	rs5c313_write_reg(RS5C313_ADDR_SEC, data);
 	rs5c313_write_reg(RS5C313_ADDR_SEC10, (data >> 4));
 
-	data = BIN2BCD(tm->tm_min);
+	data = bin2bcd(tm->tm_min);
 	rs5c313_write_reg(RS5C313_ADDR_MIN, data );
 	rs5c313_write_reg(RS5C313_ADDR_MIN10, (data >> 4));
 
-	data = BIN2BCD(tm->tm_hour);
+	data = bin2bcd(tm->tm_hour);
 	rs5c313_write_reg(RS5C313_ADDR_HOUR, data);
 	rs5c313_write_reg(RS5C313_ADDR_HOUR10, (data >> 4));
 
-	data = BIN2BCD(tm->tm_mday);
+	data = bin2bcd(tm->tm_mday);
 	rs5c313_write_reg(RS5C313_ADDR_DAY, data);
 	rs5c313_write_reg(RS5C313_ADDR_DAY10, (data>> 4));
 
-	data = BIN2BCD(tm->tm_mon + 1);
+	data = bin2bcd(tm->tm_mon + 1);
 	rs5c313_write_reg(RS5C313_ADDR_MON, data);
 	rs5c313_write_reg(RS5C313_ADDR_MON10, (data >> 4));
 
-	data = BIN2BCD(tm->tm_year % 100);
+	data = bin2bcd(tm->tm_year % 100);
 	rs5c313_write_reg(RS5C313_ADDR_YEAR, data);
 	rs5c313_write_reg(RS5C313_ADDR_YEAR10, (data >> 4));
 
-	data = BIN2BCD(tm->tm_wday);
+	data = bin2bcd(tm->tm_wday);
 	rs5c313_write_reg(RS5C313_ADDR_WEEK, data);
 
 	RS5C313_CEDISABLE;	/* CE:H */
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 8394626..dd1e2bc 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -74,20 +74,20 @@
 	txbuf[3] = 0;	/* dummy */
 	txbuf[4] = RS5C348_CMD_MW(RS5C348_REG_SECS); /* cmd, sec, ... */
 	txp = &txbuf[5];
-	txp[RS5C348_REG_SECS] = BIN2BCD(tm->tm_sec);
-	txp[RS5C348_REG_MINS] = BIN2BCD(tm->tm_min);
+	txp[RS5C348_REG_SECS] = bin2bcd(tm->tm_sec);
+	txp[RS5C348_REG_MINS] = bin2bcd(tm->tm_min);
 	if (pdata->rtc_24h) {
-		txp[RS5C348_REG_HOURS] = BIN2BCD(tm->tm_hour);
+		txp[RS5C348_REG_HOURS] = bin2bcd(tm->tm_hour);
 	} else {
 		/* hour 0 is AM12, noon is PM12 */
-		txp[RS5C348_REG_HOURS] = BIN2BCD((tm->tm_hour + 11) % 12 + 1) |
+		txp[RS5C348_REG_HOURS] = bin2bcd((tm->tm_hour + 11) % 12 + 1) |
 			(tm->tm_hour >= 12 ? RS5C348_BIT_PM : 0);
 	}
-	txp[RS5C348_REG_WDAY] = BIN2BCD(tm->tm_wday);
-	txp[RS5C348_REG_DAY] = BIN2BCD(tm->tm_mday);
-	txp[RS5C348_REG_MONTH] = BIN2BCD(tm->tm_mon + 1) |
+	txp[RS5C348_REG_WDAY] = bin2bcd(tm->tm_wday);
+	txp[RS5C348_REG_DAY] = bin2bcd(tm->tm_mday);
+	txp[RS5C348_REG_MONTH] = bin2bcd(tm->tm_mon + 1) |
 		(tm->tm_year >= 100 ? RS5C348_BIT_Y2K : 0);
-	txp[RS5C348_REG_YEAR] = BIN2BCD(tm->tm_year % 100);
+	txp[RS5C348_REG_YEAR] = bin2bcd(tm->tm_year % 100);
 	/* write in one transfer to avoid data inconsistency */
 	ret = spi_write_then_read(spi, txbuf, sizeof(txbuf), NULL, 0);
 	udelay(62);	/* Tcsr 62us */
@@ -116,20 +116,20 @@
 	if (ret < 0)
 		return ret;
 
-	tm->tm_sec = BCD2BIN(rxbuf[RS5C348_REG_SECS] & RS5C348_SECS_MASK);
-	tm->tm_min = BCD2BIN(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
-	tm->tm_hour = BCD2BIN(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
+	tm->tm_sec = bcd2bin(rxbuf[RS5C348_REG_SECS] & RS5C348_SECS_MASK);
+	tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
+	tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
 	if (!pdata->rtc_24h) {
 		tm->tm_hour %= 12;
 		if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM)
 			tm->tm_hour += 12;
 	}
-	tm->tm_wday = BCD2BIN(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
-	tm->tm_mday = BCD2BIN(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
+	tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
+	tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
 	tm->tm_mon =
-		BCD2BIN(rxbuf[RS5C348_REG_MONTH] & RS5C348_MONTH_MASK) - 1;
+		bcd2bin(rxbuf[RS5C348_REG_MONTH] & RS5C348_MONTH_MASK) - 1;
 	/* year is 1900 + tm->tm_year */
-	tm->tm_year = BCD2BIN(rxbuf[RS5C348_REG_YEAR]) +
+	tm->tm_year = bcd2bin(rxbuf[RS5C348_REG_YEAR]) +
 		((rxbuf[RS5C348_REG_MONTH] & RS5C348_BIT_Y2K) ? 100 : 0);
 
 	if (rtc_valid_tm(tm) < 0) {
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 8b56195..2f2c68d 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -148,9 +148,9 @@
 	unsigned	hour;
 
 	if (rs5c->time24)
-		return BCD2BIN(reg & 0x3f);
+		return bcd2bin(reg & 0x3f);
 
-	hour = BCD2BIN(reg & 0x1f);
+	hour = bcd2bin(reg & 0x1f);
 	if (hour == 12)
 		hour = 0;
 	if (reg & 0x20)
@@ -161,15 +161,15 @@
 static unsigned rs5c_hr2reg(struct rs5c372 *rs5c, unsigned hour)
 {
 	if (rs5c->time24)
-		return BIN2BCD(hour);
+		return bin2bcd(hour);
 
 	if (hour > 12)
-		return 0x20 | BIN2BCD(hour - 12);
+		return 0x20 | bin2bcd(hour - 12);
 	if (hour == 12)
-		return 0x20 | BIN2BCD(12);
+		return 0x20 | bin2bcd(12);
 	if (hour == 0)
-		return BIN2BCD(12);
-	return BIN2BCD(hour);
+		return bin2bcd(12);
+	return bin2bcd(hour);
 }
 
 static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm)
@@ -180,18 +180,18 @@
 	if (status < 0)
 		return status;
 
-	tm->tm_sec = BCD2BIN(rs5c->regs[RS5C372_REG_SECS] & 0x7f);
-	tm->tm_min = BCD2BIN(rs5c->regs[RS5C372_REG_MINS] & 0x7f);
+	tm->tm_sec = bcd2bin(rs5c->regs[RS5C372_REG_SECS] & 0x7f);
+	tm->tm_min = bcd2bin(rs5c->regs[RS5C372_REG_MINS] & 0x7f);
 	tm->tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C372_REG_HOURS]);
 
-	tm->tm_wday = BCD2BIN(rs5c->regs[RS5C372_REG_WDAY] & 0x07);
-	tm->tm_mday = BCD2BIN(rs5c->regs[RS5C372_REG_DAY] & 0x3f);
+	tm->tm_wday = bcd2bin(rs5c->regs[RS5C372_REG_WDAY] & 0x07);
+	tm->tm_mday = bcd2bin(rs5c->regs[RS5C372_REG_DAY] & 0x3f);
 
 	/* tm->tm_mon is zero-based */
-	tm->tm_mon = BCD2BIN(rs5c->regs[RS5C372_REG_MONTH] & 0x1f) - 1;
+	tm->tm_mon = bcd2bin(rs5c->regs[RS5C372_REG_MONTH] & 0x1f) - 1;
 
 	/* year is 1900 + tm->tm_year */
-	tm->tm_year = BCD2BIN(rs5c->regs[RS5C372_REG_YEAR]) + 100;
+	tm->tm_year = bcd2bin(rs5c->regs[RS5C372_REG_YEAR]) + 100;
 
 	dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
 		"mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -216,13 +216,13 @@
 		tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
 	addr   = RS5C_ADDR(RS5C372_REG_SECS);
-	buf[0] = BIN2BCD(tm->tm_sec);
-	buf[1] = BIN2BCD(tm->tm_min);
+	buf[0] = bin2bcd(tm->tm_sec);
+	buf[1] = bin2bcd(tm->tm_min);
 	buf[2] = rs5c_hr2reg(rs5c, tm->tm_hour);
-	buf[3] = BIN2BCD(tm->tm_wday);
-	buf[4] = BIN2BCD(tm->tm_mday);
-	buf[5] = BIN2BCD(tm->tm_mon + 1);
-	buf[6] = BIN2BCD(tm->tm_year - 100);
+	buf[3] = bin2bcd(tm->tm_wday);
+	buf[4] = bin2bcd(tm->tm_mday);
+	buf[5] = bin2bcd(tm->tm_mon + 1);
+	buf[6] = bin2bcd(tm->tm_year - 100);
 
 	if (i2c_smbus_write_i2c_block_data(client, addr, sizeof(buf), buf) < 0) {
 		dev_err(&client->dev, "%s: write error\n", __func__);
@@ -367,7 +367,7 @@
 
 	/* report alarm time */
 	t->time.tm_sec = 0;
-	t->time.tm_min = BCD2BIN(rs5c->regs[RS5C_REG_ALARM_A_MIN] & 0x7f);
+	t->time.tm_min = bcd2bin(rs5c->regs[RS5C_REG_ALARM_A_MIN] & 0x7f);
 	t->time.tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C_REG_ALARM_A_HOURS]);
 	t->time.tm_mday = -1;
 	t->time.tm_mon = -1;
@@ -413,7 +413,7 @@
 	}
 
 	/* set alarm */
-	buf[0] = BIN2BCD(t->time.tm_min);
+	buf[0] = bin2bcd(t->time.tm_min);
 	buf[1] = rs5c_hr2reg(rs5c, t->time.tm_hour);
 	buf[2] = 0x7f;	/* any/all days */
 
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index a6fa1f2..def4d39 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -104,12 +104,12 @@
 static char s35390a_hr2reg(struct s35390a *s35390a, int hour)
 {
 	if (s35390a->twentyfourhour)
-		return BIN2BCD(hour);
+		return bin2bcd(hour);
 
 	if (hour < 12)
-		return BIN2BCD(hour);
+		return bin2bcd(hour);
 
-	return 0x40 | BIN2BCD(hour - 12);
+	return 0x40 | bin2bcd(hour - 12);
 }
 
 static int s35390a_reg2hr(struct s35390a *s35390a, char reg)
@@ -117,9 +117,9 @@
 	unsigned hour;
 
 	if (s35390a->twentyfourhour)
-		return BCD2BIN(reg & 0x3f);
+		return bcd2bin(reg & 0x3f);
 
-	hour = BCD2BIN(reg & 0x3f);
+	hour = bcd2bin(reg & 0x3f);
 	if (reg & 0x40)
 		hour += 12;
 
@@ -137,13 +137,13 @@
 		tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year,
 		tm->tm_wday);
 
-	buf[S35390A_BYTE_YEAR] = BIN2BCD(tm->tm_year - 100);
-	buf[S35390A_BYTE_MONTH] = BIN2BCD(tm->tm_mon + 1);
-	buf[S35390A_BYTE_DAY] = BIN2BCD(tm->tm_mday);
-	buf[S35390A_BYTE_WDAY] = BIN2BCD(tm->tm_wday);
+	buf[S35390A_BYTE_YEAR] = bin2bcd(tm->tm_year - 100);
+	buf[S35390A_BYTE_MONTH] = bin2bcd(tm->tm_mon + 1);
+	buf[S35390A_BYTE_DAY] = bin2bcd(tm->tm_mday);
+	buf[S35390A_BYTE_WDAY] = bin2bcd(tm->tm_wday);
 	buf[S35390A_BYTE_HOURS] = s35390a_hr2reg(s35390a, tm->tm_hour);
-	buf[S35390A_BYTE_MINS] = BIN2BCD(tm->tm_min);
-	buf[S35390A_BYTE_SECS] = BIN2BCD(tm->tm_sec);
+	buf[S35390A_BYTE_MINS] = bin2bcd(tm->tm_min);
+	buf[S35390A_BYTE_SECS] = bin2bcd(tm->tm_sec);
 
 	/* This chip expects the bits of each byte to be in reverse order */
 	for (i = 0; i < 7; ++i)
@@ -168,13 +168,13 @@
 	for (i = 0; i < 7; ++i)
 		buf[i] = bitrev8(buf[i]);
 
-	tm->tm_sec = BCD2BIN(buf[S35390A_BYTE_SECS]);
-	tm->tm_min = BCD2BIN(buf[S35390A_BYTE_MINS]);
+	tm->tm_sec = bcd2bin(buf[S35390A_BYTE_SECS]);
+	tm->tm_min = bcd2bin(buf[S35390A_BYTE_MINS]);
 	tm->tm_hour = s35390a_reg2hr(s35390a, buf[S35390A_BYTE_HOURS]);
-	tm->tm_wday = BCD2BIN(buf[S35390A_BYTE_WDAY]);
-	tm->tm_mday = BCD2BIN(buf[S35390A_BYTE_DAY]);
-	tm->tm_mon = BCD2BIN(buf[S35390A_BYTE_MONTH]) - 1;
-	tm->tm_year = BCD2BIN(buf[S35390A_BYTE_YEAR]) + 100;
+	tm->tm_wday = bcd2bin(buf[S35390A_BYTE_WDAY]);
+	tm->tm_mday = bcd2bin(buf[S35390A_BYTE_DAY]);
+	tm->tm_mon = bcd2bin(buf[S35390A_BYTE_MONTH]) - 1;
+	tm->tm_year = bcd2bin(buf[S35390A_BYTE_YEAR]) + 100;
 
 	dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, mday=%d, "
 		"mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec,
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index e7d19b6..910bc70 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -134,12 +134,12 @@
 		 rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
 		 rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
 
-	BCD_TO_BIN(rtc_tm->tm_sec);
-	BCD_TO_BIN(rtc_tm->tm_min);
-	BCD_TO_BIN(rtc_tm->tm_hour);
-	BCD_TO_BIN(rtc_tm->tm_mday);
-	BCD_TO_BIN(rtc_tm->tm_mon);
-	BCD_TO_BIN(rtc_tm->tm_year);
+	rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
+	rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
+	rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
+	rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
+	rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
+	rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
 
 	rtc_tm->tm_year += 100;
 	rtc_tm->tm_mon -= 1;
@@ -163,12 +163,12 @@
 		return -EINVAL;
 	}
 
-	writeb(BIN2BCD(tm->tm_sec),  base + S3C2410_RTCSEC);
-	writeb(BIN2BCD(tm->tm_min),  base + S3C2410_RTCMIN);
-	writeb(BIN2BCD(tm->tm_hour), base + S3C2410_RTCHOUR);
-	writeb(BIN2BCD(tm->tm_mday), base + S3C2410_RTCDATE);
-	writeb(BIN2BCD(tm->tm_mon + 1), base + S3C2410_RTCMON);
-	writeb(BIN2BCD(year), base + S3C2410_RTCYEAR);
+	writeb(bin2bcd(tm->tm_sec),  base + S3C2410_RTCSEC);
+	writeb(bin2bcd(tm->tm_min),  base + S3C2410_RTCMIN);
+	writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
+	writeb(bin2bcd(tm->tm_mday), base + S3C2410_RTCDATE);
+	writeb(bin2bcd(tm->tm_mon + 1), base + S3C2410_RTCMON);
+	writeb(bin2bcd(year), base + S3C2410_RTCYEAR);
 
 	return 0;
 }
@@ -199,34 +199,34 @@
 	/* decode the alarm enable field */
 
 	if (alm_en & S3C2410_RTCALM_SECEN)
-		BCD_TO_BIN(alm_tm->tm_sec);
+		alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
 	else
 		alm_tm->tm_sec = 0xff;
 
 	if (alm_en & S3C2410_RTCALM_MINEN)
-		BCD_TO_BIN(alm_tm->tm_min);
+		alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
 	else
 		alm_tm->tm_min = 0xff;
 
 	if (alm_en & S3C2410_RTCALM_HOUREN)
-		BCD_TO_BIN(alm_tm->tm_hour);
+		alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
 	else
 		alm_tm->tm_hour = 0xff;
 
 	if (alm_en & S3C2410_RTCALM_DAYEN)
-		BCD_TO_BIN(alm_tm->tm_mday);
+		alm_tm->tm_mday = bcd2bin(alm_tm->tm_mday);
 	else
 		alm_tm->tm_mday = 0xff;
 
 	if (alm_en & S3C2410_RTCALM_MONEN) {
-		BCD_TO_BIN(alm_tm->tm_mon);
+		alm_tm->tm_mon = bcd2bin(alm_tm->tm_mon);
 		alm_tm->tm_mon -= 1;
 	} else {
 		alm_tm->tm_mon = 0xff;
 	}
 
 	if (alm_en & S3C2410_RTCALM_YEAREN)
-		BCD_TO_BIN(alm_tm->tm_year);
+		alm_tm->tm_year = bcd2bin(alm_tm->tm_year);
 	else
 		alm_tm->tm_year = 0xffff;
 
@@ -250,17 +250,17 @@
 
 	if (tm->tm_sec < 60 && tm->tm_sec >= 0) {
 		alrm_en |= S3C2410_RTCALM_SECEN;
-		writeb(BIN2BCD(tm->tm_sec), base + S3C2410_ALMSEC);
+		writeb(bin2bcd(tm->tm_sec), base + S3C2410_ALMSEC);
 	}
 
 	if (tm->tm_min < 60 && tm->tm_min >= 0) {
 		alrm_en |= S3C2410_RTCALM_MINEN;
-		writeb(BIN2BCD(tm->tm_min), base + S3C2410_ALMMIN);
+		writeb(bin2bcd(tm->tm_min), base + S3C2410_ALMMIN);
 	}
 
 	if (tm->tm_hour < 24 && tm->tm_hour >= 0) {
 		alrm_en |= S3C2410_RTCALM_HOUREN;
-		writeb(BIN2BCD(tm->tm_hour), base + S3C2410_ALMHOUR);
+		writeb(bin2bcd(tm->tm_hour), base + S3C2410_ALMHOUR);
 	}
 
 	pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en);
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index fcead4c..aaf9d6a 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -324,23 +324,23 @@
 
 		sec128 = readb(rtc->regbase + R64CNT);
 
-		tm->tm_sec	= BCD2BIN(readb(rtc->regbase + RSECCNT));
-		tm->tm_min	= BCD2BIN(readb(rtc->regbase + RMINCNT));
-		tm->tm_hour	= BCD2BIN(readb(rtc->regbase + RHRCNT));
-		tm->tm_wday	= BCD2BIN(readb(rtc->regbase + RWKCNT));
-		tm->tm_mday	= BCD2BIN(readb(rtc->regbase + RDAYCNT));
-		tm->tm_mon	= BCD2BIN(readb(rtc->regbase + RMONCNT)) - 1;
+		tm->tm_sec	= bcd2bin(readb(rtc->regbase + RSECCNT));
+		tm->tm_min	= bcd2bin(readb(rtc->regbase + RMINCNT));
+		tm->tm_hour	= bcd2bin(readb(rtc->regbase + RHRCNT));
+		tm->tm_wday	= bcd2bin(readb(rtc->regbase + RWKCNT));
+		tm->tm_mday	= bcd2bin(readb(rtc->regbase + RDAYCNT));
+		tm->tm_mon	= bcd2bin(readb(rtc->regbase + RMONCNT)) - 1;
 
 		if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) {
 			yr  = readw(rtc->regbase + RYRCNT);
-			yr100 = BCD2BIN(yr >> 8);
+			yr100 = bcd2bin(yr >> 8);
 			yr &= 0xff;
 		} else {
 			yr  = readb(rtc->regbase + RYRCNT);
-			yr100 = BCD2BIN((yr == 0x99) ? 0x19 : 0x20);
+			yr100 = bcd2bin((yr == 0x99) ? 0x19 : 0x20);
 		}
 
-		tm->tm_year = (yr100 * 100 + BCD2BIN(yr)) - 1900;
+		tm->tm_year = (yr100 * 100 + bcd2bin(yr)) - 1900;
 
 		sec2 = readb(rtc->regbase + R64CNT);
 		cf_bit = readb(rtc->regbase + RCR1) & RCR1_CF;
@@ -382,20 +382,20 @@
 	tmp &= ~RCR2_START;
 	writeb(tmp, rtc->regbase + RCR2);
 
-	writeb(BIN2BCD(tm->tm_sec),  rtc->regbase + RSECCNT);
-	writeb(BIN2BCD(tm->tm_min),  rtc->regbase + RMINCNT);
-	writeb(BIN2BCD(tm->tm_hour), rtc->regbase + RHRCNT);
-	writeb(BIN2BCD(tm->tm_wday), rtc->regbase + RWKCNT);
-	writeb(BIN2BCD(tm->tm_mday), rtc->regbase + RDAYCNT);
-	writeb(BIN2BCD(tm->tm_mon + 1), rtc->regbase + RMONCNT);
+	writeb(bin2bcd(tm->tm_sec),  rtc->regbase + RSECCNT);
+	writeb(bin2bcd(tm->tm_min),  rtc->regbase + RMINCNT);
+	writeb(bin2bcd(tm->tm_hour), rtc->regbase + RHRCNT);
+	writeb(bin2bcd(tm->tm_wday), rtc->regbase + RWKCNT);
+	writeb(bin2bcd(tm->tm_mday), rtc->regbase + RDAYCNT);
+	writeb(bin2bcd(tm->tm_mon + 1), rtc->regbase + RMONCNT);
 
 	if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) {
-		year = (BIN2BCD((tm->tm_year + 1900) / 100) << 8) |
-			BIN2BCD(tm->tm_year % 100);
+		year = (bin2bcd((tm->tm_year + 1900) / 100) << 8) |
+			bin2bcd(tm->tm_year % 100);
 		writew(year, rtc->regbase + RYRCNT);
 	} else {
 		year = tm->tm_year % 100;
-		writeb(BIN2BCD(year), rtc->regbase + RYRCNT);
+		writeb(bin2bcd(year), rtc->regbase + RYRCNT);
 	}
 
 	/* Start RTC */
@@ -417,7 +417,7 @@
 	byte = readb(rtc->regbase + reg_off);
 	if (byte & AR_ENB) {
 		byte &= ~AR_ENB;	/* strip the enable bit */
-		value = BCD2BIN(byte);
+		value = bcd2bin(byte);
 	}
 
 	return value;
@@ -455,7 +455,7 @@
 	if (value < 0)
 		writeb(0, rtc->regbase + reg_off);
 	else
-		writeb(BIN2BCD(value) | AR_ENB,  rtc->regbase + reg_off);
+		writeb(bin2bcd(value) | AR_ENB,  rtc->regbase + reg_off);
 }
 
 static int sh_rtc_check_alarm(struct rtc_time *tm)
@@ -568,7 +568,7 @@
 	struct sh_rtc *rtc;
 	struct resource *res;
 	unsigned int tmp;
-	int ret = -ENOENT;
+	int ret;
 
 	rtc = kzalloc(sizeof(struct sh_rtc), GFP_KERNEL);
 	if (unlikely(!rtc))
@@ -577,26 +577,33 @@
 	spin_lock_init(&rtc->lock);
 
 	/* get periodic/carry/alarm irqs */
-	rtc->periodic_irq = platform_get_irq(pdev, 0);
-	if (unlikely(rtc->periodic_irq < 0)) {
+	ret = platform_get_irq(pdev, 0);
+	if (unlikely(ret < 0)) {
+		ret = -ENOENT;
 		dev_err(&pdev->dev, "No IRQ for period\n");
 		goto err_badres;
 	}
+	rtc->periodic_irq = ret;
 
-	rtc->carry_irq = platform_get_irq(pdev, 1);
-	if (unlikely(rtc->carry_irq < 0)) {
+	ret = platform_get_irq(pdev, 1);
+	if (unlikely(ret < 0)) {
+		ret = -ENOENT;
 		dev_err(&pdev->dev, "No IRQ for carry\n");
 		goto err_badres;
 	}
+	rtc->carry_irq = ret;
 
-	rtc->alarm_irq = platform_get_irq(pdev, 2);
-	if (unlikely(rtc->alarm_irq < 0)) {
+	ret = platform_get_irq(pdev, 2);
+	if (unlikely(ret < 0)) {
+		ret = -ENOENT;
 		dev_err(&pdev->dev, "No IRQ for alarm\n");
 		goto err_badres;
 	}
+	rtc->alarm_irq = ret;
 
 	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
 	if (unlikely(res == NULL)) {
+		ret = -ENOENT;
 		dev_err(&pdev->dev, "No IO resource\n");
 		goto err_badres;
 	}
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index 9a7e920..f4cd46e 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -82,14 +82,14 @@
 	flags = readb(pdata->ioaddr + RTC_FLAGS);
 	writeb(flags | RTC_WRITE, pdata->ioaddr + RTC_FLAGS);
 
-	writeb(BIN2BCD(tm->tm_year % 100), ioaddr + RTC_YEAR);
-	writeb(BIN2BCD(tm->tm_mon + 1), ioaddr + RTC_MONTH);
-	writeb(BIN2BCD(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY);
-	writeb(BIN2BCD(tm->tm_mday), ioaddr + RTC_DATE);
-	writeb(BIN2BCD(tm->tm_hour), ioaddr + RTC_HOURS);
-	writeb(BIN2BCD(tm->tm_min), ioaddr + RTC_MINUTES);
-	writeb(BIN2BCD(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS);
-	writeb(BIN2BCD((tm->tm_year + 1900) / 100), ioaddr + RTC_CENTURY);
+	writeb(bin2bcd(tm->tm_year % 100), ioaddr + RTC_YEAR);
+	writeb(bin2bcd(tm->tm_mon + 1), ioaddr + RTC_MONTH);
+	writeb(bin2bcd(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY);
+	writeb(bin2bcd(tm->tm_mday), ioaddr + RTC_DATE);
+	writeb(bin2bcd(tm->tm_hour), ioaddr + RTC_HOURS);
+	writeb(bin2bcd(tm->tm_min), ioaddr + RTC_MINUTES);
+	writeb(bin2bcd(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS);
+	writeb(bin2bcd((tm->tm_year + 1900) / 100), ioaddr + RTC_CENTURY);
 
 	writeb(flags & ~RTC_WRITE, pdata->ioaddr + RTC_FLAGS);
 	return 0;
@@ -120,14 +120,14 @@
 	year = readb(ioaddr + RTC_YEAR);
 	century = readb(ioaddr + RTC_CENTURY);
 	writeb(flags & ~RTC_READ, ioaddr + RTC_FLAGS);
-	tm->tm_sec = BCD2BIN(second);
-	tm->tm_min = BCD2BIN(minute);
-	tm->tm_hour = BCD2BIN(hour);
-	tm->tm_mday = BCD2BIN(day);
-	tm->tm_wday = BCD2BIN(week);
-	tm->tm_mon = BCD2BIN(month) - 1;
+	tm->tm_sec = bcd2bin(second);
+	tm->tm_min = bcd2bin(minute);
+	tm->tm_hour = bcd2bin(hour);
+	tm->tm_mday = bcd2bin(day);
+	tm->tm_wday = bcd2bin(week);
+	tm->tm_mon = bcd2bin(month) - 1;
 	/* year is 1900 + tm->tm_year */
-	tm->tm_year = BCD2BIN(year) + BCD2BIN(century) * 100 - 1900;
+	tm->tm_year = bcd2bin(year) + bcd2bin(century) * 100 - 1900;
 
 	if (rtc_valid_tm(tm) < 0) {
 		dev_err(dev, "retrieved date/time is not valid.\n");
@@ -148,16 +148,16 @@
 	writeb(flags | RTC_WRITE, ioaddr + RTC_FLAGS);
 
 	writeb(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
-	       0x80 : BIN2BCD(pdata->alrm_mday),
+	       0x80 : bin2bcd(pdata->alrm_mday),
 	       ioaddr + RTC_DATE_ALARM);
 	writeb(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ?
-	       0x80 : BIN2BCD(pdata->alrm_hour),
+	       0x80 : bin2bcd(pdata->alrm_hour),
 	       ioaddr + RTC_HOURS_ALARM);
 	writeb(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ?
-	       0x80 : BIN2BCD(pdata->alrm_min),
+	       0x80 : bin2bcd(pdata->alrm_min),
 	       ioaddr + RTC_MINUTES_ALARM);
 	writeb(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ?
-	       0x80 : BIN2BCD(pdata->alrm_sec),
+	       0x80 : bin2bcd(pdata->alrm_sec),
 	       ioaddr + RTC_SECONDS_ALARM);
 	writeb(pdata->irqen ? RTC_INTS_AIE : 0, ioaddr + RTC_INTERRUPTS);
 	readb(ioaddr + RTC_FLAGS);	/* clear interrupts */
@@ -280,7 +280,6 @@
 	.attr = {
 		.name = "nvram",
 		.mode = S_IRUGO | S_IWUSR,
-		.owner = THIS_MODULE,
 	},
 	.size = RTC_OFFSET,
 	.read = stk17ta8_nvram_read,
diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl4030.c
new file mode 100644
index 0000000..abe87a4
--- /dev/null
+++ b/drivers/rtc/rtc-twl4030.c
@@ -0,0 +1,564 @@
+/*
+ * rtc-twl4030.c -- TWL4030 Real Time Clock interface
+ *
+ * Copyright (C) 2007 MontaVista Software, Inc
+ * Author: Alexandre Rusev <source@mvista.com>
+ *
+ * Based on original TI driver twl4030-rtc.c
+ *   Copyright (C) 2006 Texas Instruments, Inc.
+ *
+ * Based on rtc-omap.c
+ *   Copyright (C) 2003 MontaVista Software, Inc.
+ *   Author: George G. Davis <gdavis@mvista.com> or <source@mvista.com>
+ *   Copyright (C) 2006 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+
+#include <linux/i2c/twl4030.h>
+
+
+/*
+ * RTC block register offsets (use TWL_MODULE_RTC)
+ */
+#define REG_SECONDS_REG                          0x00
+#define REG_MINUTES_REG                          0x01
+#define REG_HOURS_REG                            0x02
+#define REG_DAYS_REG                             0x03
+#define REG_MONTHS_REG                           0x04
+#define REG_YEARS_REG                            0x05
+#define REG_WEEKS_REG                            0x06
+
+#define REG_ALARM_SECONDS_REG                    0x07
+#define REG_ALARM_MINUTES_REG                    0x08
+#define REG_ALARM_HOURS_REG                      0x09
+#define REG_ALARM_DAYS_REG                       0x0A
+#define REG_ALARM_MONTHS_REG                     0x0B
+#define REG_ALARM_YEARS_REG                      0x0C
+
+#define REG_RTC_CTRL_REG                         0x0D
+#define REG_RTC_STATUS_REG                       0x0E
+#define REG_RTC_INTERRUPTS_REG                   0x0F
+
+#define REG_RTC_COMP_LSB_REG                     0x10
+#define REG_RTC_COMP_MSB_REG                     0x11
+
+/* RTC_CTRL_REG bitfields */
+#define BIT_RTC_CTRL_REG_STOP_RTC_M              0x01
+#define BIT_RTC_CTRL_REG_ROUND_30S_M             0x02
+#define BIT_RTC_CTRL_REG_AUTO_COMP_M             0x04
+#define BIT_RTC_CTRL_REG_MODE_12_24_M            0x08
+#define BIT_RTC_CTRL_REG_TEST_MODE_M             0x10
+#define BIT_RTC_CTRL_REG_SET_32_COUNTER_M        0x20
+#define BIT_RTC_CTRL_REG_GET_TIME_M              0x40
+
+/* RTC_STATUS_REG bitfields */
+#define BIT_RTC_STATUS_REG_RUN_M                 0x02
+#define BIT_RTC_STATUS_REG_1S_EVENT_M            0x04
+#define BIT_RTC_STATUS_REG_1M_EVENT_M            0x08
+#define BIT_RTC_STATUS_REG_1H_EVENT_M            0x10
+#define BIT_RTC_STATUS_REG_1D_EVENT_M            0x20
+#define BIT_RTC_STATUS_REG_ALARM_M               0x40
+#define BIT_RTC_STATUS_REG_POWER_UP_M            0x80
+
+/* RTC_INTERRUPTS_REG bitfields */
+#define BIT_RTC_INTERRUPTS_REG_EVERY_M           0x03
+#define BIT_RTC_INTERRUPTS_REG_IT_TIMER_M        0x04
+#define BIT_RTC_INTERRUPTS_REG_IT_ALARM_M        0x08
+
+
+/* REG_SECONDS_REG through REG_YEARS_REG is how many registers? */
+#define ALL_TIME_REGS		6
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Supports 1 byte read from TWL4030 RTC register.
+ */
+static int twl4030_rtc_read_u8(u8 *data, u8 reg)
+{
+	int ret;
+
+	ret = twl4030_i2c_read_u8(TWL4030_MODULE_RTC, data, reg);
+	if (ret < 0)
+		pr_err("twl4030_rtc: Could not read TWL4030"
+		       "register %X - error %d\n", reg, ret);
+	return ret;
+}
+
+/*
+ * Supports 1 byte write to TWL4030 RTC registers.
+ */
+static int twl4030_rtc_write_u8(u8 data, u8 reg)
+{
+	int ret;
+
+	ret = twl4030_i2c_write_u8(TWL4030_MODULE_RTC, data, reg);
+	if (ret < 0)
+		pr_err("twl4030_rtc: Could not write TWL4030"
+		       "register %X - error %d\n", reg, ret);
+	return ret;
+}
+
+/*
+ * Cache the value for timer/alarm interrupts register; this is
+ * only changed by callers holding rtc ops lock (or resume).
+ */
+static unsigned char rtc_irq_bits;
+
+/*
+ * Enable timer and/or alarm interrupts.
+ */
+static int set_rtc_irq_bit(unsigned char bit)
+{
+	unsigned char val;
+	int ret;
+
+	val = rtc_irq_bits | bit;
+	ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
+	if (ret == 0)
+		rtc_irq_bits = val;
+
+	return ret;
+}
+
+/*
+ * Disable timer and/or alarm interrupts.
+ */
+static int mask_rtc_irq_bit(unsigned char bit)
+{
+	unsigned char val;
+	int ret;
+
+	val = rtc_irq_bits & ~bit;
+	ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
+	if (ret == 0)
+		rtc_irq_bits = val;
+
+	return ret;
+}
+
+static inline int twl4030_rtc_alarm_irq_set_state(int enabled)
+{
+	int ret;
+
+	if (enabled)
+		ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+	else
+		ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+
+	return ret;
+}
+
+static inline int twl4030_rtc_irq_set_state(int enabled)
+{
+	int ret;
+
+	if (enabled)
+		ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+	else
+		ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+
+	return ret;
+}
+
+/*
+ * Gets current TWL4030 RTC time and date parameters.
+ *
+ * The RTC's time/alarm representation is not what gmtime(3) requires
+ * Linux to use:
+ *
+ *  - Months are 1..12 vs Linux 0-11
+ *  - Years are 0..99 vs Linux 1900..N (we assume 21st century)
+ */
+static int twl4030_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	unsigned char rtc_data[ALL_TIME_REGS + 1];
+	int ret;
+	u8 save_control;
+
+	ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
+	if (ret < 0)
+		return ret;
+
+	save_control |= BIT_RTC_CTRL_REG_GET_TIME_M;
+
+	ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+	if (ret < 0)
+		return ret;
+
+	ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
+			       REG_SECONDS_REG, ALL_TIME_REGS);
+
+	if (ret < 0) {
+		dev_err(dev, "rtc_read_time error %d\n", ret);
+		return ret;
+	}
+
+	tm->tm_sec = bcd2bin(rtc_data[0]);
+	tm->tm_min = bcd2bin(rtc_data[1]);
+	tm->tm_hour = bcd2bin(rtc_data[2]);
+	tm->tm_mday = bcd2bin(rtc_data[3]);
+	tm->tm_mon = bcd2bin(rtc_data[4]) - 1;
+	tm->tm_year = bcd2bin(rtc_data[5]) + 100;
+
+	return ret;
+}
+
+static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	unsigned char save_control;
+	unsigned char rtc_data[ALL_TIME_REGS + 1];
+	int ret;
+
+	rtc_data[1] = bin2bcd(tm->tm_sec);
+	rtc_data[2] = bin2bcd(tm->tm_min);
+	rtc_data[3] = bin2bcd(tm->tm_hour);
+	rtc_data[4] = bin2bcd(tm->tm_mday);
+	rtc_data[5] = bin2bcd(tm->tm_mon + 1);
+	rtc_data[6] = bin2bcd(tm->tm_year - 100);
+
+	/* Stop RTC while updating the TC registers */
+	ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
+	if (ret < 0)
+		goto out;
+
+	save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M;
+	twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+	if (ret < 0)
+		goto out;
+
+	/* update all the time registers in one shot */
+	ret = twl4030_i2c_write(TWL4030_MODULE_RTC, rtc_data,
+			REG_SECONDS_REG, ALL_TIME_REGS);
+	if (ret < 0) {
+		dev_err(dev, "rtc_set_time error %d\n", ret);
+		goto out;
+	}
+
+	/* Start back RTC */
+	save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M;
+	ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+
+out:
+	return ret;
+}
+
+/*
+ * Gets current TWL4030 RTC alarm time.
+ */
+static int twl4030_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+	unsigned char rtc_data[ALL_TIME_REGS + 1];
+	int ret;
+
+	ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
+			       REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
+	if (ret < 0) {
+		dev_err(dev, "rtc_read_alarm error %d\n", ret);
+		return ret;
+	}
+
+	/* some of these fields may be wildcard/"match all" */
+	alm->time.tm_sec = bcd2bin(rtc_data[0]);
+	alm->time.tm_min = bcd2bin(rtc_data[1]);
+	alm->time.tm_hour = bcd2bin(rtc_data[2]);
+	alm->time.tm_mday = bcd2bin(rtc_data[3]);
+	alm->time.tm_mon = bcd2bin(rtc_data[4]) - 1;
+	alm->time.tm_year = bcd2bin(rtc_data[5]) + 100;
+
+	/* report cached alarm enable state */
+	if (rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M)
+		alm->enabled = 1;
+
+	return ret;
+}
+
+static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+	unsigned char alarm_data[ALL_TIME_REGS + 1];
+	int ret;
+
+	ret = twl4030_rtc_alarm_irq_set_state(0);
+	if (ret)
+		goto out;
+
+	alarm_data[1] = bin2bcd(alm->time.tm_sec);
+	alarm_data[2] = bin2bcd(alm->time.tm_min);
+	alarm_data[3] = bin2bcd(alm->time.tm_hour);
+	alarm_data[4] = bin2bcd(alm->time.tm_mday);
+	alarm_data[5] = bin2bcd(alm->time.tm_mon + 1);
+	alarm_data[6] = bin2bcd(alm->time.tm_year - 100);
+
+	/* update all the alarm registers in one shot */
+	ret = twl4030_i2c_write(TWL4030_MODULE_RTC, alarm_data,
+			REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
+	if (ret) {
+		dev_err(dev, "rtc_set_alarm error %d\n", ret);
+		goto out;
+	}
+
+	if (alm->enabled)
+		ret = twl4030_rtc_alarm_irq_set_state(1);
+out:
+	return ret;
+}
+
+#ifdef	CONFIG_RTC_INTF_DEV
+
+static int twl4030_rtc_ioctl(struct device *dev, unsigned int cmd,
+			     unsigned long arg)
+{
+	switch (cmd) {
+	case RTC_AIE_OFF:
+		return twl4030_rtc_alarm_irq_set_state(0);
+	case RTC_AIE_ON:
+		return twl4030_rtc_alarm_irq_set_state(1);
+	case RTC_UIE_OFF:
+		return twl4030_rtc_irq_set_state(0);
+	case RTC_UIE_ON:
+		return twl4030_rtc_irq_set_state(1);
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+#else
+#define	omap_rtc_ioctl	NULL
+#endif
+
+static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
+{
+	unsigned long events = 0;
+	int ret = IRQ_NONE;
+	int res;
+	u8 rd_reg;
+
+#ifdef CONFIG_LOCKDEP
+	/* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
+	 * we don't want and can't tolerate.  Although it might be
+	 * friendlier not to borrow this thread context...
+	 */
+	local_irq_enable();
+#endif
+
+	res = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
+	if (res)
+		goto out;
+	/*
+	 * Figure out source of interrupt: ALARM or TIMER in RTC_STATUS_REG.
+	 * only one (ALARM or RTC) interrupt source may be enabled
+	 * at time, we also could check our results
+	 * by reading RTS_INTERRUPTS_REGISTER[IT_TIMER,IT_ALARM]
+	 */
+	if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M)
+		events |= RTC_IRQF | RTC_AF;
+	else
+		events |= RTC_IRQF | RTC_UF;
+
+	res = twl4030_rtc_write_u8(rd_reg | BIT_RTC_STATUS_REG_ALARM_M,
+				   REG_RTC_STATUS_REG);
+	if (res)
+		goto out;
+
+	/* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1
+	 * needs 2 reads to clear the interrupt. One read is done in
+	 * do_twl4030_pwrirq(). Doing the second read, to clear
+	 * the bit.
+	 *
+	 * FIXME the reason PWR_ISR1 needs an extra read is that
+	 * RTC_IF retriggered until we cleared REG_ALARM_M above.
+	 * But re-reading like this is a bad hack; by doing so we
+	 * risk wrongly clearing status for some other IRQ (losing
+	 * the interrupt).  Be smarter about handling RTC_UF ...
+	 */
+	res = twl4030_i2c_read_u8(TWL4030_MODULE_INT,
+			&rd_reg, TWL4030_INT_PWR_ISR1);
+	if (res)
+		goto out;
+
+	/* Notify RTC core on event */
+	rtc_update_irq(rtc, 1, events);
+
+	ret = IRQ_HANDLED;
+out:
+	return ret;
+}
+
+static struct rtc_class_ops twl4030_rtc_ops = {
+	.ioctl		= twl4030_rtc_ioctl,
+	.read_time	= twl4030_rtc_read_time,
+	.set_time	= twl4030_rtc_set_time,
+	.read_alarm	= twl4030_rtc_read_alarm,
+	.set_alarm	= twl4030_rtc_set_alarm,
+};
+
+/*----------------------------------------------------------------------*/
+
+static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
+{
+	struct rtc_device *rtc;
+	int ret = 0;
+	int irq = platform_get_irq(pdev, 0);
+	u8 rd_reg;
+
+	if (irq < 0)
+		return irq;
+
+	rtc = rtc_device_register(pdev->name,
+				  &pdev->dev, &twl4030_rtc_ops, THIS_MODULE);
+	if (IS_ERR(rtc)) {
+		ret = -EINVAL;
+		dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
+			PTR_ERR(rtc));
+		goto out0;
+
+	}
+
+	platform_set_drvdata(pdev, rtc);
+
+	ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
+
+	if (ret < 0)
+		goto out1;
+
+	if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M)
+		dev_warn(&pdev->dev, "Power up reset detected.\n");
+
+	if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M)
+		dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n");
+
+	/* Clear RTC Power up reset and pending alarm interrupts */
+	ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG);
+	if (ret < 0)
+		goto out1;
+
+	ret = request_irq(irq, twl4030_rtc_interrupt,
+				IRQF_TRIGGER_RISING,
+				rtc->dev.bus_id, rtc);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "IRQ is not free.\n");
+		goto out1;
+	}
+
+	/* Check RTC module status, Enable if it is off */
+	ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
+	if (ret < 0)
+		goto out2;
+
+	if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
+		dev_info(&pdev->dev, "Enabling TWL4030-RTC.\n");
+		rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
+		ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
+		if (ret < 0)
+			goto out2;
+	}
+
+	/* init cached IRQ enable bits */
+	ret = twl4030_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
+	if (ret < 0)
+		goto out2;
+
+	return ret;
+
+
+out2:
+	free_irq(irq, rtc);
+out1:
+	rtc_device_unregister(rtc);
+out0:
+	return ret;
+}
+
+/*
+ * Disable all TWL4030 RTC module interrupts.
+ * Sets status flag to free.
+ */
+static int __devexit twl4030_rtc_remove(struct platform_device *pdev)
+{
+	/* leave rtc running, but disable irqs */
+	struct rtc_device *rtc = platform_get_drvdata(pdev);
+	int irq = platform_get_irq(pdev, 0);
+
+	mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+	mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+
+	free_irq(irq, rtc);
+
+	rtc_device_unregister(rtc);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static void twl4030_rtc_shutdown(struct platform_device *pdev)
+{
+	mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M |
+			 BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+}
+
+#ifdef CONFIG_PM
+
+static unsigned char irqstat;
+
+static int twl4030_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	irqstat = rtc_irq_bits;
+
+	/* REVISIT alarm may need to wake us from sleep */
+	mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M |
+			 BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+	return 0;
+}
+
+static int twl4030_rtc_resume(struct platform_device *pdev)
+{
+	set_rtc_irq_bit(irqstat);
+	return 0;
+}
+
+#else
+#define twl4030_rtc_suspend NULL
+#define twl4030_rtc_resume  NULL
+#endif
+
+MODULE_ALIAS("platform:twl4030_rtc");
+
+static struct platform_driver twl4030rtc_driver = {
+	.probe		= twl4030_rtc_probe,
+	.remove		= __devexit_p(twl4030_rtc_remove),
+	.shutdown	= twl4030_rtc_shutdown,
+	.suspend	= twl4030_rtc_suspend,
+	.resume		= twl4030_rtc_resume,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "twl4030_rtc",
+	},
+};
+
+static int __init twl4030_rtc_init(void)
+{
+	return platform_driver_register(&twl4030rtc_driver);
+}
+module_init(twl4030_rtc_init);
+
+static void __exit twl4030_rtc_exit(void)
+{
+	platform_driver_unregister(&twl4030rtc_driver);
+}
+module_exit(twl4030_rtc_exit);
+
+MODULE_AUTHOR("Texas Instruments, MontaVista Software");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index 10025d8..14d4f03 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -92,19 +92,19 @@
 
 	/* ...and then read constant values. */
 	tmp = v3020_get_reg(chip, V3020_SECONDS);
-	dt->tm_sec	= BCD2BIN(tmp);
+	dt->tm_sec	= bcd2bin(tmp);
 	tmp = v3020_get_reg(chip, V3020_MINUTES);
-	dt->tm_min	= BCD2BIN(tmp);
+	dt->tm_min	= bcd2bin(tmp);
 	tmp = v3020_get_reg(chip, V3020_HOURS);
-	dt->tm_hour	= BCD2BIN(tmp);
+	dt->tm_hour	= bcd2bin(tmp);
 	tmp = v3020_get_reg(chip, V3020_MONTH_DAY);
-	dt->tm_mday	= BCD2BIN(tmp);
+	dt->tm_mday	= bcd2bin(tmp);
 	tmp = v3020_get_reg(chip, V3020_MONTH);
-	dt->tm_mon    = BCD2BIN(tmp) - 1;
+	dt->tm_mon    = bcd2bin(tmp) - 1;
 	tmp = v3020_get_reg(chip, V3020_WEEK_DAY);
-	dt->tm_wday	= BCD2BIN(tmp);
+	dt->tm_wday	= bcd2bin(tmp);
 	tmp = v3020_get_reg(chip, V3020_YEAR);
-	dt->tm_year = BCD2BIN(tmp)+100;
+	dt->tm_year = bcd2bin(tmp)+100;
 
 #ifdef DEBUG
 	printk("\n%s : Read RTC values\n",__func__);
@@ -136,13 +136,13 @@
 #endif
 
 	/* Write all the values to ram... */
-	v3020_set_reg(chip, V3020_SECONDS, 	BIN2BCD(dt->tm_sec));
-	v3020_set_reg(chip, V3020_MINUTES, 	BIN2BCD(dt->tm_min));
-	v3020_set_reg(chip, V3020_HOURS, 	BIN2BCD(dt->tm_hour));
-	v3020_set_reg(chip, V3020_MONTH_DAY,	BIN2BCD(dt->tm_mday));
-	v3020_set_reg(chip, V3020_MONTH,     BIN2BCD(dt->tm_mon + 1));
-	v3020_set_reg(chip, V3020_WEEK_DAY, 	BIN2BCD(dt->tm_wday));
-	v3020_set_reg(chip, V3020_YEAR, 	BIN2BCD(dt->tm_year % 100));
+	v3020_set_reg(chip, V3020_SECONDS, 	bin2bcd(dt->tm_sec));
+	v3020_set_reg(chip, V3020_MINUTES, 	bin2bcd(dt->tm_min));
+	v3020_set_reg(chip, V3020_HOURS, 	bin2bcd(dt->tm_hour));
+	v3020_set_reg(chip, V3020_MONTH_DAY,	bin2bcd(dt->tm_mday));
+	v3020_set_reg(chip, V3020_MONTH,     bin2bcd(dt->tm_mon + 1));
+	v3020_set_reg(chip, V3020_WEEK_DAY, 	bin2bcd(dt->tm_wday));
+	v3020_set_reg(chip, V3020_YEAR, 	bin2bcd(dt->tm_year % 100));
 
 	/* ...and set the clock. */
 	v3020_set_reg(chip, V3020_CMD_RAM2CLOCK, 0);
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 884b635..834dcc6 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -360,7 +360,7 @@
 	spin_unlock_irq(&rtc_lock);
 
 	aie_irq = platform_get_irq(pdev, 0);
-	if (aie_irq < 0 || aie_irq >= NR_IRQS) {
+	if (aie_irq < 0 || aie_irq >= nr_irqs) {
 		retval = -EBUSY;
 		goto err_device_unregister;
 	}
@@ -371,7 +371,7 @@
 		goto err_device_unregister;
 
 	pie_irq = platform_get_irq(pdev, 1);
-	if (pie_irq < 0 || pie_irq >= NR_IRQS)
+	if (pie_irq < 0 || pie_irq >= nr_irqs)
 		goto err_free_irq;
 
 	retval = request_irq(pie_irq, rtclong1_interrupt, IRQF_DISABLED,
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 7dcfba1..310c107 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -118,13 +118,13 @@
 		for (i = 0; i <= 4; i++)
 			buf[i] &= 0x7F;
 
-	tm->tm_sec = BCD2BIN(buf[CCR_SEC]);
-	tm->tm_min = BCD2BIN(buf[CCR_MIN]);
-	tm->tm_hour = BCD2BIN(buf[CCR_HOUR] & 0x3F); /* hr is 0-23 */
-	tm->tm_mday = BCD2BIN(buf[CCR_MDAY]);
-	tm->tm_mon = BCD2BIN(buf[CCR_MONTH]) - 1; /* mon is 0-11 */
-	tm->tm_year = BCD2BIN(buf[CCR_YEAR])
-			+ (BCD2BIN(buf[CCR_Y2K]) * 100) - 1900;
+	tm->tm_sec = bcd2bin(buf[CCR_SEC]);
+	tm->tm_min = bcd2bin(buf[CCR_MIN]);
+	tm->tm_hour = bcd2bin(buf[CCR_HOUR] & 0x3F); /* hr is 0-23 */
+	tm->tm_mday = bcd2bin(buf[CCR_MDAY]);
+	tm->tm_mon = bcd2bin(buf[CCR_MONTH]) - 1; /* mon is 0-11 */
+	tm->tm_year = bcd2bin(buf[CCR_YEAR])
+			+ (bcd2bin(buf[CCR_Y2K]) * 100) - 1900;
 	tm->tm_wday = buf[CCR_WDAY];
 
 	dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
@@ -174,11 +174,11 @@
 		__func__,
 		tm->tm_sec, tm->tm_min, tm->tm_hour);
 
-	buf[CCR_SEC] = BIN2BCD(tm->tm_sec);
-	buf[CCR_MIN] = BIN2BCD(tm->tm_min);
+	buf[CCR_SEC] = bin2bcd(tm->tm_sec);
+	buf[CCR_MIN] = bin2bcd(tm->tm_min);
 
 	/* set hour and 24hr bit */
-	buf[CCR_HOUR] = BIN2BCD(tm->tm_hour) | X1205_HR_MIL;
+	buf[CCR_HOUR] = bin2bcd(tm->tm_hour) | X1205_HR_MIL;
 
 	/* should we also set the date? */
 	if (datetoo) {
@@ -187,15 +187,15 @@
 			__func__,
 			tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
-		buf[CCR_MDAY] = BIN2BCD(tm->tm_mday);
+		buf[CCR_MDAY] = bin2bcd(tm->tm_mday);
 
 		/* month, 1 - 12 */
-		buf[CCR_MONTH] = BIN2BCD(tm->tm_mon + 1);
+		buf[CCR_MONTH] = bin2bcd(tm->tm_mon + 1);
 
 		/* year, since the rtc epoch*/
-		buf[CCR_YEAR] = BIN2BCD(tm->tm_year % 100);
+		buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100);
 		buf[CCR_WDAY] = tm->tm_wday & 0x07;
-		buf[CCR_Y2K] = BIN2BCD(tm->tm_year / 100);
+		buf[CCR_Y2K] = bin2bcd(tm->tm_year / 100);
 	}
 
 	/* If writing alarm registers, set compare bits on registers 0-4 */
@@ -437,7 +437,7 @@
 			return -EIO;
 		}
 
-		value = BCD2BIN(reg & probe_limits_pattern[i].mask);
+		value = bcd2bin(reg & probe_limits_pattern[i].mask);
 
 		if (value > probe_limits_pattern[i].max ||
 			value < probe_limits_pattern[i].min) {
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index b5a868d..1e5478a 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -337,7 +337,7 @@
 #else
 #define IRQ_MIN 9
 #if defined(__PPC)
-#define IRQ_MAX (NR_IRQS-1)
+#define IRQ_MAX (nr_irqs-1)
 #else
 #define IRQ_MAX 12
 #endif
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 69f8346..5877f29 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -189,7 +189,6 @@
 	.attr = {
 		.name = "mu_read",
 		.mode = S_IRUSR ,
-		.owner = THIS_MODULE,
 	},
 	.size = 1032,
 	.read = arcmsr_sysfs_iop_message_read,
@@ -199,7 +198,6 @@
 	.attr = {
 		.name = "mu_write",
 		.mode = S_IWUSR,
-		.owner = THIS_MODULE,
 	},
 	.size = 1032,
 	.write = arcmsr_sysfs_iop_message_write,
@@ -209,7 +207,6 @@
 	.attr = {
 		.name = "mu_clear",
 		.mode = S_IWUSR,
-		.owner = THIS_MODULE,
 	},
 	.size = 1,
 	.write = arcmsr_sysfs_iop_message_clear,
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 740bad4..afc96e8 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -343,6 +343,11 @@
 }
 
 #ifdef CONFIG_IDE_PROC_FS
+static ide_proc_entry_t idescsi_proc[] = {
+	{ "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
+	{ NULL, 0, NULL, NULL }
+};
+
 #define ide_scsi_devset_get(name, field) \
 static int get_##name(ide_drive_t *drive) \
 { \
@@ -378,6 +383,16 @@
 	IDE_PROC_DEVSET(transform, 0,	 3),
 	{ 0 },
 };
+
+static ide_proc_entry_t *ide_scsi_proc_entries(ide_drive_t *drive)
+{
+	return idescsi_proc;
+}
+
+static const struct ide_proc_devset *ide_scsi_proc_devsets(ide_drive_t *drive)
+{
+	return idescsi_settings;
+}
 #endif
 
 /*
@@ -419,13 +434,6 @@
 
 static int ide_scsi_probe(ide_drive_t *);
 
-#ifdef CONFIG_IDE_PROC_FS
-static ide_proc_entry_t idescsi_proc[] = {
-	{ "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
-	{ NULL, 0, NULL, NULL }
-};
-#endif
-
 static ide_driver_t idescsi_driver = {
 	.gen_driver = {
 		.owner		= THIS_MODULE,
@@ -439,8 +447,8 @@
 	.end_request		= idescsi_end_request,
 	.error                  = idescsi_atapi_error,
 #ifdef CONFIG_IDE_PROC_FS
-	.proc			= idescsi_proc,
-	.settings		= idescsi_settings,
+	.proc_entries		= ide_scsi_proc_entries,
+	.proc_devsets		= ide_scsi_proc_devsets,
 #endif
 };
 
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d30eb7b..098739d 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7859,7 +7859,6 @@
 	.remove = ipr_remove,
 	.shutdown = ipr_shutdown,
 	.err_handler = &ipr_err_handler,
-	.dynids.use_driver_data = 1
 };
 
 /**
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 83c8192..f25f41a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2108,7 +2108,7 @@
 
 struct qla_msix_entry {
 	int have_irq;
-	uint16_t msix_vector;
+	uint32_t msix_vector;
 	uint16_t msix_entry;
 };
 
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2aed472..21dd182 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1566,9 +1566,8 @@
 			goto probe_out;
 	}
 
-	if (pci_find_aer_capability(pdev))
-		if (pci_enable_pcie_error_reporting(pdev))
-			goto probe_out;
+	/* This may fail but that's ok */
+	pci_enable_pcie_error_reporting(pdev);
 
 	host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
 	if (host == NULL) {
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 4eb3da9..4ad3e01 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -223,9 +223,9 @@
 				no_multi = 1;
 				break;
 			}
-			min = BCD2BIN(buffer[15]);
-			sec = BCD2BIN(buffer[16]);
-			frame = BCD2BIN(buffer[17]);
+			min = bcd2bin(buffer[15]);
+			sec = bcd2bin(buffer[16]);
+			frame = bcd2bin(buffer[17]);
 			sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
 			break;
 		}
@@ -252,9 +252,9 @@
 			}
 			if (rc != 0)
 				break;
-			min = BCD2BIN(buffer[1]);
-			sec = BCD2BIN(buffer[2]);
-			frame = BCD2BIN(buffer[3]);
+			min = bcd2bin(buffer[1]);
+			sec = bcd2bin(buffer[2]);
+			frame = bcd2bin(buffer[3]);
 			sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
 			if (sector)
 				sector -= CD_MSF_OFFSET;
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 381b12a..d935b2d 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -66,7 +66,6 @@
 #endif
 
 static struct m68k_serial m68k_soft[NR_PORTS];
-struct m68k_serial *IRQ_ports[NR_IRQS];
 
 static unsigned int uart_irqs[NR_PORTS] = UART_IRQ_DEFNS;
 
@@ -375,15 +374,11 @@
  */
 irqreturn_t rs_interrupt(int irq, void *dev_id)
 {
-	struct m68k_serial * info;
+	struct m68k_serial *info = dev_id;
 	m68328_uart *uart;
 	unsigned short rx;
 	unsigned short tx;
 
-	info = IRQ_ports[irq];
-	if(!info)
-	    return IRQ_NONE;
-
 	uart = &uart_addr[info->line];
 	rx = uart->urx.w;
 
@@ -1383,8 +1378,6 @@
 		   info->port, info->irq);
 	    printk(" is a builtin MC68328 UART\n");
 	    
-	    IRQ_ports[info->irq] = info;	/* waste of space */
-
 #ifdef CONFIG_M68VZ328
 		if (i > 0 )
 			PJSEL &= 0xCF;  /* PSW enable second port output */
@@ -1393,7 +1386,7 @@
 	    if (request_irq(uart_irqs[i],
 			    rs_interrupt,
 			    IRQF_DISABLED,
-			    "M68328_UART", NULL))
+			    "M68328_UART", info))
                 panic("Unable to attach 68328 serial interrupt\n");
 	}
 	local_irq_restore(flags);
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 1528de2..303272a 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -156,11 +156,15 @@
 };
 
 struct irq_info {
-	spinlock_t		lock;
+	struct			hlist_node node;
+	int			irq;
+	spinlock_t		lock;	/* Protects list not the hash */
 	struct list_head	*head;
 };
 
-static struct irq_info irq_lists[NR_IRQS];
+#define NR_IRQ_HASH		32	/* Can be adjusted later */
+static struct hlist_head irq_lists[NR_IRQ_HASH];
+static DEFINE_MUTEX(hash_mutex);	/* Used to walk the hash */
 
 /*
  * Here we define the default xmit fifo size used for each type of UART.
@@ -1545,15 +1549,43 @@
 		BUG_ON(i->head != &up->list);
 		i->head = NULL;
 	}
-
 	spin_unlock_irq(&i->lock);
+	/* List empty so throw away the hash node */
+	if (i->head == NULL) {
+		hlist_del(&i->node);
+		kfree(i);
+	}
 }
 
 static int serial_link_irq_chain(struct uart_8250_port *up)
 {
-	struct irq_info *i = irq_lists + up->port.irq;
+	struct hlist_head *h;
+	struct hlist_node *n;
+	struct irq_info *i;
 	int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
 
+	mutex_lock(&hash_mutex);
+
+	h = &irq_lists[up->port.irq % NR_IRQ_HASH];
+
+	hlist_for_each(n, h) {
+		i = hlist_entry(n, struct irq_info, node);
+		if (i->irq == up->port.irq)
+			break;
+	}
+
+	if (n == NULL) {
+		i = kzalloc(sizeof(struct irq_info), GFP_KERNEL);
+		if (i == NULL) {
+			mutex_unlock(&hash_mutex);
+			return -ENOMEM;
+		}
+		spin_lock_init(&i->lock);
+		i->irq = up->port.irq;
+		hlist_add_head(&i->node, h);
+	}
+	mutex_unlock(&hash_mutex);
+
 	spin_lock_irq(&i->lock);
 
 	if (i->head) {
@@ -1577,14 +1609,28 @@
 
 static void serial_unlink_irq_chain(struct uart_8250_port *up)
 {
-	struct irq_info *i = irq_lists + up->port.irq;
+	struct irq_info *i;
+	struct hlist_node *n;
+	struct hlist_head *h;
 
+	mutex_lock(&hash_mutex);
+
+	h = &irq_lists[up->port.irq % NR_IRQ_HASH];
+
+	hlist_for_each(n, h) {
+		i = hlist_entry(n, struct irq_info, node);
+		if (i->irq == up->port.irq)
+			break;
+	}
+
+	BUG_ON(n == NULL);
 	BUG_ON(i->head == NULL);
 
 	if (list_empty(i->head))
 		free_irq(up->port.irq, i);
 
 	serial_do_unlink(i, up);
+	mutex_unlock(&hash_mutex);
 }
 
 /* Base timer interval for polling */
@@ -2447,7 +2493,7 @@
 static int
 serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
 {
-	if (ser->irq >= NR_IRQS || ser->irq < 0 ||
+	if (ser->irq >= nr_irqs || ser->irq < 0 ||
 	    ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
 	    ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
 	    ser->type == PORT_STARTECH)
@@ -2967,7 +3013,7 @@
 
 static int __init serial8250_init(void)
 {
-	int ret, i;
+	int ret;
 
 	if (nr_uarts > UART_NR)
 		nr_uarts = UART_NR;
@@ -2976,9 +3022,6 @@
 		"%d ports, IRQ sharing %sabled\n", nr_uarts,
 		share_irqs ? "en" : "dis");
 
-	for (i = 0; i < NR_IRQS; i++)
-		spin_lock_init(&irq_lists[i].lock);
-
 #ifdef CONFIG_SPARC
 	ret = sunserial_register_minors(&serial8250_reg, UART_NR);
 #else
@@ -3006,15 +3049,15 @@
 		goto out;
 
 	platform_device_del(serial8250_isa_devs);
- put_dev:
+put_dev:
 	platform_device_put(serial8250_isa_devs);
- unreg_uart_drv:
+unreg_uart_drv:
 #ifdef CONFIG_SPARC
 	sunserial_unregister_minors(&serial8250_reg, UART_NR);
 #else
 	uart_unregister_driver(&serial8250_reg);
 #endif
- out:
+out:
 	return ret;
 }
 
diff --git a/drivers/serial/8250_gsc.c b/drivers/serial/8250_gsc.c
index 0416ad3..418b4fe 100644
--- a/drivers/serial/8250_gsc.c
+++ b/drivers/serial/8250_gsc.c
@@ -111,7 +111,7 @@
 	.probe		= serial_init_chip,
 };
 
-int __init probe_serial_gsc(void)
+static int __init probe_serial_gsc(void)
 {
 	register_parisc_driver(&lasi_driver);
 	register_parisc_driver(&serial_driver);
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index c014ffb1..5450a0e 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -1100,6 +1100,8 @@
 	pbn_b0_4_1843200_200,
 	pbn_b0_8_1843200_200,
 
+	pbn_b0_1_4000000,
+
 	pbn_b0_bt_1_115200,
 	pbn_b0_bt_2_115200,
 	pbn_b0_bt_8_115200,
@@ -1167,6 +1169,10 @@
 	pbn_exsys_4055,
 	pbn_plx_romulus,
 	pbn_oxsemi,
+	pbn_oxsemi_1_4000000,
+	pbn_oxsemi_2_4000000,
+	pbn_oxsemi_4_4000000,
+	pbn_oxsemi_8_4000000,
 	pbn_intel_i960,
 	pbn_sgi_ioc3,
 	pbn_computone_4,
@@ -1290,6 +1296,12 @@
 		.base_baud	= 1843200,
 		.uart_offset	= 0x200,
 	},
+	[pbn_b0_1_4000000] = {
+		.flags		= FL_BASE0,
+		.num_ports	= 1,
+		.base_baud	= 4000000,
+		.uart_offset	= 8,
+	},
 
 	[pbn_b0_bt_1_115200] = {
 		.flags		= FL_BASE0|FL_BASE_BARS,
@@ -1625,6 +1637,35 @@
 		.base_baud	= 115200,
 		.uart_offset	= 8,
 	},
+	[pbn_oxsemi_1_4000000] = {
+		.flags		= FL_BASE0,
+		.num_ports	= 1,
+		.base_baud	= 4000000,
+		.uart_offset	= 0x200,
+		.first_offset	= 0x1000,
+	},
+	[pbn_oxsemi_2_4000000] = {
+		.flags		= FL_BASE0,
+		.num_ports	= 2,
+		.base_baud	= 4000000,
+		.uart_offset	= 0x200,
+		.first_offset	= 0x1000,
+	},
+	[pbn_oxsemi_4_4000000] = {
+		.flags		= FL_BASE0,
+		.num_ports	= 4,
+		.base_baud	= 4000000,
+		.uart_offset	= 0x200,
+		.first_offset	= 0x1000,
+	},
+	[pbn_oxsemi_8_4000000] = {
+		.flags		= FL_BASE0,
+		.num_ports	= 8,
+		.base_baud	= 4000000,
+		.uart_offset	= 0x200,
+		.first_offset	= 0x1000,
+	},
+
 
 	/*
 	 * EKF addition for i960 Boards form EKF with serial port.
@@ -1813,6 +1854,39 @@
 	    board->first_offset == guessed->first_offset;
 }
 
+/*
+ * Oxford Semiconductor Inc.
+ * Check that device is part of the Tornado range of devices, then determine
+ * the number of ports available on the device.
+ */
+static int pci_oxsemi_tornado_init(struct pci_dev *dev, struct pciserial_board *board)
+{
+	u8 __iomem *p;
+	unsigned long deviceID;
+	unsigned int  number_uarts;
+
+	/* OxSemi Tornado devices are all 0xCxxx */
+	if (dev->vendor == PCI_VENDOR_ID_OXSEMI &&
+	    (dev->device & 0xF000) != 0xC000)
+		return 0;
+
+	p = pci_iomap(dev, 0, 5);
+	if (p == NULL)
+		return -ENOMEM;
+
+	deviceID = ioread32(p);
+	/* Tornado device */
+	if (deviceID == 0x07000200) {
+		number_uarts = ioread8(p + 4);
+		board->num_ports = number_uarts;
+		printk(KERN_DEBUG
+			"%d ports detected on Oxford PCI Express device\n",
+								number_uarts);
+	}
+	pci_iounmap(dev, p);
+	return 0;
+}
+
 struct serial_private *
 pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board)
 {
@@ -1821,6 +1895,13 @@
 	struct pci_serial_quirk *quirk;
 	int rc, nr_ports, i;
 
+	/*
+	 * Find number of ports on board
+	 */
+	if (dev->vendor == PCI_VENDOR_ID_OXSEMI ||
+	    dev->vendor == PCI_VENDOR_ID_MAINPINE)
+		pci_oxsemi_tornado_init(dev, board);
+
 	nr_ports = board->num_ports;
 
 	/*
@@ -2301,6 +2382,156 @@
 		pbn_b0_bt_2_921600 },
 
 	/*
+	 * Oxford Semiconductor Inc. Tornado PCI express device range.
+	 */
+	{	PCI_VENDOR_ID_OXSEMI, 0xc101,    /* OXPCIe952 1 Legacy UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc105,    /* OXPCIe952 1 Legacy UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc11b,    /* OXPCIe952 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc11f,    /* OXPCIe952 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc120,    /* OXPCIe952 1 Legacy UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc124,    /* OXPCIe952 1 Legacy UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc138,    /* OXPCIe952 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc13d,    /* OXPCIe952 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc140,    /* OXPCIe952 1 Legacy UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc141,    /* OXPCIe952 1 Legacy UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc144,    /* OXPCIe952 1 Legacy UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc145,    /* OXPCIe952 1 Legacy UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc158,    /* OXPCIe952 2 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_2_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc15d,    /* OXPCIe952 2 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_2_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc208,    /* OXPCIe954 4 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_4_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc20d,    /* OXPCIe954 4 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_4_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc308,    /* OXPCIe958 8 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_8_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc30d,    /* OXPCIe958 8 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_8_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc40b,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc40f,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc41b,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc41f,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc42b,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc42f,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc43b,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc43f,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc44b,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc44f,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc45b,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc45f,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc46b,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc46f,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc47b,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc47f,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc48b,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc48f,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc49b,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc49f,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc4ab,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc4af,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc4bb,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc4bf,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc4cb,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0xc4cf,    /* OXPCIe200 1 Native UART */
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	/*
+	 * Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado
+	 */
+	{	PCI_VENDOR_ID_MAINPINE, 0x4000,	/* IQ Express 1 Port V.34 Super-G3 Fax */
+		PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0,
+		pbn_oxsemi_1_4000000 },
+	{	PCI_VENDOR_ID_MAINPINE, 0x4000,	/* IQ Express 2 Port V.34 Super-G3 Fax */
+		PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0,
+		pbn_oxsemi_2_4000000 },
+	{	PCI_VENDOR_ID_MAINPINE, 0x4000,	/* IQ Express 4 Port V.34 Super-G3 Fax */
+		PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0,
+		pbn_oxsemi_4_4000000 },
+	{	PCI_VENDOR_ID_MAINPINE, 0x4000,	/* IQ Express 8 Port V.34 Super-G3 Fax */
+		PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0,
+		pbn_oxsemi_8_4000000 },
+	/*
 	 * SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards,
 	 * from skokodyn@yahoo.com
 	 */
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index db783b7..c94d3c4 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -457,7 +457,7 @@
 
 config SERIAL_SAMSUNG_DEBUG
 	bool "Samsung SoC serial debug"
-	depends on SERIAL_SAMSUNG
+	depends on SERIAL_SAMSUNG && DEBUG_LL
 	help
 	  Add support for debugging the serial driver. Since this is
 	  generally being used as a console, we use our own output
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 90b56c2..7156268 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -512,7 +512,7 @@
 	int ret = 0;
 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
 		ret = -EINVAL;
-	if (ser->irq < 0 || ser->irq >= NR_IRQS)
+	if (ser->irq < 0 || ser->irq >= nr_irqs)
 		ret = -EINVAL;
 	if (ser->baud_base < 9600)
 		ret = -EINVAL;
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 9d08f27..b718004 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -572,7 +572,7 @@
 	int ret = 0;
 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
 		ret = -EINVAL;
-	if (ser->irq < 0 || ser->irq >= NR_IRQS)
+	if (ser->irq < 0 || ser->irq >= nr_irqs)
 		ret = -EINVAL;
 	if (ser->baud_base < 9600)
 		ret = -EINVAL;
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index a6c4d74..bde4b4b0 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -623,7 +623,7 @@
 
 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM)
 		ret = -EINVAL;
-	if (ser->irq < 0 || ser->irq >= NR_IRQS)
+	if (ser->irq < 0 || ser->irq >= nr_irqs)
 		ret = -EINVAL;
 	if (ser->baud_base < 9600)
 		ret = -EINVAL;
diff --git a/drivers/serial/m32r_sio.c b/drivers/serial/m32r_sio.c
index 23d0305..611c97a 100644
--- a/drivers/serial/m32r_sio.c
+++ b/drivers/serial/m32r_sio.c
@@ -922,7 +922,7 @@
 static int
 m32r_sio_verify_port(struct uart_port *port, struct serial_struct *ser)
 {
-	if (ser->irq >= NR_IRQS || ser->irq < 0 ||
+	if (ser->irq >= nr_irqs || ser->irq < 0 ||
 	    ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
 	    ser->type >= ARRAY_SIZE(uart_config))
 		return -EINVAL;
@@ -1162,7 +1162,7 @@
 
 	printk(KERN_INFO "Serial: M32R SIO driver\n");
 
-	for (i = 0; i < NR_IRQS; i++)
+	for (i = 0; i < nr_irqs; i++)
 		spin_lock_init(&irq_lists[i].lock);
 
 	ret = uart_register_driver(&m32r_sio_reg);
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 6bdf336..874786a 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -741,7 +741,7 @@
 	if (port->ops->verify_port)
 		retval = port->ops->verify_port(port, &new_serial);
 
-	if ((new_serial.irq >= NR_IRQS) || (new_serial.irq < 0) ||
+	if ((new_serial.irq >= nr_irqs) || (new_serial.irq < 0) ||
 	    (new_serial.baud_base < 9600))
 		retval = -EINVAL;
 
diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/serial/serial_lh7a40x.c
index cb49a5a..61dc8b3 100644
--- a/drivers/serial/serial_lh7a40x.c
+++ b/drivers/serial/serial_lh7a40x.c
@@ -460,7 +460,7 @@
 
 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_LH7A40X)
 		ret = -EINVAL;
-	if (ser->irq < 0 || ser->irq >= NR_IRQS)
+	if (ser->irq < 0 || ser->irq >= nr_irqs)
 		ret = -EINVAL;
 	if (ser->baud_base < 9600) /* *** FIXME: is this true? */
 		ret = -EINVAL;
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index 8fcb4c5..7313c2e 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -1039,7 +1039,7 @@
 		ret = serial_txx9_register_port(&port);
 		if (ret < 0) {
 			dev_err(&dev->dev, "unable to register port at index %d "
-				"(IO%x MEM%llx IRQ%d): %d\n", i,
+				"(IO%lx MEM%llx IRQ%d): %d\n", i,
 				p->iobase, (unsigned long long)p->mapbase,
 				p->irq, ret);
 		}
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 3df2aae..f0658d2 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -3,7 +3,7 @@
  *
  * SuperH on-chip serial module support.  (SCI with no FIFO / with FIFO)
  *
- *  Copyright (C) 2002 - 2006  Paul Mundt
+ *  Copyright (C) 2002 - 2008  Paul Mundt
  *  Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
  *
  * based off of the old drivers/char/sh-sci.c by:
@@ -46,6 +46,7 @@
 #include <linux/cpufreq.h>
 #include <linux/clk.h>
 #include <linux/ctype.h>
+#include <linux/err.h>
 
 #ifdef CONFIG_SUPERH
 #include <asm/clock.h>
@@ -78,7 +79,7 @@
 	struct timer_list	break_timer;
 	int			break_flag;
 
-#ifdef CONFIG_SUPERH
+#ifdef CONFIG_HAVE_CLK
 	/* Port clock */
 	struct clk		*clk;
 #endif
@@ -831,7 +832,7 @@
 	return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_CPU_FREQ
+#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_HAVE_CLK)
 /*
  * Here we define a transistion notifier so that we can update all of our
  * ports' baud rate when the peripheral clock changes.
@@ -860,7 +861,7 @@
 			 * Clean this up later..
 			 */
 			clk = clk_get(NULL, "module_clk");
-			port->uartclk = clk_get_rate(clk) * 16;
+			port->uartclk = clk_get_rate(clk);
 			clk_put(clk);
 		}
 
@@ -873,7 +874,7 @@
 }
 
 static struct notifier_block sci_nb = { &sci_notifier, NULL, 0 };
-#endif /* CONFIG_CPU_FREQ */
+#endif /* CONFIG_CPU_FREQ && CONFIG_HAVE_CLK */
 
 static int sci_request_irq(struct sci_port *port)
 {
@@ -1008,7 +1009,7 @@
 	if (s->enable)
 		s->enable(port);
 
-#if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64)
+#ifdef CONFIG_HAVE_CLK
 	s->clk = clk_get(NULL, "module_clk");
 #endif
 
@@ -1030,7 +1031,7 @@
 	if (s->disable)
 		s->disable(port);
 
-#if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64)
+#ifdef CONFIG_HAVE_CLK
 	clk_put(s->clk);
 	s->clk = NULL;
 #endif
@@ -1041,24 +1042,11 @@
 {
 	struct sci_port *s = &sci_ports[port->line];
 	unsigned int status, baud, smr_val;
-	int t;
+	int t = -1;
 
 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
-
-	switch (baud) {
-		case 0:
-			t = -1;
-			break;
-		default:
-		{
-#if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64)
-			t = SCBRR_VALUE(baud, clk_get_rate(s->clk));
-#else
-			t = SCBRR_VALUE(baud);
-#endif
-			break;
-		}
-	}
+	if (likely(baud))
+		t = SCBRR_VALUE(baud, port->uartclk);
 
 	do {
 		status = sci_in(port, SCxSR);
@@ -1113,7 +1101,7 @@
 		case PORT_IRDA: return "irda";
 	}
 
-	return 0;
+	return NULL;
 }
 
 static void sci_release_port(struct uart_port *port)
@@ -1145,19 +1133,23 @@
 		break;
 	}
 
-#if defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
-	if (port->mapbase == 0)
+	if (port->flags & UPF_IOREMAP && !port->membase) {
+#if defined(CONFIG_SUPERH64)
 		port->mapbase = onchip_remap(SCIF_ADDR_SH5, 1024, "SCIF");
-
-	port->membase = (void __iomem *)port->mapbase;
+		port->membase = (void __iomem *)port->mapbase;
+#else
+		port->membase = ioremap_nocache(port->mapbase, 0x40);
 #endif
+
+		printk(KERN_ERR "sci: can't remap port#%d\n", port->line);
+	}
 }
 
 static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
 {
 	struct sci_port *s = &sci_ports[port->line];
 
-	if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > NR_IRQS)
+	if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
 		return -EINVAL;
 	if (ser->baud_base < 2400)
 		/* No paper tape reader for Mitch.. */
@@ -1207,17 +1199,17 @@
 		sci_ports[i].disable	= h8300_sci_disable;
 #endif
 		sci_ports[i].port.uartclk = CONFIG_CPU_CLOCK;
-#elif defined(CONFIG_SUPERH64)
-		sci_ports[i].port.uartclk = current_cpu_data.module_clock * 16;
-#else
+#elif defined(CONFIG_HAVE_CLK)
 		/*
 		 * XXX: We should use a proper SCI/SCIF clock
 		 */
 		{
 			struct clk *clk = clk_get(NULL, "module_clk");
-			sci_ports[i].port.uartclk = clk_get_rate(clk) * 16;
+			sci_ports[i].port.uartclk = clk_get_rate(clk);
 			clk_put(clk);
 		}
+#else
+#error "Need a valid uartclk"
 #endif
 
 		sci_ports[i].break_timer.data = (unsigned long)&sci_ports[i];
@@ -1285,7 +1277,7 @@
 
 	port->type = serial_console_port->type;
 
-#if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64)
+#ifdef CONFIG_HAVE_CLK
 	if (!serial_console_port->clk)
 		serial_console_port->clk = clk_get(NULL, "module_clk");
 #endif
@@ -1436,7 +1428,7 @@
 static int __devinit sci_probe(struct platform_device *dev)
 {
 	struct plat_sci_port *p = dev->dev.platform_data;
-	int i;
+	int i, ret = -EINVAL;
 
 	for (i = 0; p && p->flags != 0; p++, i++) {
 		struct sci_port *sciport = &sci_ports[i];
@@ -1453,12 +1445,22 @@
 
 		sciport->port.mapbase	= p->mapbase;
 
-		/*
-		 * For the simple (and majority of) cases where we don't need
-		 * to do any remapping, just cast the cookie directly.
-		 */
-		if (p->mapbase && !p->membase && !(p->flags & UPF_IOREMAP))
-			p->membase = (void __iomem *)p->mapbase;
+		if (p->mapbase && !p->membase) {
+			if (p->flags & UPF_IOREMAP) {
+				p->membase = ioremap_nocache(p->mapbase, 0x40);
+				if (IS_ERR(p->membase)) {
+					ret = PTR_ERR(p->membase);
+					goto err_unreg;
+				}
+			} else {
+				/*
+				 * For the simple (and majority of) cases
+				 * where we don't need to do any remapping,
+				 * just cast the cookie directly.
+				 */
+				p->membase = (void __iomem *)p->mapbase;
+			}
+		}
 
 		sciport->port.membase	= p->membase;
 
@@ -1479,7 +1481,7 @@
 	kgdb_putchar	= kgdb_sci_putchar;
 #endif
 
-#ifdef CONFIG_CPU_FREQ
+#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_HAVE_CLK)
 	cpufreq_register_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER);
 	dev_info(&dev->dev, "CPU frequency notifier registered\n");
 #endif
@@ -1489,6 +1491,12 @@
 #endif
 
 	return 0;
+
+err_unreg:
+	for (i = i - 1; i >= 0; i--)
+		uart_remove_one_port(&sci_uart_driver, &sci_ports[i].port);
+
+	return ret;
 }
 
 static int __devexit sci_remove(struct platform_device *dev)
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 8a0749e..7cd28b2 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -320,18 +320,16 @@
 #define SCI_EVENT_WRITE_WAKEUP	0
 
 #define SCI_IN(size, offset)					\
-  unsigned int addr = port->mapbase + (offset);			\
   if ((size) == 8) {						\
-    return ctrl_inb(addr);					\
+    return ioread8(port->membase + (offset));			\
   } else {							\
-    return ctrl_inw(addr);					\
+    return ioread16(port->membase + (offset));			\
   }
 #define SCI_OUT(size, offset, value)				\
-  unsigned int addr = port->mapbase + (offset);			\
   if ((size) == 8) {						\
-    ctrl_outb(value, addr);					\
+    iowrite8(value, port->membase + (offset));			\
   } else if ((size) == 16) {					\
-    ctrl_outw(value, addr);					\
+    iowrite16(value, port->membase + (offset));			\
   }
 
 #define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\
@@ -791,11 +789,16 @@
       defined(CONFIG_CPU_SUBTYPE_SH7721)
 #define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1)
 #elif defined(CONFIG_CPU_SUBTYPE_SH7723)
-#define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(16*bps)-1)
+static inline int scbrr_calc(struct uart_port *port, int bps, int clk)
+{
+	if (port->type == PORT_SCIF)
+		return (clk+16*bps)/(32*bps)-1;
+	else
+		return ((clk*2)+16*bps)/(16*bps)-1;
+}
+#define SCBRR_VALUE(bps, clk) scbrr_calc(port, bps, clk)
 #elif defined(__H8300H__) || defined(__H8300S__)
-#define SCBRR_VALUE(bps) (((CONFIG_CPU_CLOCK*1000/32)/bps)-1)
-#elif defined(CONFIG_SUPERH64)
-#define SCBRR_VALUE(bps) ((current_cpu_data.module_clock+16*bps)/(32*bps)-1)
+#define SCBRR_VALUE(bps, clk) (((clk*1000/32)/bps)-1)
 #else /* Generic SH */
 #define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(32*bps)-1)
 #endif
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index b73e3c0..d5276c0 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -61,7 +61,7 @@
 #define SN_SAL_BUFFER_SIZE (64 * (1 << 10))
 
 #define SN_SAL_UART_FIFO_DEPTH 16
-#define SN_SAL_UART_FIFO_SPEED_CPS 9600/10
+#define SN_SAL_UART_FIFO_SPEED_CPS (9600/10)
 
 /* sn_transmit_chars() calling args */
 #define TRANSMIT_BUFFERED	0
diff --git a/drivers/serial/ucc_uart.c b/drivers/serial/ucc_uart.c
index 539c933..315a933 100644
--- a/drivers/serial/ucc_uart.c
+++ b/drivers/serial/ucc_uart.c
@@ -1066,7 +1066,7 @@
 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM)
 		return -EINVAL;
 
-	if (ser->irq < 0 || ser->irq >= NR_IRQS)
+	if (ser->irq < 0 || ser->irq >= nr_irqs)
 		return -EINVAL;
 
 	if (ser->baud_base < 9600)
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index a96f4a8..6a025ce 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -1,6 +1,6 @@
 #
 # Makefile for the SuperH specific drivers.
 #
-
 obj-$(CONFIG_SUPERHYWAY)	+= superhyway/
 obj-$(CONFIG_MAPLE)		+= maple/
+obj-y				+= intc.o
diff --git a/arch/sh/kernel/cpu/irq/intc.c b/drivers/sh/intc.c
similarity index 95%
rename from arch/sh/kernel/cpu/irq/intc.c
rename to drivers/sh/intc.c
index 8c70e20..58d24c5 100644
--- a/arch/sh/kernel/cpu/irq/intc.c
+++ b/drivers/sh/intc.c
@@ -21,6 +21,7 @@
 #include <linux/io.h>
 #include <linux/interrupt.h>
 #include <linux/bootmem.h>
+#include <linux/sh_intc.h>
 
 #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
 	((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -86,24 +87,24 @@
 
 static void write_8(unsigned long addr, unsigned long h, unsigned long data)
 {
-	ctrl_outb(set_field(0, data, h), addr);
+	__raw_writeb(set_field(0, data, h), addr);
 }
 
 static void write_16(unsigned long addr, unsigned long h, unsigned long data)
 {
-	ctrl_outw(set_field(0, data, h), addr);
+	__raw_writew(set_field(0, data, h), addr);
 }
 
 static void write_32(unsigned long addr, unsigned long h, unsigned long data)
 {
-	ctrl_outl(set_field(0, data, h), addr);
+	__raw_writel(set_field(0, data, h), addr);
 }
 
 static void modify_8(unsigned long addr, unsigned long h, unsigned long data)
 {
 	unsigned long flags;
 	local_irq_save(flags);
-	ctrl_outb(set_field(ctrl_inb(addr), data, h), addr);
+	__raw_writeb(set_field(__raw_readb(addr), data, h), addr);
 	local_irq_restore(flags);
 }
 
@@ -111,7 +112,7 @@
 {
 	unsigned long flags;
 	local_irq_save(flags);
-	ctrl_outw(set_field(ctrl_inw(addr), data, h), addr);
+	__raw_writew(set_field(__raw_readw(addr), data, h), addr);
 	local_irq_restore(flags);
 }
 
@@ -119,7 +120,7 @@
 {
 	unsigned long flags;
 	local_irq_save(flags);
-	ctrl_outl(set_field(ctrl_inl(addr), data, h), addr);
+	__raw_writel(set_field(__raw_readl(addr), data, h), addr);
 	local_irq_restore(flags);
 }
 
@@ -246,16 +247,16 @@
 		addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
 		switch (_INTC_FN(handle)) {
 		case REG_FN_MODIFY_BASE + 0:	/* 8bit */
-			ctrl_inb(addr);
-			ctrl_outb(0xff ^ set_field(0, 1, handle), addr);
+			__raw_readb(addr);
+			__raw_writeb(0xff ^ set_field(0, 1, handle), addr);
 			break;
 		case REG_FN_MODIFY_BASE + 1:	/* 16bit */
-			ctrl_inw(addr);
-			ctrl_outw(0xffff ^ set_field(0, 1, handle), addr);
+			__raw_readw(addr);
+			__raw_writew(0xffff ^ set_field(0, 1, handle), addr);
 			break;
 		case REG_FN_MODIFY_BASE + 3:	/* 32bit */
-			ctrl_inl(addr);
-			ctrl_outl(0xffffffff ^ set_field(0, 1, handle), addr);
+			__raw_readl(addr);
+			__raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
 			break;
 		default:
 			BUG();
@@ -464,9 +465,10 @@
 			}
 
 			fn += (pr->reg_width >> 3) - 1;
-			bit = pr->reg_width - ((j + 1) * pr->field_width);
 
-			BUG_ON(bit < 0);
+			BUG_ON((j + 1) * pr->field_width > pr->reg_width);
+
+			bit = pr->reg_width - ((j + 1) * pr->field_width);
 
 			return _INTC_MK(fn, mode,
 					intc_get_reg(d, reg_e),
@@ -531,9 +533,10 @@
 
 			fn = REG_FN_MODIFY_BASE;
 			fn += (sr->reg_width >> 3) - 1;
-			bit = sr->reg_width - ((j + 1) * sr->field_width);
 
-			BUG_ON(bit < 0);
+			BUG_ON((j + 1) * sr->field_width > sr->reg_width);
+
+			bit = sr->reg_width - ((j + 1) * sr->field_width);
 
 			return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
 					0, sr->field_width, bit);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 2a79dec..c4eff44 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -43,4 +43,8 @@
 
 source "drivers/staging/at76_usb/Kconfig"
 
+source "drivers/staging/pcc-acpi/Kconfig"
+
+source "drivers/staging/poch/Kconfig"
+
 endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 325bca4..7cb8701 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -13,3 +13,5 @@
 obj-$(CONFIG_PRISM2_USB)	+= wlan-ng/
 obj-$(CONFIG_ECHO)		+= echo/
 obj-$(CONFIG_USB_ATMEL)		+= at76_usb/
+obj-$(CONFIG_PCC_ACPI)		+= pcc-acpi/
+obj-$(CONFIG_POCH)		+= poch/
diff --git a/drivers/staging/at76_usb/at76_usb.c b/drivers/staging/at76_usb/at76_usb.c
index 52df0c6..174e2be 100644
--- a/drivers/staging/at76_usb/at76_usb.c
+++ b/drivers/staging/at76_usb/at76_usb.c
@@ -2319,9 +2319,11 @@
 	if (!iwe)
 		return -ENOMEM;
 
-	if (priv->scan_state != SCAN_COMPLETED)
+	if (priv->scan_state != SCAN_COMPLETED) {
 		/* scan not yet finished */
+		kfree(iwe);
 		return -EAGAIN;
+	}
 
 	spin_lock_irqsave(&priv->bss_list_spinlock, flags);
 
diff --git a/drivers/staging/echo/bit_operations.h b/drivers/staging/echo/bit_operations.h
index b32f4bf..cecdcf3 100644
--- a/drivers/staging/echo/bit_operations.h
+++ b/drivers/staging/echo/bit_operations.h
@@ -30,114 +30,98 @@
 #if !defined(_BIT_OPERATIONS_H_)
 #define _BIT_OPERATIONS_H_
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 #if defined(__i386__)  ||  defined(__x86_64__)
 /*! \brief Find the bit position of the highest set bit in a word
     \param bits The word to be searched
     \return The bit number of the highest set bit, or -1 if the word is zero. */
 static __inline__ int top_bit(unsigned int bits)
 {
-    int res;
+	int res;
 
-    __asm__ (" xorl %[res],%[res];\n"
-             " decl %[res];\n"
-             " bsrl %[bits],%[res]\n"
-             : [res] "=&r" (res)
-             : [bits] "rm" (bits));
-    return res;
+	__asm__(" xorl %[res],%[res];\n"
+		" decl %[res];\n"
+		" bsrl %[bits],%[res]\n"
+		:[res] "=&r" (res)
+		:[bits] "rm"(bits)
+	);
+	return res;
 }
-/*- End of function --------------------------------------------------------*/
 
 /*! \brief Find the bit position of the lowest set bit in a word
     \param bits The word to be searched
     \return The bit number of the lowest set bit, or -1 if the word is zero. */
 static __inline__ int bottom_bit(unsigned int bits)
 {
-    int res;
+	int res;
 
-    __asm__ (" xorl %[res],%[res];\n"
-             " decl %[res];\n"
-             " bsfl %[bits],%[res]\n"
-             : [res] "=&r" (res)
-             : [bits] "rm" (bits));
-    return res;
+	__asm__(" xorl %[res],%[res];\n"
+		" decl %[res];\n"
+		" bsfl %[bits],%[res]\n"
+		:[res] "=&r" (res)
+		:[bits] "rm"(bits)
+	);
+	return res;
 }
-/*- End of function --------------------------------------------------------*/
 #else
 static __inline__ int top_bit(unsigned int bits)
 {
-    int i;
+	int i;
 
-    if (bits == 0)
-        return -1;
-    i = 0;
-    if (bits & 0xFFFF0000)
-    {
-        bits &= 0xFFFF0000;
-        i += 16;
-    }
-    if (bits & 0xFF00FF00)
-    {
-        bits &= 0xFF00FF00;
-        i += 8;
-    }
-    if (bits & 0xF0F0F0F0)
-    {
-        bits &= 0xF0F0F0F0;
-        i += 4;
-    }
-    if (bits & 0xCCCCCCCC)
-    {
-        bits &= 0xCCCCCCCC;
-        i += 2;
-    }
-    if (bits & 0xAAAAAAAA)
-    {
-        bits &= 0xAAAAAAAA;
-        i += 1;
-    }
-    return i;
+	if (bits == 0)
+		return -1;
+	i = 0;
+	if (bits & 0xFFFF0000) {
+		bits &= 0xFFFF0000;
+		i += 16;
+	}
+	if (bits & 0xFF00FF00) {
+		bits &= 0xFF00FF00;
+		i += 8;
+	}
+	if (bits & 0xF0F0F0F0) {
+		bits &= 0xF0F0F0F0;
+		i += 4;
+	}
+	if (bits & 0xCCCCCCCC) {
+		bits &= 0xCCCCCCCC;
+		i += 2;
+	}
+	if (bits & 0xAAAAAAAA) {
+		bits &= 0xAAAAAAAA;
+		i += 1;
+	}
+	return i;
 }
-/*- End of function --------------------------------------------------------*/
 
 static __inline__ int bottom_bit(unsigned int bits)
 {
-    int i;
+	int i;
 
-    if (bits == 0)
-        return -1;
-    i = 32;
-    if (bits & 0x0000FFFF)
-    {
-        bits &= 0x0000FFFF;
-        i -= 16;
-    }
-    if (bits & 0x00FF00FF)
-    {
-        bits &= 0x00FF00FF;
-        i -= 8;
-    }
-    if (bits & 0x0F0F0F0F)
-    {
-        bits &= 0x0F0F0F0F;
-        i -= 4;
-    }
-    if (bits & 0x33333333)
-    {
-        bits &= 0x33333333;
-        i -= 2;
-    }
-    if (bits & 0x55555555)
-    {
-        bits &= 0x55555555;
-        i -= 1;
-    }
-    return i;
+	if (bits == 0)
+		return -1;
+	i = 32;
+	if (bits & 0x0000FFFF) {
+		bits &= 0x0000FFFF;
+		i -= 16;
+	}
+	if (bits & 0x00FF00FF) {
+		bits &= 0x00FF00FF;
+		i -= 8;
+	}
+	if (bits & 0x0F0F0F0F) {
+		bits &= 0x0F0F0F0F;
+		i -= 4;
+	}
+	if (bits & 0x33333333) {
+		bits &= 0x33333333;
+		i -= 2;
+	}
+	if (bits & 0x55555555) {
+		bits &= 0x55555555;
+		i -= 1;
+	}
+	return i;
 }
-/*- End of function --------------------------------------------------------*/
 #endif
 
 /*! \brief Bit reverse a byte.
@@ -146,16 +130,16 @@
 static __inline__ uint8_t bit_reverse8(uint8_t x)
 {
 #if defined(__i386__)  ||  defined(__x86_64__)
-    /* If multiply is fast */
-    return ((x*0x0802U & 0x22110U) | (x*0x8020U & 0x88440U))*0x10101U >> 16;
+	/* If multiply is fast */
+	return ((x * 0x0802U & 0x22110U) | (x * 0x8020U & 0x88440U)) *
+	    0x10101U >> 16;
 #else
-    /* If multiply is slow, but we have a barrel shifter */
-    x = (x >> 4) | (x << 4);
-    x = ((x & 0xCC) >> 2) | ((x & 0x33) << 2);
-    return ((x & 0xAA) >> 1) | ((x & 0x55) << 1);
+	/* If multiply is slow, but we have a barrel shifter */
+	x = (x >> 4) | (x << 4);
+	x = ((x & 0xCC) >> 2) | ((x & 0x33) << 2);
+	return ((x & 0xAA) >> 1) | ((x & 0x55) << 1);
 #endif
 }
-/*- End of function --------------------------------------------------------*/
 
 /*! \brief Bit reverse a 16 bit word.
     \param data The word to be reversed.
@@ -193,9 +177,8 @@
     \return The word with the single set bit. */
 static __inline__ uint32_t least_significant_one32(uint32_t x)
 {
-    return (x & (-(int32_t) x));
+	return (x & (-(int32_t) x));
 }
-/*- End of function --------------------------------------------------------*/
 
 /*! \brief Find the most significant one in a word, and return a word
            with just that bit set.
@@ -204,50 +187,42 @@
 static __inline__ uint32_t most_significant_one32(uint32_t x)
 {
 #if defined(__i386__)  ||  defined(__x86_64__)
-    return 1 << top_bit(x);
+	return 1 << top_bit(x);
 #else
-    x = make_mask32(x);
-    return (x ^ (x >> 1));
+	x = make_mask32(x);
+	return (x ^ (x >> 1));
 #endif
 }
-/*- End of function --------------------------------------------------------*/
 
 /*! \brief Find the parity of a byte.
     \param x The byte to be checked.
     \return 1 for odd, or 0 for even. */
 static __inline__ int parity8(uint8_t x)
 {
-    x = (x ^ (x >> 4)) & 0x0F;
-    return (0x6996 >> x) & 1;
+	x = (x ^ (x >> 4)) & 0x0F;
+	return (0x6996 >> x) & 1;
 }
-/*- End of function --------------------------------------------------------*/
 
 /*! \brief Find the parity of a 16 bit word.
     \param x The word to be checked.
     \return 1 for odd, or 0 for even. */
 static __inline__ int parity16(uint16_t x)
 {
-    x ^= (x >> 8);
-    x = (x ^ (x >> 4)) & 0x0F;
-    return (0x6996 >> x) & 1;
+	x ^= (x >> 8);
+	x = (x ^ (x >> 4)) & 0x0F;
+	return (0x6996 >> x) & 1;
 }
-/*- End of function --------------------------------------------------------*/
 
 /*! \brief Find the parity of a 32 bit word.
     \param x The word to be checked.
     \return 1 for odd, or 0 for even. */
 static __inline__ int parity32(uint32_t x)
 {
-    x ^= (x >> 16);
-    x ^= (x >> 8);
-    x = (x ^ (x >> 4)) & 0x0F;
-    return (0x6996 >> x) & 1;
+	x ^= (x >> 16);
+	x ^= (x >> 8);
+	x = (x ^ (x >> 4)) & 0x0F;
+	return (0x6996 >> x) & 1;
 }
-/*- End of function --------------------------------------------------------*/
-
-#ifdef __cplusplus
-}
-#endif
 
 #endif
 /*- End of file ------------------------------------------------------------*/
diff --git a/drivers/staging/echo/echo.c b/drivers/staging/echo/echo.c
index 4a281b1..b8f2c5e 100644
--- a/drivers/staging/echo/echo.c
+++ b/drivers/staging/echo/echo.c
@@ -74,7 +74,6 @@
 
    Steve also has some nice notes on echo cancellers in echo.h
 
-
    References:
 
    [1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo
@@ -105,20 +104,18 @@
    Mark, Pawel, and Pavel.
 */
 
-#include <linux/kernel.h>       /* We're doing kernel work */
+#include <linux/kernel.h>	/* We're doing kernel work */
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
-#define malloc(a) kmalloc((a), GFP_KERNEL)
-#define free(a) kfree(a)
 
 #include "bit_operations.h"
 #include "echo.h"
 
 #define MIN_TX_POWER_FOR_ADAPTION   64
 #define MIN_RX_POWER_FOR_ADAPTION   64
-#define DTD_HANGOVER               600     /* 600 samples, or 75ms     */
-#define DC_LOG2BETA                  3     /* log2() of DC filter Beta */
+#define DTD_HANGOVER               600	/* 600 samples, or 75ms     */
+#define DC_LOG2BETA                  3	/* log2() of DC filter Beta */
 
 /*-----------------------------------------------------------------------*\
                                FUNCTIONS
@@ -126,59 +123,58 @@
 
 /* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */
 
-
-#ifdef __BLACKFIN_ASM__
-static void __inline__ lms_adapt_bg(echo_can_state_t *ec, int clean, int shift)
+#ifdef __bfin__
+static void __inline__ lms_adapt_bg(struct oslec_state *ec, int clean,
+				    int shift)
 {
-    int i, j;
-    int offset1;
-    int offset2;
-    int factor;
-    int exp;
-    int16_t *phist;
-    int n;
+	int i, j;
+	int offset1;
+	int offset2;
+	int factor;
+	int exp;
+	int16_t *phist;
+	int n;
 
-    if (shift > 0)
-	factor = clean << shift;
-    else
-	factor = clean >> -shift;
+	if (shift > 0)
+		factor = clean << shift;
+	else
+		factor = clean >> -shift;
 
-    /* Update the FIR taps */
+	/* Update the FIR taps */
 
-    offset2 = ec->curr_pos;
-    offset1 = ec->taps - offset2;
-    phist = &ec->fir_state_bg.history[offset2];
+	offset2 = ec->curr_pos;
+	offset1 = ec->taps - offset2;
+	phist = &ec->fir_state_bg.history[offset2];
 
-    /* st: and en: help us locate the assembler in echo.s */
+	/* st: and en: help us locate the assembler in echo.s */
 
-    //asm("st:");
-    n = ec->taps;
-    for (i = 0, j = offset2;  i < n;  i++, j++)
-    {
-       exp = *phist++ * factor;
-       ec->fir_taps16[1][i] += (int16_t) ((exp+(1<<14)) >> 15);
-    }
-    //asm("en:");
+	//asm("st:");
+	n = ec->taps;
+	for (i = 0, j = offset2; i < n; i++, j++) {
+		exp = *phist++ * factor;
+		ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
+	}
+	//asm("en:");
 
-    /* Note the asm for the inner loop above generated by Blackfin gcc
-       4.1.1 is pretty good (note even parallel instructions used):
+	/* Note the asm for the inner loop above generated by Blackfin gcc
+	   4.1.1 is pretty good (note even parallel instructions used):
 
-    	R0 = W [P0++] (X);
-	R0 *= R2;
-	R0 = R0 + R3 (NS) ||
-	R1 = W [P1] (X) ||
-	nop;
-	R0 >>>= 15;
-	R0 = R0 + R1;
-	W [P1++] = R0;
+	   R0 = W [P0++] (X);
+	   R0 *= R2;
+	   R0 = R0 + R3 (NS) ||
+	   R1 = W [P1] (X) ||
+	   nop;
+	   R0 >>>= 15;
+	   R0 = R0 + R1;
+	   W [P1++] = R0;
 
-	A block based update algorithm would be much faster but the
-	above can't be improved on much.  Every instruction saved in
-	the loop above is 2 MIPs/ch!  The for loop above is where the
-	Blackfin spends most of it's time - about 17 MIPs/ch measured
-	with speedtest.c with 256 taps (32ms).  Write-back and
-	Write-through cache gave about the same performance.
-    */
+	   A block based update algorithm would be much faster but the
+	   above can't be improved on much.  Every instruction saved in
+	   the loop above is 2 MIPs/ch!  The for loop above is where the
+	   Blackfin spends most of it's time - about 17 MIPs/ch measured
+	   with speedtest.c with 256 taps (32ms).  Write-back and
+	   Write-through cache gave about the same performance.
+	 */
 }
 
 /*
@@ -200,392 +196,393 @@
 */
 
 #else
-static __inline__ void lms_adapt_bg(echo_can_state_t *ec, int clean, int shift)
+static __inline__ void lms_adapt_bg(struct oslec_state *ec, int clean,
+				    int shift)
 {
-    int i;
+	int i;
 
-    int offset1;
-    int offset2;
-    int factor;
-    int exp;
+	int offset1;
+	int offset2;
+	int factor;
+	int exp;
 
-    if (shift > 0)
-	factor = clean << shift;
-    else
-	factor = clean >> -shift;
+	if (shift > 0)
+		factor = clean << shift;
+	else
+		factor = clean >> -shift;
 
-    /* Update the FIR taps */
+	/* Update the FIR taps */
 
-    offset2 = ec->curr_pos;
-    offset1 = ec->taps - offset2;
+	offset2 = ec->curr_pos;
+	offset1 = ec->taps - offset2;
 
-    for (i = ec->taps - 1;  i >= offset1;  i--)
-    {
-       exp = (ec->fir_state_bg.history[i - offset1]*factor);
-       ec->fir_taps16[1][i] += (int16_t) ((exp+(1<<14)) >> 15);
-    }
-    for (  ;  i >= 0;  i--)
-    {
-       exp = (ec->fir_state_bg.history[i + offset2]*factor);
-       ec->fir_taps16[1][i] += (int16_t) ((exp+(1<<14)) >> 15);
-    }
+	for (i = ec->taps - 1; i >= offset1; i--) {
+		exp = (ec->fir_state_bg.history[i - offset1] * factor);
+		ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
+	}
+	for (; i >= 0; i--) {
+		exp = (ec->fir_state_bg.history[i + offset2] * factor);
+		ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
+	}
 }
 #endif
 
-/*- End of function --------------------------------------------------------*/
-
-echo_can_state_t *echo_can_create(int len, int adaption_mode)
+struct oslec_state *oslec_create(int len, int adaption_mode)
 {
-    echo_can_state_t *ec;
-    int i;
-    int j;
+	struct oslec_state *ec;
+	int i;
 
-    ec = kmalloc(sizeof(*ec), GFP_KERNEL);
-    if (ec == NULL)
-        return  NULL;
-    memset(ec, 0, sizeof(*ec));
+	ec = kzalloc(sizeof(*ec), GFP_KERNEL);
+	if (!ec)
+		return NULL;
 
-    ec->taps = len;
-    ec->log2taps = top_bit(len);
-    ec->curr_pos = ec->taps - 1;
+	ec->taps = len;
+	ec->log2taps = top_bit(len);
+	ec->curr_pos = ec->taps - 1;
 
-    for (i = 0;  i < 2;  i++)
-    {
-        if ((ec->fir_taps16[i] = (int16_t *) malloc((ec->taps)*sizeof(int16_t))) == NULL)
-        {
-            for (j = 0;  j < i;  j++)
-                kfree(ec->fir_taps16[j]);
-            kfree(ec);
-            return  NULL;
-        }
-        memset(ec->fir_taps16[i], 0, (ec->taps)*sizeof(int16_t));
-    }
+	for (i = 0; i < 2; i++) {
+		ec->fir_taps16[i] =
+		    kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
+		if (!ec->fir_taps16[i])
+			goto error_oom;
+	}
 
-    fir16_create(&ec->fir_state,
-                 ec->fir_taps16[0],
-                 ec->taps);
-    fir16_create(&ec->fir_state_bg,
-                 ec->fir_taps16[1],
-                 ec->taps);
+	fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps);
+	fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps);
 
-    for(i=0; i<5; i++) {
-      ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0;
-    }
+	for (i = 0; i < 5; i++) {
+		ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0;
+	}
 
-    ec->cng_level = 1000;
-    echo_can_adaption_mode(ec, adaption_mode);
+	ec->cng_level = 1000;
+	oslec_adaption_mode(ec, adaption_mode);
 
-    ec->snapshot = (int16_t*)malloc(ec->taps*sizeof(int16_t));
-    memset(ec->snapshot, 0, sizeof(int16_t)*ec->taps);
+	ec->snapshot = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
+	if (!ec->snapshot)
+		goto error_oom;
 
-    ec->cond_met = 0;
-    ec->Pstates = 0;
-    ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
-    ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
-    ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
-    ec->Lbgn = ec->Lbgn_acc = 0;
-    ec->Lbgn_upper = 200;
-    ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
+	ec->cond_met = 0;
+	ec->Pstates = 0;
+	ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
+	ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
+	ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
+	ec->Lbgn = ec->Lbgn_acc = 0;
+	ec->Lbgn_upper = 200;
+	ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
 
-    return  ec;
+	return ec;
+
+      error_oom:
+	for (i = 0; i < 2; i++)
+		kfree(ec->fir_taps16[i]);
+
+	kfree(ec);
+	return NULL;
 }
-/*- End of function --------------------------------------------------------*/
 
-void echo_can_free(echo_can_state_t *ec)
+EXPORT_SYMBOL_GPL(oslec_create);
+
+void oslec_free(struct oslec_state *ec)
 {
 	int i;
 
 	fir16_free(&ec->fir_state);
 	fir16_free(&ec->fir_state_bg);
-	for (i = 0;  i < 2;  i++)
+	for (i = 0; i < 2; i++)
 		kfree(ec->fir_taps16[i]);
 	kfree(ec->snapshot);
 	kfree(ec);
 }
-/*- End of function --------------------------------------------------------*/
 
-void echo_can_adaption_mode(echo_can_state_t *ec, int adaption_mode)
+EXPORT_SYMBOL_GPL(oslec_free);
+
+void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode)
 {
-    ec->adaption_mode = adaption_mode;
+	ec->adaption_mode = adaption_mode;
 }
-/*- End of function --------------------------------------------------------*/
 
-void echo_can_flush(echo_can_state_t *ec)
+EXPORT_SYMBOL_GPL(oslec_adaption_mode);
+
+void oslec_flush(struct oslec_state *ec)
 {
-    int i;
+	int i;
 
-    ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
-    ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
-    ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
+	ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
+	ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
+	ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
 
-    ec->Lbgn = ec->Lbgn_acc = 0;
-    ec->Lbgn_upper = 200;
-    ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
+	ec->Lbgn = ec->Lbgn_acc = 0;
+	ec->Lbgn_upper = 200;
+	ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
 
-    ec->nonupdate_dwell = 0;
+	ec->nonupdate_dwell = 0;
 
-    fir16_flush(&ec->fir_state);
-    fir16_flush(&ec->fir_state_bg);
-    ec->fir_state.curr_pos = ec->taps - 1;
-    ec->fir_state_bg.curr_pos = ec->taps - 1;
-    for (i = 0;  i < 2;  i++)
-        memset(ec->fir_taps16[i], 0, ec->taps*sizeof(int16_t));
+	fir16_flush(&ec->fir_state);
+	fir16_flush(&ec->fir_state_bg);
+	ec->fir_state.curr_pos = ec->taps - 1;
+	ec->fir_state_bg.curr_pos = ec->taps - 1;
+	for (i = 0; i < 2; i++)
+		memset(ec->fir_taps16[i], 0, ec->taps * sizeof(int16_t));
 
-    ec->curr_pos = ec->taps - 1;
-    ec->Pstates = 0;
+	ec->curr_pos = ec->taps - 1;
+	ec->Pstates = 0;
 }
-/*- End of function --------------------------------------------------------*/
 
-void echo_can_snapshot(echo_can_state_t *ec) {
-    memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps*sizeof(int16_t));
+EXPORT_SYMBOL_GPL(oslec_flush);
+
+void oslec_snapshot(struct oslec_state *ec)
+{
+	memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps * sizeof(int16_t));
 }
-/*- End of function --------------------------------------------------------*/
+
+EXPORT_SYMBOL_GPL(oslec_snapshot);
 
 /* Dual Path Echo Canceller ------------------------------------------------*/
 
-int16_t echo_can_update(echo_can_state_t *ec, int16_t tx, int16_t rx)
+int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
 {
-    int32_t echo_value;
-    int clean_bg;
-    int tmp, tmp1;
+	int32_t echo_value;
+	int clean_bg;
+	int tmp, tmp1;
 
-    /* Input scaling was found be required to prevent problems when tx
-       starts clipping.  Another possible way to handle this would be the
-       filter coefficent scaling. */
+	/* Input scaling was found be required to prevent problems when tx
+	   starts clipping.  Another possible way to handle this would be the
+	   filter coefficent scaling. */
 
-    ec->tx = tx; ec->rx = rx;
-    tx >>=1;
-    rx >>=1;
+	ec->tx = tx;
+	ec->rx = rx;
+	tx >>= 1;
+	rx >>= 1;
 
-    /*
-       Filter DC, 3dB point is 160Hz (I think), note 32 bit precision required
-       otherwise values do not track down to 0. Zero at DC, Pole at (1-Beta)
-       only real axis.  Some chip sets (like Si labs) don't need
-       this, but something like a $10 X100P card does.  Any DC really slows
-       down convergence.
+	/*
+	   Filter DC, 3dB point is 160Hz (I think), note 32 bit precision required
+	   otherwise values do not track down to 0. Zero at DC, Pole at (1-Beta)
+	   only real axis.  Some chip sets (like Si labs) don't need
+	   this, but something like a $10 X100P card does.  Any DC really slows
+	   down convergence.
 
-       Note: removes some low frequency from the signal, this reduces
-       the speech quality when listening to samples through headphones
-       but may not be obvious through a telephone handset.
+	   Note: removes some low frequency from the signal, this reduces
+	   the speech quality when listening to samples through headphones
+	   but may not be obvious through a telephone handset.
 
-       Note that the 3dB frequency in radians is approx Beta, e.g. for
-       Beta = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz.
-    */
+	   Note that the 3dB frequency in radians is approx Beta, e.g. for
+	   Beta = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz.
+	 */
 
-    if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) {
-      tmp = rx << 15;
+	if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) {
+		tmp = rx << 15;
 #if 1
-        /* Make sure the gain of the HPF is 1.0. This can still saturate a little under
-           impulse conditions, and it might roll to 32768 and need clipping on sustained peak
-           level signals. However, the scale of such clipping is small, and the error due to
-           any saturation should not markedly affect the downstream processing. */
-        tmp -= (tmp >> 4);
+		/* Make sure the gain of the HPF is 1.0. This can still saturate a little under
+		   impulse conditions, and it might roll to 32768 and need clipping on sustained peak
+		   level signals. However, the scale of such clipping is small, and the error due to
+		   any saturation should not markedly affect the downstream processing. */
+		tmp -= (tmp >> 4);
 #endif
-      ec->rx_1 += -(ec->rx_1>>DC_LOG2BETA) + tmp - ec->rx_2;
+		ec->rx_1 += -(ec->rx_1 >> DC_LOG2BETA) + tmp - ec->rx_2;
 
-      /* hard limit filter to prevent clipping.  Note that at this stage
-	 rx should be limited to +/- 16383 due to right shift above */
-      tmp1 = ec->rx_1 >> 15;
-      if (tmp1 > 16383) tmp1 = 16383;
-      if (tmp1 < -16383) tmp1 = -16383;
-      rx = tmp1;
-      ec->rx_2 = tmp;
-    }
-
-    /* Block average of power in the filter states.  Used for
-       adaption power calculation. */
-
-    {
-	int new, old;
-
-	/* efficient "out with the old and in with the new" algorithm so
-	   we don't have to recalculate over the whole block of
-	   samples. */
-	new = (int)tx * (int)tx;
-	old = (int)ec->fir_state.history[ec->fir_state.curr_pos] *
-              (int)ec->fir_state.history[ec->fir_state.curr_pos];
-	ec->Pstates += ((new - old) + (1<<ec->log2taps)) >> ec->log2taps;
-	if (ec->Pstates < 0) ec->Pstates = 0;
-    }
-
-    /* Calculate short term average levels using simple single pole IIRs */
-
-    ec->Ltxacc += abs(tx) - ec->Ltx;
-    ec->Ltx = (ec->Ltxacc + (1<<4)) >> 5;
-    ec->Lrxacc += abs(rx) - ec->Lrx;
-    ec->Lrx = (ec->Lrxacc + (1<<4)) >> 5;
-
-    /* Foreground filter ---------------------------------------------------*/
-
-    ec->fir_state.coeffs = ec->fir_taps16[0];
-    echo_value = fir16(&ec->fir_state, tx);
-    ec->clean = rx - echo_value;
-    ec->Lcleanacc += abs(ec->clean) - ec->Lclean;
-    ec->Lclean = (ec->Lcleanacc + (1<<4)) >> 5;
-
-    /* Background filter ---------------------------------------------------*/
-
-    echo_value = fir16(&ec->fir_state_bg, tx);
-    clean_bg = rx - echo_value;
-    ec->Lclean_bgacc += abs(clean_bg) - ec->Lclean_bg;
-    ec->Lclean_bg = (ec->Lclean_bgacc + (1<<4)) >> 5;
-
-    /* Background Filter adaption -----------------------------------------*/
-
-    /* Almost always adap bg filter, just simple DT and energy
-       detection to minimise adaption in cases of strong double talk.
-       However this is not critical for the dual path algorithm.
-    */
-    ec->factor = 0;
-    ec->shift = 0;
-    if ((ec->nonupdate_dwell == 0)) {
-	int   P, logP, shift;
-
-	/* Determine:
-
-	   f = Beta * clean_bg_rx/P ------ (1)
-
-	   where P is the total power in the filter states.
-
-	   The Boffins have shown that if we obey (1) we converge
-	   quickly and avoid instability.
-
-	   The correct factor f must be in Q30, as this is the fixed
-	   point format required by the lms_adapt_bg() function,
-	   therefore the scaled version of (1) is:
-
-	   (2^30) * f  = (2^30) * Beta * clean_bg_rx/P
-	       factor  = (2^30) * Beta * clean_bg_rx/P         ----- (2)
-
-	   We have chosen Beta = 0.25 by experiment, so:
-
-	       factor  = (2^30) * (2^-2) * clean_bg_rx/P
-
-                                       (30 - 2 - log2(P))
-	       factor  = clean_bg_rx 2                         ----- (3)
-
-	   To avoid a divide we approximate log2(P) as top_bit(P),
-	   which returns the position of the highest non-zero bit in
-	   P.  This approximation introduces an error as large as a
-	   factor of 2, but the algorithm seems to handle it OK.
-
-	   Come to think of it a divide may not be a big deal on a
-	   modern DSP, so its probably worth checking out the cycles
-	   for a divide versus a top_bit() implementation.
-	*/
-
-	P = MIN_TX_POWER_FOR_ADAPTION + ec->Pstates;
-	logP = top_bit(P) + ec->log2taps;
-	shift = 30 - 2 - logP;
-	ec->shift = shift;
-
-	lms_adapt_bg(ec, clean_bg, shift);
-    }
-
-    /* very simple DTD to make sure we dont try and adapt with strong
-       near end speech */
-
-    ec->adapt = 0;
-    if ((ec->Lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->Lrx > ec->Ltx))
-	ec->nonupdate_dwell = DTD_HANGOVER;
-    if (ec->nonupdate_dwell)
-	ec->nonupdate_dwell--;
-
-    /* Transfer logic ------------------------------------------------------*/
-
-    /* These conditions are from the dual path paper [1], I messed with
-       them a bit to improve performance. */
-
-    if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) &&
-	(ec->nonupdate_dwell == 0) &&
-	(8*ec->Lclean_bg < 7*ec->Lclean) /* (ec->Lclean_bg < 0.875*ec->Lclean) */ &&
-	(8*ec->Lclean_bg < ec->Ltx)      /* (ec->Lclean_bg < 0.125*ec->Ltx)    */ )
-    {
-	if (ec->cond_met == 6) {
-	    /* BG filter has had better results for 6 consecutive samples */
-	    ec->adapt = 1;
-	    memcpy(ec->fir_taps16[0], ec->fir_taps16[1], ec->taps*sizeof(int16_t));
+		/* hard limit filter to prevent clipping.  Note that at this stage
+		   rx should be limited to +/- 16383 due to right shift above */
+		tmp1 = ec->rx_1 >> 15;
+		if (tmp1 > 16383)
+			tmp1 = 16383;
+		if (tmp1 < -16383)
+			tmp1 = -16383;
+		rx = tmp1;
+		ec->rx_2 = tmp;
 	}
-	else
-	    ec->cond_met++;
-    }
-    else
-	ec->cond_met = 0;
 
-    /* Non-Linear Processing ---------------------------------------------------*/
+	/* Block average of power in the filter states.  Used for
+	   adaption power calculation. */
 
-    ec->clean_nlp = ec->clean;
-    if (ec->adaption_mode & ECHO_CAN_USE_NLP)
-    {
-        /* Non-linear processor - a fancy way to say "zap small signals, to avoid
-           residual echo due to (uLaw/ALaw) non-linearity in the channel.". */
-
-      if ((16*ec->Lclean < ec->Ltx))
-      {
-	/* Our e/c has improved echo by at least 24 dB (each factor of 2 is 6dB,
-	   so 2*2*2*2=16 is the same as 6+6+6+6=24dB) */
-        if (ec->adaption_mode & ECHO_CAN_USE_CNG)
 	{
-	    ec->cng_level = ec->Lbgn;
+		int new, old;
 
-	    /* Very elementary comfort noise generation.  Just random
-	       numbers rolled off very vaguely Hoth-like.  DR: This
-	       noise doesn't sound quite right to me - I suspect there
-	       are some overlfow issues in the filtering as it's too
-	       "crackly".  TODO: debug this, maybe just play noise at
-	       high level or look at spectrum.
-	    */
-
-	    ec->cng_rndnum = 1664525U*ec->cng_rndnum + 1013904223U;
-	    ec->cng_filter = ((ec->cng_rndnum & 0xFFFF) - 32768 + 5*ec->cng_filter) >> 3;
-	    ec->clean_nlp = (ec->cng_filter*ec->cng_level*8) >> 14;
-
-        }
-        else if (ec->adaption_mode & ECHO_CAN_USE_CLIP)
-	{
-	    /* This sounds much better than CNG */
-	    if (ec->clean_nlp > ec->Lbgn)
-	      ec->clean_nlp = ec->Lbgn;
-	    if (ec->clean_nlp < -ec->Lbgn)
-	      ec->clean_nlp = -ec->Lbgn;
+		/* efficient "out with the old and in with the new" algorithm so
+		   we don't have to recalculate over the whole block of
+		   samples. */
+		new = (int)tx *(int)tx;
+		old = (int)ec->fir_state.history[ec->fir_state.curr_pos] *
+		    (int)ec->fir_state.history[ec->fir_state.curr_pos];
+		ec->Pstates +=
+		    ((new - old) + (1 << ec->log2taps)) >> ec->log2taps;
+		if (ec->Pstates < 0)
+			ec->Pstates = 0;
 	}
-	else
-        {
-	  /* just mute the residual, doesn't sound very good, used mainly
-	     in G168 tests */
-          ec->clean_nlp = 0;
-        }
-      }
-      else {
-	  /* Background noise estimator.  I tried a few algorithms
-	     here without much luck.  This very simple one seems to
-	     work best, we just average the level using a slow (1 sec
-	     time const) filter if the current level is less than a
-	     (experimentally derived) constant.  This means we dont
-	     include high level signals like near end speech.  When
-	     combined with CNG or especially CLIP seems to work OK.
-	  */
-	  if (ec->Lclean < 40) {
-	      ec->Lbgn_acc += abs(ec->clean) - ec->Lbgn;
-	      ec->Lbgn = (ec->Lbgn_acc + (1<<11)) >> 12;
-	  }
-       }
-    }
 
-    /* Roll around the taps buffer */
-    if (ec->curr_pos <= 0)
-        ec->curr_pos = ec->taps;
-    ec->curr_pos--;
+	/* Calculate short term average levels using simple single pole IIRs */
 
-    if (ec->adaption_mode & ECHO_CAN_DISABLE)
-      ec->clean_nlp = rx;
+	ec->Ltxacc += abs(tx) - ec->Ltx;
+	ec->Ltx = (ec->Ltxacc + (1 << 4)) >> 5;
+	ec->Lrxacc += abs(rx) - ec->Lrx;
+	ec->Lrx = (ec->Lrxacc + (1 << 4)) >> 5;
 
-    /* Output scaled back up again to match input scaling */
+	/* Foreground filter --------------------------------------------------- */
 
-    return (int16_t) ec->clean_nlp << 1;
+	ec->fir_state.coeffs = ec->fir_taps16[0];
+	echo_value = fir16(&ec->fir_state, tx);
+	ec->clean = rx - echo_value;
+	ec->Lcleanacc += abs(ec->clean) - ec->Lclean;
+	ec->Lclean = (ec->Lcleanacc + (1 << 4)) >> 5;
+
+	/* Background filter --------------------------------------------------- */
+
+	echo_value = fir16(&ec->fir_state_bg, tx);
+	clean_bg = rx - echo_value;
+	ec->Lclean_bgacc += abs(clean_bg) - ec->Lclean_bg;
+	ec->Lclean_bg = (ec->Lclean_bgacc + (1 << 4)) >> 5;
+
+	/* Background Filter adaption ----------------------------------------- */
+
+	/* Almost always adap bg filter, just simple DT and energy
+	   detection to minimise adaption in cases of strong double talk.
+	   However this is not critical for the dual path algorithm.
+	 */
+	ec->factor = 0;
+	ec->shift = 0;
+	if ((ec->nonupdate_dwell == 0)) {
+		int P, logP, shift;
+
+		/* Determine:
+
+		   f = Beta * clean_bg_rx/P ------ (1)
+
+		   where P is the total power in the filter states.
+
+		   The Boffins have shown that if we obey (1) we converge
+		   quickly and avoid instability.
+
+		   The correct factor f must be in Q30, as this is the fixed
+		   point format required by the lms_adapt_bg() function,
+		   therefore the scaled version of (1) is:
+
+		   (2^30) * f  = (2^30) * Beta * clean_bg_rx/P
+		   factor  = (2^30) * Beta * clean_bg_rx/P         ----- (2)
+
+		   We have chosen Beta = 0.25 by experiment, so:
+
+		   factor  = (2^30) * (2^-2) * clean_bg_rx/P
+
+		   (30 - 2 - log2(P))
+		   factor  = clean_bg_rx 2                         ----- (3)
+
+		   To avoid a divide we approximate log2(P) as top_bit(P),
+		   which returns the position of the highest non-zero bit in
+		   P.  This approximation introduces an error as large as a
+		   factor of 2, but the algorithm seems to handle it OK.
+
+		   Come to think of it a divide may not be a big deal on a
+		   modern DSP, so its probably worth checking out the cycles
+		   for a divide versus a top_bit() implementation.
+		 */
+
+		P = MIN_TX_POWER_FOR_ADAPTION + ec->Pstates;
+		logP = top_bit(P) + ec->log2taps;
+		shift = 30 - 2 - logP;
+		ec->shift = shift;
+
+		lms_adapt_bg(ec, clean_bg, shift);
+	}
+
+	/* very simple DTD to make sure we dont try and adapt with strong
+	   near end speech */
+
+	ec->adapt = 0;
+	if ((ec->Lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->Lrx > ec->Ltx))
+		ec->nonupdate_dwell = DTD_HANGOVER;
+	if (ec->nonupdate_dwell)
+		ec->nonupdate_dwell--;
+
+	/* Transfer logic ------------------------------------------------------ */
+
+	/* These conditions are from the dual path paper [1], I messed with
+	   them a bit to improve performance. */
+
+	if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) &&
+	    (ec->nonupdate_dwell == 0) &&
+	    (8 * ec->Lclean_bg <
+	     7 * ec->Lclean) /* (ec->Lclean_bg < 0.875*ec->Lclean) */ &&
+	    (8 * ec->Lclean_bg <
+	     ec->Ltx) /* (ec->Lclean_bg < 0.125*ec->Ltx)    */ ) {
+		if (ec->cond_met == 6) {
+			/* BG filter has had better results for 6 consecutive samples */
+			ec->adapt = 1;
+			memcpy(ec->fir_taps16[0], ec->fir_taps16[1],
+			       ec->taps * sizeof(int16_t));
+		} else
+			ec->cond_met++;
+	} else
+		ec->cond_met = 0;
+
+	/* Non-Linear Processing --------------------------------------------------- */
+
+	ec->clean_nlp = ec->clean;
+	if (ec->adaption_mode & ECHO_CAN_USE_NLP) {
+		/* Non-linear processor - a fancy way to say "zap small signals, to avoid
+		   residual echo due to (uLaw/ALaw) non-linearity in the channel.". */
+
+		if ((16 * ec->Lclean < ec->Ltx)) {
+			/* Our e/c has improved echo by at least 24 dB (each factor of 2 is 6dB,
+			   so 2*2*2*2=16 is the same as 6+6+6+6=24dB) */
+			if (ec->adaption_mode & ECHO_CAN_USE_CNG) {
+				ec->cng_level = ec->Lbgn;
+
+				/* Very elementary comfort noise generation.  Just random
+				   numbers rolled off very vaguely Hoth-like.  DR: This
+				   noise doesn't sound quite right to me - I suspect there
+				   are some overlfow issues in the filtering as it's too
+				   "crackly".  TODO: debug this, maybe just play noise at
+				   high level or look at spectrum.
+				 */
+
+				ec->cng_rndnum =
+				    1664525U * ec->cng_rndnum + 1013904223U;
+				ec->cng_filter =
+				    ((ec->cng_rndnum & 0xFFFF) - 32768 +
+				     5 * ec->cng_filter) >> 3;
+				ec->clean_nlp =
+				    (ec->cng_filter * ec->cng_level * 8) >> 14;
+
+			} else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) {
+				/* This sounds much better than CNG */
+				if (ec->clean_nlp > ec->Lbgn)
+					ec->clean_nlp = ec->Lbgn;
+				if (ec->clean_nlp < -ec->Lbgn)
+					ec->clean_nlp = -ec->Lbgn;
+			} else {
+				/* just mute the residual, doesn't sound very good, used mainly
+				   in G168 tests */
+				ec->clean_nlp = 0;
+			}
+		} else {
+			/* Background noise estimator.  I tried a few algorithms
+			   here without much luck.  This very simple one seems to
+			   work best, we just average the level using a slow (1 sec
+			   time const) filter if the current level is less than a
+			   (experimentally derived) constant.  This means we dont
+			   include high level signals like near end speech.  When
+			   combined with CNG or especially CLIP seems to work OK.
+			 */
+			if (ec->Lclean < 40) {
+				ec->Lbgn_acc += abs(ec->clean) - ec->Lbgn;
+				ec->Lbgn = (ec->Lbgn_acc + (1 << 11)) >> 12;
+			}
+		}
+	}
+
+	/* Roll around the taps buffer */
+	if (ec->curr_pos <= 0)
+		ec->curr_pos = ec->taps;
+	ec->curr_pos--;
+
+	if (ec->adaption_mode & ECHO_CAN_DISABLE)
+		ec->clean_nlp = rx;
+
+	/* Output scaled back up again to match input scaling */
+
+	return (int16_t) ec->clean_nlp << 1;
 }
 
-/*- End of function --------------------------------------------------------*/
+EXPORT_SYMBOL_GPL(oslec_update);
 
 /* This function is seperated from the echo canceller is it is usually called
    as part of the tx process.  See rx HP (DC blocking) filter above, it's
@@ -608,25 +605,35 @@
    precision, which noise shapes things, giving very clean DC removal.
 */
 
-int16_t echo_can_hpf_tx(echo_can_state_t *ec, int16_t tx) {
-    int tmp, tmp1;
+int16_t oslec_hpf_tx(struct oslec_state * ec, int16_t tx)
+{
+	int tmp, tmp1;
 
-    if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) {
-        tmp = tx << 15;
+	if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) {
+		tmp = tx << 15;
 #if 1
-        /* Make sure the gain of the HPF is 1.0. The first can still saturate a little under
-           impulse conditions, and it might roll to 32768 and need clipping on sustained peak
-           level signals. However, the scale of such clipping is small, and the error due to
-           any saturation should not markedly affect the downstream processing. */
-        tmp -= (tmp >> 4);
+		/* Make sure the gain of the HPF is 1.0. The first can still saturate a little under
+		   impulse conditions, and it might roll to 32768 and need clipping on sustained peak
+		   level signals. However, the scale of such clipping is small, and the error due to
+		   any saturation should not markedly affect the downstream processing. */
+		tmp -= (tmp >> 4);
 #endif
-        ec->tx_1 += -(ec->tx_1>>DC_LOG2BETA) + tmp - ec->tx_2;
-        tmp1 = ec->tx_1 >> 15;
-	if (tmp1 > 32767) tmp1 = 32767;
-	if (tmp1 < -32767) tmp1 = -32767;
-	tx = tmp1;
-        ec->tx_2 = tmp;
-    }
+		ec->tx_1 += -(ec->tx_1 >> DC_LOG2BETA) + tmp - ec->tx_2;
+		tmp1 = ec->tx_1 >> 15;
+		if (tmp1 > 32767)
+			tmp1 = 32767;
+		if (tmp1 < -32767)
+			tmp1 = -32767;
+		tx = tmp1;
+		ec->tx_2 = tmp;
+	}
 
-    return tx;
+	return tx;
 }
+
+EXPORT_SYMBOL_GPL(oslec_hpf_tx);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Rowe");
+MODULE_DESCRIPTION("Open Source Line Echo Canceller");
+MODULE_VERSION("0.3.0");
diff --git a/drivers/staging/echo/echo.h b/drivers/staging/echo/echo.h
index 7a91b439..9fb9543 100644
--- a/drivers/staging/echo/echo.h
+++ b/drivers/staging/echo/echo.h
@@ -118,23 +118,14 @@
 */
 
 #include "fir.h"
-
-/* Mask bits for the adaption mode */
-#define ECHO_CAN_USE_ADAPTION	0x01
-#define ECHO_CAN_USE_NLP	0x02
-#define ECHO_CAN_USE_CNG	0x04
-#define ECHO_CAN_USE_CLIP	0x08
-#define ECHO_CAN_USE_TX_HPF	0x10
-#define ECHO_CAN_USE_RX_HPF	0x20
-#define ECHO_CAN_DISABLE	0x40
+#include "oslec.h"
 
 /*!
     G.168 echo canceller descriptor. This defines the working state for a line
     echo canceller.
 */
-typedef struct
-{
-	int16_t tx,rx;
+struct oslec_state {
+	int16_t tx, rx;
 	int16_t clean;
 	int16_t clean_nlp;
 
@@ -176,45 +167,6 @@
 
 	/* snapshot sample of coeffs used for development */
 	int16_t *snapshot;
-} echo_can_state_t;
+};
 
-/*! Create a voice echo canceller context.
-    \param len The length of the canceller, in samples.
-    \return The new canceller context, or NULL if the canceller could not be created.
-*/
-echo_can_state_t *echo_can_create(int len, int adaption_mode);
-
-/*! Free a voice echo canceller context.
-    \param ec The echo canceller context.
-*/
-void echo_can_free(echo_can_state_t *ec);
-
-/*! Flush (reinitialise) a voice echo canceller context.
-    \param ec The echo canceller context.
-*/
-void echo_can_flush(echo_can_state_t *ec);
-
-/*! Set the adaption mode of a voice echo canceller context.
-    \param ec The echo canceller context.
-    \param adapt The mode.
-*/
-void echo_can_adaption_mode(echo_can_state_t *ec, int adaption_mode);
-
-void echo_can_snapshot(echo_can_state_t *ec);
-
-/*! Process a sample through a voice echo canceller.
-    \param ec The echo canceller context.
-    \param tx The transmitted audio sample.
-    \param rx The received audio sample.
-    \return The clean (echo cancelled) received sample.
-*/
-int16_t echo_can_update(echo_can_state_t *ec, int16_t tx, int16_t rx);
-
-/*! Process to high pass filter the tx signal.
-    \param ec The echo canceller context.
-    \param tx The transmitted auio sample.
-    \return The HP filtered transmit sample, send this to your D/A.
-*/
-int16_t echo_can_hpf_tx(echo_can_state_t *ec, int16_t tx);
-
-#endif	/* __ECHO_H */
+#endif /* __ECHO_H */
diff --git a/drivers/staging/echo/fir.h b/drivers/staging/echo/fir.h
index e1bfc49..5645cb1 100644
--- a/drivers/staging/echo/fir.h
+++ b/drivers/staging/echo/fir.h
@@ -72,8 +72,7 @@
     16 bit integer FIR descriptor. This defines the working state for a single
     instance of an FIR filter using 16 bit integer coefficients.
 */
-typedef struct
-{
+typedef struct {
 	int taps;
 	int curr_pos;
 	const int16_t *coeffs;
@@ -85,8 +84,7 @@
     instance of an FIR filter using 32 bit integer coefficients, and filtering
     16 bit integer data.
 */
-typedef struct
-{
+typedef struct {
 	int taps;
 	int curr_pos;
 	const int32_t *coeffs;
@@ -97,273 +95,201 @@
     Floating point FIR descriptor. This defines the working state for a single
     instance of an FIR filter using floating point coefficients and data.
 */
-typedef struct
-{
+typedef struct {
 	int taps;
 	int curr_pos;
 	const float *coeffs;
 	float *history;
 } fir_float_state_t;
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-static __inline__ const int16_t *fir16_create(fir16_state_t *fir,
-                                              const int16_t *coeffs,
-                                              int taps)
+static __inline__ const int16_t *fir16_create(fir16_state_t * fir,
+					      const int16_t * coeffs, int taps)
 {
 	fir->taps = taps;
 	fir->curr_pos = taps - 1;
 	fir->coeffs = coeffs;
-#if defined(USE_MMX)  ||  defined(USE_SSE2) || defined(__BLACKFIN_ASM__)
-	if ((fir->history = malloc(2*taps*sizeof(int16_t))))
-		memset(fir->history, 0, 2*taps*sizeof(int16_t));
+#if defined(USE_MMX)  ||  defined(USE_SSE2) || defined(__bfin__)
+	fir->history = kcalloc(2 * taps, sizeof(int16_t), GFP_KERNEL);
 #else
-	if ((fir->history = (int16_t *) malloc(taps*sizeof(int16_t))))
-		memset(fir->history, 0, taps*sizeof(int16_t));
+	fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL);
 #endif
 	return fir->history;
 }
-/*- End of function --------------------------------------------------------*/
 
-static __inline__ void fir16_flush(fir16_state_t *fir)
+static __inline__ void fir16_flush(fir16_state_t * fir)
 {
-#if defined(USE_MMX)  ||  defined(USE_SSE2) || defined(__BLACKFIN_ASM__)
-    memset(fir->history, 0, 2*fir->taps*sizeof(int16_t));
+#if defined(USE_MMX)  ||  defined(USE_SSE2) || defined(__bfin__)
+	memset(fir->history, 0, 2 * fir->taps * sizeof(int16_t));
 #else
-    memset(fir->history, 0, fir->taps*sizeof(int16_t));
+	memset(fir->history, 0, fir->taps * sizeof(int16_t));
 #endif
 }
-/*- End of function --------------------------------------------------------*/
 
-static __inline__ void fir16_free(fir16_state_t *fir)
+static __inline__ void fir16_free(fir16_state_t * fir)
 {
-	free(fir->history);
+	kfree(fir->history);
 }
-/*- End of function --------------------------------------------------------*/
 
-#ifdef __BLACKFIN_ASM__
+#ifdef __bfin__
 static inline int32_t dot_asm(short *x, short *y, int len)
 {
-   int dot;
+	int dot;
 
-   len--;
+	len--;
 
-   __asm__
-   (
-   "I0 = %1;\n\t"
-   "I1 = %2;\n\t"
-   "A0 = 0;\n\t"
-   "R0.L = W[I0++] || R1.L = W[I1++];\n\t"
-   "LOOP dot%= LC0 = %3;\n\t"
-   "LOOP_BEGIN dot%=;\n\t"
-      "A0 += R0.L * R1.L (IS) || R0.L = W[I0++] || R1.L = W[I1++];\n\t"
-   "LOOP_END dot%=;\n\t"
-   "A0 += R0.L*R1.L (IS);\n\t"
-   "R0 = A0;\n\t"
-   "%0 = R0;\n\t"
-   : "=&d" (dot)
-   : "a" (x), "a" (y), "a" (len)
-   : "I0", "I1", "A1", "A0", "R0", "R1"
-   );
+	__asm__("I0 = %1;\n\t"
+		"I1 = %2;\n\t"
+		"A0 = 0;\n\t"
+		"R0.L = W[I0++] || R1.L = W[I1++];\n\t"
+		"LOOP dot%= LC0 = %3;\n\t"
+		"LOOP_BEGIN dot%=;\n\t"
+		"A0 += R0.L * R1.L (IS) || R0.L = W[I0++] || R1.L = W[I1++];\n\t"
+		"LOOP_END dot%=;\n\t"
+		"A0 += R0.L*R1.L (IS);\n\t"
+		"R0 = A0;\n\t"
+		"%0 = R0;\n\t"
+		:"=&d"(dot)
+		:"a"(x), "a"(y), "a"(len)
+		:"I0", "I1", "A1", "A0", "R0", "R1"
+	);
 
-   return dot;
+	return dot;
 }
 #endif
-/*- End of function --------------------------------------------------------*/
 
-static __inline__ int16_t fir16(fir16_state_t *fir, int16_t sample)
+static __inline__ int16_t fir16(fir16_state_t * fir, int16_t sample)
 {
-    int32_t y;
+	int32_t y;
 #if defined(USE_MMX)
-    int i;
-    mmx_t *mmx_coeffs;
-    mmx_t *mmx_hist;
+	int i;
+	mmx_t *mmx_coeffs;
+	mmx_t *mmx_hist;
 
-    fir->history[fir->curr_pos] = sample;
-    fir->history[fir->curr_pos + fir->taps] = sample;
+	fir->history[fir->curr_pos] = sample;
+	fir->history[fir->curr_pos + fir->taps] = sample;
 
-    mmx_coeffs = (mmx_t *) fir->coeffs;
-    mmx_hist = (mmx_t *) &fir->history[fir->curr_pos];
-    i = fir->taps;
-    pxor_r2r(mm4, mm4);
-    /* 8 samples per iteration, so the filter must be a multiple of 8 long. */
-    while (i > 0)
-    {
-        movq_m2r(mmx_coeffs[0], mm0);
-        movq_m2r(mmx_coeffs[1], mm2);
-        movq_m2r(mmx_hist[0], mm1);
-        movq_m2r(mmx_hist[1], mm3);
-        mmx_coeffs += 2;
-        mmx_hist += 2;
-        pmaddwd_r2r(mm1, mm0);
-        pmaddwd_r2r(mm3, mm2);
-        paddd_r2r(mm0, mm4);
-        paddd_r2r(mm2, mm4);
-        i -= 8;
-    }
-    movq_r2r(mm4, mm0);
-    psrlq_i2r(32, mm0);
-    paddd_r2r(mm0, mm4);
-    movd_r2m(mm4, y);
-    emms();
+	mmx_coeffs = (mmx_t *) fir->coeffs;
+	mmx_hist = (mmx_t *) & fir->history[fir->curr_pos];
+	i = fir->taps;
+	pxor_r2r(mm4, mm4);
+	/* 8 samples per iteration, so the filter must be a multiple of 8 long. */
+	while (i > 0) {
+		movq_m2r(mmx_coeffs[0], mm0);
+		movq_m2r(mmx_coeffs[1], mm2);
+		movq_m2r(mmx_hist[0], mm1);
+		movq_m2r(mmx_hist[1], mm3);
+		mmx_coeffs += 2;
+		mmx_hist += 2;
+		pmaddwd_r2r(mm1, mm0);
+		pmaddwd_r2r(mm3, mm2);
+		paddd_r2r(mm0, mm4);
+		paddd_r2r(mm2, mm4);
+		i -= 8;
+	}
+	movq_r2r(mm4, mm0);
+	psrlq_i2r(32, mm0);
+	paddd_r2r(mm0, mm4);
+	movd_r2m(mm4, y);
+	emms();
 #elif defined(USE_SSE2)
-    int i;
-    xmm_t *xmm_coeffs;
-    xmm_t *xmm_hist;
+	int i;
+	xmm_t *xmm_coeffs;
+	xmm_t *xmm_hist;
 
-    fir->history[fir->curr_pos] = sample;
-    fir->history[fir->curr_pos + fir->taps] = sample;
+	fir->history[fir->curr_pos] = sample;
+	fir->history[fir->curr_pos + fir->taps] = sample;
 
-    xmm_coeffs = (xmm_t *) fir->coeffs;
-    xmm_hist = (xmm_t *) &fir->history[fir->curr_pos];
-    i = fir->taps;
-    pxor_r2r(xmm4, xmm4);
-    /* 16 samples per iteration, so the filter must be a multiple of 16 long. */
-    while (i > 0)
-    {
-        movdqu_m2r(xmm_coeffs[0], xmm0);
-        movdqu_m2r(xmm_coeffs[1], xmm2);
-        movdqu_m2r(xmm_hist[0], xmm1);
-        movdqu_m2r(xmm_hist[1], xmm3);
-        xmm_coeffs += 2;
-        xmm_hist += 2;
-        pmaddwd_r2r(xmm1, xmm0);
-        pmaddwd_r2r(xmm3, xmm2);
-        paddd_r2r(xmm0, xmm4);
-        paddd_r2r(xmm2, xmm4);
-        i -= 16;
-    }
-    movdqa_r2r(xmm4, xmm0);
-    psrldq_i2r(8, xmm0);
-    paddd_r2r(xmm0, xmm4);
-    movdqa_r2r(xmm4, xmm0);
-    psrldq_i2r(4, xmm0);
-    paddd_r2r(xmm0, xmm4);
-    movd_r2m(xmm4, y);
-#elif defined(__BLACKFIN_ASM__)
-    fir->history[fir->curr_pos] = sample;
-    fir->history[fir->curr_pos + fir->taps] = sample;
-    y = dot_asm((int16_t*)fir->coeffs, &fir->history[fir->curr_pos], fir->taps);
+	xmm_coeffs = (xmm_t *) fir->coeffs;
+	xmm_hist = (xmm_t *) & fir->history[fir->curr_pos];
+	i = fir->taps;
+	pxor_r2r(xmm4, xmm4);
+	/* 16 samples per iteration, so the filter must be a multiple of 16 long. */
+	while (i > 0) {
+		movdqu_m2r(xmm_coeffs[0], xmm0);
+		movdqu_m2r(xmm_coeffs[1], xmm2);
+		movdqu_m2r(xmm_hist[0], xmm1);
+		movdqu_m2r(xmm_hist[1], xmm3);
+		xmm_coeffs += 2;
+		xmm_hist += 2;
+		pmaddwd_r2r(xmm1, xmm0);
+		pmaddwd_r2r(xmm3, xmm2);
+		paddd_r2r(xmm0, xmm4);
+		paddd_r2r(xmm2, xmm4);
+		i -= 16;
+	}
+	movdqa_r2r(xmm4, xmm0);
+	psrldq_i2r(8, xmm0);
+	paddd_r2r(xmm0, xmm4);
+	movdqa_r2r(xmm4, xmm0);
+	psrldq_i2r(4, xmm0);
+	paddd_r2r(xmm0, xmm4);
+	movd_r2m(xmm4, y);
+#elif defined(__bfin__)
+	fir->history[fir->curr_pos] = sample;
+	fir->history[fir->curr_pos + fir->taps] = sample;
+	y = dot_asm((int16_t *) fir->coeffs, &fir->history[fir->curr_pos],
+		    fir->taps);
 #else
-    int i;
-    int offset1;
-    int offset2;
+	int i;
+	int offset1;
+	int offset2;
 
-    fir->history[fir->curr_pos] = sample;
+	fir->history[fir->curr_pos] = sample;
 
-    offset2 = fir->curr_pos;
-    offset1 = fir->taps - offset2;
-    y = 0;
-    for (i = fir->taps - 1;  i >= offset1;  i--)
-        y += fir->coeffs[i]*fir->history[i - offset1];
-    for (  ;  i >= 0;  i--)
-        y += fir->coeffs[i]*fir->history[i + offset2];
+	offset2 = fir->curr_pos;
+	offset1 = fir->taps - offset2;
+	y = 0;
+	for (i = fir->taps - 1; i >= offset1; i--)
+		y += fir->coeffs[i] * fir->history[i - offset1];
+	for (; i >= 0; i--)
+		y += fir->coeffs[i] * fir->history[i + offset2];
 #endif
-    if (fir->curr_pos <= 0)
-    	fir->curr_pos = fir->taps;
-    fir->curr_pos--;
-    return (int16_t) (y >> 15);
+	if (fir->curr_pos <= 0)
+		fir->curr_pos = fir->taps;
+	fir->curr_pos--;
+	return (int16_t) (y >> 15);
 }
-/*- End of function --------------------------------------------------------*/
 
-static __inline__ const int16_t *fir32_create(fir32_state_t *fir,
-                                              const int32_t *coeffs,
-                                              int taps)
+static __inline__ const int16_t *fir32_create(fir32_state_t * fir,
+					      const int32_t * coeffs, int taps)
 {
-    fir->taps = taps;
-    fir->curr_pos = taps - 1;
-    fir->coeffs = coeffs;
-    fir->history = (int16_t *) malloc(taps*sizeof(int16_t));
-    if (fir->history)
-    	memset(fir->history, '\0', taps*sizeof(int16_t));
-    return fir->history;
+	fir->taps = taps;
+	fir->curr_pos = taps - 1;
+	fir->coeffs = coeffs;
+	fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL);
+	return fir->history;
 }
-/*- End of function --------------------------------------------------------*/
 
-static __inline__ void fir32_flush(fir32_state_t *fir)
+static __inline__ void fir32_flush(fir32_state_t * fir)
 {
-    memset(fir->history, 0, fir->taps*sizeof(int16_t));
+	memset(fir->history, 0, fir->taps * sizeof(int16_t));
 }
-/*- End of function --------------------------------------------------------*/
 
-static __inline__ void fir32_free(fir32_state_t *fir)
+static __inline__ void fir32_free(fir32_state_t * fir)
 {
-    free(fir->history);
+	kfree(fir->history);
 }
-/*- End of function --------------------------------------------------------*/
 
-static __inline__ int16_t fir32(fir32_state_t *fir, int16_t sample)
+static __inline__ int16_t fir32(fir32_state_t * fir, int16_t sample)
 {
-    int i;
-    int32_t y;
-    int offset1;
-    int offset2;
+	int i;
+	int32_t y;
+	int offset1;
+	int offset2;
 
-    fir->history[fir->curr_pos] = sample;
-    offset2 = fir->curr_pos;
-    offset1 = fir->taps - offset2;
-    y = 0;
-    for (i = fir->taps - 1;  i >= offset1;  i--)
-        y += fir->coeffs[i]*fir->history[i - offset1];
-    for (  ;  i >= 0;  i--)
-        y += fir->coeffs[i]*fir->history[i + offset2];
-    if (fir->curr_pos <= 0)
-    	fir->curr_pos = fir->taps;
-    fir->curr_pos--;
-    return (int16_t) (y >> 15);
+	fir->history[fir->curr_pos] = sample;
+	offset2 = fir->curr_pos;
+	offset1 = fir->taps - offset2;
+	y = 0;
+	for (i = fir->taps - 1; i >= offset1; i--)
+		y += fir->coeffs[i] * fir->history[i - offset1];
+	for (; i >= 0; i--)
+		y += fir->coeffs[i] * fir->history[i + offset2];
+	if (fir->curr_pos <= 0)
+		fir->curr_pos = fir->taps;
+	fir->curr_pos--;
+	return (int16_t) (y >> 15);
 }
-/*- End of function --------------------------------------------------------*/
-
-#ifndef __KERNEL__
-static __inline__ const float *fir_float_create(fir_float_state_t *fir,
-                                                const float *coeffs,
-    	    	    	                        int taps)
-{
-    fir->taps = taps;
-    fir->curr_pos = taps - 1;
-    fir->coeffs = coeffs;
-    fir->history = (float *) malloc(taps*sizeof(float));
-    if (fir->history)
-        memset(fir->history, '\0', taps*sizeof(float));
-    return fir->history;
-}
-/*- End of function --------------------------------------------------------*/
-
-static __inline__ void fir_float_free(fir_float_state_t *fir)
-{
-    free(fir->history);
-}
-/*- End of function --------------------------------------------------------*/
-
-static __inline__ int16_t fir_float(fir_float_state_t *fir, int16_t sample)
-{
-    int i;
-    float y;
-    int offset1;
-    int offset2;
-
-    fir->history[fir->curr_pos] = sample;
-
-    offset2 = fir->curr_pos;
-    offset1 = fir->taps - offset2;
-    y = 0;
-    for (i = fir->taps - 1;  i >= offset1;  i--)
-        y += fir->coeffs[i]*fir->history[i - offset1];
-    for (  ;  i >= 0;  i--)
-        y += fir->coeffs[i]*fir->history[i + offset2];
-    if (fir->curr_pos <= 0)
-    	fir->curr_pos = fir->taps;
-    fir->curr_pos--;
-    return  (int16_t) y;
-}
-/*- End of function --------------------------------------------------------*/
-#endif
-
-#ifdef __cplusplus
-}
-#endif
 
 #endif
 /*- End of file ------------------------------------------------------------*/
diff --git a/drivers/staging/echo/mmx.h b/drivers/staging/echo/mmx.h
index b5a3964..35412ef 100644
--- a/drivers/staging/echo/mmx.h
+++ b/drivers/staging/echo/mmx.h
@@ -27,24 +27,23 @@
  * values by ULL, lest they be truncated by the compiler)
  */
 
-typedef        union {
-        long long               q;      /* Quadword (64-bit) value */
-        unsigned long long      uq;     /* Unsigned Quadword */
-        int                     d[2];   /* 2 Doubleword (32-bit) values */
-        unsigned int            ud[2];  /* 2 Unsigned Doubleword */
-        short                   w[4];   /* 4 Word (16-bit) values */
-        unsigned short          uw[4];  /* 4 Unsigned Word */
-        char                    b[8];   /* 8 Byte (8-bit) values */
-        unsigned char           ub[8];  /* 8 Unsigned Byte */
-        float                   s[2];   /* Single-precision (32-bit) value */
-} mmx_t;        /* On an 8-byte (64-bit) boundary */
+typedef union {
+	long long q;		/* Quadword (64-bit) value */
+	unsigned long long uq;	/* Unsigned Quadword */
+	int d[2];		/* 2 Doubleword (32-bit) values */
+	unsigned int ud[2];	/* 2 Unsigned Doubleword */
+	short w[4];		/* 4 Word (16-bit) values */
+	unsigned short uw[4];	/* 4 Unsigned Word */
+	char b[8];		/* 8 Byte (8-bit) values */
+	unsigned char ub[8];	/* 8 Unsigned Byte */
+	float s[2];		/* Single-precision (32-bit) value */
+} mmx_t;			/* On an 8-byte (64-bit) boundary */
 
 /* SSE registers */
 typedef union {
 	char b[16];
 } xmm_t;
 
-
 #define         mmx_i2r(op,imm,reg) \
         __asm__ __volatile__ (#op " %0, %%" #reg \
                               : /* nothing */ \
@@ -63,7 +62,6 @@
 #define         mmx_r2r(op,regs,regd) \
         __asm__ __volatile__ (#op " %" #regs ", %" #regd)
 
-
 #define         emms() __asm__ __volatile__ ("emms")
 
 #define         movd_m2r(var,reg)           mmx_m2r (movd, var, reg)
@@ -192,16 +190,13 @@
 #define         pxor_m2r(var,reg)           mmx_m2r (pxor, var, reg)
 #define         pxor_r2r(regs,regd)         mmx_r2r (pxor, regs, regd)
 
-
 /* 3DNOW extensions */
 
 #define         pavgusb_m2r(var,reg)        mmx_m2r (pavgusb, var, reg)
 #define         pavgusb_r2r(regs,regd)      mmx_r2r (pavgusb, regs, regd)
 
-
 /* AMD MMX extensions - also available in intel SSE */
 
-
 #define         mmx_m2ri(op,mem,reg,imm) \
         __asm__ __volatile__ (#op " %1, %0, %%" #reg \
                               : /* nothing */ \
@@ -216,7 +211,6 @@
                               : /* nothing */ \
                               : "m" (mem))
 
-
 #define         maskmovq(regs,maskreg)      mmx_r2ri (maskmovq, regs, maskreg)
 
 #define         movntq_r2m(mmreg,var)       mmx_r2m (movntq, mmreg, var)
@@ -284,5 +278,4 @@
 #define         punpcklqdq_r2r(regs,regd)   mmx_r2r (punpcklqdq, regs, regd)
 #define         punpckhqdq_r2r(regs,regd)   mmx_r2r (punpckhqdq, regs, regd)
 
-
 #endif /* AVCODEC_I386MMX_H */
diff --git a/drivers/staging/echo/oslec.h b/drivers/staging/echo/oslec.h
new file mode 100644
index 0000000..bad8523
--- /dev/null
+++ b/drivers/staging/echo/oslec.h
@@ -0,0 +1,86 @@
+/*
+ *  OSLEC - A line echo canceller.  This code is being developed
+ *          against and partially complies with G168. Using code from SpanDSP
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *         and David Rowe <david_at_rowetel_dot_com>
+ *
+ * Copyright (C) 2001 Steve Underwood and 2007-2008 David Rowe
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __OSLEC_H
+#define __OSLEC_H
+
+/* TODO: document interface */
+
+/* Mask bits for the adaption mode */
+#define ECHO_CAN_USE_ADAPTION	0x01
+#define ECHO_CAN_USE_NLP	0x02
+#define ECHO_CAN_USE_CNG	0x04
+#define ECHO_CAN_USE_CLIP	0x08
+#define ECHO_CAN_USE_TX_HPF	0x10
+#define ECHO_CAN_USE_RX_HPF	0x20
+#define ECHO_CAN_DISABLE	0x40
+
+/*!
+    G.168 echo canceller descriptor. This defines the working state for a line
+    echo canceller.
+*/
+struct oslec_state;
+
+/*! Create a voice echo canceller context.
+    \param len The length of the canceller, in samples.
+    \return The new canceller context, or NULL if the canceller could not be created.
+*/
+struct oslec_state *oslec_create(int len, int adaption_mode);
+
+/*! Free a voice echo canceller context.
+    \param ec The echo canceller context.
+*/
+void oslec_free(struct oslec_state *ec);
+
+/*! Flush (reinitialise) a voice echo canceller context.
+    \param ec The echo canceller context.
+*/
+void oslec_flush(struct oslec_state *ec);
+
+/*! Set the adaption mode of a voice echo canceller context.
+    \param ec The echo canceller context.
+    \param adapt The mode.
+*/
+void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode);
+
+void oslec_snapshot(struct oslec_state *ec);
+
+/*! Process a sample through a voice echo canceller.
+    \param ec The echo canceller context.
+    \param tx The transmitted audio sample.
+    \param rx The received audio sample.
+    \return The clean (echo cancelled) received sample.
+*/
+int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx);
+
+/*! Process to high pass filter the tx signal.
+    \param ec The echo canceller context.
+    \param tx The transmitted auio sample.
+    \return The HP filtered transmit sample, send this to your D/A.
+*/
+int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx);
+
+#endif /* __OSLEC_H */
diff --git a/drivers/staging/et131x/et1310_phy.c b/drivers/staging/et131x/et1310_phy.c
index 6c4fa54..9dd6dfd 100644
--- a/drivers/staging/et131x/et1310_phy.c
+++ b/drivers/staging/et131x/et1310_phy.c
@@ -84,7 +84,6 @@
 #include <linux/if_arp.h>
 #include <linux/ioport.h>
 #include <linux/random.h>
-#include <linux/delay.h>
 
 #include "et1310_phy.h"
 #include "et1310_pm.h"
@@ -95,7 +94,6 @@
 #include "et131x_initpci.h"
 
 #include "et1310_address_map.h"
-#include "et1310_jagcore.h"
 #include "et1310_tx.h"
 #include "et1310_rx.h"
 #include "et1310_mac.h"
diff --git a/drivers/staging/et131x/et131x_debug.c b/drivers/staging/et131x/et131x_debug.c
index 9ee5bce..d1dd46e 100644
--- a/drivers/staging/et131x/et131x_debug.c
+++ b/drivers/staging/et131x/et131x_debug.c
@@ -97,7 +97,6 @@
 #include "et131x_isr.h"
 
 #include "et1310_address_map.h"
-#include "et1310_jagcore.h"
 #include "et1310_tx.h"
 #include "et1310_rx.h"
 #include "et1310_mac.h"
diff --git a/drivers/staging/et131x/et131x_initpci.c b/drivers/staging/et131x/et131x_initpci.c
index 4c6f171..a18c499 100644
--- a/drivers/staging/et131x/et131x_initpci.c
+++ b/drivers/staging/et131x/et131x_initpci.c
@@ -97,7 +97,6 @@
 #include "et131x_isr.h"
 
 #include "et1310_address_map.h"
-#include "et1310_jagcore.h"
 #include "et1310_tx.h"
 #include "et1310_rx.h"
 #include "et1310_mac.h"
diff --git a/drivers/staging/go7007/Kconfig b/drivers/staging/go7007/Kconfig
index 57a121c..593fdb7 100644
--- a/drivers/staging/go7007/Kconfig
+++ b/drivers/staging/go7007/Kconfig
@@ -1,10 +1,12 @@
 config VIDEO_GO7007
 	tristate "Go 7007 support"
 	depends on VIDEO_DEV && PCI && I2C && INPUT
+	depends on SND
 	select VIDEOBUF_DMA_SG
 	select VIDEO_IR
 	select VIDEO_TUNER
 	select VIDEO_TVEEPROM
+	select SND_PCM
 	select CRC32
 	default N
 	---help---
diff --git a/drivers/staging/go7007/go7007-driver.c b/drivers/staging/go7007/go7007-driver.c
index 81ae4b0..e4ead96 100644
--- a/drivers/staging/go7007/go7007-driver.c
+++ b/drivers/staging/go7007/go7007-driver.c
@@ -16,7 +16,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/version.h>
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/sched.h>
diff --git a/drivers/staging/go7007/go7007-fw.c b/drivers/staging/go7007/go7007-fw.c
index c2aea10..a0e17b0 100644
--- a/drivers/staging/go7007/go7007-fw.c
+++ b/drivers/staging/go7007/go7007-fw.c
@@ -26,7 +26,6 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/version.h>
 #include <linux/time.h>
 #include <linux/mm.h>
 #include <linux/device.h>
diff --git a/drivers/staging/go7007/go7007-i2c.c b/drivers/staging/go7007/go7007-i2c.c
index 10baae3..cd55b76 100644
--- a/drivers/staging/go7007/go7007-i2c.c
+++ b/drivers/staging/go7007/go7007-i2c.c
@@ -15,7 +15,6 @@
  * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
  */
 
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/delay.h>
diff --git a/drivers/staging/go7007/go7007-usb.c b/drivers/staging/go7007/go7007-usb.c
index d4ed6d2..3f5ee34 100644
--- a/drivers/staging/go7007/go7007-usb.c
+++ b/drivers/staging/go7007/go7007-usb.c
@@ -16,7 +16,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/wait.h>
diff --git a/drivers/staging/go7007/snd-go7007.c b/drivers/staging/go7007/snd-go7007.c
index 382740c..a7de401 100644
--- a/drivers/staging/go7007/snd-go7007.c
+++ b/drivers/staging/go7007/snd-go7007.c
@@ -17,7 +17,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/version.h>
 #include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
diff --git a/drivers/staging/go7007/wis-ov7640.c b/drivers/staging/go7007/wis-ov7640.c
index f5f11e9..2f9efca 100644
--- a/drivers/staging/go7007/wis-ov7640.c
+++ b/drivers/staging/go7007/wis-ov7640.c
@@ -17,7 +17,6 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/version.h>
 #include <linux/i2c.h>
 #include <linux/videodev2.h>
 
diff --git a/drivers/staging/go7007/wis-saa7113.c b/drivers/staging/go7007/wis-saa7113.c
index c1aff1b..1168972 100644
--- a/drivers/staging/go7007/wis-saa7113.c
+++ b/drivers/staging/go7007/wis-saa7113.c
@@ -17,7 +17,6 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/version.h>
 #include <linux/i2c.h>
 #include <linux/videodev2.h>
 #include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-saa7115.c b/drivers/staging/go7007/wis-saa7115.c
index 5c94c88..59417a7 100644
--- a/drivers/staging/go7007/wis-saa7115.c
+++ b/drivers/staging/go7007/wis-saa7115.c
@@ -17,7 +17,6 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/version.h>
 #include <linux/i2c.h>
 #include <linux/videodev2.h>
 #include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-sony-tuner.c b/drivers/staging/go7007/wis-sony-tuner.c
index 5997fb4..5a91ee4 100644
--- a/drivers/staging/go7007/wis-sony-tuner.c
+++ b/drivers/staging/go7007/wis-sony-tuner.c
@@ -17,7 +17,6 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/version.h>
 #include <linux/i2c.h>
 #include <linux/videodev2.h>
 #include <media/tuner.h>
diff --git a/drivers/staging/go7007/wis-tw2804.c b/drivers/staging/go7007/wis-tw2804.c
index 27fe4d0..57b8f2b 100644
--- a/drivers/staging/go7007/wis-tw2804.c
+++ b/drivers/staging/go7007/wis-tw2804.c
@@ -17,7 +17,6 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/version.h>
 #include <linux/i2c.h>
 #include <linux/videodev2.h>
 #include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-tw9903.c b/drivers/staging/go7007/wis-tw9903.c
index d8e4196..40627b2 100644
--- a/drivers/staging/go7007/wis-tw9903.c
+++ b/drivers/staging/go7007/wis-tw9903.c
@@ -17,7 +17,6 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/version.h>
 #include <linux/i2c.h>
 #include <linux/videodev2.h>
 #include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-uda1342.c b/drivers/staging/go7007/wis-uda1342.c
index a0894e3..555645c 100644
--- a/drivers/staging/go7007/wis-uda1342.c
+++ b/drivers/staging/go7007/wis-uda1342.c
@@ -17,7 +17,6 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/version.h>
 #include <linux/i2c.h>
 #include <linux/videodev2.h>
 #include <media/tvaudio.h>
diff --git a/drivers/staging/me4000/me4000.c b/drivers/staging/me4000/me4000.c
index 862dd7f..0b33773 100644
--- a/drivers/staging/me4000/me4000.c
+++ b/drivers/staging/me4000/me4000.c
@@ -25,24 +25,21 @@
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
 #include <linux/errno.h>
 #include <linux/delay.h>
-#include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/unistd.h>
 #include <linux/list.h>
 #include <linux/proc_fs.h>
-
+#include <linux/types.h>
 #include <linux/poll.h>
 #include <linux/vmalloc.h>
+#include <linux/slab.h>
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
-#include <linux/types.h>
-
-#include <linux/slab.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
 
 /* Include-File for the Meilhaus ME-4000 I/O board */
 #include "me4000.h"
@@ -57,14 +54,14 @@
 MODULE_LICENSE("GPL");
 
 /* Board specific data are kept in a global list */
-LIST_HEAD(me4000_board_info_list);
+static LIST_HEAD(me4000_board_info_list);
 
 /* Major Device Numbers. 0 means to get it automatically from the System */
-static int me4000_ao_major_driver_no = 0;
-static int me4000_ai_major_driver_no = 0;
-static int me4000_dio_major_driver_no = 0;
-static int me4000_cnt_major_driver_no = 0;
-static int me4000_ext_int_major_driver_no = 0;
+static int me4000_ao_major_driver_no;
+static int me4000_ai_major_driver_no;
+static int me4000_dio_major_driver_no;
+static int me4000_cnt_major_driver_no;
+static int me4000_ext_int_major_driver_no;
 
 /* Let the user specify a custom major driver number */
 module_param(me4000_ao_major_driver_no, int, 0);
@@ -88,36 +85,22 @@
 		 "Major driver number for external interrupt (default 0)");
 
 /*-----------------------------------------------------------------------------
-  Module stuff
-  ---------------------------------------------------------------------------*/
-int init_module(void);
-void cleanup_module(void);
-
-/*-----------------------------------------------------------------------------
   Board detection and initialization
   ---------------------------------------------------------------------------*/
 static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id);
-static int me4000_xilinx_download(me4000_info_t *);
-static int me4000_reset_board(me4000_info_t *);
+static int me4000_xilinx_download(struct me4000_info *);
+static int me4000_reset_board(struct me4000_info *);
 
 static void clear_board_info_list(void);
-static int get_registers(struct pci_dev *dev, me4000_info_t * info);
-static int init_board_info(struct pci_dev *dev, me4000_info_t * board_info);
-static int alloc_ao_contexts(me4000_info_t * info);
-static void release_ao_contexts(me4000_info_t * board_info);
-static int alloc_ai_context(me4000_info_t * info);
-static int alloc_dio_context(me4000_info_t * info);
-static int alloc_cnt_context(me4000_info_t * info);
-static int alloc_ext_int_context(me4000_info_t * info);
-
+static void release_ao_contexts(struct me4000_info *board_info);
 /*-----------------------------------------------------------------------------
   Stuff used by all device parts
   ---------------------------------------------------------------------------*/
 static int me4000_open(struct inode *, struct file *);
 static int me4000_release(struct inode *, struct file *);
 
-static int me4000_get_user_info(me4000_user_info_t *,
-				me4000_info_t * board_info);
+static int me4000_get_user_info(struct me4000_user_info *,
+				struct me4000_info *board_info);
 static int me4000_read_procmem(char *, char **, off_t, int, int *, void *);
 
 /*-----------------------------------------------------------------------------
@@ -140,40 +123,42 @@
 static unsigned int me4000_ao_poll_cont(struct file *, poll_table *);
 static int me4000_ao_fsync_cont(struct file *, struct dentry *, int);
 
-static int me4000_ao_start(unsigned long *, me4000_ao_context_t *);
-static int me4000_ao_stop(me4000_ao_context_t *);
-static int me4000_ao_immediate_stop(me4000_ao_context_t *);
-static int me4000_ao_timer_set_divisor(u32 *, me4000_ao_context_t *);
-static int me4000_ao_preload(me4000_ao_context_t *);
-static int me4000_ao_preload_update(me4000_ao_context_t *);
-static int me4000_ao_ex_trig_set_edge(int *, me4000_ao_context_t *);
-static int me4000_ao_ex_trig_enable(me4000_ao_context_t *);
-static int me4000_ao_ex_trig_disable(me4000_ao_context_t *);
-static int me4000_ao_prepare(me4000_ao_context_t * ao_info);
-static int me4000_ao_reset(me4000_ao_context_t * ao_info);
-static int me4000_ao_enable_do(me4000_ao_context_t *);
-static int me4000_ao_disable_do(me4000_ao_context_t *);
-static int me4000_ao_fsm_state(int *, me4000_ao_context_t *);
+static int me4000_ao_start(unsigned long *, struct me4000_ao_context *);
+static int me4000_ao_stop(struct me4000_ao_context *);
+static int me4000_ao_immediate_stop(struct me4000_ao_context *);
+static int me4000_ao_timer_set_divisor(u32 *, struct me4000_ao_context *);
+static int me4000_ao_preload(struct me4000_ao_context *);
+static int me4000_ao_preload_update(struct me4000_ao_context *);
+static int me4000_ao_ex_trig_set_edge(int *, struct me4000_ao_context *);
+static int me4000_ao_ex_trig_enable(struct me4000_ao_context *);
+static int me4000_ao_ex_trig_disable(struct me4000_ao_context *);
+static int me4000_ao_prepare(struct me4000_ao_context *ao_info);
+static int me4000_ao_reset(struct me4000_ao_context *ao_info);
+static int me4000_ao_enable_do(struct me4000_ao_context *);
+static int me4000_ao_disable_do(struct me4000_ao_context *);
+static int me4000_ao_fsm_state(int *, struct me4000_ao_context *);
 
-static int me4000_ao_simultaneous_ex_trig(me4000_ao_context_t * ao_context);
-static int me4000_ao_simultaneous_sw(me4000_ao_context_t * ao_context);
-static int me4000_ao_simultaneous_disable(me4000_ao_context_t * ao_context);
-static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * channels,
-					 me4000_ao_context_t * ao_context);
+static int me4000_ao_simultaneous_ex_trig(struct me4000_ao_context *ao_context);
+static int me4000_ao_simultaneous_sw(struct me4000_ao_context *ao_context);
+static int me4000_ao_simultaneous_disable(struct me4000_ao_context *ao_context);
+static int me4000_ao_simultaneous_update(
+					struct me4000_ao_channel_list *channels,
+					struct me4000_ao_context *ao_context);
 
-static int me4000_ao_synchronous_ex_trig(me4000_ao_context_t * ao_context);
-static int me4000_ao_synchronous_sw(me4000_ao_context_t * ao_context);
-static int me4000_ao_synchronous_disable(me4000_ao_context_t * ao_context);
+static int me4000_ao_synchronous_ex_trig(struct me4000_ao_context *ao_context);
+static int me4000_ao_synchronous_sw(struct me4000_ao_context *ao_context);
+static int me4000_ao_synchronous_disable(struct me4000_ao_context *ao_context);
 
 static int me4000_ao_ex_trig_timeout(unsigned long *arg,
-				     me4000_ao_context_t * ao_context);
+				     struct me4000_ao_context *ao_context);
 static int me4000_ao_get_free_buffer(unsigned long *arg,
-				     me4000_ao_context_t * ao_context);
+				     struct me4000_ao_context *ao_context);
 
 /*-----------------------------------------------------------------------------
   Analog input stuff
   ---------------------------------------------------------------------------*/
-static int me4000_ai_single(me4000_ai_single_t *, me4000_ai_context_t *);
+static int me4000_ai_single(struct me4000_ai_single *,
+				struct me4000_ai_context *);
 static int me4000_ai_ioctl_sing(struct inode *, struct file *, unsigned int,
 				unsigned long);
 
@@ -186,68 +171,69 @@
 static int me4000_ai_ioctl_ext(struct inode *, struct file *, unsigned int,
 			       unsigned long);
 
-static int me4000_ai_prepare(me4000_ai_context_t * ai_context);
-static int me4000_ai_reset(me4000_ai_context_t * ai_context);
-static int me4000_ai_config(me4000_ai_config_t *, me4000_ai_context_t *);
-static int me4000_ai_start(me4000_ai_context_t *);
-static int me4000_ai_start_ex(unsigned long *, me4000_ai_context_t *);
-static int me4000_ai_stop(me4000_ai_context_t *);
-static int me4000_ai_immediate_stop(me4000_ai_context_t *);
-static int me4000_ai_ex_trig_enable(me4000_ai_context_t *);
-static int me4000_ai_ex_trig_disable(me4000_ai_context_t *);
-static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t *,
-				   me4000_ai_context_t *);
-static int me4000_ai_sc_setup(me4000_ai_sc_t * arg,
-			      me4000_ai_context_t * ai_context);
-static int me4000_ai_offset_enable(me4000_ai_context_t * ai_context);
-static int me4000_ai_offset_disable(me4000_ai_context_t * ai_context);
-static int me4000_ai_fullscale_enable(me4000_ai_context_t * ai_context);
-static int me4000_ai_fullscale_disable(me4000_ai_context_t * ai_context);
-static int me4000_ai_fsm_state(int *arg, me4000_ai_context_t * ai_context);
+static int me4000_ai_prepare(struct me4000_ai_context *ai_context);
+static int me4000_ai_reset(struct me4000_ai_context *ai_context);
+static int me4000_ai_config(struct me4000_ai_config *,
+				struct me4000_ai_context *);
+static int me4000_ai_start(struct me4000_ai_context *);
+static int me4000_ai_start_ex(unsigned long *, struct me4000_ai_context *);
+static int me4000_ai_stop(struct me4000_ai_context *);
+static int me4000_ai_immediate_stop(struct me4000_ai_context *);
+static int me4000_ai_ex_trig_enable(struct me4000_ai_context *);
+static int me4000_ai_ex_trig_disable(struct me4000_ai_context *);
+static int me4000_ai_ex_trig_setup(struct me4000_ai_trigger *,
+				   struct me4000_ai_context *);
+static int me4000_ai_sc_setup(struct me4000_ai_sc *arg,
+			      struct me4000_ai_context *ai_context);
+static int me4000_ai_offset_enable(struct me4000_ai_context *ai_context);
+static int me4000_ai_offset_disable(struct me4000_ai_context *ai_context);
+static int me4000_ai_fullscale_enable(struct me4000_ai_context *ai_context);
+static int me4000_ai_fullscale_disable(struct me4000_ai_context *ai_context);
+static int me4000_ai_fsm_state(int *arg, struct me4000_ai_context *ai_context);
 static int me4000_ai_get_count_buffer(unsigned long *arg,
-				      me4000_ai_context_t * ai_context);
+				      struct me4000_ai_context *ai_context);
 
 /*-----------------------------------------------------------------------------
   EEPROM stuff
   ---------------------------------------------------------------------------*/
-static int me4000_eeprom_read(me4000_eeprom_t * arg,
-			      me4000_ai_context_t * ai_context);
-static int me4000_eeprom_write(me4000_eeprom_t * arg,
-			       me4000_ai_context_t * ai_context);
-static unsigned short eeprom_read_cmd(me4000_ai_context_t * ai_context,
-				      unsigned long cmd, int length);
-static int eeprom_write_cmd(me4000_ai_context_t * ai_context, unsigned long cmd,
-			    int length);
+static int me4000_eeprom_read(struct me4000_eeprom *arg,
+			      struct me4000_ai_context *ai_context);
+static int me4000_eeprom_write(struct me4000_eeprom *arg,
+			       struct me4000_ai_context *ai_context);
 
 /*-----------------------------------------------------------------------------
   Digital I/O stuff
   ---------------------------------------------------------------------------*/
 static int me4000_dio_ioctl(struct inode *, struct file *, unsigned int,
 			    unsigned long);
-static int me4000_dio_config(me4000_dio_config_t *, me4000_dio_context_t *);
-static int me4000_dio_get_byte(me4000_dio_byte_t *, me4000_dio_context_t *);
-static int me4000_dio_set_byte(me4000_dio_byte_t *, me4000_dio_context_t *);
-static int me4000_dio_reset(me4000_dio_context_t *);
+static int me4000_dio_config(struct me4000_dio_config *,
+				struct me4000_dio_context *);
+static int me4000_dio_get_byte(struct me4000_dio_byte *,
+				struct me4000_dio_context *);
+static int me4000_dio_set_byte(struct me4000_dio_byte *,
+				struct me4000_dio_context *);
+static int me4000_dio_reset(struct me4000_dio_context *);
 
 /*-----------------------------------------------------------------------------
   Counter stuff
   ---------------------------------------------------------------------------*/
 static int me4000_cnt_ioctl(struct inode *, struct file *, unsigned int,
 			    unsigned long);
-static int me4000_cnt_config(me4000_cnt_config_t *, me4000_cnt_context_t *);
-static int me4000_cnt_read(me4000_cnt_t *, me4000_cnt_context_t *);
-static int me4000_cnt_write(me4000_cnt_t *, me4000_cnt_context_t *);
-static int me4000_cnt_reset(me4000_cnt_context_t *);
+static int me4000_cnt_config(struct me4000_cnt_config *,
+				struct me4000_cnt_context *);
+static int me4000_cnt_read(struct me4000_cnt *, struct me4000_cnt_context *);
+static int me4000_cnt_write(struct me4000_cnt *, struct me4000_cnt_context *);
+static int me4000_cnt_reset(struct me4000_cnt_context *);
 
 /*-----------------------------------------------------------------------------
   External interrupt routines
   ---------------------------------------------------------------------------*/
 static int me4000_ext_int_ioctl(struct inode *, struct file *, unsigned int,
 				unsigned long);
-static int me4000_ext_int_enable(me4000_ext_int_context_t *);
-static int me4000_ext_int_disable(me4000_ext_int_context_t *);
+static int me4000_ext_int_enable(struct me4000_ext_int_context *);
+static int me4000_ext_int_disable(struct me4000_ext_int_context *);
 static int me4000_ext_int_count(unsigned long *arg,
-				me4000_ext_int_context_t * ext_int_context);
+				struct me4000_ext_int_context *ext_int_context);
 static int me4000_ext_int_fasync(int fd, struct file *file_ptr, int mode);
 
 /*-----------------------------------------------------------------------------
@@ -260,27 +246,18 @@
 /*-----------------------------------------------------------------------------
   Inline functions
   ---------------------------------------------------------------------------*/
-static int inline me4000_buf_count(me4000_circ_buf_t, int);
-static int inline me4000_buf_space(me4000_circ_buf_t, int);
-static int inline me4000_space_to_end(me4000_circ_buf_t, int);
-static int inline me4000_values_to_end(me4000_circ_buf_t, int);
 
-static void inline me4000_outb(unsigned char value, unsigned long port);
-static void inline me4000_outl(unsigned long value, unsigned long port);
-static unsigned long inline me4000_inl(unsigned long port);
-static unsigned char inline me4000_inb(unsigned long port);
-
-static int me4000_buf_count(me4000_circ_buf_t buf, int size)
+static int inline me4000_buf_count(struct me4000_circ_buf buf, int size)
 {
 	return ((buf.head - buf.tail) & (size - 1));
 }
 
-static int me4000_buf_space(me4000_circ_buf_t buf, int size)
+static int inline me4000_buf_space(struct me4000_circ_buf buf, int size)
 {
 	return ((buf.tail - (buf.head + 1)) & (size - 1));
 }
 
-static int me4000_values_to_end(me4000_circ_buf_t buf, int size)
+static int inline me4000_values_to_end(struct me4000_circ_buf buf, int size)
 {
 	int end;
 	int n;
@@ -289,7 +266,7 @@
 	return (n < end) ? n : end;
 }
 
-static int me4000_space_to_end(me4000_circ_buf_t buf, int size)
+static int inline me4000_space_to_end(struct me4000_circ_buf buf, int size)
 {
 	int end;
 	int n;
@@ -299,19 +276,19 @@
 	return (n <= end) ? n : (end + 1);
 }
 
-static void me4000_outb(unsigned char value, unsigned long port)
+static void inline me4000_outb(unsigned char value, unsigned long port)
 {
 	PORT_PDEBUG("--> 0x%02X port 0x%04lX\n", value, port);
 	outb(value, port);
 }
 
-static void me4000_outl(unsigned long value, unsigned long port)
+static void inline me4000_outl(unsigned long value, unsigned long port)
 {
 	PORT_PDEBUG("--> 0x%08lX port 0x%04lX\n", value, port);
 	outl(value, port);
 }
 
-static unsigned long me4000_inl(unsigned long port)
+static unsigned long inline me4000_inl(unsigned long port)
 {
 	unsigned long value;
 	value = inl(port);
@@ -319,7 +296,7 @@
 	return value;
 }
 
-static unsigned char me4000_inb(unsigned long port)
+static unsigned char inline me4000_inb(unsigned long port)
 {
 	unsigned char value;
 	value = inb(port);
@@ -327,102 +304,102 @@
 	return value;
 }
 
-struct pci_driver me4000_driver = {
+static struct pci_driver me4000_driver = {
 	.name = ME4000_NAME,
 	.id_table = me4000_pci_table,
 	.probe = me4000_probe
 };
 
 static struct file_operations me4000_ao_fops_sing = {
-      owner:THIS_MODULE,
-      write:me4000_ao_write_sing,
-      ioctl:me4000_ao_ioctl_sing,
-      open:me4000_open,
-      release:me4000_release,
+      .owner = THIS_MODULE,
+      .write = me4000_ao_write_sing,
+      .ioctl = me4000_ao_ioctl_sing,
+      .open = me4000_open,
+      .release = me4000_release,
 };
 
 static struct file_operations me4000_ao_fops_wrap = {
-      owner:THIS_MODULE,
-      write:me4000_ao_write_wrap,
-      ioctl:me4000_ao_ioctl_wrap,
-      open:me4000_open,
-      release:me4000_release,
+      .owner = THIS_MODULE,
+      .write = me4000_ao_write_wrap,
+      .ioctl = me4000_ao_ioctl_wrap,
+      .open = me4000_open,
+      .release = me4000_release,
 };
 
 static struct file_operations me4000_ao_fops_cont = {
-      owner:THIS_MODULE,
-      write:me4000_ao_write_cont,
-      poll:me4000_ao_poll_cont,
-      ioctl:me4000_ao_ioctl_cont,
-      open:me4000_open,
-      release:me4000_release,
-      fsync:me4000_ao_fsync_cont,
+      .owner = THIS_MODULE,
+      .write = me4000_ao_write_cont,
+      .poll = me4000_ao_poll_cont,
+      .ioctl = me4000_ao_ioctl_cont,
+      .open = me4000_open,
+      .release = me4000_release,
+      .fsync = me4000_ao_fsync_cont,
 };
 
 static struct file_operations me4000_ai_fops_sing = {
-      owner:THIS_MODULE,
-      ioctl:me4000_ai_ioctl_sing,
-      open:me4000_open,
-      release:me4000_release,
+      .owner = THIS_MODULE,
+      .ioctl = me4000_ai_ioctl_sing,
+      .open = me4000_open,
+      .release = me4000_release,
 };
 
 static struct file_operations me4000_ai_fops_cont_sw = {
-      owner:THIS_MODULE,
-      read:me4000_ai_read,
-      poll:me4000_ai_poll,
-      ioctl:me4000_ai_ioctl_sw,
-      open:me4000_open,
-      release:me4000_release,
-      fasync:me4000_ai_fasync,
+      .owner = THIS_MODULE,
+      .read = me4000_ai_read,
+      .poll = me4000_ai_poll,
+      .ioctl = me4000_ai_ioctl_sw,
+      .open = me4000_open,
+      .release = me4000_release,
+      .fasync = me4000_ai_fasync,
 };
 
 static struct file_operations me4000_ai_fops_cont_et = {
-      owner:THIS_MODULE,
-      read:me4000_ai_read,
-      poll:me4000_ai_poll,
-      ioctl:me4000_ai_ioctl_ext,
-      open:me4000_open,
-      release:me4000_release,
+      .owner = THIS_MODULE,
+      .read = me4000_ai_read,
+      .poll = me4000_ai_poll,
+      .ioctl = me4000_ai_ioctl_ext,
+      .open = me4000_open,
+      .release = me4000_release,
 };
 
 static struct file_operations me4000_ai_fops_cont_et_value = {
-      owner:THIS_MODULE,
-      read:me4000_ai_read,
-      poll:me4000_ai_poll,
-      ioctl:me4000_ai_ioctl_ext,
-      open:me4000_open,
-      release:me4000_release,
+      .owner = THIS_MODULE,
+      .read = me4000_ai_read,
+      .poll = me4000_ai_poll,
+      .ioctl = me4000_ai_ioctl_ext,
+      .open = me4000_open,
+      .release = me4000_release,
 };
 
 static struct file_operations me4000_ai_fops_cont_et_chanlist = {
-      owner:THIS_MODULE,
-      read:me4000_ai_read,
-      poll:me4000_ai_poll,
-      ioctl:me4000_ai_ioctl_ext,
-      open:me4000_open,
-      release:me4000_release,
+      .owner = THIS_MODULE,
+      .read = me4000_ai_read,
+      .poll = me4000_ai_poll,
+      .ioctl = me4000_ai_ioctl_ext,
+      .open = me4000_open,
+      .release = me4000_release,
 };
 
 static struct file_operations me4000_dio_fops = {
-      owner:THIS_MODULE,
-      ioctl:me4000_dio_ioctl,
-      open:me4000_open,
-      release:me4000_release,
+      .owner = THIS_MODULE,
+      .ioctl = me4000_dio_ioctl,
+      .open = me4000_open,
+      .release = me4000_release,
 };
 
 static struct file_operations me4000_cnt_fops = {
-      owner:THIS_MODULE,
-      ioctl:me4000_cnt_ioctl,
-      open:me4000_open,
-      release:me4000_release,
+      .owner = THIS_MODULE,
+      .ioctl = me4000_cnt_ioctl,
+      .open = me4000_open,
+      .release = me4000_release,
 };
 
 static struct file_operations me4000_ext_int_fops = {
-      owner:THIS_MODULE,
-      ioctl:me4000_ext_int_ioctl,
-      open:me4000_open,
-      release:me4000_release,
-      fasync:me4000_ext_int_fasync,
+      .owner = THIS_MODULE,
+      .ioctl = me4000_ext_int_ioctl,
+      .open = me4000_open,
+      .release = me4000_release,
+      .fasync = me4000_ext_int_fasync,
 };
 
 static struct file_operations *me4000_ao_fops_array[] = {
@@ -439,9 +416,9 @@
 	&me4000_ai_fops_cont_et_chanlist,	// work through one channel list by external trigger
 };
 
-int __init me4000_init_module(void)
+static int __init me4000_init_module(void)
 {
-	int result = 0;
+	int result;
 
 	CALL_PDEBUG("init_module() is executed\n");
 
@@ -533,26 +510,26 @@
 
 	return 0;
 
-      INIT_ERROR_7:
+INIT_ERROR_7:
 	unregister_chrdev(me4000_ext_int_major_driver_no, ME4000_EXT_INT_NAME);
 
-      INIT_ERROR_6:
+INIT_ERROR_6:
 	unregister_chrdev(me4000_cnt_major_driver_no, ME4000_CNT_NAME);
 
-      INIT_ERROR_5:
+INIT_ERROR_5:
 	unregister_chrdev(me4000_dio_major_driver_no, ME4000_DIO_NAME);
 
-      INIT_ERROR_4:
+INIT_ERROR_4:
 	unregister_chrdev(me4000_ai_major_driver_no, ME4000_AI_NAME);
 
-      INIT_ERROR_3:
+INIT_ERROR_3:
 	unregister_chrdev(me4000_ao_major_driver_no, ME4000_AO_NAME);
 
-      INIT_ERROR_2:
+INIT_ERROR_2:
 	pci_unregister_driver(&me4000_driver);
 	clear_board_info_list();
 
-      INIT_ERROR_1:
+INIT_ERROR_1:
 	return result;
 }
 
@@ -562,18 +539,18 @@
 {
 	struct list_head *board_p;
 	struct list_head *dac_p;
-	me4000_info_t *board_info;
-	me4000_ao_context_t *ao_context;
+	struct me4000_info *board_info;
+	struct me4000_ao_context *ao_context;
 
 	/* Clear context lists */
 	for (board_p = me4000_board_info_list.next;
 	     board_p != &me4000_board_info_list; board_p = board_p->next) {
-		board_info = list_entry(board_p, me4000_info_t, list);
+		board_info = list_entry(board_p, struct me4000_info, list);
 		/* Clear analog output context list */
 		while (!list_empty(&board_info->ao_context_list)) {
 			dac_p = board_info->ao_context_list.next;
 			ao_context =
-			    list_entry(dac_p, me4000_ao_context_t, list);
+			    list_entry(dac_p, struct me4000_ao_context, list);
 			me4000_ao_reset(ao_context);
 			free_irq(ao_context->irq, ao_context);
 			if (ao_context->circ_buf.buf)
@@ -600,14 +577,14 @@
 	/* Clear the board info list */
 	while (!list_empty(&me4000_board_info_list)) {
 		board_p = me4000_board_info_list.next;
-		board_info = list_entry(board_p, me4000_info_t, list);
+		board_info = list_entry(board_p, struct me4000_info, list);
 		pci_release_regions(board_info->pci_dev_p);
 		list_del(board_p);
 		kfree(board_info);
 	}
 }
 
-static int get_registers(struct pci_dev *dev, me4000_info_t * board_info)
+static int get_registers(struct pci_dev *dev, struct me4000_info *board_info)
 {
 
 	/*--------------------------- plx regbase ---------------------------------*/
@@ -667,20 +644,20 @@
 }
 
 static int init_board_info(struct pci_dev *pci_dev_p,
-			   me4000_info_t * board_info)
+			   struct me4000_info *board_info)
 {
 	int i;
 	int result;
 	struct list_head *board_p;
 	board_info->pci_dev_p = pci_dev_p;
 
-	for (i = 0; i < ME4000_BOARD_VERSIONS; i++) {
+	for (i = 0; i < ARRAY_SIZE(me4000_boards); i++) {
 		if (me4000_boards[i].device_id == pci_dev_p->device) {
 			board_info->board_p = &me4000_boards[i];
 			break;
 		}
 	}
-	if (i == ME4000_BOARD_VERSIONS) {
+	if (i == ARRAY_SIZE(me4000_boards)) {
 		printk(KERN_ERR
 		       "ME4000:init_board_info():Device ID not valid\n");
 		return -ENODEV;
@@ -755,21 +732,21 @@
 	return 0;
 }
 
-static int alloc_ao_contexts(me4000_info_t * info)
+static int alloc_ao_contexts(struct me4000_info *info)
 {
 	int i;
 	int err;
-	me4000_ao_context_t *ao_context;
+	struct me4000_ao_context *ao_context;
 
 	for (i = 0; i < info->board_p->ao.count; i++) {
-		ao_context = kmalloc(sizeof(me4000_ao_context_t), GFP_KERNEL);
+		ao_context = kzalloc(sizeof(struct me4000_ao_context),
+								GFP_KERNEL);
 		if (!ao_context) {
 			printk(KERN_ERR
 			       "alloc_ao_contexts():Can't get memory for ao context\n");
 			release_ao_contexts(info);
 			return -ENOMEM;
 		}
-		memset(ao_context, 0, sizeof(me4000_ao_context_t));
 
 		spin_lock_init(&ao_context->use_lock);
 		spin_lock_init(&ao_context->int_lock);
@@ -780,15 +757,13 @@
 		if (info->board_p->ao.fifo_count) {
 			/* Allocate circular buffer */
 			ao_context->circ_buf.buf =
-			    kmalloc(ME4000_AO_BUFFER_SIZE, GFP_KERNEL);
+			    kzalloc(ME4000_AO_BUFFER_SIZE, GFP_KERNEL);
 			if (!ao_context->circ_buf.buf) {
 				printk(KERN_ERR
 				       "alloc_ao_contexts():Can't get circular buffer\n");
 				release_ao_contexts(info);
 				return -ENOMEM;
 			}
-			memset(ao_context->circ_buf.buf, 0,
-			       ME4000_AO_BUFFER_SIZE);
 
 			/* Clear the circular buffer */
 			ao_context->circ_buf.head = 0;
@@ -872,9 +847,8 @@
 					ME4000_NAME, ao_context);
 			if (err) {
 				printk(KERN_ERR
-				       "alloc_ao_contexts():Can't get interrupt line");
-				if (ao_context->circ_buf.buf)
-					kfree(ao_context->circ_buf.buf);
+				       "%s:Can't get interrupt line", __func__);
+				kfree(ao_context->circ_buf.buf);
 				kfree(ao_context);
 				release_ao_contexts(info);
 				return -ENODEV;
@@ -888,35 +862,34 @@
 	return 0;
 }
 
-static void release_ao_contexts(me4000_info_t * board_info)
+static void release_ao_contexts(struct me4000_info *board_info)
 {
 	struct list_head *dac_p;
-	me4000_ao_context_t *ao_context;
+	struct me4000_ao_context *ao_context;
 
 	/* Clear analog output context list */
 	while (!list_empty(&board_info->ao_context_list)) {
 		dac_p = board_info->ao_context_list.next;
-		ao_context = list_entry(dac_p, me4000_ao_context_t, list);
+		ao_context = list_entry(dac_p, struct me4000_ao_context, list);
 		free_irq(ao_context->irq, ao_context);
-		if (ao_context->circ_buf.buf)
-			kfree(ao_context->circ_buf.buf);
+		kfree(ao_context->circ_buf.buf);
 		list_del(dac_p);
 		kfree(ao_context);
 	}
 }
 
-static int alloc_ai_context(me4000_info_t * info)
+static int alloc_ai_context(struct me4000_info *info)
 {
-	me4000_ai_context_t *ai_context;
+	struct me4000_ai_context *ai_context;
 
 	if (info->board_p->ai.count) {
-		ai_context = kmalloc(sizeof(me4000_ai_context_t), GFP_KERNEL);
+		ai_context = kzalloc(sizeof(struct me4000_ai_context),
+								GFP_KERNEL);
 		if (!ai_context) {
 			printk(KERN_ERR
 			       "ME4000:alloc_ai_context():Can't get memory for ai context\n");
 			return -ENOMEM;
 		}
-		memset(ai_context, 0, sizeof(me4000_ai_context_t));
 
 		info->ai_context = ai_context;
 
@@ -958,18 +931,18 @@
 	return 0;
 }
 
-static int alloc_dio_context(me4000_info_t * info)
+static int alloc_dio_context(struct me4000_info *info)
 {
-	me4000_dio_context_t *dio_context;
+	struct me4000_dio_context *dio_context;
 
 	if (info->board_p->dio.count) {
-		dio_context = kmalloc(sizeof(me4000_dio_context_t), GFP_KERNEL);
+		dio_context = kzalloc(sizeof(struct me4000_dio_context),
+								GFP_KERNEL);
 		if (!dio_context) {
 			printk(KERN_ERR
 			       "ME4000:alloc_dio_context():Can't get memory for dio context\n");
 			return -ENOMEM;
 		}
-		memset(dio_context, 0, sizeof(me4000_dio_context_t));
 
 		info->dio_context = dio_context;
 
@@ -995,18 +968,18 @@
 	return 0;
 }
 
-static int alloc_cnt_context(me4000_info_t * info)
+static int alloc_cnt_context(struct me4000_info *info)
 {
-	me4000_cnt_context_t *cnt_context;
+	struct me4000_cnt_context *cnt_context;
 
 	if (info->board_p->cnt.count) {
-		cnt_context = kmalloc(sizeof(me4000_cnt_context_t), GFP_KERNEL);
+		cnt_context = kzalloc(sizeof(struct me4000_cnt_context),
+								GFP_KERNEL);
 		if (!cnt_context) {
 			printk(KERN_ERR
 			       "ME4000:alloc_cnt_context():Can't get memory for cnt context\n");
 			return -ENOMEM;
 		}
-		memset(cnt_context, 0, sizeof(me4000_cnt_context_t));
 
 		info->cnt_context = cnt_context;
 
@@ -1026,19 +999,18 @@
 	return 0;
 }
 
-static int alloc_ext_int_context(me4000_info_t * info)
+static int alloc_ext_int_context(struct me4000_info *info)
 {
-	me4000_ext_int_context_t *ext_int_context;
+	struct me4000_ext_int_context *ext_int_context;
 
 	if (info->board_p->cnt.count) {
 		ext_int_context =
-		    kmalloc(sizeof(me4000_ext_int_context_t), GFP_KERNEL);
+		    kzalloc(sizeof(struct me4000_ext_int_context), GFP_KERNEL);
 		if (!ext_int_context) {
 			printk(KERN_ERR
 			       "ME4000:alloc_ext_int_context():Can't get memory for cnt context\n");
 			return -ENOMEM;
 		}
-		memset(ext_int_context, 0, sizeof(me4000_ext_int_context_t));
 
 		info->ext_int_context = ext_int_context;
 
@@ -1060,19 +1032,18 @@
 static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	int result = 0;
-	me4000_info_t *board_info;
+	struct me4000_info *board_info;
 
 	CALL_PDEBUG("me4000_probe() is executed\n");
 
 	/* Allocate structure for board context */
-	board_info = kmalloc(sizeof(me4000_info_t), GFP_KERNEL);
+	board_info = kzalloc(sizeof(struct me4000_info), GFP_KERNEL);
 	if (!board_info) {
 		printk(KERN_ERR
 		       "ME4000:Can't get memory for board info structure\n");
 		result = -ENOMEM;
 		goto PROBE_ERROR_1;
 	}
-	memset(board_info, 0, sizeof(me4000_info_t));
 
 	/* Add to global linked list */
 	list_add_tail(&board_info->list, &me4000_board_info_list);
@@ -1080,70 +1051,70 @@
 	/* Get the PCI base registers */
 	result = get_registers(dev, board_info);
 	if (result) {
-		printk(KERN_ERR "me4000_probe():Cannot get registers\n");
+		printk(KERN_ERR "%s:Cannot get registers\n", __func__);
 		goto PROBE_ERROR_2;
 	}
 
 	/* Enable the device */
 	result = pci_enable_device(dev);
 	if (result < 0) {
-		printk(KERN_ERR "me4000_probe():Cannot enable PCI device\n");
+		printk(KERN_ERR "%s:Cannot enable PCI device\n", __func__);
 		goto PROBE_ERROR_2;
 	}
 
 	/* Request the PCI register regions */
 	result = pci_request_regions(dev, ME4000_NAME);
 	if (result < 0) {
-		printk(KERN_ERR "me4000_probe():Cannot request I/O regions\n");
+		printk(KERN_ERR "%s:Cannot request I/O regions\n", __func__);
 		goto PROBE_ERROR_2;
 	}
 
 	/* Initialize board info */
 	result = init_board_info(dev, board_info);
 	if (result) {
-		printk(KERN_ERR "me4000_probe():Cannot init baord info\n");
+		printk(KERN_ERR "%s:Cannot init baord info\n", __func__);
 		goto PROBE_ERROR_3;
 	}
 
 	/* Download the xilinx firmware */
 	result = me4000_xilinx_download(board_info);
 	if (result) {
-		printk(KERN_ERR "me4000_probe:Can't download firmware\n");
+		printk(KERN_ERR "%s:Can't download firmware\n", __func__);
 		goto PROBE_ERROR_3;
 	}
 
 	/* Make a hardware reset */
 	result = me4000_reset_board(board_info);
 	if (result) {
-		printk(KERN_ERR "me4000_probe:Can't reset board\n");
+		printk(KERN_ERR "%s :Can't reset board\n", __func__);
 		goto PROBE_ERROR_3;
 	}
 
 	/* Allocate analog output context structures */
 	result = alloc_ao_contexts(board_info);
 	if (result) {
-		printk(KERN_ERR "me4000_probe():Cannot allocate ao contexts\n");
+		printk(KERN_ERR "%s:Cannot allocate ao contexts\n", __func__);
 		goto PROBE_ERROR_3;
 	}
 
 	/* Allocate analog input context */
 	result = alloc_ai_context(board_info);
 	if (result) {
-		printk(KERN_ERR "me4000_probe():Cannot allocate ai context\n");
+		printk(KERN_ERR "%s:Cannot allocate ai context\n", __func__);
 		goto PROBE_ERROR_4;
 	}
 
 	/* Allocate digital I/O context */
 	result = alloc_dio_context(board_info);
 	if (result) {
-		printk(KERN_ERR "me4000_probe():Cannot allocate dio context\n");
+		printk(KERN_ERR "%s:Cannot allocate dio context\n", __func__);
 		goto PROBE_ERROR_5;
 	}
 
 	/* Allocate counter context */
 	result = alloc_cnt_context(board_info);
 	if (result) {
-		printk(KERN_ERR "me4000_probe():Cannot allocate cnt context\n");
+		printk(KERN_ERR "%s:Cannot allocate cnt context\n", __func__);
 		goto PROBE_ERROR_6;
 	}
 
@@ -1151,36 +1122,36 @@
 	result = alloc_ext_int_context(board_info);
 	if (result) {
 		printk(KERN_ERR
-		       "me4000_probe():Cannot allocate ext_int context\n");
+		       "%s:Cannot allocate ext_int context\n", __func__);
 		goto PROBE_ERROR_7;
 	}
 
 	return 0;
 
-      PROBE_ERROR_7:
+PROBE_ERROR_7:
 	kfree(board_info->cnt_context);
 
-      PROBE_ERROR_6:
+PROBE_ERROR_6:
 	kfree(board_info->dio_context);
 
-      PROBE_ERROR_5:
+PROBE_ERROR_5:
 	kfree(board_info->ai_context);
 
-      PROBE_ERROR_4:
+PROBE_ERROR_4:
 	release_ao_contexts(board_info);
 
-      PROBE_ERROR_3:
+PROBE_ERROR_3:
 	pci_release_regions(dev);
 
-      PROBE_ERROR_2:
+PROBE_ERROR_2:
 	list_del(&board_info->list);
 	kfree(board_info);
 
-      PROBE_ERROR_1:
+PROBE_ERROR_1:
 	return result;
 }
 
-static int me4000_xilinx_download(me4000_info_t * info)
+static int me4000_xilinx_download(struct me4000_info *info)
 {
 	int size = 0;
 	u32 value = 0;
@@ -1211,7 +1182,7 @@
 	/* Wait until /INIT pin is set */
 	udelay(20);
 	if (!inl(info->plx_regbase + PLX_INTCSR) & 0x20) {
-		printk(KERN_ERR "me4000_xilinx_download():Can't init Xilinx\n");
+		printk(KERN_ERR "%s:Can't init Xilinx\n", __func__);
 		return -EIO;
 	}
 
@@ -1232,7 +1203,7 @@
 		/* Check if BUSY flag is low */
 		if (inl(info->plx_regbase + PLX_ICR) & 0x20) {
 			printk(KERN_ERR
-			       "me4000_xilinx_download():Xilinx is still busy (idx = %d)\n",
+			       "%s:Xilinx is still busy (idx = %d)\n", __func__,
 			       idx);
 			return -EIO;
 		}
@@ -1246,9 +1217,9 @@
 		PDEBUG("me4000_xilinx_download():Download was successful\n");
 	} else {
 		printk(KERN_ERR
-		       "ME4000:me4000_xilinx_download():DONE flag is not set\n");
+		       "ME4000:%s:DONE flag is not set\n", __func__);
 		printk(KERN_ERR
-		       "ME4000:me4000_xilinx_download():Download not succesful\n");
+		       "ME4000:%s:Download not succesful\n", __func__);
 		return -EIO;
 	}
 
@@ -1260,7 +1231,7 @@
 	return 0;
 }
 
-static int me4000_reset_board(me4000_info_t * info)
+static int me4000_reset_board(struct me4000_info *info)
 {
 	unsigned long icr;
 
@@ -1314,12 +1285,12 @@
 	int err = 0;
 	int i;
 	struct list_head *ptr;
-	me4000_info_t *board_info = NULL;
-	me4000_ao_context_t *ao_context = NULL;
-	me4000_ai_context_t *ai_context = NULL;
-	me4000_dio_context_t *dio_context = NULL;
-	me4000_cnt_context_t *cnt_context = NULL;
-	me4000_ext_int_context_t *ext_int_context = NULL;
+	struct me4000_info *board_info = NULL;
+	struct me4000_ao_context *ao_context = NULL;
+	struct me4000_ai_context *ai_context = NULL;
+	struct me4000_dio_context *dio_context = NULL;
+	struct me4000_cnt_context *cnt_context = NULL;
+	struct me4000_ext_int_context *ext_int_context = NULL;
 
 	CALL_PDEBUG("me4000_open() is executed\n");
 
@@ -1335,7 +1306,7 @@
 		/* Search for the board context */
 		for (ptr = me4000_board_info_list.next, i = 0;
 		     ptr != &me4000_board_info_list; ptr = ptr->next, i++) {
-			board_info = list_entry(ptr, me4000_info_t, list);
+			board_info = list_entry(ptr, struct me4000_info, list);
 			if (i == board)
 				break;
 		}
@@ -1351,7 +1322,8 @@
 		for (ptr = board_info->ao_context_list.next, i = 0;
 		     ptr != &board_info->ao_context_list;
 		     ptr = ptr->next, i++) {
-			ao_context = list_entry(ptr, me4000_ao_context_t, list);
+			ao_context = list_entry(ptr, struct me4000_ao_context,
+									list);
 			if (i == dev)
 				break;
 		}
@@ -1415,7 +1387,7 @@
 		/* Search for the board context */
 		for (ptr = me4000_board_info_list.next, i = 0;
 		     ptr != &me4000_board_info_list; ptr = ptr->next, i++) {
-			board_info = list_entry(ptr, me4000_info_t, list);
+			board_info = list_entry(ptr, struct me4000_info, list);
 			if (i == board)
 				break;
 		}
@@ -1469,7 +1441,7 @@
 		/* Search for the board context */
 		for (ptr = me4000_board_info_list.next;
 		     ptr != &me4000_board_info_list; ptr = ptr->next) {
-			board_info = list_entry(ptr, me4000_info_t, list);
+			board_info = list_entry(ptr, struct me4000_info, list);
 			if (board_info->board_count == board)
 				break;
 		}
@@ -1514,7 +1486,7 @@
 		/* Search for the board context */
 		for (ptr = me4000_board_info_list.next;
 		     ptr != &me4000_board_info_list; ptr = ptr->next) {
-			board_info = list_entry(ptr, me4000_info_t, list);
+			board_info = list_entry(ptr, struct me4000_info, list);
 			if (board_info->board_count == board)
 				break;
 		}
@@ -1557,7 +1529,7 @@
 		/* Search for the board context */
 		for (ptr = me4000_board_info_list.next;
 		     ptr != &me4000_board_info_list; ptr = ptr->next) {
-			board_info = list_entry(ptr, me4000_info_t, list);
+			board_info = list_entry(ptr, struct me4000_info, list);
 			if (board_info->board_count == board)
 				break;
 		}
@@ -1613,11 +1585,11 @@
 
 static int me4000_release(struct inode *inode_p, struct file *file_p)
 {
-	me4000_ao_context_t *ao_context;
-	me4000_ai_context_t *ai_context;
-	me4000_dio_context_t *dio_context;
-	me4000_cnt_context_t *cnt_context;
-	me4000_ext_int_context_t *ext_int_context;
+	struct me4000_ao_context *ao_context;
+	struct me4000_ai_context *ai_context;
+	struct me4000_dio_context *dio_context;
+	struct me4000_cnt_context *cnt_context;
+	struct me4000_ext_int_context *ext_int_context;
 
 	CALL_PDEBUG("me4000_release() is executed\n");
 
@@ -1677,7 +1649,7 @@
 
 /*------------------------------- Analog output stuff --------------------------------------*/
 
-static int me4000_ao_prepare(me4000_ao_context_t * ao_context)
+static int me4000_ao_prepare(struct me4000_ao_context *ao_context)
 {
 	unsigned long flags;
 
@@ -1756,7 +1728,7 @@
 	return 0;
 }
 
-static int me4000_ao_reset(me4000_ao_context_t * ao_context)
+static int me4000_ao_reset(struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 	wait_queue_head_t queue;
@@ -1777,9 +1749,10 @@
 		tmp |= ME4000_AO_CTRL_BIT_IMMEDIATE_STOP;
 		me4000_outl(tmp, ao_context->ctrl_reg);
 
-		while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) {
-			sleep_on_timeout(&queue, 1);
-		}
+		wait_event_timeout(queue,
+			(inl(ao_context->status_reg) &
+				ME4000_AO_STATUS_BIT_FSM) == 0,
+			1);
 
 		/* Set to transparent mode */
 		me4000_ao_simultaneous_disable(ao_context);
@@ -1812,9 +1785,10 @@
 		me4000_outl(tmp, ao_context->ctrl_reg);
 		spin_unlock_irqrestore(&ao_context->int_lock, flags);
 
-		while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) {
-			sleep_on_timeout(&queue, 1);
-		}
+		wait_event_timeout(queue,
+			(inl(ao_context->status_reg) &
+				ME4000_AO_STATUS_BIT_FSM) == 0,
+			1);
 
 		/* Clear the circular buffer */
 		ao_context->circ_buf.head = 0;
@@ -1853,9 +1827,9 @@
 }
 
 static ssize_t me4000_ao_write_sing(struct file *filep, const char *buff,
-				    size_t cnt, loff_t * offp)
+				    size_t cnt, loff_t *offp)
 {
-	me4000_ao_context_t *ao_context = filep->private_data;
+	struct me4000_ao_context *ao_context = filep->private_data;
 	u32 value;
 	const u16 *buffer = (const u16 *)buff;
 
@@ -1863,13 +1837,13 @@
 
 	if (cnt != 2) {
 		printk(KERN_ERR
-		       "me4000_ao_write_sing():Write count is not 2\n");
+		       "%s:Write count is not 2\n", __func__);
 		return -EINVAL;
 	}
 
 	if (get_user(value, buffer)) {
 		printk(KERN_ERR
-		       "me4000_ao_write_sing():Cannot copy data from user\n");
+		       "%s:Cannot copy data from user\n", __func__);
 		return -EFAULT;
 	}
 
@@ -1879,9 +1853,9 @@
 }
 
 static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff,
-				    size_t cnt, loff_t * offp)
+				    size_t cnt, loff_t *offp)
 {
-	me4000_ao_context_t *ao_context = filep->private_data;
+	struct me4000_ao_context *ao_context = filep->private_data;
 	size_t i;
 	u32 value;
 	u32 tmp;
@@ -1893,13 +1867,13 @@
 	/* Check if a conversion is already running */
 	if (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) {
 		printk(KERN_ERR
-		       "ME4000:me4000_ao_write_wrap():There is already a conversion running\n");
+		       "%s:There is already a conversion running\n", __func__);
 		return -EBUSY;
 	}
 
 	if (count > ME4000_AO_FIFO_COUNT) {
 		printk(KERN_ERR
-		       "me4000_ao_write_wrap():Can't load more than %d values\n",
+		       "%s:Can't load more than %d values\n", __func__,
 		       ME4000_AO_FIFO_COUNT);
 		return -ENOSPC;
 	}
@@ -1914,7 +1888,7 @@
 	for (i = 0; i < count; i++) {
 		if (get_user(value, buffer + i)) {
 			printk(KERN_ERR
-			       "me4000_ao_write_single():Cannot copy data from user\n");
+			       "%s:Cannot copy data from user\n", __func__);
 			return -EFAULT;
 		}
 		if (((ao_context->fifo_reg & 0xFF) == ME4000_AO_01_FIFO_REG)
@@ -1928,9 +1902,9 @@
 }
 
 static ssize_t me4000_ao_write_cont(struct file *filep, const char *buff,
-				    size_t cnt, loff_t * offp)
+				    size_t cnt, loff_t *offp)
 {
-	me4000_ao_context_t *ao_context = filep->private_data;
+	struct me4000_ao_context *ao_context = filep->private_data;
 	const u16 *buffer = (const u16 *)buff;
 	size_t count = cnt / 2;
 	unsigned long flags;
@@ -2154,9 +2128,9 @@
 	return 2 * ret;
 }
 
-static unsigned int me4000_ao_poll_cont(struct file *file_p, poll_table * wait)
+static unsigned int me4000_ao_poll_cont(struct file *file_p, poll_table *wait)
 {
-	me4000_ao_context_t *ao_context;
+	struct me4000_ao_context *ao_context;
 	unsigned long mask = 0;
 
 	CALL_PDEBUG("me4000_ao_poll_cont() is executed\n");
@@ -2177,7 +2151,7 @@
 static int me4000_ao_fsync_cont(struct file *file_p, struct dentry *dentry_p,
 				int datasync)
 {
-	me4000_ao_context_t *ao_context;
+	struct me4000_ao_context *ao_context;
 	wait_queue_head_t queue;
 
 	CALL_PDEBUG("me4000_ao_fsync_cont() is executed\n");
@@ -2187,15 +2161,19 @@
 
 	while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) {
 		interruptible_sleep_on_timeout(&queue, 1);
+			wait_event_interruptible_timeout(queue,
+			!(inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM),
+			1);
 		if (ao_context->pipe_flag) {
 			printk(KERN_ERR
-			       "me4000_ao_fsync_cont():Broken pipe detected\n");
+			       "%s:Broken pipe detected\n", __func__);
 			return -EPIPE;
 		}
 
 		if (signal_pending(current)) {
 			printk(KERN_ERR
-			       "me4000_ao_fsync_cont():Wait on state machine interrupted\n");
+			       "%s:Wait on state machine interrupted\n",
+			       __func__);
 			return -EINTR;
 		}
 	}
@@ -2206,7 +2184,7 @@
 static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p,
 				unsigned int service, unsigned long arg)
 {
-	me4000_ao_context_t *ao_context;
+	struct me4000_ao_context *ao_context;
 
 	CALL_PDEBUG("me4000_ao_ioctl_sing() is executed\n");
 
@@ -2229,7 +2207,7 @@
 	case ME4000_AO_PRELOAD_UPDATE:
 		return me4000_ao_preload_update(ao_context);
 	case ME4000_GET_USER_INFO:
-		return me4000_get_user_info((me4000_user_info_t *) arg,
+		return me4000_get_user_info((struct me4000_user_info *)arg,
 					    ao_context->board_info);
 	case ME4000_AO_SIMULTANEOUS_EX_TRIG:
 		return me4000_ao_simultaneous_ex_trig(ao_context);
@@ -2239,8 +2217,9 @@
 		return me4000_ao_simultaneous_disable(ao_context);
 	case ME4000_AO_SIMULTANEOUS_UPDATE:
 		return
-		    me4000_ao_simultaneous_update((me4000_ao_channel_list_t *)
-						  arg, ao_context);
+		    me4000_ao_simultaneous_update(
+		    		(struct me4000_ao_channel_list *)arg,
+				ao_context);
 	case ME4000_AO_EX_TRIG_TIMEOUT:
 		return me4000_ao_ex_trig_timeout((unsigned long *)arg,
 						 ao_context);
@@ -2258,7 +2237,7 @@
 static int me4000_ao_ioctl_wrap(struct inode *inode_p, struct file *file_p,
 				unsigned int service, unsigned long arg)
 {
-	me4000_ao_context_t *ao_context;
+	struct me4000_ao_context *ao_context;
 
 	CALL_PDEBUG("me4000_ao_ioctl_wrap() is executed\n");
 
@@ -2287,7 +2266,7 @@
 	case ME4000_AO_EX_TRIG_DISABLE:
 		return me4000_ao_ex_trig_disable(ao_context);
 	case ME4000_GET_USER_INFO:
-		return me4000_get_user_info((me4000_user_info_t *) arg,
+		return me4000_get_user_info((struct me4000_user_info *)arg,
 					    ao_context->board_info);
 	case ME4000_AO_FSM_STATE:
 		return me4000_ao_fsm_state((int *)arg, ao_context);
@@ -2310,7 +2289,7 @@
 static int me4000_ao_ioctl_cont(struct inode *inode_p, struct file *file_p,
 				unsigned int service, unsigned long arg)
 {
-	me4000_ao_context_t *ao_context;
+	struct me4000_ao_context *ao_context;
 
 	CALL_PDEBUG("me4000_ao_ioctl_cont() is executed\n");
 
@@ -2345,7 +2324,7 @@
 	case ME4000_AO_FSM_STATE:
 		return me4000_ao_fsm_state((int *)arg, ao_context);
 	case ME4000_GET_USER_INFO:
-		return me4000_get_user_info((me4000_user_info_t *) arg,
+		return me4000_get_user_info((struct me4000_user_info *)arg,
 					    ao_context->board_info);
 	case ME4000_AO_SYNCHRONOUS_EX_TRIG:
 		return me4000_ao_synchronous_ex_trig(ao_context);
@@ -2362,7 +2341,8 @@
 	return 0;
 }
 
-static int me4000_ao_start(unsigned long *arg, me4000_ao_context_t * ao_context)
+static int me4000_ao_start(unsigned long *arg,
+			   struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 	wait_queue_head_t queue;
@@ -2412,7 +2392,7 @@
 	return 0;
 }
 
-static int me4000_ao_stop(me4000_ao_context_t * ao_context)
+static int me4000_ao_stop(struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 	wait_queue_head_t queue;
@@ -2445,7 +2425,7 @@
 	return 0;
 }
 
-static int me4000_ao_immediate_stop(me4000_ao_context_t * ao_context)
+static int me4000_ao_immediate_stop(struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 	wait_queue_head_t queue;
@@ -2477,8 +2457,8 @@
 	return 0;
 }
 
-static int me4000_ao_timer_set_divisor(u32 * arg,
-				       me4000_ao_context_t * ao_context)
+static int me4000_ao_timer_set_divisor(u32 *arg,
+				       struct me4000_ao_context *ao_context)
 {
 	u32 divisor;
 	u32 tmp;
@@ -2518,7 +2498,7 @@
 }
 
 static int me4000_ao_ex_trig_set_edge(int *arg,
-				      me4000_ao_context_t * ao_context)
+				      struct me4000_ao_context *ao_context)
 {
 	int mode;
 	u32 tmp;
@@ -2569,7 +2549,7 @@
 	return 0;
 }
 
-static int me4000_ao_ex_trig_enable(me4000_ao_context_t * ao_context)
+static int me4000_ao_ex_trig_enable(struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 	unsigned long flags;
@@ -2593,7 +2573,7 @@
 	return 0;
 }
 
-static int me4000_ao_ex_trig_disable(me4000_ao_context_t * ao_context)
+static int me4000_ao_ex_trig_disable(struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 	unsigned long flags;
@@ -2617,7 +2597,7 @@
 	return 0;
 }
 
-static int me4000_ao_simultaneous_disable(me4000_ao_context_t * ao_context)
+static int me4000_ao_simultaneous_disable(struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 
@@ -2643,7 +2623,7 @@
 	return 0;
 }
 
-static int me4000_ao_simultaneous_ex_trig(me4000_ao_context_t * ao_context)
+static int me4000_ao_simultaneous_ex_trig(struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 
@@ -2659,7 +2639,7 @@
 	return 0;
 }
 
-static int me4000_ao_simultaneous_sw(me4000_ao_context_t * ao_context)
+static int me4000_ao_simultaneous_sw(struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 
@@ -2675,13 +2655,13 @@
 	return 0;
 }
 
-static int me4000_ao_preload(me4000_ao_context_t * ao_context)
+static int me4000_ao_preload(struct me4000_ao_context *ao_context)
 {
 	CALL_PDEBUG("me4000_ao_preload() is executed\n");
 	return me4000_ao_simultaneous_sw(ao_context);
 }
 
-static int me4000_ao_preload_update(me4000_ao_context_t * ao_context)
+static int me4000_ao_preload_update(struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 	u32 ctrl;
@@ -2705,10 +2685,12 @@
 			if (!
 			    (tmp &
 			     (0x1 <<
-			      (((me4000_ao_context_t *) entry)->index + 16)))) {
+			      (((struct me4000_ao_context *)entry)->index
+			      					      + 16)))) {
 				tmp &=
 				    ~(0x1 <<
-				      (((me4000_ao_context_t *) entry)->index));
+				      (((struct me4000_ao_context *)entry)->
+				      					index));
 			}
 		}
 	}
@@ -2718,18 +2700,19 @@
 	return 0;
 }
 
-static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * arg,
-					 me4000_ao_context_t * ao_context)
+static int me4000_ao_simultaneous_update(struct me4000_ao_channel_list *arg,
+					 struct me4000_ao_context *ao_context)
 {
 	int err;
 	int i;
 	u32 tmp;
-	me4000_ao_channel_list_t channels;
+	struct me4000_ao_channel_list channels;
 
 	CALL_PDEBUG("me4000_ao_simultaneous_update() is executed\n");
 
 	/* Copy data from user */
-	err = copy_from_user(&channels, arg, sizeof(me4000_ao_channel_list_t));
+	err = copy_from_user(&channels, arg,
+			sizeof(struct me4000_ao_channel_list));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_ao_simultaneous_update():Can't copy command\n");
@@ -2737,13 +2720,12 @@
 	}
 
 	channels.list =
-	    kmalloc(sizeof(unsigned long) * channels.count, GFP_KERNEL);
+	    kzalloc(sizeof(unsigned long) * channels.count, GFP_KERNEL);
 	if (!channels.list) {
 		printk(KERN_ERR
 		       "ME4000:me4000_ao_simultaneous_update():Can't get buffer\n");
 		return -ENOMEM;
 	}
-	memset(channels.list, 0, sizeof(unsigned long) * channels.count);
 
 	/* Copy channel list from user */
 	err =
@@ -2777,7 +2759,7 @@
 	return 0;
 }
 
-static int me4000_ao_synchronous_ex_trig(me4000_ao_context_t * ao_context)
+static int me4000_ao_synchronous_ex_trig(struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 	unsigned long flags;
@@ -2813,7 +2795,7 @@
 	return 0;
 }
 
-static int me4000_ao_synchronous_sw(me4000_ao_context_t * ao_context)
+static int me4000_ao_synchronous_sw(struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 	unsigned long flags;
@@ -2848,13 +2830,13 @@
 	return 0;
 }
 
-static int me4000_ao_synchronous_disable(me4000_ao_context_t * ao_context)
+static int me4000_ao_synchronous_disable(struct me4000_ao_context *ao_context)
 {
 	return me4000_ao_simultaneous_disable(ao_context);
 }
 
 static int me4000_ao_get_free_buffer(unsigned long *arg,
-				     me4000_ao_context_t * ao_context)
+				     struct me4000_ao_context *ao_context)
 {
 	unsigned long c;
 	int err;
@@ -2864,7 +2846,7 @@
 	err = copy_to_user(arg, &c, sizeof(unsigned long));
 	if (err) {
 		printk(KERN_ERR
-		       "ME4000:me4000_ao_get_free_buffer():Can't copy to user space\n");
+		       "%s:Can't copy to user space\n", __func__);
 		return -EFAULT;
 	}
 
@@ -2872,7 +2854,7 @@
 }
 
 static int me4000_ao_ex_trig_timeout(unsigned long *arg,
-				     me4000_ao_context_t * ao_context)
+				     struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 	wait_queue_head_t queue;
@@ -2928,7 +2910,7 @@
 	return 0;
 }
 
-static int me4000_ao_enable_do(me4000_ao_context_t * ao_context)
+static int me4000_ao_enable_do(struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 	unsigned long flags;
@@ -2959,7 +2941,7 @@
 	return 0;
 }
 
-static int me4000_ao_disable_do(me4000_ao_context_t * ao_context)
+static int me4000_ao_disable_do(struct me4000_ao_context *ao_context)
 {
 	u32 tmp;
 	unsigned long flags;
@@ -2989,7 +2971,7 @@
 	return 0;
 }
 
-static int me4000_ao_fsm_state(int *arg, me4000_ao_context_t * ao_context)
+static int me4000_ao_fsm_state(int *arg, struct me4000_ao_context *ao_context)
 {
 	unsigned long tmp;
 
@@ -3012,9 +2994,9 @@
 	return 0;
 }
 
-/*------------------------------- Analog input stuff --------------------------------------*/
+/*------------------------- Analog input stuff -------------------------------*/
 
-static int me4000_ai_prepare(me4000_ai_context_t * ai_context)
+static int me4000_ai_prepare(struct me4000_ai_context *ai_context)
 {
 	wait_queue_head_t queue;
 	int err;
@@ -3057,14 +3039,13 @@
 
 		/* Allocate circular buffer */
 		ai_context->circ_buf.buf =
-		    kmalloc(ME4000_AI_BUFFER_SIZE, GFP_KERNEL);
+		    kzalloc(ME4000_AI_BUFFER_SIZE, GFP_KERNEL);
 		if (!ai_context->circ_buf.buf) {
 			printk(KERN_ERR
 			       "ME4000:me4000_ai_prepare():Can't get circular buffer\n");
 			free_irq(ai_context->irq, ai_context);
 			return -ENOMEM;
 		}
-		memset(ai_context->circ_buf.buf, 0, ME4000_AI_BUFFER_SIZE);
 
 		/* Clear the circular buffer */
 		ai_context->circ_buf.head = 0;
@@ -3074,7 +3055,7 @@
 	return 0;
 }
 
-static int me4000_ai_reset(me4000_ai_context_t * ai_context)
+static int me4000_ai_reset(struct me4000_ai_context *ai_context)
 {
 	wait_queue_head_t queue;
 	u32 tmp;
@@ -3139,7 +3120,7 @@
 static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p,
 				unsigned int service, unsigned long arg)
 {
-	me4000_ai_context_t *ai_context;
+	struct me4000_ai_context *ai_context;
 
 	CALL_PDEBUG("me4000_ai_ioctl_sing() is executed\n");
 
@@ -3157,16 +3138,17 @@
 
 	switch (service) {
 	case ME4000_AI_SINGLE:
-		return me4000_ai_single((me4000_ai_single_t *) arg, ai_context);
+		return me4000_ai_single((struct me4000_ai_single *)arg,
+								ai_context);
 	case ME4000_AI_EX_TRIG_ENABLE:
 		return me4000_ai_ex_trig_enable(ai_context);
 	case ME4000_AI_EX_TRIG_DISABLE:
 		return me4000_ai_ex_trig_disable(ai_context);
 	case ME4000_AI_EX_TRIG_SETUP:
-		return me4000_ai_ex_trig_setup((me4000_ai_trigger_t *) arg,
+		return me4000_ai_ex_trig_setup((struct me4000_ai_trigger *)arg,
 					       ai_context);
 	case ME4000_GET_USER_INFO:
-		return me4000_get_user_info((me4000_user_info_t *) arg,
+		return me4000_get_user_info((struct me4000_user_info *)arg,
 					    ai_context->board_info);
 	case ME4000_AI_OFFSET_ENABLE:
 		return me4000_ai_offset_enable(ai_context);
@@ -3177,9 +3159,11 @@
 	case ME4000_AI_FULLSCALE_DISABLE:
 		return me4000_ai_fullscale_disable(ai_context);
 	case ME4000_AI_EEPROM_READ:
-		return me4000_eeprom_read((me4000_eeprom_t *) arg, ai_context);
+		return me4000_eeprom_read((struct me4000_eeprom *)arg,
+								ai_context);
 	case ME4000_AI_EEPROM_WRITE:
-		return me4000_eeprom_write((me4000_eeprom_t *) arg, ai_context);
+		return me4000_eeprom_write((struct me4000_eeprom *)arg,
+								ai_context);
 	default:
 		printk(KERN_ERR
 		       "me4000_ai_ioctl_sing():Invalid service number\n");
@@ -3188,10 +3172,10 @@
 	return 0;
 }
 
-static int me4000_ai_single(me4000_ai_single_t * arg,
-			    me4000_ai_context_t * ai_context)
+static int me4000_ai_single(struct me4000_ai_single *arg,
+			    struct me4000_ai_context *ai_context)
 {
-	me4000_ai_single_t cmd;
+	struct me4000_ai_single cmd;
 	int err;
 	u32 tmp;
 	wait_queue_head_t queue;
@@ -3202,7 +3186,7 @@
 	init_waitqueue_head(&queue);
 
 	/* Copy data from user */
-	err = copy_from_user(&cmd, arg, sizeof(me4000_ai_single_t));
+	err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_single));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_ai_single():Can't copy from user space\n");
@@ -3301,7 +3285,7 @@
 	cmd.value = me4000_inl(ai_context->data_reg) & 0xFFFF;
 
 	/* Copy result back to user */
-	err = copy_to_user(arg, &cmd, sizeof(me4000_ai_single_t));
+	err = copy_to_user(arg, &cmd, sizeof(struct me4000_ai_single));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_ai_single():Can't copy to user space\n");
@@ -3314,7 +3298,7 @@
 static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p,
 			      unsigned int service, unsigned long arg)
 {
-	me4000_ai_context_t *ai_context;
+	struct me4000_ai_context *ai_context;
 
 	CALL_PDEBUG("me4000_ai_ioctl_sw() is executed\n");
 
@@ -3332,9 +3316,11 @@
 
 	switch (service) {
 	case ME4000_AI_SC_SETUP:
-		return me4000_ai_sc_setup((me4000_ai_sc_t *) arg, ai_context);
+		return me4000_ai_sc_setup((struct me4000_ai_sc *)arg,
+								ai_context);
 	case ME4000_AI_CONFIG:
-		return me4000_ai_config((me4000_ai_config_t *) arg, ai_context);
+		return me4000_ai_config((struct me4000_ai_config *)arg,
+								ai_context);
 	case ME4000_AI_START:
 		return me4000_ai_start(ai_context);
 	case ME4000_AI_STOP:
@@ -3344,19 +3330,20 @@
 	case ME4000_AI_FSM_STATE:
 		return me4000_ai_fsm_state((int *)arg, ai_context);
 	case ME4000_GET_USER_INFO:
-		return me4000_get_user_info((me4000_user_info_t *) arg,
+		return me4000_get_user_info((struct me4000_user_info *)arg,
 					    ai_context->board_info);
 	case ME4000_AI_EEPROM_READ:
-		return me4000_eeprom_read((me4000_eeprom_t *) arg, ai_context);
+		return me4000_eeprom_read((struct me4000_eeprom *)arg,
+								ai_context);
 	case ME4000_AI_EEPROM_WRITE:
-		return me4000_eeprom_write((me4000_eeprom_t *) arg, ai_context);
+		return me4000_eeprom_write((struct me4000_eeprom *)arg,
+								ai_context);
 	case ME4000_AI_GET_COUNT_BUFFER:
 		return me4000_ai_get_count_buffer((unsigned long *)arg,
 						  ai_context);
 	default:
 		printk(KERN_ERR
-		       "ME4000:me4000_ai_ioctl_sw():Invalid service number %d\n",
-		       service);
+		       "%s:Invalid service number %d\n", __func__, service);
 		return -ENOTTY;
 	}
 	return 0;
@@ -3365,7 +3352,7 @@
 static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p,
 			       unsigned int service, unsigned long arg)
 {
-	me4000_ai_context_t *ai_context;
+	struct me4000_ai_context *ai_context;
 
 	CALL_PDEBUG("me4000_ai_ioctl_ext() is executed\n");
 
@@ -3383,9 +3370,11 @@
 
 	switch (service) {
 	case ME4000_AI_SC_SETUP:
-		return me4000_ai_sc_setup((me4000_ai_sc_t *) arg, ai_context);
+		return me4000_ai_sc_setup((struct me4000_ai_sc *)arg,
+								ai_context);
 	case ME4000_AI_CONFIG:
-		return me4000_ai_config((me4000_ai_config_t *) arg, ai_context);
+		return me4000_ai_config((struct me4000_ai_config *)arg,
+								ai_context);
 	case ME4000_AI_START:
 		return me4000_ai_start_ex((unsigned long *)arg, ai_context);
 	case ME4000_AI_STOP:
@@ -3397,20 +3386,19 @@
 	case ME4000_AI_EX_TRIG_DISABLE:
 		return me4000_ai_ex_trig_disable(ai_context);
 	case ME4000_AI_EX_TRIG_SETUP:
-		return me4000_ai_ex_trig_setup((me4000_ai_trigger_t *) arg,
+		return me4000_ai_ex_trig_setup((struct me4000_ai_trigger *)arg,
 					       ai_context);
 	case ME4000_AI_FSM_STATE:
 		return me4000_ai_fsm_state((int *)arg, ai_context);
 	case ME4000_GET_USER_INFO:
-		return me4000_get_user_info((me4000_user_info_t *) arg,
+		return me4000_get_user_info((struct me4000_user_info *)arg,
 					    ai_context->board_info);
 	case ME4000_AI_GET_COUNT_BUFFER:
 		return me4000_ai_get_count_buffer((unsigned long *)arg,
 						  ai_context);
 	default:
 		printk(KERN_ERR
-		       "ME4000:me4000_ai_ioctl_ext():Invalid service number %d\n",
-		       service);
+		       "%s:Invalid service number %d\n", __func__ , service);
 		return -ENOTTY;
 	}
 	return 0;
@@ -3418,7 +3406,7 @@
 
 static int me4000_ai_fasync(int fd, struct file *file_p, int mode)
 {
-	me4000_ai_context_t *ai_context;
+	struct me4000_ai_context *ai_context;
 
 	CALL_PDEBUG("me4000_ao_fasync_cont() is executed\n");
 
@@ -3426,10 +3414,10 @@
 	return fasync_helper(fd, file_p, mode, &ai_context->fasync_p);
 }
 
-static int me4000_ai_config(me4000_ai_config_t * arg,
-			    me4000_ai_context_t * ai_context)
+static int me4000_ai_config(struct me4000_ai_config *arg,
+			    struct me4000_ai_context *ai_context)
 {
-	me4000_ai_config_t cmd;
+	struct me4000_ai_config cmd;
 	u32 *list = NULL;
 	u32 mode;
 	int i;
@@ -3451,7 +3439,7 @@
 	}
 
 	/* Copy data from user */
-	err = copy_from_user(&cmd, arg, sizeof(me4000_ai_config_t));
+	err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_config));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_ai_config():Can't copy from user space\n");
@@ -3671,7 +3659,7 @@
 
 	return 0;
 
-      AI_CONFIG_ERR:
+AI_CONFIG_ERR:
 
 	/* Reset the timers */
 	ai_context->chan_timer = 66;
@@ -3699,7 +3687,7 @@
 
 }
 
-static int ai_common_start(me4000_ai_context_t * ai_context)
+static int ai_common_start(struct me4000_ai_context *ai_context)
 {
 	u32 tmp;
 	CALL_PDEBUG("ai_common_start() is executed\n");
@@ -3762,7 +3750,7 @@
 	return 0;
 }
 
-static int me4000_ai_start(me4000_ai_context_t * ai_context)
+static int me4000_ai_start(struct me4000_ai_context *ai_context)
 {
 	int err;
 	CALL_PDEBUG("me4000_ai_start() is executed\n");
@@ -3779,7 +3767,7 @@
 }
 
 static int me4000_ai_start_ex(unsigned long *arg,
-			      me4000_ai_context_t * ai_context)
+			      struct me4000_ai_context *ai_context)
 {
 	int err;
 	wait_queue_head_t queue;
@@ -3834,7 +3822,7 @@
 	return 0;
 }
 
-static int me4000_ai_stop(me4000_ai_context_t * ai_context)
+static int me4000_ai_stop(struct me4000_ai_context *ai_context)
 {
 	wait_queue_head_t queue;
 	u32 tmp;
@@ -3871,7 +3859,7 @@
 	return 0;
 }
 
-static int me4000_ai_immediate_stop(me4000_ai_context_t * ai_context)
+static int me4000_ai_immediate_stop(struct me4000_ai_context *ai_context)
 {
 	wait_queue_head_t queue;
 	u32 tmp;
@@ -3908,7 +3896,7 @@
 	return 0;
 }
 
-static int me4000_ai_ex_trig_enable(me4000_ai_context_t * ai_context)
+static int me4000_ai_ex_trig_enable(struct me4000_ai_context *ai_context)
 {
 	u32 tmp;
 	unsigned long flags;
@@ -3924,7 +3912,7 @@
 	return 0;
 }
 
-static int me4000_ai_ex_trig_disable(me4000_ai_context_t * ai_context)
+static int me4000_ai_ex_trig_disable(struct me4000_ai_context *ai_context)
 {
 	u32 tmp;
 	unsigned long flags;
@@ -3940,10 +3928,10 @@
 	return 0;
 }
 
-static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t * arg,
-				   me4000_ai_context_t * ai_context)
+static int me4000_ai_ex_trig_setup(struct me4000_ai_trigger *arg,
+				   struct me4000_ai_context *ai_context)
 {
-	me4000_ai_trigger_t cmd;
+	struct me4000_ai_trigger cmd;
 	int err;
 	u32 tmp;
 	unsigned long flags;
@@ -3951,7 +3939,7 @@
 	CALL_PDEBUG("me4000_ai_ex_trig_setup() is executed\n");
 
 	/* Copy data from user */
-	err = copy_from_user(&cmd, arg, sizeof(me4000_ai_trigger_t));
+	err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_trigger));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_ai_ex_trig_setup():Can't copy from user space\n");
@@ -4000,16 +3988,16 @@
 	return 0;
 }
 
-static int me4000_ai_sc_setup(me4000_ai_sc_t * arg,
-			      me4000_ai_context_t * ai_context)
+static int me4000_ai_sc_setup(struct me4000_ai_sc *arg,
+			      struct me4000_ai_context *ai_context)
 {
-	me4000_ai_sc_t cmd;
+	struct me4000_ai_sc cmd;
 	int err;
 
 	CALL_PDEBUG("me4000_ai_sc_setup() is executed\n");
 
 	/* Copy data from user */
-	err = copy_from_user(&cmd, arg, sizeof(me4000_ai_sc_t));
+	err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_sc));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_ai_sc_setup():Can't copy from user space\n");
@@ -4023,9 +4011,9 @@
 }
 
 static ssize_t me4000_ai_read(struct file *filep, char *buff, size_t cnt,
-			      loff_t * offp)
+			      loff_t *offp)
 {
-	me4000_ai_context_t *ai_context = filep->private_data;
+	struct me4000_ai_context *ai_context = filep->private_data;
 	s16 *buffer = (s16 *) buff;
 	size_t count = cnt / 2;
 	unsigned long flags;
@@ -4150,9 +4138,9 @@
 	return ret * 2;
 }
 
-static unsigned int me4000_ai_poll(struct file *file_p, poll_table * wait)
+static unsigned int me4000_ai_poll(struct file *file_p, poll_table *wait)
 {
-	me4000_ai_context_t *ai_context;
+	struct me4000_ai_context *ai_context;
 	unsigned long mask = 0;
 
 	CALL_PDEBUG("me4000_ai_poll() is executed\n");
@@ -4171,7 +4159,7 @@
 	return mask;
 }
 
-static int me4000_ai_offset_enable(me4000_ai_context_t * ai_context)
+static int me4000_ai_offset_enable(struct me4000_ai_context *ai_context)
 {
 	unsigned long tmp;
 
@@ -4184,7 +4172,7 @@
 	return 0;
 }
 
-static int me4000_ai_offset_disable(me4000_ai_context_t * ai_context)
+static int me4000_ai_offset_disable(struct me4000_ai_context *ai_context)
 {
 	unsigned long tmp;
 
@@ -4197,7 +4185,7 @@
 	return 0;
 }
 
-static int me4000_ai_fullscale_enable(me4000_ai_context_t * ai_context)
+static int me4000_ai_fullscale_enable(struct me4000_ai_context *ai_context)
 {
 	unsigned long tmp;
 
@@ -4210,7 +4198,7 @@
 	return 0;
 }
 
-static int me4000_ai_fullscale_disable(me4000_ai_context_t * ai_context)
+static int me4000_ai_fullscale_disable(struct me4000_ai_context *ai_context)
 {
 	unsigned long tmp;
 
@@ -4223,7 +4211,7 @@
 	return 0;
 }
 
-static int me4000_ai_fsm_state(int *arg, me4000_ai_context_t * ai_context)
+static int me4000_ai_fsm_state(int *arg, struct me4000_ai_context *ai_context)
 {
 	unsigned long tmp;
 
@@ -4242,7 +4230,7 @@
 }
 
 static int me4000_ai_get_count_buffer(unsigned long *arg,
-				      me4000_ai_context_t * ai_context)
+				      struct me4000_ai_context *ai_context)
 {
 	unsigned long c;
 	int err;
@@ -4252,7 +4240,7 @@
 	err = copy_to_user(arg, &c, sizeof(unsigned long));
 	if (err) {
 		printk(KERN_ERR
-		       "ME4000:me4000_ai_get_count_buffer():Can't copy to user space\n");
+		       "%s:Can't copy to user space\n", __func__);
 		return -EFAULT;
 	}
 
@@ -4261,7 +4249,7 @@
 
 /*---------------------------------- EEPROM stuff ---------------------------*/
 
-static int eeprom_write_cmd(me4000_ai_context_t * ai_context, unsigned long cmd,
+static int eeprom_write_cmd(struct me4000_ai_context *ai_context, unsigned long cmd,
 			    int length)
 {
 	int i;
@@ -4318,7 +4306,7 @@
 	return 0;
 }
 
-static unsigned short eeprom_read_cmd(me4000_ai_context_t * ai_context,
+static unsigned short eeprom_read_cmd(struct me4000_ai_context *ai_context,
 				      unsigned long cmd, int length)
 {
 	int i;
@@ -4397,11 +4385,11 @@
 	return id;
 }
 
-static int me4000_eeprom_write(me4000_eeprom_t * arg,
-			       me4000_ai_context_t * ai_context)
+static int me4000_eeprom_write(struct me4000_eeprom *arg,
+			       struct me4000_ai_context *ai_context)
 {
 	int err;
-	me4000_eeprom_t setup;
+	struct me4000_eeprom setup;
 	unsigned long cmd;
 	unsigned long date_high;
 	unsigned long date_low;
@@ -4594,12 +4582,12 @@
 	return 0;
 }
 
-static int me4000_eeprom_read(me4000_eeprom_t * arg,
-			      me4000_ai_context_t * ai_context)
+static int me4000_eeprom_read(struct me4000_eeprom *arg,
+			      struct me4000_ai_context *ai_context)
 {
 	int err;
 	unsigned long cmd;
-	me4000_eeprom_t setup;
+	struct me4000_eeprom setup;
 
 	CALL_PDEBUG("me4000_eeprom_read() is executed\n");
 
@@ -4687,7 +4675,7 @@
 static int me4000_dio_ioctl(struct inode *inode_p, struct file *file_p,
 			    unsigned int service, unsigned long arg)
 {
-	me4000_dio_context_t *dio_context;
+	struct me4000_dio_context *dio_context;
 
 	CALL_PDEBUG("me4000_dio_ioctl() is executed\n");
 
@@ -4704,13 +4692,13 @@
 
 	switch (service) {
 	case ME4000_DIO_CONFIG:
-		return me4000_dio_config((me4000_dio_config_t *) arg,
+		return me4000_dio_config((struct me4000_dio_config *)arg,
 					 dio_context);
 	case ME4000_DIO_SET_BYTE:
-		return me4000_dio_set_byte((me4000_dio_byte_t *) arg,
+		return me4000_dio_set_byte((struct me4000_dio_byte *)arg,
 					   dio_context);
 	case ME4000_DIO_GET_BYTE:
-		return me4000_dio_get_byte((me4000_dio_byte_t *) arg,
+		return me4000_dio_get_byte((struct me4000_dio_byte *)arg,
 					   dio_context);
 	case ME4000_DIO_RESET:
 		return me4000_dio_reset(dio_context);
@@ -4723,17 +4711,17 @@
 	return 0;
 }
 
-static int me4000_dio_config(me4000_dio_config_t * arg,
-			     me4000_dio_context_t * dio_context)
+static int me4000_dio_config(struct me4000_dio_config *arg,
+			     struct me4000_dio_context *dio_context)
 {
-	me4000_dio_config_t cmd;
+	struct me4000_dio_config cmd;
 	u32 tmp;
 	int err;
 
 	CALL_PDEBUG("me4000_dio_config() is executed\n");
 
 	/* Copy data from user */
-	err = copy_from_user(&cmd, arg, sizeof(me4000_dio_config_t));
+	err = copy_from_user(&cmd, arg, sizeof(struct me4000_dio_config));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_dio_config():Can't copy from user space\n");
@@ -4964,16 +4952,16 @@
 	return 0;
 }
 
-static int me4000_dio_set_byte(me4000_dio_byte_t * arg,
-			       me4000_dio_context_t * dio_context)
+static int me4000_dio_set_byte(struct me4000_dio_byte *arg,
+			       struct me4000_dio_context *dio_context)
 {
-	me4000_dio_byte_t cmd;
+	struct me4000_dio_byte cmd;
 	int err;
 
 	CALL_PDEBUG("me4000_dio_set_byte() is executed\n");
 
 	/* Copy data from user */
-	err = copy_from_user(&cmd, arg, sizeof(me4000_dio_byte_t));
+	err = copy_from_user(&cmd, arg, sizeof(struct me4000_dio_byte));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_dio_set_byte():Can't copy from user space\n");
@@ -5030,16 +5018,16 @@
 	return 0;
 }
 
-static int me4000_dio_get_byte(me4000_dio_byte_t * arg,
-			       me4000_dio_context_t * dio_context)
+static int me4000_dio_get_byte(struct me4000_dio_byte *arg,
+			       struct me4000_dio_context *dio_context)
 {
-	me4000_dio_byte_t cmd;
+	struct me4000_dio_byte cmd;
 	int err;
 
 	CALL_PDEBUG("me4000_dio_get_byte() is executed\n");
 
 	/* Copy data from user */
-	err = copy_from_user(&cmd, arg, sizeof(me4000_dio_byte_t));
+	err = copy_from_user(&cmd, arg, sizeof(struct me4000_dio_byte));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_dio_get_byte():Can't copy from user space\n");
@@ -5070,7 +5058,7 @@
 	}
 
 	/* Copy result back to user */
-	err = copy_to_user(arg, &cmd, sizeof(me4000_dio_byte_t));
+	err = copy_to_user(arg, &cmd, sizeof(struct me4000_dio_byte));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_dio_get_byte():Can't copy to user space\n");
@@ -5080,7 +5068,7 @@
 	return 0;
 }
 
-static int me4000_dio_reset(me4000_dio_context_t * dio_context)
+static int me4000_dio_reset(struct me4000_dio_context *dio_context)
 {
 	CALL_PDEBUG("me4000_dio_reset() is executed\n");
 
@@ -5101,7 +5089,7 @@
 static int me4000_cnt_ioctl(struct inode *inode_p, struct file *file_p,
 			    unsigned int service, unsigned long arg)
 {
-	me4000_cnt_context_t *cnt_context;
+	struct me4000_cnt_context *cnt_context;
 
 	CALL_PDEBUG("me4000_cnt_ioctl() is executed\n");
 
@@ -5118,11 +5106,11 @@
 
 	switch (service) {
 	case ME4000_CNT_READ:
-		return me4000_cnt_read((me4000_cnt_t *) arg, cnt_context);
+		return me4000_cnt_read((struct me4000_cnt *)arg, cnt_context);
 	case ME4000_CNT_WRITE:
-		return me4000_cnt_write((me4000_cnt_t *) arg, cnt_context);
+		return me4000_cnt_write((struct me4000_cnt *)arg, cnt_context);
 	case ME4000_CNT_CONFIG:
-		return me4000_cnt_config((me4000_cnt_config_t *) arg,
+		return me4000_cnt_config((struct me4000_cnt_config *)arg,
 					 cnt_context);
 	case ME4000_CNT_RESET:
 		return me4000_cnt_reset(cnt_context);
@@ -5135,10 +5123,10 @@
 	return 0;
 }
 
-static int me4000_cnt_config(me4000_cnt_config_t * arg,
-			     me4000_cnt_context_t * cnt_context)
+static int me4000_cnt_config(struct me4000_cnt_config *arg,
+			     struct me4000_cnt_context *cnt_context)
 {
-	me4000_cnt_config_t cmd;
+	struct me4000_cnt_config cmd;
 	u8 counter;
 	u8 mode;
 	int err;
@@ -5146,7 +5134,7 @@
 	CALL_PDEBUG("me4000_cnt_config() is executed\n");
 
 	/* Copy data from user */
-	err = copy_from_user(&cmd, arg, sizeof(me4000_cnt_config_t));
+	err = copy_from_user(&cmd, arg, sizeof(struct me4000_cnt_config));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_cnt_config():Can't copy from user space\n");
@@ -5204,17 +5192,17 @@
 	return 0;
 }
 
-static int me4000_cnt_read(me4000_cnt_t * arg,
-			   me4000_cnt_context_t * cnt_context)
+static int me4000_cnt_read(struct me4000_cnt *arg,
+			   struct me4000_cnt_context *cnt_context)
 {
-	me4000_cnt_t cmd;
+	struct me4000_cnt cmd;
 	u8 tmp;
 	int err;
 
 	CALL_PDEBUG("me4000_cnt_read() is executed\n");
 
 	/* Copy data from user */
-	err = copy_from_user(&cmd, arg, sizeof(me4000_cnt_t));
+	err = copy_from_user(&cmd, arg, sizeof(struct me4000_cnt));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_cnt_read():Can't copy from user space\n");
@@ -5249,7 +5237,7 @@
 	}
 
 	/* Copy result back to user */
-	err = copy_to_user(arg, &cmd, sizeof(me4000_cnt_t));
+	err = copy_to_user(arg, &cmd, sizeof(struct me4000_cnt));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_cnt_read():Can't copy to user space\n");
@@ -5259,17 +5247,17 @@
 	return 0;
 }
 
-static int me4000_cnt_write(me4000_cnt_t * arg,
-			    me4000_cnt_context_t * cnt_context)
+static int me4000_cnt_write(struct me4000_cnt *arg,
+			    struct me4000_cnt_context *cnt_context)
 {
-	me4000_cnt_t cmd;
+	struct me4000_cnt cmd;
 	u8 tmp;
 	int err;
 
 	CALL_PDEBUG("me4000_cnt_write() is executed\n");
 
 	/* Copy data from user */
-	err = copy_from_user(&cmd, arg, sizeof(me4000_cnt_t));
+	err = copy_from_user(&cmd, arg, sizeof(struct me4000_cnt));
 	if (err) {
 		printk(KERN_ERR
 		       "ME4000:me4000_cnt_write():Can't copy from user space\n");
@@ -5306,7 +5294,7 @@
 	return 0;
 }
 
-static int me4000_cnt_reset(me4000_cnt_context_t * cnt_context)
+static int me4000_cnt_reset(struct me4000_cnt_context *cnt_context)
 {
 	CALL_PDEBUG("me4000_cnt_reset() is executed\n");
 
@@ -5333,7 +5321,7 @@
 static int me4000_ext_int_ioctl(struct inode *inode_p, struct file *file_p,
 				unsigned int service, unsigned long arg)
 {
-	me4000_ext_int_context_t *ext_int_context;
+	struct me4000_ext_int_context *ext_int_context;
 
 	CALL_PDEBUG("me4000_ext_int_ioctl() is executed\n");
 
@@ -5366,7 +5354,7 @@
 	return 0;
 }
 
-static int me4000_ext_int_enable(me4000_ext_int_context_t * ext_int_context)
+static int me4000_ext_int_enable(struct me4000_ext_int_context *ext_int_context)
 {
 	unsigned long tmp;
 
@@ -5379,7 +5367,7 @@
 	return 0;
 }
 
-static int me4000_ext_int_disable(me4000_ext_int_context_t * ext_int_context)
+static int me4000_ext_int_disable(struct me4000_ext_int_context *ext_int_context)
 {
 	unsigned long tmp;
 
@@ -5393,7 +5381,7 @@
 }
 
 static int me4000_ext_int_count(unsigned long *arg,
-				me4000_ext_int_context_t * ext_int_context)
+				struct me4000_ext_int_context *ext_int_context)
 {
 
 	CALL_PDEBUG("me4000_ext_int_count() is executed\n");
@@ -5404,10 +5392,10 @@
 
 /*------------------------------------ General stuff ------------------------------------*/
 
-static int me4000_get_user_info(me4000_user_info_t * arg,
-				me4000_info_t * board_info)
+static int me4000_get_user_info(struct me4000_user_info *arg,
+				struct me4000_info *board_info)
 {
-	me4000_user_info_t user_info;
+	struct me4000_user_info user_info;
 
 	CALL_PDEBUG("me4000_get_user_info() is executed\n");
 
@@ -5437,7 +5425,7 @@
 
 	user_info.cnt_count = board_info->board_p->cnt.count;
 
-	if (copy_to_user(arg, &user_info, sizeof(me4000_user_info_t)))
+	if (copy_to_user(arg, &user_info, sizeof(struct me4000_user_info)))
 		return -EFAULT;
 
 	return 0;
@@ -5448,7 +5436,7 @@
 static int me4000_ext_int_fasync(int fd, struct file *file_ptr, int mode)
 {
 	int result = 0;
-	me4000_ext_int_context_t *ext_int_context;
+	struct me4000_ext_int_context *ext_int_context;
 
 	CALL_PDEBUG("me4000_ext_int_fasync() is executed\n");
 
@@ -5465,7 +5453,7 @@
 {
 	u32 tmp;
 	u32 value;
-	me4000_ao_context_t *ao_context;
+	struct me4000_ao_context *ao_context;
 	int i;
 	int c = 0;
 	int c1 = 0;
@@ -5589,7 +5577,7 @@
 static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
 {
 	u32 tmp;
-	me4000_ai_context_t *ai_context;
+	struct me4000_ai_context *ai_context;
 	int i;
 	int c = 0;
 	int c1 = 0;
@@ -5933,7 +5921,7 @@
 
 static irqreturn_t me4000_ext_int_isr(int irq, void *dev_id)
 {
-	me4000_ext_int_context_t *ext_int_context;
+	struct me4000_ext_int_context *ext_int_context;
 	unsigned long tmp;
 
 	ISR_PDEBUG("me4000_ext_int_isr() is executed\n");
@@ -5969,10 +5957,10 @@
 	return IRQ_HANDLED;
 }
 
-void __exit me4000_module_exit(void)
+static void __exit me4000_module_exit(void)
 {
 	struct list_head *board_p;
-	me4000_info_t *board_info;
+	struct me4000_info *board_info;
 
 	CALL_PDEBUG("cleanup_module() is executed\n");
 
@@ -5993,7 +5981,7 @@
 	/* Reset the boards */
 	for (board_p = me4000_board_info_list.next;
 	     board_p != &me4000_board_info_list; board_p = board_p->next) {
-		board_info = list_entry(board_p, me4000_info_t, list);
+		board_info = list_entry(board_p, struct me4000_info, list);
 		me4000_reset_board(board_info);
 	}
 
@@ -6007,7 +5995,7 @@
 {
 	int len = 0;
 	int limit = count - 1000;
-	me4000_info_t *board_info;
+	struct me4000_info *board_info;
 	struct list_head *ptr;
 
 	len += sprintf(buf + len, "\nME4000 DRIVER VERSION %X.%X.%X\n\n",
@@ -6019,7 +6007,7 @@
 	for (ptr = me4000_board_info_list.next;
 	     (ptr != &me4000_board_info_list) && (len < limit);
 	     ptr = ptr->next) {
-		board_info = list_entry(ptr, me4000_info_t, list);
+		board_info = list_entry(ptr, struct me4000_info, list);
 
 		len +=
 		    sprintf(buf + len, "Board number %d:\n",
@@ -6029,14 +6017,14 @@
 		    sprintf(buf + len, "PLX base register = 0x%lX\n",
 			    board_info->plx_regbase);
 		len +=
-		    sprintf(buf + len, "PLX base register size = 0x%lX\n",
-			    board_info->plx_regbase_size);
+		    sprintf(buf + len, "PLX base register size = 0x%X\n",
+			    (unsigned int)board_info->plx_regbase_size);
 		len +=
-		    sprintf(buf + len, "ME4000 base register = 0x%lX\n",
-			    board_info->me4000_regbase);
+		    sprintf(buf + len, "ME4000 base register = 0x%X\n",
+			    (unsigned int)board_info->me4000_regbase);
 		len +=
-		    sprintf(buf + len, "ME4000 base register size = 0x%lX\n",
-			    board_info->me4000_regbase_size);
+		    sprintf(buf + len, "ME4000 base register size = 0x%X\n",
+			    (unsigned int)board_info->me4000_regbase_size);
 		len +=
 		    sprintf(buf + len, "Serial number = 0x%X\n",
 			    board_info->serial_no);
diff --git a/drivers/staging/me4000/me4000.h b/drivers/staging/me4000/me4000.h
index c35e4b9..81c6f4d5 100644
--- a/drivers/staging/me4000/me4000.h
+++ b/drivers/staging/me4000/me4000.h
@@ -329,46 +329,46 @@
   Circular buffer used for analog input/output reads/writes.
   ===========================================================================*/
 
-typedef struct me4000_circ_buf {
+struct me4000_circ_buf {
 	s16 *buf;
 	int volatile head;
 	int volatile tail;
-} me4000_circ_buf_t;
+};
 
 /*=============================================================================
   Information about the hardware capabilities
   ===========================================================================*/
 
-typedef struct me4000_ao_info {
+struct me4000_ao_info {
 	int count;
 	int fifo_count;
-} me4000_ao_info_t;
+};
 
-typedef struct me4000_ai_info {
+struct me4000_ai_info {
 	int count;
 	int sh_count;
 	int diff_count;
 	int ex_trig_analog;
-} me4000_ai_info_t;
+};
 
-typedef struct me4000_dio_info {
+struct me4000_dio_info {
 	int count;
-} me4000_dio_info_t;
+};
 
-typedef struct me4000_cnt_info {
+struct me4000_cnt_info {
 	int count;
-} me4000_cnt_info_t;
+};
 
-typedef struct me4000_board {
+struct me4000_board {
 	u16 vendor_id;
 	u16 device_id;
-	me4000_ao_info_t ao;
-	me4000_ai_info_t ai;
-	me4000_dio_info_t dio;
-	me4000_cnt_info_t cnt;
-} me4000_board_t;
+	struct me4000_ao_info ao;
+	struct me4000_ai_info ai;
+	struct me4000_dio_info dio;
+	struct me4000_cnt_info cnt;
+};
 
-static me4000_board_t me4000_boards[] = {
+static struct me4000_board me4000_boards[] = {
 	{PCI_VENDOR_ID_MEILHAUS, 0x4610, {0, 0}, {16, 0, 0, 0}, {4}, {3}},
 
 	{PCI_VENDOR_ID_MEILHAUS, 0x4650, {0, 0}, {16, 0, 0, 0}, {4}, {0}},
@@ -391,8 +391,6 @@
 	{0},
 };
 
-#define ME4000_BOARD_VERSIONS (sizeof(me4000_boards) / sizeof(me4000_board_t) - 1)
-
 /*=============================================================================
   PCI device table.
   This is used by modprobe to translate PCI IDs to drivers.
@@ -427,19 +425,19 @@
   Global board and subdevice information structures
   ===========================================================================*/
 
-typedef struct me4000_info {
+struct me4000_info {
 	struct list_head list;	// List of all detected boards
 	int board_count;	// Index of the board after detection
 
 	unsigned long plx_regbase;	// PLX configuration space base address
-	unsigned long me4000_regbase;	// Base address of the ME4000
-	unsigned long timer_regbase;	// Base address of the timer circuit
-	unsigned long program_regbase;	// Base address to set the program pin for the xilinx
+	resource_size_t me4000_regbase;	// Base address of the ME4000
+	resource_size_t timer_regbase;	// Base address of the timer circuit
+	resource_size_t program_regbase;	// Base address to set the program pin for the xilinx
 
 	unsigned long plx_regbase_size;	// PLX register set space
-	unsigned long me4000_regbase_size;	// ME4000 register set space
-	unsigned long timer_regbase_size;	// Timer circuit register set space
-	unsigned long program_regbase_size;	// Size of program base address of the ME4000
+	resource_size_t me4000_regbase_size;	// ME4000 register set space
+	resource_size_t timer_regbase_size;	// Timer circuit register set space
+	resource_size_t program_regbase_size;	// Size of program base address of the ME4000
 
 	unsigned int serial_no;	// Serial number of the board
 	unsigned char hw_revision;	// Hardware revision of the board
@@ -451,7 +449,7 @@
 	int pci_func_no;	// PCI function number
 	struct pci_dev *pci_dev_p;	// General PCI information
 
-	me4000_board_t *board_p;	// Holds the board capabilities
+	struct me4000_board *board_p;	// Holds the board capabilities
 
 	unsigned int irq;	// IRQ assigned from the PCI BIOS
 	unsigned int irq_count;	// Count of external interrupts
@@ -464,18 +462,18 @@
 	struct me4000_dio_context *dio_context;	// Digital I/O specific context
 	struct me4000_cnt_context *cnt_context;	// Counter specific context
 	struct me4000_ext_int_context *ext_int_context;	// External interrupt specific context
-} me4000_info_t;
+};
 
-typedef struct me4000_ao_context {
+struct me4000_ao_context {
 	struct list_head list;	// linked list of me4000_ao_context_t
 	int index;		// Index in the list
 	int mode;		// Indicates mode (0 = single, 1 = wraparound, 2 = continous)
 	int dac_in_use;		// Indicates if already opend
 	spinlock_t use_lock;	// Guards in_use
 	spinlock_t int_lock;	// Used when locking out interrupts
-	me4000_circ_buf_t circ_buf;	// Circular buffer
+	struct me4000_circ_buf circ_buf;	// Circular buffer
 	wait_queue_head_t wait_queue;	// Wait queue to sleep while blocking write
-	me4000_info_t *board_info;
+	struct me4000_info *board_info;
 	unsigned int irq;	// The irq associated with this ADC
 	int volatile pipe_flag;	// Indicates broken pipe set from me4000_ao_isr()
 	unsigned long ctrl_reg;
@@ -486,9 +484,9 @@
 	unsigned long irq_status_reg;
 	unsigned long preload_reg;
 	struct fasync_struct *fasync_p;	// Queue for asynchronous notification
-} me4000_ao_context_t;
+};
 
-typedef struct me4000_ai_context {
+struct me4000_ai_context {
 	struct list_head list;	// linked list of me4000_ai_info_t
 	int mode;		// Indicates mode
 	int in_use;		// Indicates if already opend
@@ -496,9 +494,9 @@
 	spinlock_t int_lock;	// Used when locking out interrupts
 	int number;		// Number of the DAC
 	unsigned int irq;	// The irq associated with this ADC
-	me4000_circ_buf_t circ_buf;	// Circular buffer
+	struct me4000_circ_buf circ_buf;	// Circular buffer
 	wait_queue_head_t wait_queue;	// Wait queue to sleep while blocking read
-	me4000_info_t *board_info;
+	struct me4000_info *board_info;
 
 	struct fasync_struct *fasync_p;	// Queue for asynchronous notification
 
@@ -523,48 +521,48 @@
 	unsigned long channel_list_count;
 	unsigned long sample_counter;
 	int sample_counter_reload;
-} me4000_ai_context_t;
+};
 
-typedef struct me4000_dio_context {
+struct me4000_dio_context {
 	struct list_head list;	// linked list of me4000_dio_context_t
 	int in_use;		// Indicates if already opend
 	spinlock_t use_lock;	// Guards in_use
 	int number;
 	int dio_count;
-	me4000_info_t *board_info;
+	struct me4000_info *board_info;
 	unsigned long dir_reg;
 	unsigned long ctrl_reg;
 	unsigned long port_0_reg;
 	unsigned long port_1_reg;
 	unsigned long port_2_reg;
 	unsigned long port_3_reg;
-} me4000_dio_context_t;
+};
 
-typedef struct me4000_cnt_context {
+struct me4000_cnt_context {
 	struct list_head list;	// linked list of me4000_dio_context_t
 	int in_use;		// Indicates if already opend
 	spinlock_t use_lock;	// Guards in_use
 	int number;
 	int cnt_count;
-	me4000_info_t *board_info;
+	struct me4000_info *board_info;
 	unsigned long ctrl_reg;
 	unsigned long counter_0_reg;
 	unsigned long counter_1_reg;
 	unsigned long counter_2_reg;
-} me4000_cnt_context_t;
+};
 
-typedef struct me4000_ext_int_context {
+struct me4000_ext_int_context {
 	struct list_head list;	// linked list of me4000_dio_context_t
 	int in_use;		// Indicates if already opend
 	spinlock_t use_lock;	// Guards in_use
 	int number;
-	me4000_info_t *board_info;
+	struct me4000_info *board_info;
 	unsigned int irq;
 	unsigned long int_count;
 	struct fasync_struct *fasync_ptr;
 	unsigned long ctrl_reg;
 	unsigned long irq_status_reg;
-} me4000_ext_int_context_t;
+};
 
 #endif
 
@@ -745,12 +743,12 @@
   General type definitions
   ----------------------------------------------------------------------------*/
 
-typedef struct me4000_user_info {
+struct me4000_user_info {
 	int board_count;	// Index of the board after detection
 	unsigned long plx_regbase;	// PLX configuration space base address
-	unsigned long me4000_regbase;	// Base address of the ME4000
+	resource_size_t me4000_regbase;	// Base address of the ME4000
 	unsigned long plx_regbase_size;	// PLX register set space
-	unsigned long me4000_regbase_size;	// ME4000 register set space
+	resource_size_t me4000_regbase_size;	// ME4000 register set space
 	unsigned long serial_no;	// Serial number of the board
 	unsigned char hw_revision;	// Hardware revision of the board
 	unsigned short vendor_id;	// Meilhaus vendor id (0x1402)
@@ -773,62 +771,62 @@
 	int dio_count;		// Count of digital I/O ports
 
 	int cnt_count;		// Count of counters
-} me4000_user_info_t;
+};
 
 /*-----------------------------------------------------------------------------
   Type definitions for analog output
   ----------------------------------------------------------------------------*/
 
-typedef struct me4000_ao_channel_list {
+struct me4000_ao_channel_list {
 	unsigned long count;
 	unsigned long *list;
-} me4000_ao_channel_list_t;
+};
 
 /*-----------------------------------------------------------------------------
   Type definitions for analog input
   ----------------------------------------------------------------------------*/
 
-typedef struct me4000_ai_channel_list {
+struct me4000_ai_channel_list {
 	unsigned long count;
 	unsigned long *list;
-} me4000_ai_channel_list_t;
+};
 
-typedef struct me4000_ai_timer {
+struct me4000_ai_timer {
 	unsigned long pre_chan;
 	unsigned long chan;
 	unsigned long scan_low;
 	unsigned long scan_high;
-} me4000_ai_timer_t;
+};
 
-typedef struct me4000_ai_config {
-	me4000_ai_timer_t timer;
-	me4000_ai_channel_list_t channel_list;
+struct me4000_ai_config {
+	struct me4000_ai_timer timer;
+	struct me4000_ai_channel_list channel_list;
 	int sh;
-} me4000_ai_config_t;
+};
 
-typedef struct me4000_ai_single {
+struct me4000_ai_single {
 	int channel;
 	int range;
 	int mode;
 	short value;
 	unsigned long timeout;
-} me4000_ai_single_t;
+};
 
-typedef struct me4000_ai_trigger {
+struct me4000_ai_trigger {
 	int mode;
 	int edge;
-} me4000_ai_trigger_t;
+};
 
-typedef struct me4000_ai_sc {
+struct me4000_ai_sc {
 	unsigned long value;
 	int reload;
-} me4000_ai_sc_t;
+};
 
 /*-----------------------------------------------------------------------------
   Type definitions for eeprom
   ----------------------------------------------------------------------------*/
 
-typedef struct me4000_eeprom {
+struct me4000_eeprom {
 	unsigned long date;
 	short uni_10_offset;
 	short uni_10_fullscale;
@@ -842,45 +840,45 @@
 	short diff_10_fullscale;
 	short diff_2_5_offset;
 	short diff_2_5_fullscale;
-} me4000_eeprom_t;
+};
 
 /*-----------------------------------------------------------------------------
   Type definitions for digital I/O
   ----------------------------------------------------------------------------*/
 
-typedef struct me4000_dio_config {
+struct me4000_dio_config {
 	int port;
 	int mode;
 	int function;
-} me4000_dio_config_t;
+};
 
-typedef struct me4000_dio_byte {
+struct me4000_dio_byte {
 	int port;
 	unsigned char byte;
-} me4000_dio_byte_t;
+};
 
 /*-----------------------------------------------------------------------------
   Type definitions for counters
   ----------------------------------------------------------------------------*/
 
-typedef struct me4000_cnt {
+struct me4000_cnt {
 	int counter;
 	unsigned short value;
-} me4000_cnt_t;
+};
 
-typedef struct me4000_cnt_config {
+struct me4000_cnt_config {
 	int counter;
 	int mode;
-} me4000_cnt_config_t;
+};
 
 /*-----------------------------------------------------------------------------
   Type definitions for external interrupt
   ----------------------------------------------------------------------------*/
 
-typedef struct {
+struct me4000_int {
 	int int1_count;
 	int int2_count;
-} me4000_int_type;
+};
 
 /*-----------------------------------------------------------------------------
   The ioctls of the board
@@ -888,7 +886,8 @@
 
 #define ME4000_IOCTL_MAXNR 50
 #define ME4000_MAGIC 'y'
-#define ME4000_GET_USER_INFO          _IOR (ME4000_MAGIC, 0, me4000_user_info_t)
+#define ME4000_GET_USER_INFO          _IOR (ME4000_MAGIC, 0, \
+					    struct me4000_user_info)
 
 #define ME4000_AO_START               _IOW (ME4000_MAGIC, 1, unsigned long)
 #define ME4000_AO_STOP                _IO  (ME4000_MAGIC, 2)
@@ -904,25 +903,35 @@
 #define ME4000_AO_DISABLE_DO          _IO  (ME4000_MAGIC, 12)
 #define ME4000_AO_FSM_STATE           _IOR (ME4000_MAGIC, 13, int)
 
-#define ME4000_AI_SINGLE              _IOR (ME4000_MAGIC, 14, me4000_ai_single_t)
+#define ME4000_AI_SINGLE              _IOR (ME4000_MAGIC, 14, \
+					    struct me4000_ai_single)
 #define ME4000_AI_START               _IOW (ME4000_MAGIC, 15, unsigned long)
 #define ME4000_AI_STOP                _IO  (ME4000_MAGIC, 16)
 #define ME4000_AI_IMMEDIATE_STOP      _IO  (ME4000_MAGIC, 17)
 #define ME4000_AI_EX_TRIG_ENABLE      _IO  (ME4000_MAGIC, 18)
 #define ME4000_AI_EX_TRIG_DISABLE     _IO  (ME4000_MAGIC, 19)
-#define ME4000_AI_EX_TRIG_SETUP       _IOW (ME4000_MAGIC, 20, me4000_ai_trigger_t)
-#define ME4000_AI_CONFIG              _IOW (ME4000_MAGIC, 21, me4000_ai_config_t)
-#define ME4000_AI_SC_SETUP            _IOW (ME4000_MAGIC, 22, me4000_ai_sc_t)
+#define ME4000_AI_EX_TRIG_SETUP       _IOW (ME4000_MAGIC, 20, \
+					    struct me4000_ai_trigger)
+#define ME4000_AI_CONFIG              _IOW (ME4000_MAGIC, 21, \
+					    struct me4000_ai_config)
+#define ME4000_AI_SC_SETUP            _IOW (ME4000_MAGIC, 22, \
+					    struct me4000_ai_sc)
 #define ME4000_AI_FSM_STATE           _IOR (ME4000_MAGIC, 23, int)
 
-#define ME4000_DIO_CONFIG             _IOW (ME4000_MAGIC, 24, me4000_dio_config_t)
-#define ME4000_DIO_GET_BYTE           _IOR (ME4000_MAGIC, 25, me4000_dio_byte_t)
-#define ME4000_DIO_SET_BYTE           _IOW (ME4000_MAGIC, 26, me4000_dio_byte_t)
+#define ME4000_DIO_CONFIG             _IOW (ME4000_MAGIC, 24, \
+					    struct me4000_dio_config)
+#define ME4000_DIO_GET_BYTE           _IOR (ME4000_MAGIC, 25, \
+					    struct me4000_dio_byte)
+#define ME4000_DIO_SET_BYTE           _IOW (ME4000_MAGIC, 26, \
+					    struct me4000_dio_byte)
 #define ME4000_DIO_RESET              _IO  (ME4000_MAGIC, 27)
 
-#define ME4000_CNT_READ               _IOR (ME4000_MAGIC, 28, me4000_cnt_t)
-#define ME4000_CNT_WRITE              _IOW (ME4000_MAGIC, 29, me4000_cnt_t)
-#define ME4000_CNT_CONFIG             _IOW (ME4000_MAGIC, 30, me4000_cnt_config_t)
+#define ME4000_CNT_READ               _IOR (ME4000_MAGIC, 28, \
+					    struct me4000_cnt)
+#define ME4000_CNT_WRITE              _IOW (ME4000_MAGIC, 29, \
+					    struct me4000_cnt)
+#define ME4000_CNT_CONFIG             _IOW (ME4000_MAGIC, 30, \
+					    struct me4000_cnt_config)
 #define ME4000_CNT_RESET              _IO  (ME4000_MAGIC, 31)
 
 #define ME4000_EXT_INT_DISABLE        _IO  (ME4000_MAGIC, 32)
@@ -934,13 +943,16 @@
 #define ME4000_AI_FULLSCALE_ENABLE    _IO  (ME4000_MAGIC, 37)
 #define ME4000_AI_FULLSCALE_DISABLE   _IO  (ME4000_MAGIC, 38)
 
-#define ME4000_AI_EEPROM_READ         _IOR (ME4000_MAGIC, 39, me4000_eeprom_t)
-#define ME4000_AI_EEPROM_WRITE        _IOW (ME4000_MAGIC, 40, me4000_eeprom_t)
+#define ME4000_AI_EEPROM_READ         _IOR (ME4000_MAGIC, 39, \
+					    struct me4000_eeprom)
+#define ME4000_AI_EEPROM_WRITE        _IOW (ME4000_MAGIC, 40, \
+					    struct me4000_eeprom)
 
 #define ME4000_AO_SIMULTANEOUS_EX_TRIG _IO  (ME4000_MAGIC, 41)
 #define ME4000_AO_SIMULTANEOUS_SW      _IO  (ME4000_MAGIC, 42)
 #define ME4000_AO_SIMULTANEOUS_DISABLE _IO  (ME4000_MAGIC, 43)
-#define ME4000_AO_SIMULTANEOUS_UPDATE  _IOW (ME4000_MAGIC, 44, me4000_ao_channel_list_t)
+#define ME4000_AO_SIMULTANEOUS_UPDATE  _IOW (ME4000_MAGIC, 44, \
+					     struct me4000_ao_channel_list)
 
 #define ME4000_AO_SYNCHRONOUS_EX_TRIG  _IO  (ME4000_MAGIC, 45)
 #define ME4000_AO_SYNCHRONOUS_SW       _IO  (ME4000_MAGIC, 46)
diff --git a/drivers/staging/pcc-acpi/Kconfig b/drivers/staging/pcc-acpi/Kconfig
new file mode 100644
index 0000000..6720d40
--- /dev/null
+++ b/drivers/staging/pcc-acpi/Kconfig
@@ -0,0 +1,11 @@
+config PCC_ACPI
+	tristate "Panasonic ACPI Hotkey support"
+	depends on ACPI
+	default n
+	---help---
+	  This driver provides support for Panasonic hotkeys through the
+	  ACPI interface.  This works for the Panasonic R1 (N variant),
+	  R2, R3, T2, W2, and Y2 laptops.
+
+	  To compile this driver as a module, choose M here. The module
+	  will be called pcc-acpi.
diff --git a/drivers/staging/pcc-acpi/Makefile b/drivers/staging/pcc-acpi/Makefile
new file mode 100644
index 0000000..f93b29e
--- /dev/null
+++ b/drivers/staging/pcc-acpi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PCC_ACPI)		+= pcc-acpi.o
diff --git a/drivers/staging/pcc-acpi/TODO b/drivers/staging/pcc-acpi/TODO
new file mode 100644
index 0000000..fab2409
--- /dev/null
+++ b/drivers/staging/pcc-acpi/TODO
@@ -0,0 +1,7 @@
+TODO:
+	- Lindent fixes
+	- checkpatch.pl fixes
+	- verify that the acpi interface is correct
+	- remove /proc dependancy if needed (not sure yet.)
+
+Please send any patches for this driver to Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/staging/pcc-acpi/pcc-acpi.c b/drivers/staging/pcc-acpi/pcc-acpi.c
new file mode 100644
index 0000000..7715c31
--- /dev/null
+++ b/drivers/staging/pcc-acpi/pcc-acpi.c
@@ -0,0 +1,1111 @@
+/*
+ *  Panasonic HotKey and lcd brightness control Extra driver
+ *  (C) 2004 Hiroshi Miura <miura@da-cha.org>
+ *  (C) 2004 NTT DATA Intellilink Co. http://www.intellilink.co.jp/
+ *  (C) YOKOTA Hiroshi <yokota (at) netlab. is. tsukuba. ac. jp>
+ *  (C) 2004 David Bronaugh <dbronaugh>
+ *
+ *  derived from toshiba_acpi.c, Copyright (C) 2002-2004 John Belmonte
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  publicshed by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ *---------------------------------------------------------------------------
+ *
+ * ChangeLog:
+ *
+ * 	Nov.04, 2006	Hiroshi Miura <miura@da-cha.org>
+ * 		-v0.9	remove warning about section reference.
+ * 			remove acpi_os_free
+ * 			add /proc/acpi/pcc/brightness interface to
+ * 			allow HAL to access.
+ * 			merge dbronaugh's enhancement
+ * 			Aug.17, 2004 David Bronaugh (dbronaugh)
+ *  				- Added screen brightness setting interface
+ *				  Thanks to the FreeBSD crew
+ *				  (acpi_panasonic.c authors)
+ * 				  for the ideas I needed to accomplish it
+ *
+ *	May.29, 2006	Hiroshi Miura <miura@da-cha.org>
+ *		-v0.8.4 follow to change keyinput structure
+ *			thanks Fabian Yamaguchi <fabs@cs.tu-berlin.de>,
+ *			Jacob Bower <jacob.bower@ic.ac.uk> and
+ *			Hiroshi Yokota for providing solutions.
+ *
+ *	Oct.02, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		-v0.8.2	merge code of YOKOTA Hiroshi
+ *			<yokota@netlab.is.tsukuba.ac.jp>.
+ *			Add sticky key mode interface.
+ *			Refactoring acpi_pcc_generete_keyinput().
+ *
+ *	Sep.15, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		-v0.8	Generate key input event on input subsystem.
+ *			This is based on yet another driver
+ *			written by Ryuta Nakanishi.
+ *
+ *	Sep.10, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		-v0.7	Change proc interface functions using seq_file
+ *			facility as same as other ACPI drivers.
+ *
+ *	Aug.28, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		-v0.6.4 Fix a silly error with status checking
+ *
+ *	Aug.25, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		-v0.6.3 replace read_acpi_int by standard
+ *			function acpi_evaluate_integer
+ *			some clean up and make smart copyright notice.
+ *			fix return value of pcc_acpi_get_key()
+ *			fix checking return value of acpi_bus_register_driver()
+ *
+ *      Aug.22, 2004    David Bronaugh <dbronaugh@linuxboxen.org>
+ *              -v0.6.2 Add check on ACPI data (num_sifr)
+ *                      Coding style cleanups, better error messages/handling
+ *			Fixed an off-by-one error in memory allocation
+ *
+ *      Aug.21, 2004    David Bronaugh <dbronaugh@linuxboxen.org>
+ *              -v0.6.1 Fix a silly error with status checking
+ *
+ *      Aug.20, 2004    David Bronaugh <dbronaugh@linuxboxen.org>
+ *              - v0.6  Correct brightness controls to reflect reality
+ *                      based on information gleaned by Hiroshi Miura
+ *                      and discussions with Hiroshi Miura
+ *
+ *	Aug.10, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		- v0.5  support LCD brightness control
+ *			based on the disclosed information by MEI.
+ *
+ *	Jul.25, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		- v0.4  first post version
+ *		        add function to retrive SIFR
+ *
+ *	Jul.24, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		- v0.3  get proper status of hotkey
+ *
+ *      Jul.22, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		- v0.2  add HotKey handler
+ *
+ *      Jul.17, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		- v0.1  start from toshiba_acpi driver written by John Belmonte
+ *
+ */
+
+#define ACPI_PCC_VERSION	"0.9+hy"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+
+/*************************************************************************
+ * "seq" file template definition.
+ */
+/* "seq" initializer */
+#define SEQ_OPEN_FS(_open_func_name_, _show_func_name_) \
+static int _open_func_name_(struct inode *inode, struct file *file) \
+{								      \
+	return single_open(file, _show_func_name_, PDE(inode)->data);  \
+}
+
+/*-------------------------------------------------------------------------
+ * "seq" fops template for read-only files.
+ */
+#define SEQ_FILEOPS_R(_open_func_name_) \
+{ \
+	.open	 = _open_func_name_,		  \
+	.read	 = seq_read,			  \
+	.llseek	 = seq_lseek,			  \
+	.release = single_release,		  \
+}
+
+/*------------------------------------------------------------------------
+ * "seq" fops template for read-write files.
+ */
+#define SEQ_FILEOPS_RW(_open_func_name_, _write_func_name_) \
+{ \
+	.open	 = _open_func_name_ ,		  \
+	.read	 = seq_read,			  \
+	.write	 = _write_func_name_,		  \
+	.llseek	 = seq_lseek,			  \
+	.release = single_release,		  \
+}
+
+/*
+ * "seq" file template definition ended.
+ ***************************************************************************
+ */
+#ifndef ACPI_HOTKEY_COMPONENT
+#define ACPI_HOTKEY_COMPONENT	0x10000000
+#endif
+
+#define _COMPONENT		ACPI_HOTKEY_COMPONENT
+ACPI_MODULE_NAME("pcc_acpi");
+
+MODULE_AUTHOR("Hiroshi Miura, Hiroshi Yokota");
+MODULE_DESCRIPTION("ACPI HotKey driver for Panasonic Let's Note laptops");
+MODULE_LICENSE("GPL");
+
+#define LOGPREFIX "pcc_acpi: "
+
+/****************************************************
+ * Define ACPI PATHs
+ ****************************************************/
+/* Lets note hotkeys */
+#define METHOD_HKEY_QUERY	"HINF"
+#define METHOD_HKEY_SQTY	"SQTY"
+#define METHOD_HKEY_SINF	"SINF"
+#define METHOD_HKEY_SSET	"SSET"
+#define HKEY_NOTIFY		 0x80
+
+/* for brightness control */
+#define LCD_MAX_BRIGHTNESS 255
+/* This may be magical -- beware */
+#define LCD_BRIGHTNESS_INCREMENT 17
+/* Registers of SINF */
+#define SINF_LCD_BRIGHTNESS 4
+
+/*******************************************************************
+ *
+ * definitions for /proc/ interface
+ *
+ *******************************************************************/
+#define ACPI_PCC_DRIVER_NAME	"pcc_acpi"
+#define ACPI_PCC_DEVICE_NAME	"PCCExtra"
+#define ACPI_PCC_CLASS		"pcc"
+#define PROC_PCC		ACPI_PCC_CLASS
+
+#define ACPI_PCC_INPUT_PHYS	"panasonic/hkey0"
+
+/* This is transitional definition */
+#ifndef KEY_BATT
+# define KEY_BATT 227
+#endif
+
+#define PROC_STR_MAX_LEN  8
+
+#define BUS_PCC_HOTKEY BUS_I8042 /*0x1a*/ /* FIXME: BUS_I8042? */
+
+/* Fn+F4/F5 confricts with Shift+F1/F2  */
+/* This hack avoids key number confrict */
+#define PCC_KEYINPUT_MODE (0)
+
+/* LCD_TYPEs: 0 = Normal, 1 = Semi-transparent
+   ENV_STATEs: Normal temp=0x01, High temp=0x81, N/A=0x00
+*/
+enum SINF_BITS { SINF_NUM_BATTERIES = 0,
+		 SINF_LCD_TYPE,
+		 SINF_AC_MAX_BRIGHT,
+		 SINF_AC_MIN_BRIGHT,
+		 SINF_AC_CUR_BRIGHT,
+			     /* 4 = R1 only handle SINF_AC_CUR_BRIGHT
+			      * as SINF_CUR_BRIGHT and don't know AC state */
+		 SINF_DC_MAX_BRIGHT,
+		 SINF_DC_MIN_BRIGHT,
+		 SINF_DC_CUR_BRIGHT,
+		 SINF_MUTE,
+		 SINF_RESERVED,
+		 SINF_ENV_STATE, /* 10 */
+		 SINF_STICKY_KEY = 0x80,
+};
+
+static struct acpi_device_id pcc_device_ids[] = {
+	{"MAT0012", 0},
+	{"MAT0013", 0},
+	{"MAT0018", 0},
+	{"MAT0019", 0},
+	{"",	    0},
+};
+MODULE_DEVICE_TABLE(acpi, pcc_device_ids);
+
+
+static int __devinit acpi_pcc_hotkey_add(struct acpi_device *device);
+static int __devexit acpi_pcc_hotkey_remove(struct acpi_device *device,
+					    int type);
+static int acpi_pcc_hotkey_resume(struct acpi_device *device);
+
+
+static struct acpi_driver acpi_pcc_driver = {
+	.name =		ACPI_PCC_DRIVER_NAME,
+	.class =	ACPI_PCC_CLASS,
+	.ids =		pcc_device_ids,
+	.ops =		{
+				.add = acpi_pcc_hotkey_add,
+				.remove = __devexit_p(acpi_pcc_hotkey_remove),
+#ifdef CONFIG_PM
+				/*.suspend = acpi_pcc_hotkey_suspend,*/
+				.resume = acpi_pcc_hotkey_resume,
+#endif
+			},
+};
+
+struct acpi_hotkey {
+	acpi_handle		handle;
+	struct acpi_device	*device;
+	struct proc_dir_entry   *proc_dir_entry;
+	unsigned long		num_sifr;
+	unsigned long		status;
+	struct input_dev	*input_dev;
+	int			sticky_mode;
+};
+
+struct pcc_keyinput {
+	struct acpi_hotkey      *hotkey;
+	int key_mode;
+};
+
+/* *************************************************************************
+   Hotkey driver core
+   ************************************************************************* */
+/* -------------------------------------------------------------------------
+   method access functions
+   ------------------------------------------------------------------------- */
+static int acpi_pcc_write_sset(struct acpi_hotkey *hotkey, int func, int val)
+{
+	union acpi_object in_objs[] = {
+		{ .integer.type  = ACPI_TYPE_INTEGER,
+		  .integer.value = func, },
+		{ .integer.type  = ACPI_TYPE_INTEGER,
+		  .integer.value = val, },
+	};
+	struct acpi_object_list params = {
+		.count   = ARRAY_SIZE(in_objs),
+		.pointer = in_objs,
+	};
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_write_sset");
+
+	status = acpi_evaluate_object(hotkey->handle, METHOD_HKEY_SSET,
+								&params, NULL);
+
+	return_VALUE(status == AE_OK ? AE_OK : AE_ERROR);
+}
+
+static inline int acpi_pcc_get_sqty(struct acpi_device *device)
+{
+	unsigned long s;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_get_sqty");
+
+	status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY,
+								NULL, &s);
+	if (ACPI_SUCCESS(status)) {
+		return_VALUE(s);
+	} else {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				  "evaluation error HKEY.SQTY\n"));
+		return_VALUE(-EINVAL);
+	}
+}
+
+static int acpi_pcc_retrieve_biosdata(struct acpi_hotkey *hotkey, u32 *sinf)
+{
+	acpi_status status;
+	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+	union acpi_object *hkey = NULL;
+	int i;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_retrieve_biosdata");
+
+	status = acpi_evaluate_object(hotkey->handle, METHOD_HKEY_SINF, 0,
+				      &buffer);
+	if (ACPI_FAILURE(status)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "evaluation error HKEY.SINF\n"));
+		status = AE_ERROR;
+		return_VALUE(status);
+	}
+
+	hkey = buffer.pointer;
+	if (!hkey || (hkey->type != ACPI_TYPE_PACKAGE)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF\n"));
+		goto free_buffer;
+	}
+
+	if (hotkey->num_sifr < hkey->package.count) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "SQTY reports bad SINF length\n"));
+		status = AE_ERROR;
+		goto free_buffer;
+	}
+
+	for (i = 0; i < hkey->package.count; i++) {
+		union acpi_object *element = &(hkey->package.elements[i]);
+		if (likely(element->type == ACPI_TYPE_INTEGER)) {
+			sinf[i] = element->integer.value;
+		} else {
+			ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+					 "Invalid HKEY.SINF data\n"));
+			status = AE_ERROR;
+			break;
+		}
+	}
+	sinf[hkey->package.count] = -1;
+
+ free_buffer:
+	kfree(buffer.pointer);
+	return_VALUE(status == AE_OK ? AE_OK : AE_ERROR);
+}
+
+static int acpi_pcc_read_sinf_field(struct seq_file *seq, int field)
+{
+	struct acpi_hotkey *hotkey = (struct acpi_hotkey *) seq->private;
+	u32 sinf[hotkey->num_sifr + 1];
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_read_sinf_field");
+
+	if (ACPI_SUCCESS(acpi_pcc_retrieve_biosdata(hotkey, sinf)))
+		seq_printf(seq, "%u\n",	sinf[field]);
+	else
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "Couldn't retrieve BIOS data\n"));
+
+	return_VALUE(AE_OK);
+}
+
+/* -------------------------------------------------------------------------
+   user interface functions
+   ------------------------------------------------------------------------- */
+/* read methods */
+/* Sinf read methods */
+#define PCC_SINF_READ_F(_name_, FUNC) \
+static int _name_(struct seq_file *seq, void *offset) \
+{ \
+	return_VALUE(ACPI_SUCCESS(acpi_pcc_read_sinf_field(seq,	\
+							  (FUNC))) \
+							  ? 0 : -EINVAL); \
+}
+
+PCC_SINF_READ_F(acpi_pcc_numbatteries_show,	 SINF_NUM_BATTERIES);
+PCC_SINF_READ_F(acpi_pcc_lcdtype_show,		 SINF_LCD_TYPE);
+PCC_SINF_READ_F(acpi_pcc_ac_brightness_max_show, SINF_AC_MAX_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_ac_brightness_min_show, SINF_AC_MIN_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_ac_brightness_show,	 SINF_AC_CUR_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_dc_brightness_max_show, SINF_DC_MAX_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_dc_brightness_min_show, SINF_DC_MIN_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_dc_brightness_show,	 SINF_DC_CUR_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_brightness_show,	 SINF_AC_CUR_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_mute_show,		 SINF_MUTE);
+
+static int acpi_pcc_sticky_key_show(struct seq_file *seq, void *offset)
+{
+	struct acpi_hotkey *hotkey = seq->private;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_sticky_key_show");
+
+	if (!hotkey || !hotkey->device)
+		return_VALUE(-EINVAL);
+
+	seq_printf(seq, "%d\n", hotkey->sticky_mode);
+
+	return_VALUE(0);
+}
+
+static int acpi_pcc_keyinput_show(struct seq_file *seq, void *offset)
+{
+	struct acpi_hotkey 	*hotkey = seq->private;
+	struct input_dev 	*hotk_input_dev = hotkey->input_dev;
+	struct pcc_keyinput 	*keyinput = input_get_drvdata(hotk_input_dev);
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_keyinput_show");
+
+	seq_printf(seq, "%d\n", keyinput->key_mode);
+
+	return_VALUE(0);
+}
+
+static int acpi_pcc_version_show(struct seq_file *seq, void *offset)
+{
+	struct acpi_hotkey *hotkey = seq->private;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_version_show");
+
+	if (!hotkey || !hotkey->device)
+		return_VALUE(-EINVAL);
+
+	seq_printf(seq, "%s version %s\n", ACPI_PCC_DRIVER_NAME,
+		   ACPI_PCC_VERSION);
+	seq_printf(seq, "%li functions\n", hotkey->num_sifr);
+
+	return_VALUE(0);
+}
+
+/* write methods */
+static ssize_t acpi_pcc_write_single_flag(struct file *file,
+					  const char __user *buffer,
+					  size_t count,
+					  int sinf_func)
+{
+	struct seq_file		*seq = file->private_data;
+	struct acpi_hotkey	*hotkey = seq->private;
+	char			write_string[PROC_STR_MAX_LEN];
+	u32			val;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_write_single_flag");
+
+	if (!hotkey || (count > sizeof(write_string) - 1))
+		return_VALUE(-EINVAL);
+
+	if (copy_from_user(write_string, buffer, count))
+		return_VALUE(-EFAULT);
+
+	write_string[count] = '\0';
+
+	if ((sscanf(write_string, "%3i", &val) == 1) &&
+	    (val == 0 || val == 1))
+		acpi_pcc_write_sset(hotkey, sinf_func, val);
+
+	return_VALUE(count);
+}
+
+static unsigned long acpi_pcc_write_brightness(struct file *file,
+					       const char __user *buffer,
+					       size_t count,
+					       int min_index, int max_index,
+					       int cur_index)
+{
+	struct seq_file		*seq = (struct seq_file *)file->private_data;
+	struct acpi_hotkey	*hotkey = (struct acpi_hotkey *)seq->private;
+	char			write_string[PROC_STR_MAX_LEN];
+	u32 bright;
+	u32 sinf[hotkey->num_sifr + 1];
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_write_brightness");
+
+	if (!hotkey || (count > sizeof(write_string) - 1))
+		return_VALUE(-EINVAL);
+
+	if (copy_from_user(write_string, buffer, count))
+		return_VALUE(-EFAULT);
+
+	write_string[count] = '\0';
+
+	if (ACPI_FAILURE(acpi_pcc_retrieve_biosdata(hotkey, sinf))) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "Couldn't retrieve BIOS data\n"));
+		goto end;
+	}
+
+	if ((sscanf(write_string, "%4i", &bright) == 1) &&
+	    (bright >= sinf[min_index]) &&
+	    (bright <= sinf[max_index]))
+		acpi_pcc_write_sset(hotkey, cur_index, bright);
+
+end:
+	return_VALUE(count);
+}
+
+static ssize_t acpi_pcc_write_ac_brightness(struct file *file,
+					    const char __user *buffer,
+					    size_t count, loff_t *ppos)
+{
+	return_VALUE(acpi_pcc_write_brightness(file, buffer, count,
+					       SINF_AC_MIN_BRIGHT,
+					       SINF_AC_MAX_BRIGHT,
+					       SINF_AC_CUR_BRIGHT));
+}
+
+static ssize_t acpi_pcc_write_dc_brightness(struct file *file,
+					    const char __user *buffer,
+					    size_t count, loff_t *ppos)
+{
+	return_VALUE(acpi_pcc_write_brightness(file, buffer, count,
+					       SINF_DC_MIN_BRIGHT,
+					       SINF_DC_MAX_BRIGHT,
+					       SINF_DC_CUR_BRIGHT));
+}
+
+static ssize_t acpi_pcc_write_no_brightness(struct file *file,
+					    const char __user *buffer,
+					    size_t count, loff_t *ppos)
+{
+	return acpi_pcc_write_brightness(file, buffer, count,
+					 SINF_AC_MIN_BRIGHT,
+					 SINF_AC_MAX_BRIGHT,
+					 SINF_AC_CUR_BRIGHT);
+}
+
+static ssize_t acpi_pcc_write_mute(struct file *file,
+				   const char __user *buffer,
+				   size_t count, loff_t *ppos)
+{
+	return_VALUE(acpi_pcc_write_single_flag(file, buffer, count,
+						SINF_MUTE));
+}
+
+static ssize_t acpi_pcc_write_sticky_key(struct file *file,
+					 const char __user *buffer,
+					 size_t count, loff_t *ppos)
+{
+	struct seq_file     *seq = (struct seq_file *)file->private_data;
+	struct acpi_hotkey  *hotkey = (struct acpi_hotkey *)seq->private;
+	char                 write_string[PROC_STR_MAX_LEN];
+	int                  mode;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_write_sticky_key");
+
+	if (!hotkey || (count > sizeof(write_string) - 1))
+		return_VALUE(-EINVAL);
+
+	if (copy_from_user(write_string, buffer, count))
+		return_VALUE(-EFAULT);
+
+	write_string[count] = '\0';
+
+	if ((sscanf(write_string, "%3i", &mode) == 1) &&
+	    (mode == 0 || mode == 1)) {
+		acpi_pcc_write_sset(hotkey, SINF_STICKY_KEY, mode);
+		hotkey->sticky_mode = mode;
+	}
+
+	return_VALUE(count);
+}
+
+static ssize_t acpi_pcc_write_keyinput(struct file *file,
+				       const char __user *buffer,
+				       size_t count, loff_t *ppos)
+{
+	struct seq_file		*seq = (struct seq_file *)file->private_data;
+	struct acpi_hotkey	*hotkey = (struct acpi_hotkey *)seq->private;
+	struct pcc_keyinput 	*keyinput;
+	char			write_string[PROC_STR_MAX_LEN];
+	int			key_mode;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_write_keyinput");
+
+	if (!hotkey || (count > (sizeof(write_string) - 1)))
+		return_VALUE(-EINVAL);
+
+	if (copy_from_user(write_string, buffer, count))
+		return_VALUE(-EFAULT);
+
+	write_string[count] = '\0';
+
+	if ((sscanf(write_string, "%4i", &key_mode) == 1) &&
+	    (key_mode == 0 || key_mode == 1)) {
+		keyinput = input_get_drvdata(hotkey->input_dev);
+		keyinput->key_mode = key_mode;
+	}
+
+	return_VALUE(count);
+}
+
+/* -------------------------------------------------------------------------
+   hotkey driver
+   ------------------------------------------------------------------------- */
+static void acpi_pcc_generete_keyinput(struct acpi_hotkey *hotkey)
+{
+	struct input_dev    *hotk_input_dev = hotkey->input_dev;
+	struct pcc_keyinput *keyinput = input_get_drvdata(hotk_input_dev);
+	int hinf = hotkey->status;
+	int key_code, hkey_num;
+	const int key_map[] = {
+		/*  0 */ -1,
+		/*  1 */ KEY_BRIGHTNESSDOWN,
+		/*  2 */ KEY_BRIGHTNESSUP,
+		/*  3 */ -1, /* vga/lcd switch event is not occur on
+				hotkey driver. */
+		/*  4 */ KEY_MUTE,
+		/*  5 */ KEY_VOLUMEDOWN,
+		/*  6 */ KEY_VOLUMEUP,
+		/*  7 */ KEY_SLEEP,
+		/*  8 */ -1, /* Change CPU boost: do nothing */
+		/*  9 */ KEY_BATT,
+		/* 10 */ KEY_SUSPEND,
+	};
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_generete_keyinput");
+
+	if (keyinput->key_mode == 0)
+		return_VOID;
+
+	hkey_num = hinf & 0xf;
+
+	if ((0 > hkey_num) ||
+	    (hkey_num > ARRAY_SIZE(key_map))) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				  "hotkey number out of range: %d\n",
+				  hkey_num));
+		return_VOID;
+	}
+
+	key_code = key_map[hkey_num];
+
+	if (key_code != -1) {
+		int pushed = (hinf & 0x80) ? TRUE : FALSE;
+
+		input_report_key(hotk_input_dev, key_code, pushed);
+		input_sync(hotk_input_dev);
+	}
+}
+
+static int acpi_pcc_hotkey_get_key(struct acpi_hotkey *hotkey)
+{
+	unsigned long result;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_get_key");
+
+	status = acpi_evaluate_integer(hotkey->handle, METHOD_HKEY_QUERY,
+								NULL, &result);
+	if (likely(ACPI_SUCCESS(status)))
+		hotkey->status = result;
+	else
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+			"error getting hotkey status\n"));
+
+	return_VALUE(status == AE_OK);
+}
+
+void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data)
+{
+	struct acpi_hotkey *hotkey = (struct acpi_hotkey *) data;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_notify");
+
+	switch (event) {
+	case HKEY_NOTIFY:
+		if (acpi_pcc_hotkey_get_key(hotkey)) {
+			/* generate event like '"pcc HKEY 00000080 00000084"'
+			 * when Fn+F4 pressed */
+			acpi_bus_generate_proc_event(hotkey->device, event,
+								hotkey->status);
+		}
+		acpi_pcc_generete_keyinput(hotkey);
+		break;
+	default:
+		/* nothing to do */
+		break;
+	}
+	return_VOID;
+}
+
+/* *************************************************************************
+   FS Interface (/proc)
+   ************************************************************************* */
+/* oepn proc file fs*/
+SEQ_OPEN_FS(acpi_pcc_dc_brightness_open_fs, acpi_pcc_dc_brightness_show);
+SEQ_OPEN_FS(acpi_pcc_numbatteries_open_fs, acpi_pcc_numbatteries_show);
+SEQ_OPEN_FS(acpi_pcc_lcdtype_open_fs, acpi_pcc_lcdtype_show);
+SEQ_OPEN_FS(acpi_pcc_ac_brightness_max_open_fs,
+	    acpi_pcc_ac_brightness_max_show);
+SEQ_OPEN_FS(acpi_pcc_ac_brightness_min_open_fs,
+	    acpi_pcc_ac_brightness_min_show);
+SEQ_OPEN_FS(acpi_pcc_ac_brightness_open_fs, acpi_pcc_ac_brightness_show);
+SEQ_OPEN_FS(acpi_pcc_dc_brightness_max_open_fs,
+	    acpi_pcc_dc_brightness_max_show);
+SEQ_OPEN_FS(acpi_pcc_dc_brightness_min_open_fs,
+	    acpi_pcc_dc_brightness_min_show);
+SEQ_OPEN_FS(acpi_pcc_brightness_open_fs, acpi_pcc_brightness_show);
+SEQ_OPEN_FS(acpi_pcc_mute_open_fs, acpi_pcc_mute_show);
+SEQ_OPEN_FS(acpi_pcc_version_open_fs, acpi_pcc_version_show);
+SEQ_OPEN_FS(acpi_pcc_keyinput_open_fs, acpi_pcc_keyinput_show);
+SEQ_OPEN_FS(acpi_pcc_sticky_key_open_fs, acpi_pcc_sticky_key_show);
+
+static struct file_operations acpi_pcc_numbatteries_fops =
+	SEQ_FILEOPS_R(acpi_pcc_numbatteries_open_fs);
+static struct file_operations acpi_pcc_lcdtype_fops =
+	SEQ_FILEOPS_R(acpi_pcc_lcdtype_open_fs);
+static struct file_operations acpi_pcc_mute_fops =
+	SEQ_FILEOPS_RW(acpi_pcc_mute_open_fs, acpi_pcc_write_mute);
+static struct file_operations acpi_pcc_ac_brightness_fops =
+	SEQ_FILEOPS_RW(acpi_pcc_ac_brightness_open_fs,
+		       acpi_pcc_write_ac_brightness);
+static struct file_operations acpi_pcc_ac_brightness_max_fops =
+	SEQ_FILEOPS_R(acpi_pcc_ac_brightness_max_open_fs);
+static struct file_operations acpi_pcc_ac_brightness_min_fops =
+	SEQ_FILEOPS_R(acpi_pcc_ac_brightness_min_open_fs);
+static struct file_operations acpi_pcc_dc_brightness_fops =
+	SEQ_FILEOPS_RW(acpi_pcc_dc_brightness_open_fs,
+		       acpi_pcc_write_dc_brightness);
+static struct file_operations acpi_pcc_dc_brightness_max_fops =
+	SEQ_FILEOPS_R(acpi_pcc_dc_brightness_max_open_fs);
+static struct file_operations acpi_pcc_dc_brightness_min_fops =
+	SEQ_FILEOPS_R(acpi_pcc_dc_brightness_min_open_fs);
+static struct file_operations acpi_pcc_brightness_fops =
+	SEQ_FILEOPS_RW(acpi_pcc_brightness_open_fs,
+		       acpi_pcc_write_no_brightness);
+static struct file_operations acpi_pcc_sticky_key_fops =
+	SEQ_FILEOPS_RW(acpi_pcc_sticky_key_open_fs, acpi_pcc_write_sticky_key);
+static struct file_operations acpi_pcc_keyinput_fops =
+	SEQ_FILEOPS_RW(acpi_pcc_keyinput_open_fs, acpi_pcc_write_keyinput);
+static struct file_operations acpi_pcc_version_fops =
+	SEQ_FILEOPS_R(acpi_pcc_version_open_fs);
+
+struct proc_item {
+	const char *name;
+	struct file_operations *fops;
+	mode_t flag;
+};
+
+/* Note: These functions map *exactly* to the SINF/SSET functions */
+struct proc_item acpi_pcc_proc_items_sifr[] = {
+	{ "num_batteries", &acpi_pcc_numbatteries_fops, S_IRUGO },
+	{ "lcd_type", &acpi_pcc_lcdtype_fops, S_IRUGO },
+	{ "ac_brightness_max", &acpi_pcc_ac_brightness_max_fops, S_IRUGO },
+	{ "ac_brightness_min", &acpi_pcc_ac_brightness_min_fops, S_IRUGO },
+	{ "ac_brightness", &acpi_pcc_ac_brightness_fops,
+		S_IFREG | S_IRUGO | S_IWUSR },
+	{ "dc_brightness_max", &acpi_pcc_dc_brightness_max_fops, S_IRUGO },
+	{ "dc_brightness_min", &acpi_pcc_dc_brightness_min_fops, S_IRUGO },
+	{ "dc_brightness", &acpi_pcc_dc_brightness_fops,
+		S_IFREG | S_IRUGO | S_IWUSR },
+	{ "brightness", &acpi_pcc_brightness_fops, S_IFREG | S_IRUGO | S_IWUSR},
+	{ "mute", &acpi_pcc_mute_fops, S_IFREG | S_IRUGO | S_IWUSR },
+	{ NULL, NULL, 0 },
+};
+
+struct proc_item acpi_pcc_proc_items[] = {
+	{ "sticky_key", &acpi_pcc_sticky_key_fops, S_IFREG | S_IRUGO | S_IWUSR},
+	{ "keyinput", &acpi_pcc_keyinput_fops, S_IFREG | S_IRUGO | S_IWUSR },
+	{ "version", &acpi_pcc_version_fops, S_IRUGO },
+	{ NULL, NULL, 0 },
+};
+
+static int __devinit acpi_pcc_add_device(struct acpi_device *device,
+					 struct proc_item *proc_items,
+					 int num)
+{
+	struct acpi_hotkey *hotkey = acpi_driver_data(device);
+	struct proc_dir_entry *proc;
+	struct proc_item *item;
+	int i;
+
+	for (item = proc_items, i = 0; item->name && i < num; ++item, ++i) {
+		proc = create_proc_entry(item->name, item->flag,
+					 hotkey->proc_dir_entry);
+		if (likely(proc)) {
+			proc->proc_fops = item->fops;
+			proc->data = hotkey;
+			proc->owner = THIS_MODULE;
+		} else {
+			while (i-- > 0) {
+				item--;
+				remove_proc_entry(item->name,
+					hotkey->proc_dir_entry);
+			}
+			return_VALUE(-ENODEV);
+		}
+	}
+	return_VALUE(0);
+}
+
+static int __devinit acpi_pcc_proc_init(struct acpi_device *device)
+{
+	struct proc_dir_entry *acpi_pcc_dir;
+	struct acpi_hotkey    *hotkey = acpi_driver_data(device);
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_proc_init");
+
+	acpi_pcc_dir = proc_mkdir(PROC_PCC, acpi_root_dir);
+
+	if (unlikely(!acpi_pcc_dir)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "Couldn't create dir in /proc\n"));
+		return_VALUE(-ENODEV);
+	}
+
+	acpi_pcc_dir->owner = THIS_MODULE;
+	hotkey->proc_dir_entry = acpi_pcc_dir;
+
+	status =  acpi_pcc_add_device(device, acpi_pcc_proc_items_sifr,
+				      hotkey->num_sifr);
+	status |= acpi_pcc_add_device(device, acpi_pcc_proc_items,
+				      ARRAY_SIZE(acpi_pcc_proc_items));
+	if (unlikely(status)) {
+		remove_proc_entry(PROC_PCC, acpi_root_dir);
+		hotkey->proc_dir_entry = NULL;
+		return_VALUE(-ENODEV);
+	}
+
+	return_VALUE(status);
+}
+
+static void __devexit acpi_pcc_remove_device(struct acpi_device *device,
+					     struct proc_item *proc_items,
+					     int num)
+{
+	struct acpi_hotkey *hotkey = acpi_driver_data(device);
+	struct proc_item *item;
+	int i;
+
+	for (item = proc_items, i = 0;
+	     item->name != NULL && i < num;
+	     ++item, ++i) {
+		remove_proc_entry(item->name, hotkey->proc_dir_entry);
+	}
+
+	return_VOID;
+}
+
+/* *************************************************************************
+   Power Management
+   ************************************************************************* */
+#ifdef CONFIG_PM
+static int acpi_pcc_hotkey_resume(struct acpi_device *device)
+{
+	struct acpi_hotkey *hotkey = acpi_driver_data(device);
+	acpi_status	    status = AE_OK;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_resume");
+
+	if (device == NULL || hotkey == NULL)
+		return_VALUE(-EINVAL);
+
+	if (hotkey->num_sifr != 0) {
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Sticky mode restore: %d\n",
+				 hotkey->sticky_mode));
+
+		status = acpi_pcc_write_sset(hotkey, SINF_STICKY_KEY,
+					     hotkey->sticky_mode);
+	}
+	if (status != AE_OK)
+		return_VALUE(-EINVAL);
+
+	return_VALUE(0);
+}
+#endif
+
+/* *************************************************************************
+   Module init/remove
+   ************************************************************************* */
+/* -------------------------------------------------------------------------
+   input
+   ------------------------------------------------------------------------- */
+static int __devinit acpi_pcc_init_input(struct acpi_hotkey *hotkey)
+{
+	struct input_dev    *hotk_input_dev;
+	struct pcc_keyinput *pcc_keyinput;
+	int error;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_init_input");
+
+	hotk_input_dev = input_allocate_device();
+	if (hotk_input_dev == NULL) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "Couldn't allocate input device for hotkey"));
+		goto err_input;
+	}
+
+	pcc_keyinput = kcalloc(1, sizeof(struct pcc_keyinput), GFP_KERNEL);
+
+	if (pcc_keyinput == NULL) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "Couldn't allocate mem for private data"));
+		goto err_pcc;
+	}
+
+	hotk_input_dev->evbit[0] = BIT(EV_KEY);
+
+	set_bit(KEY_BRIGHTNESSDOWN, hotk_input_dev->keybit);
+	set_bit(KEY_BRIGHTNESSUP,   hotk_input_dev->keybit);
+	set_bit(KEY_MUTE,	    hotk_input_dev->keybit);
+	set_bit(KEY_VOLUMEDOWN,	    hotk_input_dev->keybit);
+	set_bit(KEY_VOLUMEUP,	    hotk_input_dev->keybit);
+	set_bit(KEY_SLEEP,	    hotk_input_dev->keybit);
+	set_bit(KEY_BATT,	    hotk_input_dev->keybit);
+	set_bit(KEY_SUSPEND,	    hotk_input_dev->keybit);
+
+	hotk_input_dev->name       = ACPI_PCC_DRIVER_NAME;
+	hotk_input_dev->phys       = ACPI_PCC_INPUT_PHYS;
+	hotk_input_dev->id.bustype = BUS_PCC_HOTKEY;
+	hotk_input_dev->id.vendor  = 0x0001;
+	hotk_input_dev->id.product = 0x0001;
+	hotk_input_dev->id.version = 0x0100;
+
+	pcc_keyinput->key_mode = PCC_KEYINPUT_MODE;
+	pcc_keyinput->hotkey   = hotkey;
+
+	input_set_drvdata(hotk_input_dev, pcc_keyinput);
+
+	hotkey->input_dev = hotk_input_dev;
+
+	error = input_register_device(hotk_input_dev);
+
+	if (error)
+		goto err_pcc;
+
+	return_VALUE(0);
+
+ err_pcc:
+	input_unregister_device(hotk_input_dev);
+ err_input:
+	return_VALUE(-ENOMEM);
+}
+
+static void __devexit acpi_pcc_remove_input(struct acpi_hotkey *hotkey)
+{
+	struct input_dev    *hotk_input_dev;
+	struct pcc_keyinput *pcc_keyinput;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_remove_input");
+
+	if (hotkey == NULL) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Can't free memory"));
+		return_VOID;
+	}
+
+	hotk_input_dev = hotkey->input_dev;
+	pcc_keyinput   = input_get_drvdata(hotk_input_dev);
+
+	input_unregister_device(hotk_input_dev);
+
+	kfree(pcc_keyinput);
+}
+
+/* -------------------------------------------------------------------------
+   ACPI
+   ------------------------------------------------------------------------- */
+static int __devinit acpi_pcc_hotkey_add(struct acpi_device *device)
+{
+	acpi_status		status = AE_OK;
+	struct acpi_hotkey	*hotkey = NULL;
+	int sifr_status, num_sifr, result;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_add");
+
+	if (device == NULL)
+		return_VALUE(-EINVAL);
+
+	sifr_status = acpi_pcc_get_sqty(device);
+
+	if (sifr_status > 255) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr too large"));
+		return_VALUE(-ENODEV);
+	}
+
+	if (sifr_status < 0) {
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "not support SQTY"));
+		num_sifr = 0;
+	} else {
+		num_sifr = sifr_status;
+	}
+
+	hotkey = kcalloc(1, sizeof(struct acpi_hotkey), GFP_KERNEL);
+	if (hotkey == NULL) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "Couldn't allocate mem for hotkey"));
+		return_VALUE(-ENOMEM);
+	}
+
+	hotkey->device   = device;
+	hotkey->handle   = device->handle;
+	hotkey->num_sifr = num_sifr;
+	acpi_driver_data(device) = hotkey;
+	strcpy(acpi_device_name(device),  ACPI_PCC_DEVICE_NAME);
+	strcpy(acpi_device_class(device), ACPI_PCC_CLASS);
+
+	status = acpi_install_notify_handler(hotkey->handle,
+					     ACPI_DEVICE_NOTIFY,
+					     acpi_pcc_hotkey_notify,
+					     hotkey);
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "Error installing notify handler\n"));
+		kfree(hotkey);
+		return_VALUE(-ENODEV);
+	}
+
+	result = acpi_pcc_init_input(hotkey);
+	if (result != 0) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "Error installing keyinput handler\n"));
+		kfree(hotkey);
+		return_VALUE(result);
+	}
+
+	return_VALUE(acpi_pcc_proc_init(device));
+}
+
+static int __devexit acpi_pcc_hotkey_remove(struct acpi_device *device,
+					    int type)
+{
+	acpi_status		status = AE_OK;
+	struct acpi_hotkey	*hotkey = acpi_driver_data(device);
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_remove");
+
+	if (!device || !hotkey)
+		return_VALUE(-EINVAL);
+
+	if (hotkey->proc_dir_entry) {
+		acpi_pcc_remove_device(device, acpi_pcc_proc_items_sifr,
+				       hotkey->num_sifr);
+		acpi_pcc_remove_device(device, acpi_pcc_proc_items,
+				       ARRAY_SIZE(acpi_pcc_proc_items));
+		remove_proc_entry(PROC_PCC, acpi_root_dir);
+	}
+
+	status = acpi_remove_notify_handler(hotkey->handle,
+		    ACPI_DEVICE_NOTIFY, acpi_pcc_hotkey_notify);
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "Error removing notify handler\n"));
+	}
+
+	acpi_pcc_remove_input(hotkey);
+	kfree(hotkey);
+	return_VALUE(status == AE_OK);
+}
+
+/* *********************************************************************
+   Module entry point
+   ********************************************************************* */
+static int __init acpi_pcc_init(void)
+{
+	int result;
+
+	ACPI_FUNCTION_TRACE("acpi_pcc_init");
+
+	printk(KERN_INFO LOGPREFIX "loading...\n");
+
+	if (acpi_disabled) {
+		printk(KERN_INFO LOGPREFIX "ACPI disabled.\n");
+		return_VALUE(-ENODEV);
+	}
+
+	result = acpi_bus_register_driver(&acpi_pcc_driver);
+	if (result < 0) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "Error registering hotkey driver\n"));
+		return_VALUE(-ENODEV);
+	}
+
+	return_VALUE(result);
+}
+
+static void __exit acpi_pcc_exit(void)
+{
+	ACPI_FUNCTION_TRACE("acpi_pcc_exit");
+
+	printk(KERN_INFO LOGPREFIX "unloading...\n");
+
+	acpi_bus_unregister_driver(&acpi_pcc_driver);
+
+	return_VOID;
+}
+
+module_init(acpi_pcc_init);
+module_exit(acpi_pcc_exit);
diff --git a/drivers/staging/poch/Kconfig b/drivers/staging/poch/Kconfig
new file mode 100644
index 0000000..b3b33b9
--- /dev/null
+++ b/drivers/staging/poch/Kconfig
@@ -0,0 +1,6 @@
+config POCH
+	tristate "Redrapids Pocket Change CardBus support"
+	depends on PCI && UIO
+	default N
+	---help---
+	  Enable support for Redrapids Pocket Change CardBus devices.
diff --git a/drivers/staging/poch/Makefile b/drivers/staging/poch/Makefile
new file mode 100644
index 0000000..d2b9680
--- /dev/null
+++ b/drivers/staging/poch/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_POCH)		+= poch.o
diff --git a/drivers/staging/poch/README b/drivers/staging/poch/README
new file mode 100644
index 0000000..f65e979
--- /dev/null
+++ b/drivers/staging/poch/README
@@ -0,0 +1,7 @@
+TODO:
+	- fix transmit overflows
+	- audit userspace interfaces
+	- get reserved major/minor if needed
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
+Vijay Kumar <vijaykumar@bravegnu.org> and Jaya Kumar <jayakumar.lkml@gmail.com>
diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
new file mode 100644
index 0000000..0e113f9
--- /dev/null
+++ b/drivers/staging/poch/poch.c
@@ -0,0 +1,1425 @@
+/*
+ * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
+ *
+ * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
+ *
+ * Licensed under GPL version 2 only.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/uio_driver.h>
+#include <linux/spinlock.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/poll.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/io.h>
+
+#include "poch.h"
+
+#include <asm/cacheflush.h>
+
+#ifndef PCI_VENDOR_ID_RRAPIDS
+#define PCI_VENDOR_ID_RRAPIDS 0x17D2
+#endif
+
+#ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
+#define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
+#endif
+
+#define POCH_NCHANNELS 2
+
+#define MAX_POCH_CARDS 8
+#define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
+
+#define DRV_NAME "poch"
+#define PFX      DRV_NAME ": "
+
+/*
+ * BAR0 Bridge Register Definitions
+ */
+
+#define BRIDGE_REV_REG			0x0
+#define BRIDGE_INT_MASK_REG		0x4
+#define BRIDGE_INT_STAT_REG		0x8
+
+#define BRIDGE_INT_ACTIVE		(0x1 << 31)
+#define BRIDGE_INT_FPGA		        (0x1 << 2)
+#define BRIDGE_INT_TEMP_FAIL		(0x1 << 1)
+#define	BRIDGE_INT_TEMP_WARN		(0x1 << 0)
+
+#define BRIDGE_FPGA_RESET_REG		0xC
+
+#define BRIDGE_CARD_POWER_REG		0x10
+#define BRIDGE_CARD_POWER_EN            (0x1 << 0)
+#define BRIDGE_CARD_POWER_PROG_DONE     (0x1 << 31)
+
+#define BRIDGE_JTAG_REG			0x14
+#define BRIDGE_DMA_GO_REG		0x18
+#define BRIDGE_STAT_0_REG		0x1C
+#define BRIDGE_STAT_1_REG		0x20
+#define BRIDGE_STAT_2_REG		0x24
+#define BRIDGE_STAT_3_REG		0x28
+#define BRIDGE_TEMP_STAT_REG		0x2C
+#define BRIDGE_TEMP_THRESH_REG		0x30
+#define BRIDGE_EEPROM_REVSEL_REG	0x34
+#define BRIDGE_CIS_STRUCT_REG		0x100
+#define BRIDGE_BOARDREV_REG		0x124
+
+/*
+ * BAR1 FPGA Register Definitions
+ */
+
+#define FPGA_IFACE_REV_REG		0x0
+#define FPGA_RX_BLOCK_SIZE_REG		0x8
+#define FPGA_TX_BLOCK_SIZE_REG		0xC
+#define FPGA_RX_BLOCK_COUNT_REG		0x10
+#define FPGA_TX_BLOCK_COUNT_REG		0x14
+#define FPGA_RX_CURR_DMA_BLOCK_REG	0x18
+#define FPGA_TX_CURR_DMA_BLOCK_REG	0x1C
+#define FPGA_RX_GROUP_COUNT_REG		0x20
+#define FPGA_TX_GROUP_COUNT_REG		0x24
+#define FPGA_RX_CURR_GROUP_REG		0x28
+#define FPGA_TX_CURR_GROUP_REG		0x2C
+#define FPGA_RX_CURR_PCI_REG		0x38
+#define FPGA_TX_CURR_PCI_REG		0x3C
+#define FPGA_RX_GROUP0_START_REG	0x40
+#define FPGA_TX_GROUP0_START_REG	0xC0
+#define FPGA_DMA_DESC_1_REG		0x140
+#define FPGA_DMA_DESC_2_REG		0x144
+#define FPGA_DMA_DESC_3_REG		0x148
+#define FPGA_DMA_DESC_4_REG		0x14C
+
+#define FPGA_DMA_INT_STAT_REG		0x150
+#define FPGA_DMA_INT_MASK_REG		0x154
+#define FPGA_DMA_INT_RX		(1 << 0)
+#define FPGA_DMA_INT_TX		(1 << 1)
+
+#define FPGA_RX_GROUPS_PER_INT_REG	0x158
+#define FPGA_TX_GROUPS_PER_INT_REG	0x15C
+#define FPGA_DMA_ADR_PAGE_REG		0x160
+#define FPGA_FPGA_REV_REG		0x200
+
+#define FPGA_ADC_CLOCK_CTL_REG		0x204
+#define FPGA_ADC_CLOCK_CTL_OSC_EN	(0x1 << 3)
+#define FPGA_ADC_CLOCK_LOCAL_CLK	(0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
+#define FPGA_ADC_CLOCK_EXT_SAMP_CLK	0X0
+
+#define FPGA_ADC_DAC_EN_REG		0x208
+#define FPGA_ADC_DAC_EN_DAC_OFF         (0x1 << 1)
+#define FPGA_ADC_DAC_EN_ADC_OFF         (0x1 << 0)
+
+#define FPGA_INT_STAT_REG		0x20C
+#define FPGA_INT_MASK_REG		0x210
+#define FPGA_INT_PLL_UNLOCKED		(0x1 << 9)
+#define FPGA_INT_DMA_CORE		(0x1 << 8)
+#define FPGA_INT_TX_FF_EMPTY		(0x1 << 7)
+#define FPGA_INT_RX_FF_EMPTY		(0x1 << 6)
+#define FPGA_INT_TX_FF_OVRFLW		(0x1 << 3)
+#define FPGA_INT_RX_FF_OVRFLW		(0x1 << 2)
+#define FPGA_INT_TX_ACQ_DONE		(0x1 << 1)
+#define FPGA_INT_RX_ACQ_DONE		(0x1)
+
+#define FPGA_RX_ADC_CTL_REG		0x214
+#define FPGA_RX_ADC_CTL_CONT_CAP	(0x0)
+#define FPGA_RX_ADC_CTL_SNAP_CAP	(0x1)
+
+#define FPGA_RX_ARM_REG			0x21C
+
+#define FPGA_DOM_REG			0x224
+#define	FPGA_DOM_DCM_RESET		(0x1 << 5)
+#define FPGA_DOM_SOFT_RESET		(0x1 << 4)
+#define FPGA_DOM_DUAL_M_SG_DMA		(0x0)
+#define FPGA_DOM_TARGET_ACCESS		(0x1)
+
+#define FPGA_TX_CTL_REG			0x228
+#define FPGA_TX_CTL_FIFO_FLUSH          (0x1 << 9)
+#define FPGA_TX_CTL_OUTPUT_ZERO         (0x0 << 2)
+#define FPGA_TX_CTL_OUTPUT_CARDBUS      (0x1 << 2)
+#define FPGA_TX_CTL_OUTPUT_ADC          (0x2 << 2)
+#define FPGA_TX_CTL_OUTPUT_SNAPSHOT     (0x3 << 2)
+#define FPGA_TX_CTL_LOOPBACK            (0x1 << 0)
+
+#define FPGA_ENDIAN_MODE_REG		0x22C
+#define FPGA_RX_FIFO_COUNT_REG		0x28C
+#define FPGA_TX_ENABLE_REG		0x298
+#define FPGA_TX_TRIGGER_REG		0x29C
+#define FPGA_TX_DATAMEM_COUNT_REG	0x2A8
+#define FPGA_CAP_FIFO_REG		0x300
+#define FPGA_TX_SNAPSHOT_REG		0x8000
+
+/*
+ * Channel Index Definitions
+ */
+
+enum {
+	CHNO_RX_CHANNEL,
+	CHNO_TX_CHANNEL,
+};
+
+struct poch_dev;
+
+enum channel_dir {
+	CHANNEL_DIR_RX,
+	CHANNEL_DIR_TX,
+};
+
+struct poch_group_info {
+	struct page *pg;
+	dma_addr_t dma_addr;
+	unsigned long user_offset;
+};
+
+struct channel_info {
+	unsigned int chno;
+
+	atomic_t sys_block_size;
+	atomic_t sys_group_size;
+	atomic_t sys_group_count;
+
+	enum channel_dir dir;
+
+	unsigned long block_size;
+	unsigned long group_size;
+	unsigned long group_count;
+
+	/* Contains the DMA address and VM offset of each group. */
+	struct poch_group_info *groups;
+
+	/* Contains the header and circular buffer exported to userspace. */
+	spinlock_t group_offsets_lock;
+	struct poch_cbuf_header *header;
+	struct page *header_pg;
+	unsigned long header_size;
+
+	/* Last group indicated as 'complete' to user space. */
+	unsigned int transfer;
+
+	wait_queue_head_t wq;
+
+	union {
+		unsigned int data_available;
+		unsigned int space_available;
+	};
+
+	void __iomem *bridge_iomem;
+	void __iomem *fpga_iomem;
+	spinlock_t *iomem_lock;
+
+	atomic_t free;
+	atomic_t inited;
+
+	/* Error counters */
+	struct poch_counters counters;
+	spinlock_t counters_lock;
+
+	struct device *dev;
+};
+
+struct poch_dev {
+	struct uio_info uio;
+	struct pci_dev *pci_dev;
+	unsigned int nchannels;
+	struct channel_info channels[POCH_NCHANNELS];
+	struct cdev cdev;
+
+	/* Counts the no. of channels that have been opened. On first
+	 * open, the card is powered on. On last channel close, the
+	 * card is powered off.
+	 */
+	atomic_t usage;
+
+	void __iomem *bridge_iomem;
+	void __iomem *fpga_iomem;
+	spinlock_t iomem_lock;
+
+	struct device *dev;
+};
+
+static dev_t poch_first_dev;
+static struct class *poch_cls;
+static DEFINE_IDR(poch_ids);
+
+static ssize_t store_block_size(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct channel_info *channel = dev_get_drvdata(dev);
+	unsigned long block_size;
+
+	sscanf(buf, "%lu", &block_size);
+	atomic_set(&channel->sys_block_size, block_size);
+
+	return count;
+}
+static DEVICE_ATTR(block_size, S_IWUSR|S_IWGRP, NULL, store_block_size);
+
+static ssize_t store_group_size(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct channel_info *channel = dev_get_drvdata(dev);
+	unsigned long group_size;
+
+	sscanf(buf, "%lu", &group_size);
+	atomic_set(&channel->sys_group_size, group_size);
+
+	return count;
+}
+static DEVICE_ATTR(group_size, S_IWUSR|S_IWGRP, NULL, store_group_size);
+
+static ssize_t store_group_count(struct device *dev,
+				struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct channel_info *channel = dev_get_drvdata(dev);
+	unsigned long group_count;
+
+	sscanf(buf, "%lu", &group_count);
+	atomic_set(&channel->sys_group_count, group_count);
+
+	return count;
+}
+static DEVICE_ATTR(group_count, S_IWUSR|S_IWGRP, NULL, store_group_count);
+
+static ssize_t show_direction(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct channel_info *channel = dev_get_drvdata(dev);
+	int len;
+
+	len = sprintf(buf, "%s\n", (channel->dir ? "tx" : "rx"));
+	return len;
+}
+static DEVICE_ATTR(dir, S_IRUSR|S_IRGRP, show_direction, NULL);
+
+static ssize_t show_mmap_size(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct channel_info *channel = dev_get_drvdata(dev);
+	int len;
+	unsigned long mmap_size;
+	unsigned long group_pages;
+	unsigned long header_pages;
+	unsigned long total_group_pages;
+
+	/* FIXME: We do not have to add 1, if group_size a multiple of
+	   PAGE_SIZE. */
+	group_pages = (channel->group_size / PAGE_SIZE) + 1;
+	header_pages = (channel->header_size / PAGE_SIZE) + 1;
+	total_group_pages = group_pages * channel->group_count;
+
+	mmap_size = (header_pages + total_group_pages) * PAGE_SIZE;
+	len = sprintf(buf, "%lu\n", mmap_size);
+	return len;
+}
+static DEVICE_ATTR(mmap_size, S_IRUSR|S_IRGRP, show_mmap_size, NULL);
+
+static struct device_attribute *poch_class_attrs[] = {
+	&dev_attr_block_size,
+	&dev_attr_group_size,
+	&dev_attr_group_count,
+	&dev_attr_dir,
+	&dev_attr_mmap_size,
+};
+
+static void poch_channel_free_groups(struct channel_info *channel)
+{
+	unsigned long i;
+
+	for (i = 0; i < channel->group_count; i++) {
+		struct poch_group_info *group;
+		unsigned int order;
+
+		group = &channel->groups[i];
+		order = get_order(channel->group_size);
+		if (group->pg)
+			__free_pages(group->pg, order);
+	}
+}
+
+static int poch_channel_alloc_groups(struct channel_info *channel)
+{
+	unsigned long i;
+	unsigned long group_pages;
+	unsigned long header_pages;
+
+	group_pages = (channel->group_size / PAGE_SIZE) + 1;
+	header_pages = (channel->header_size / PAGE_SIZE) + 1;
+
+	for (i = 0; i < channel->group_count; i++) {
+		struct poch_group_info *group;
+		unsigned int order;
+		gfp_t gfp_mask;
+
+		group = &channel->groups[i];
+		order = get_order(channel->group_size);
+
+		/*
+		 * __GFP_COMP is required here since we are going to
+		 * perform non-linear mapping to userspace. For more
+		 * information read the vm_insert_page() function
+		 * comments.
+		 */
+
+		gfp_mask = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO;
+		group->pg = alloc_pages(gfp_mask, order);
+		if (!group->pg) {
+			poch_channel_free_groups(channel);
+			return -ENOMEM;
+		}
+
+		/* FIXME: This is the physical address not the bus
+		 * address!  This won't work in architectures that
+		 * have an IOMMU. Can we use pci_map_single() for
+		 * this?
+		 */
+		group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE;
+		group->user_offset =
+			(header_pages + (i * group_pages)) * PAGE_SIZE;
+
+		printk(KERN_INFO PFX "%ld: user_offset: 0x%lx dma: 0x%x\n", i,
+		       group->user_offset, group->dma_addr);
+	}
+
+	return 0;
+}
+
+static void channel_latch_attr(struct channel_info *channel)
+{
+	channel->group_count = atomic_read(&channel->sys_group_count);
+	channel->group_size = atomic_read(&channel->sys_group_size);
+	channel->block_size = atomic_read(&channel->sys_block_size);
+}
+
+/*
+ * Configure DMA group registers
+ */
+static void channel_dma_init(struct channel_info *channel)
+{
+	void __iomem *fpga = channel->fpga_iomem;
+	u32 group_regs_base;
+	u32 group_reg;
+	unsigned int page;
+	unsigned int group_in_page;
+	unsigned long i;
+	u32 block_size_reg;
+	u32 block_count_reg;
+	u32 group_count_reg;
+	u32 groups_per_int_reg;
+	u32 curr_pci_reg;
+
+	if (channel->chno == CHNO_RX_CHANNEL) {
+		group_regs_base = FPGA_RX_GROUP0_START_REG;
+		block_size_reg = FPGA_RX_BLOCK_SIZE_REG;
+		block_count_reg = FPGA_RX_BLOCK_COUNT_REG;
+		group_count_reg = FPGA_RX_GROUP_COUNT_REG;
+		groups_per_int_reg = FPGA_RX_GROUPS_PER_INT_REG;
+		curr_pci_reg = FPGA_RX_CURR_PCI_REG;
+	} else {
+		group_regs_base = FPGA_TX_GROUP0_START_REG;
+		block_size_reg = FPGA_TX_BLOCK_SIZE_REG;
+		block_count_reg = FPGA_TX_BLOCK_COUNT_REG;
+		group_count_reg = FPGA_TX_GROUP_COUNT_REG;
+		groups_per_int_reg = FPGA_TX_GROUPS_PER_INT_REG;
+		curr_pci_reg = FPGA_TX_CURR_PCI_REG;
+	}
+
+	printk(KERN_WARNING "block_size, group_size, group_count\n");
+	iowrite32(channel->block_size, fpga + block_size_reg);
+	iowrite32(channel->group_size / channel->block_size,
+		  fpga + block_count_reg);
+	iowrite32(channel->group_count, fpga + group_count_reg);
+	/* FIXME: Hardcoded groups per int. Get it from sysfs? */
+	iowrite32(1, fpga + groups_per_int_reg);
+
+	/* Unlock PCI address? Not defined in the data sheet, but used
+	 * in the reference code by Redrapids.
+	 */
+	iowrite32(0x1, fpga + curr_pci_reg);
+
+	/* The DMA address page register is shared between the RX and
+	 * TX channels, so acquire lock.
+	 */
+	spin_lock(channel->iomem_lock);
+	for (i = 0; i < channel->group_count; i++) {
+		page = i / 32;
+		group_in_page = i % 32;
+
+		group_reg = group_regs_base + (group_in_page * 4);
+
+		iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
+		iowrite32(channel->groups[i].dma_addr, fpga + group_reg);
+	}
+	for (i = 0; i < channel->group_count; i++) {
+		page = i / 32;
+		group_in_page = i % 32;
+
+		group_reg = group_regs_base + (group_in_page * 4);
+
+		iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
+		printk(KERN_INFO PFX "%ld: read dma_addr: 0x%x\n", i,
+		       ioread32(fpga + group_reg));
+	}
+	spin_unlock(channel->iomem_lock);
+
+}
+
+static int poch_channel_alloc_header(struct channel_info *channel)
+{
+	struct poch_cbuf_header *header = channel->header;
+	unsigned long group_offset_size;
+	unsigned long tot_group_offsets_size;
+
+	/* Allocate memory to hold header exported userspace */
+	group_offset_size = sizeof(header->group_offsets[0]);
+	tot_group_offsets_size = group_offset_size * channel->group_count;
+	channel->header_size = sizeof(*header) + tot_group_offsets_size;
+	channel->header_pg = alloc_pages(GFP_KERNEL | __GFP_ZERO,
+					 get_order(channel->header_size));
+	if (!channel->header_pg)
+		return -ENOMEM;
+
+	channel->header = page_address(channel->header_pg);
+
+	return 0;
+}
+
+static void poch_channel_free_header(struct channel_info *channel)
+{
+	unsigned int order;
+
+	order = get_order(channel->header_size);
+	__free_pages(channel->header_pg, order);
+}
+
+static void poch_channel_init_header(struct channel_info *channel)
+{
+	int i;
+	struct poch_group_info *groups;
+	s32 *group_offsets;
+
+	channel->header->group_size_bytes = channel->group_size;
+	channel->header->group_count = channel->group_count;
+
+	spin_lock_init(&channel->group_offsets_lock);
+
+	group_offsets = channel->header->group_offsets;
+	groups = channel->groups;
+
+	for (i = 0; i < channel->group_count; i++) {
+		if (channel->dir == CHANNEL_DIR_RX)
+			group_offsets[i] = -1;
+		else
+			group_offsets[i] = groups[i].user_offset;
+	}
+}
+
+static void __poch_channel_clear_counters(struct channel_info *channel)
+{
+	channel->counters.pll_unlock = 0;
+	channel->counters.fifo_empty = 0;
+	channel->counters.fifo_overflow = 0;
+}
+
+static int poch_channel_init(struct channel_info *channel,
+			     struct poch_dev *poch_dev)
+{
+	struct pci_dev *pdev = poch_dev->pci_dev;
+	struct device *dev = &pdev->dev;
+	unsigned long alloc_size;
+	int ret;
+
+	printk(KERN_WARNING "channel_latch_attr\n");
+
+	channel_latch_attr(channel);
+
+	channel->transfer = 0;
+
+	/* Allocate memory to hold group information. */
+	alloc_size = channel->group_count * sizeof(struct poch_group_info);
+	channel->groups = kzalloc(alloc_size, GFP_KERNEL);
+	if (!channel->groups) {
+		dev_err(dev, "error allocating memory for group info\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	printk(KERN_WARNING "poch_channel_alloc_groups\n");
+
+	ret = poch_channel_alloc_groups(channel);
+	if (ret) {
+		dev_err(dev, "error allocating groups of order %d\n",
+			get_order(channel->group_size));
+		goto out_free_group_info;
+	}
+
+	ret = poch_channel_alloc_header(channel);
+	if (ret) {
+		dev_err(dev, "error allocating user space header\n");
+		goto out_free_groups;
+	}
+
+	channel->fpga_iomem = poch_dev->fpga_iomem;
+	channel->bridge_iomem = poch_dev->bridge_iomem;
+	channel->iomem_lock = &poch_dev->iomem_lock;
+	spin_lock_init(&channel->counters_lock);
+
+	__poch_channel_clear_counters(channel);
+
+	printk(KERN_WARNING "poch_channel_init_header\n");
+
+	poch_channel_init_header(channel);
+
+	return 0;
+
+ out_free_groups:
+	poch_channel_free_groups(channel);
+ out_free_group_info:
+	kfree(channel->groups);
+ out:
+	return ret;
+}
+
+static int poch_wait_fpga_prog(void __iomem *bridge)
+{
+	unsigned long total_wait;
+	const unsigned long wait_period = 100;
+	/* FIXME: Get the actual timeout */
+	const unsigned long prog_timeo = 10000; /* 10 Seconds */
+	u32 card_power;
+
+	printk(KERN_WARNING "poch_wait_fpg_prog\n");
+
+	printk(KERN_INFO PFX "programming fpga ...\n");
+	total_wait = 0;
+	while (1) {
+		msleep(wait_period);
+		total_wait += wait_period;
+
+		card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
+		if (card_power & BRIDGE_CARD_POWER_PROG_DONE) {
+			printk(KERN_INFO PFX "programming done\n");
+			return 0;
+		}
+		if (total_wait > prog_timeo) {
+			printk(KERN_ERR PFX
+			       "timed out while programming FPGA\n");
+			return -EIO;
+		}
+	}
+}
+
+static void poch_card_power_off(struct poch_dev *poch_dev)
+{
+	void __iomem *bridge = poch_dev->bridge_iomem;
+	u32 card_power;
+
+	iowrite32(0, bridge + BRIDGE_INT_MASK_REG);
+	iowrite32(0, bridge + BRIDGE_DMA_GO_REG);
+
+	card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
+	iowrite32(card_power & ~BRIDGE_CARD_POWER_EN,
+		  bridge + BRIDGE_CARD_POWER_REG);
+}
+
+enum clk_src {
+	CLK_SRC_ON_BOARD,
+	CLK_SRC_EXTERNAL
+};
+
+static void poch_card_clock_on(void __iomem *fpga)
+{
+	/* FIXME: Get this data through sysfs? */
+	enum clk_src clk_src = CLK_SRC_ON_BOARD;
+
+	if (clk_src == CLK_SRC_ON_BOARD) {
+		iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK | FPGA_ADC_CLOCK_CTL_OSC_EN,
+			  fpga + FPGA_ADC_CLOCK_CTL_REG);
+	} else if (clk_src == CLK_SRC_EXTERNAL) {
+		iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK,
+			  fpga + FPGA_ADC_CLOCK_CTL_REG);
+	}
+}
+
+static int poch_card_power_on(struct poch_dev *poch_dev)
+{
+	void __iomem *bridge = poch_dev->bridge_iomem;
+	void __iomem *fpga = poch_dev->fpga_iomem;
+
+	iowrite32(BRIDGE_CARD_POWER_EN, bridge + BRIDGE_CARD_POWER_REG);
+
+	if (poch_wait_fpga_prog(bridge) != 0) {
+		poch_card_power_off(poch_dev);
+		return -EIO;
+	}
+
+	poch_card_clock_on(fpga);
+
+	/* Sync to new clock, reset state machines, set DMA mode. */
+	iowrite32(FPGA_DOM_DCM_RESET | FPGA_DOM_SOFT_RESET
+		  | FPGA_DOM_DUAL_M_SG_DMA, fpga + FPGA_DOM_REG);
+
+	/* FIXME: The time required for sync. needs to be tuned. */
+	msleep(1000);
+
+	return 0;
+}
+
+static void poch_channel_analog_on(struct channel_info *channel)
+{
+	void __iomem *fpga = channel->fpga_iomem;
+	u32 adc_dac_en;
+
+	spin_lock(channel->iomem_lock);
+	adc_dac_en = ioread32(fpga + FPGA_ADC_DAC_EN_REG);
+	switch (channel->chno) {
+	case CHNO_RX_CHANNEL:
+		iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_ADC_OFF,
+			  fpga + FPGA_ADC_DAC_EN_REG);
+		break;
+	case CHNO_TX_CHANNEL:
+		iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_DAC_OFF,
+			  fpga + FPGA_ADC_DAC_EN_REG);
+		break;
+	}
+	spin_unlock(channel->iomem_lock);
+}
+
+static int poch_open(struct inode *inode, struct file *filp)
+{
+	struct poch_dev *poch_dev;
+	struct channel_info *channel;
+	void __iomem *bridge;
+	void __iomem *fpga;
+	int chno;
+	int usage;
+	int ret;
+
+	poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
+	bridge = poch_dev->bridge_iomem;
+	fpga = poch_dev->fpga_iomem;
+
+	chno = iminor(inode) % poch_dev->nchannels;
+	channel = &poch_dev->channels[chno];
+
+	if (!atomic_dec_and_test(&channel->free)) {
+		atomic_inc(&channel->free);
+		ret = -EBUSY;
+		goto out;
+	}
+
+	usage = atomic_inc_return(&poch_dev->usage);
+
+	printk(KERN_WARNING "poch_card_power_on\n");
+
+	if (usage == 1) {
+		ret = poch_card_power_on(poch_dev);
+		if (ret)
+			goto out_dec_usage;
+	}
+
+	printk(KERN_INFO "CardBus Bridge Revision: %x\n",
+	       ioread32(bridge + BRIDGE_REV_REG));
+	printk(KERN_INFO "CardBus Interface Revision: %x\n",
+	       ioread32(fpga + FPGA_IFACE_REV_REG));
+
+	channel->chno = chno;
+	filp->private_data = channel;
+
+	printk(KERN_WARNING "poch_channel_init\n");
+
+	ret = poch_channel_init(channel, poch_dev);
+	if (ret)
+		goto out_power_off;
+
+	poch_channel_analog_on(channel);
+
+	printk(KERN_WARNING "channel_dma_init\n");
+
+	channel_dma_init(channel);
+
+	printk(KERN_WARNING "poch_channel_analog_on\n");
+
+	if (usage == 1) {
+		printk(KERN_WARNING "setting up DMA\n");
+
+		/* Initialize DMA Controller. */
+		iowrite32(FPGA_CAP_FIFO_REG, bridge + BRIDGE_STAT_2_REG);
+		iowrite32(FPGA_DMA_DESC_1_REG, bridge + BRIDGE_STAT_3_REG);
+
+		ioread32(fpga + FPGA_DMA_INT_STAT_REG);
+		ioread32(fpga + FPGA_INT_STAT_REG);
+		ioread32(bridge + BRIDGE_INT_STAT_REG);
+
+		/* Initialize Interrupts. FIXME: Enable temperature
+		 * handling We are enabling both Tx and Rx channel
+		 * interrupts here. Do we need to enable interrupts
+		 * only for the current channel? Anyways we won't get
+		 * the interrupt unless the DMA is activated.
+		 */
+		iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
+		iowrite32(FPGA_INT_DMA_CORE
+			  | FPGA_INT_PLL_UNLOCKED
+			  | FPGA_INT_TX_FF_EMPTY
+			  | FPGA_INT_RX_FF_EMPTY
+			  | FPGA_INT_TX_FF_OVRFLW
+			  | FPGA_INT_RX_FF_OVRFLW,
+			  fpga + FPGA_INT_MASK_REG);
+		iowrite32(FPGA_DMA_INT_RX | FPGA_DMA_INT_TX,
+			  fpga + FPGA_DMA_INT_MASK_REG);
+	}
+
+	if (channel->dir == CHANNEL_DIR_TX) {
+		/* Flush TX FIFO and output data from cardbus. */
+		iowrite32(FPGA_TX_CTL_FIFO_FLUSH
+			  | FPGA_TX_CTL_OUTPUT_CARDBUS,
+			  fpga + FPGA_TX_CTL_REG);
+	}
+
+	atomic_inc(&channel->inited);
+
+	return 0;
+
+ out_power_off:
+	if (usage == 1)
+		poch_card_power_off(poch_dev);
+ out_dec_usage:
+	atomic_dec(&poch_dev->usage);
+	atomic_inc(&channel->free);
+ out:
+	return ret;
+}
+
+static int poch_release(struct inode *inode, struct file *filp)
+{
+	struct channel_info *channel = filp->private_data;
+	struct poch_dev *poch_dev;
+	int usage;
+
+	poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
+
+	usage = atomic_dec_return(&poch_dev->usage);
+	if (usage == 0) {
+		printk(KERN_WARNING "poch_card_power_off\n");
+		poch_card_power_off(poch_dev);
+	}
+
+	atomic_dec(&channel->inited);
+	poch_channel_free_header(channel);
+	poch_channel_free_groups(channel);
+	kfree(channel->groups);
+	atomic_inc(&channel->free);
+
+	return 0;
+}
+
+/*
+ * Map the header and the group buffers, to user space.
+ */
+static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct channel_info *channel = filp->private_data;
+
+	unsigned long start;
+	unsigned long size;
+
+	unsigned long group_pages;
+	unsigned long header_pages;
+	unsigned long total_group_pages;
+
+	int pg_num;
+	struct page *pg;
+
+	int i;
+	int ret;
+
+	printk(KERN_WARNING "poch_mmap\n");
+
+	if (vma->vm_pgoff) {
+		printk(KERN_WARNING PFX "page offset: %lu\n", vma->vm_pgoff);
+		return -EINVAL;
+	}
+
+	group_pages = (channel->group_size / PAGE_SIZE) + 1;
+	header_pages = (channel->header_size / PAGE_SIZE) + 1;
+	total_group_pages = group_pages * channel->group_count;
+
+	size = vma->vm_end - vma->vm_start;
+	if (size != (header_pages + total_group_pages) * PAGE_SIZE) {
+		printk(KERN_WARNING PFX "required %lu bytes\n", size);
+		return -EINVAL;
+	}
+
+	start = vma->vm_start;
+
+	/* FIXME: Cleanup required on failure? */
+	pg = channel->header_pg;
+	for (pg_num = 0; pg_num < header_pages; pg_num++, pg++) {
+		printk(KERN_DEBUG PFX "page_count: %d\n", page_count(pg));
+		printk(KERN_DEBUG PFX "%d: header: 0x%lx\n", pg_num, start);
+		ret = vm_insert_page(vma, start, pg);
+		if (ret) {
+			printk(KERN_DEBUG "vm_insert 1 failed at %lx\n", start);
+			return ret;
+		}
+		start += PAGE_SIZE;
+	}
+
+	for (i = 0; i < channel->group_count; i++) {
+		pg = channel->groups[i].pg;
+		for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) {
+			printk(KERN_DEBUG PFX "%d: group %d: 0x%lx\n",
+			       pg_num, i, start);
+			ret = vm_insert_page(vma, start, pg);
+			if (ret) {
+				printk(KERN_DEBUG PFX
+				       "vm_insert 2 failed at %d\n", pg_num);
+				return ret;
+			}
+			start += PAGE_SIZE;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Check whether there is some group that the user space has not
+ * consumed yet. When the user space consumes a group, it sets it to
+ * -1. Cosuming could be reading data in case of RX and filling a
+ * buffer in case of TX.
+ */
+static int poch_channel_available(struct channel_info *channel)
+{
+	int i;
+
+	spin_lock_irq(&channel->group_offsets_lock);
+
+	for (i = 0; i < channel->group_count; i++) {
+		if (channel->dir == CHANNEL_DIR_RX
+		    && channel->header->group_offsets[i] == -1) {
+			spin_unlock_irq(&channel->group_offsets_lock);
+			return 1;
+		}
+
+		if (channel->dir == CHANNEL_DIR_TX
+		    && channel->header->group_offsets[i] != -1) {
+			spin_unlock_irq(&channel->group_offsets_lock);
+			return 1;
+		}
+	}
+
+	spin_unlock_irq(&channel->group_offsets_lock);
+
+	return 0;
+}
+
+static unsigned int poch_poll(struct file *filp, poll_table *pt)
+{
+	struct channel_info *channel = filp->private_data;
+	unsigned int ret = 0;
+
+	poll_wait(filp, &channel->wq, pt);
+
+	if (poch_channel_available(channel)) {
+		if (channel->dir == CHANNEL_DIR_RX)
+			ret = POLLIN | POLLRDNORM;
+		else
+			ret = POLLOUT | POLLWRNORM;
+	}
+
+	return ret;
+}
+
+static int poch_ioctl(struct inode *inode, struct file *filp,
+		      unsigned int cmd, unsigned long arg)
+{
+	struct channel_info *channel = filp->private_data;
+	void __iomem *fpga = channel->fpga_iomem;
+	void __iomem *bridge = channel->bridge_iomem;
+	void __user *argp = (void __user *)arg;
+	struct vm_area_struct *vms;
+	struct poch_counters counters;
+	int ret;
+
+	switch (cmd) {
+	case POCH_IOC_TRANSFER_START:
+		switch (channel->chno) {
+		case CHNO_TX_CHANNEL:
+			printk(KERN_INFO PFX "ioctl: Tx start\n");
+			iowrite32(0x1, fpga + FPGA_TX_TRIGGER_REG);
+			iowrite32(0x1, fpga + FPGA_TX_ENABLE_REG);
+
+			/* FIXME: Does it make sense to do a DMA GO
+			 * twice, once in Tx and once in Rx.
+			 */
+			iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
+			break;
+		case CHNO_RX_CHANNEL:
+			printk(KERN_INFO PFX "ioctl: Rx start\n");
+			iowrite32(0x1, fpga + FPGA_RX_ARM_REG);
+			iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
+			break;
+		}
+		break;
+	case POCH_IOC_TRANSFER_STOP:
+		switch (channel->chno) {
+		case CHNO_TX_CHANNEL:
+			printk(KERN_INFO PFX "ioctl: Tx stop\n");
+			iowrite32(0x0, fpga + FPGA_TX_ENABLE_REG);
+			iowrite32(0x0, fpga + FPGA_TX_TRIGGER_REG);
+			iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
+			break;
+		case CHNO_RX_CHANNEL:
+			printk(KERN_INFO PFX "ioctl: Rx stop\n");
+			iowrite32(0x0, fpga + FPGA_RX_ARM_REG);
+			iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
+			break;
+		}
+		break;
+	case POCH_IOC_GET_COUNTERS:
+		if (access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
+			return -EFAULT;
+
+		spin_lock_irq(&channel->counters_lock);
+		counters = channel->counters;
+		__poch_channel_clear_counters(channel);
+		spin_unlock_irq(&channel->counters_lock);
+
+		ret = copy_to_user(argp, &counters,
+				   sizeof(struct poch_counters));
+		if (ret)
+			return ret;
+
+		break;
+	case POCH_IOC_SYNC_GROUP_FOR_USER:
+	case POCH_IOC_SYNC_GROUP_FOR_DEVICE:
+		vms = find_vma(current->mm, arg);
+		if (!vms)
+			/* Address not mapped. */
+			return -EINVAL;
+		if (vms->vm_file != filp)
+			/* Address mapped from different device/file. */
+			return -EINVAL;
+
+		flush_cache_range(vms, arg, arg + channel->group_size);
+		break;
+	}
+	return 0;
+}
+
+static struct file_operations poch_fops = {
+	.owner = THIS_MODULE,
+	.open = poch_open,
+	.release = poch_release,
+	.ioctl = poch_ioctl,
+	.poll = poch_poll,
+	.mmap = poch_mmap
+};
+
+static void poch_irq_dma(struct channel_info *channel)
+{
+	u32 prev_transfer;
+	u32 curr_transfer;
+	long groups_done;
+	unsigned long i, j;
+	struct poch_group_info *groups;
+	s32 *group_offsets;
+	u32 curr_group_reg;
+
+	if (!atomic_read(&channel->inited))
+		return;
+
+	prev_transfer = channel->transfer;
+
+	if (channel->chno == CHNO_RX_CHANNEL)
+		curr_group_reg = FPGA_RX_CURR_GROUP_REG;
+	else
+		curr_group_reg = FPGA_TX_CURR_GROUP_REG;
+
+	curr_transfer = ioread32(channel->fpga_iomem + curr_group_reg);
+
+	groups_done = curr_transfer - prev_transfer;
+	/* Check wrap over, and handle it. */
+	if (groups_done <= 0)
+		groups_done += channel->group_count;
+
+	group_offsets = channel->header->group_offsets;
+	groups = channel->groups;
+
+	spin_lock(&channel->group_offsets_lock);
+
+	for (i = 0; i < groups_done; i++) {
+		j = (prev_transfer + i) % channel->group_count;
+		if (channel->dir == CHANNEL_DIR_RX)
+			group_offsets[j] = -1;
+		else
+			group_offsets[j] = groups[j].user_offset;
+	}
+
+	spin_unlock(&channel->group_offsets_lock);
+
+	channel->transfer = curr_transfer;
+
+	wake_up_interruptible(&channel->wq);
+}
+
+static irqreturn_t poch_irq_handler(int irq, void *p)
+{
+	struct poch_dev *poch_dev = p;
+	void __iomem *bridge = poch_dev->bridge_iomem;
+	void __iomem *fpga = poch_dev->fpga_iomem;
+	struct channel_info *channel_rx = &poch_dev->channels[CHNO_RX_CHANNEL];
+	struct channel_info *channel_tx = &poch_dev->channels[CHNO_TX_CHANNEL];
+	u32 bridge_stat;
+	u32 fpga_stat;
+	u32 dma_stat;
+
+	bridge_stat = ioread32(bridge + BRIDGE_INT_STAT_REG);
+	fpga_stat = ioread32(fpga + FPGA_INT_STAT_REG);
+	dma_stat = ioread32(fpga + FPGA_DMA_INT_STAT_REG);
+
+	ioread32(fpga + FPGA_DMA_INT_STAT_REG);
+	ioread32(fpga + FPGA_INT_STAT_REG);
+	ioread32(bridge + BRIDGE_INT_STAT_REG);
+
+	if (bridge_stat & BRIDGE_INT_FPGA) {
+		if (fpga_stat & FPGA_INT_DMA_CORE) {
+			if (dma_stat & FPGA_DMA_INT_RX)
+				poch_irq_dma(channel_rx);
+			if (dma_stat & FPGA_DMA_INT_TX)
+				poch_irq_dma(channel_tx);
+		}
+		if (fpga_stat & FPGA_INT_PLL_UNLOCKED) {
+			channel_tx->counters.pll_unlock++;
+			channel_rx->counters.pll_unlock++;
+			if (printk_ratelimit())
+				printk(KERN_WARNING PFX "PLL unlocked\n");
+		}
+		if (fpga_stat & FPGA_INT_TX_FF_EMPTY)
+			channel_tx->counters.fifo_empty++;
+		if (fpga_stat & FPGA_INT_TX_FF_OVRFLW)
+			channel_tx->counters.fifo_overflow++;
+		if (fpga_stat & FPGA_INT_RX_FF_EMPTY)
+			channel_rx->counters.fifo_empty++;
+		if (fpga_stat & FPGA_INT_RX_FF_OVRFLW)
+			channel_rx->counters.fifo_overflow++;
+
+		/*
+		 * FIXME: These errors should be notified through the
+		 * poll interface as POLLERR.
+		 */
+
+		/* Re-enable interrupts. */
+		iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
+
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static void poch_class_dev_unregister(struct poch_dev *poch_dev, int id)
+{
+	int i, j;
+	int nattrs;
+	struct channel_info *channel;
+	dev_t devno;
+
+	if (poch_dev->dev == NULL)
+		return;
+
+	for (i = 0; i < poch_dev->nchannels; i++) {
+		channel = &poch_dev->channels[i];
+		devno = poch_first_dev + (id * poch_dev->nchannels) + i;
+
+		if (!channel->dev)
+			continue;
+
+		nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
+		for (j = 0; j < nattrs; j++)
+			device_remove_file(channel->dev, poch_class_attrs[j]);
+
+		device_unregister(channel->dev);
+	}
+
+	device_unregister(poch_dev->dev);
+}
+
+static int __devinit poch_class_dev_register(struct poch_dev *poch_dev,
+					     int id)
+{
+	struct device *dev = &poch_dev->pci_dev->dev;
+	int i, j;
+	int nattrs;
+	int ret;
+	struct channel_info *channel;
+	dev_t devno;
+
+	poch_dev->dev = device_create(poch_cls, &poch_dev->pci_dev->dev,
+				      MKDEV(0, 0), NULL, "poch%d", id);
+	if (IS_ERR(poch_dev->dev)) {
+		dev_err(dev, "error creating parent class device");
+		ret = PTR_ERR(poch_dev->dev);
+		poch_dev->dev = NULL;
+		return ret;
+	}
+
+	for (i = 0; i < poch_dev->nchannels; i++) {
+		channel = &poch_dev->channels[i];
+
+		devno = poch_first_dev + (id * poch_dev->nchannels) + i;
+		channel->dev = device_create(poch_cls, poch_dev->dev, devno,
+					     NULL, "ch%d", i);
+		if (IS_ERR(channel->dev)) {
+			dev_err(dev, "error creating channel class device");
+			ret = PTR_ERR(channel->dev);
+			channel->dev = NULL;
+			poch_class_dev_unregister(poch_dev, id);
+			return ret;
+		}
+
+		dev_set_drvdata(channel->dev, channel);
+		nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
+		for (j = 0; j < nattrs; j++) {
+			ret = device_create_file(channel->dev,
+						 poch_class_attrs[j]);
+			if (ret) {
+				dev_err(dev, "error creating attribute file");
+				poch_class_dev_unregister(poch_dev, id);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int __devinit poch_pci_probe(struct pci_dev *pdev,
+				    const struct pci_device_id *pci_id)
+{
+	struct device *dev = &pdev->dev;
+	struct poch_dev *poch_dev;
+	struct uio_info *uio;
+	int ret;
+	int id;
+	int i;
+
+	poch_dev = kzalloc(sizeof(struct poch_dev), GFP_KERNEL);
+	if (!poch_dev) {
+		dev_err(dev, "error allocating priv. data memory\n");
+		return -ENOMEM;
+	}
+
+	poch_dev->pci_dev = pdev;
+	uio = &poch_dev->uio;
+
+	pci_set_drvdata(pdev, poch_dev);
+
+	spin_lock_init(&poch_dev->iomem_lock);
+
+	poch_dev->nchannels = POCH_NCHANNELS;
+	poch_dev->channels[CHNO_RX_CHANNEL].dir = CHANNEL_DIR_RX;
+	poch_dev->channels[CHNO_TX_CHANNEL].dir = CHANNEL_DIR_TX;
+
+	for (i = 0; i < poch_dev->nchannels; i++) {
+		init_waitqueue_head(&poch_dev->channels[i].wq);
+		atomic_set(&poch_dev->channels[i].free, 1);
+		atomic_set(&poch_dev->channels[i].inited, 0);
+	}
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(dev, "error enabling device\n");
+		goto out_free;
+	}
+
+	ret = pci_request_regions(pdev, "poch");
+	if (ret) {
+		dev_err(dev, "error requesting resources\n");
+		goto out_disable;
+	}
+
+	uio->mem[0].addr = pci_resource_start(pdev, 1);
+	if (!uio->mem[0].addr) {
+		dev_err(dev, "invalid BAR1\n");
+		ret = -ENODEV;
+		goto out_release;
+	}
+
+	uio->mem[0].size = pci_resource_len(pdev, 1);
+	uio->mem[0].memtype = UIO_MEM_PHYS;
+
+	uio->name = "poch";
+	uio->version = "0.0.1";
+	uio->irq = -1;
+	ret = uio_register_device(dev, uio);
+	if (ret) {
+		dev_err(dev, "error register UIO device: %d\n", ret);
+		goto out_release;
+	}
+
+	poch_dev->bridge_iomem = ioremap(pci_resource_start(pdev, 0),
+					 pci_resource_len(pdev, 0));
+	if (poch_dev->bridge_iomem == NULL) {
+		dev_err(dev, "error mapping bridge (bar0) registers\n");
+		ret = -ENOMEM;
+		goto out_uio_unreg;
+	}
+
+	poch_dev->fpga_iomem = ioremap(pci_resource_start(pdev, 1),
+				       pci_resource_len(pdev, 1));
+	if (poch_dev->fpga_iomem == NULL) {
+		dev_err(dev, "error mapping fpga (bar1) registers\n");
+		ret = -ENOMEM;
+		goto out_bar0_unmap;
+	}
+
+	ret = request_irq(pdev->irq, poch_irq_handler, IRQF_SHARED,
+			  dev->bus_id, poch_dev);
+	if (ret) {
+		dev_err(dev, "error requesting IRQ %u\n", pdev->irq);
+		ret = -ENOMEM;
+		goto out_bar1_unmap;
+	}
+
+	if (!idr_pre_get(&poch_ids, GFP_KERNEL)) {
+		dev_err(dev, "error allocating memory ids\n");
+		ret = -ENOMEM;
+		goto out_free_irq;
+	}
+
+	idr_get_new(&poch_ids, poch_dev, &id);
+	if (id >= MAX_POCH_CARDS) {
+		dev_err(dev, "minors exhausted\n");
+		ret = -EBUSY;
+		goto out_free_irq;
+	}
+
+	cdev_init(&poch_dev->cdev, &poch_fops);
+	poch_dev->cdev.owner = THIS_MODULE;
+	ret = cdev_add(&poch_dev->cdev,
+		       poch_first_dev + (id * poch_dev->nchannels),
+		       poch_dev->nchannels);
+	if (ret) {
+		dev_err(dev, "error register character device\n");
+		goto out_idr_remove;
+	}
+
+	ret = poch_class_dev_register(poch_dev, id);
+	if (ret)
+		goto out_cdev_del;
+
+	return 0;
+
+ out_cdev_del:
+	cdev_del(&poch_dev->cdev);
+ out_idr_remove:
+	idr_remove(&poch_ids, id);
+ out_free_irq:
+	free_irq(pdev->irq, poch_dev);
+ out_bar1_unmap:
+	iounmap(poch_dev->fpga_iomem);
+ out_bar0_unmap:
+	iounmap(poch_dev->bridge_iomem);
+ out_uio_unreg:
+	uio_unregister_device(uio);
+ out_release:
+	pci_release_regions(pdev);
+ out_disable:
+	pci_disable_device(pdev);
+ out_free:
+	kfree(poch_dev);
+	return ret;
+}
+
+/*
+ * FIXME: We are yet to handle the hot unplug case.
+ */
+static void poch_pci_remove(struct pci_dev *pdev)
+{
+	struct poch_dev *poch_dev = pci_get_drvdata(pdev);
+	struct uio_info *uio = &poch_dev->uio;
+	unsigned int minor = MINOR(poch_dev->cdev.dev);
+	unsigned int id = minor / poch_dev->nchannels;
+
+	/* FIXME: unmap fpga_iomem and bridge_iomem */
+
+	poch_class_dev_unregister(poch_dev, id);
+	cdev_del(&poch_dev->cdev);
+	idr_remove(&poch_ids, id);
+	free_irq(pdev->irq, poch_dev);
+	uio_unregister_device(uio);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	iounmap(uio->mem[0].internal_addr);
+
+	kfree(poch_dev);
+}
+
+static const struct pci_device_id poch_pci_ids[] /* __devinitconst */ = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS,
+		     PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE) },
+	{ 0, }
+};
+
+static struct pci_driver poch_pci_driver = {
+	.name = DRV_NAME,
+	.id_table = poch_pci_ids,
+	.probe = poch_pci_probe,
+	.remove = poch_pci_remove,
+};
+
+static int __init poch_init_module(void)
+{
+	int ret = 0;
+
+	ret = alloc_chrdev_region(&poch_first_dev, 0,
+				  MAX_POCH_DEVICES, DRV_NAME);
+	if (ret) {
+		printk(KERN_ERR PFX "error allocating device no.");
+		return ret;
+	}
+
+	poch_cls = class_create(THIS_MODULE, "pocketchange");
+	if (IS_ERR(poch_cls)) {
+		ret = PTR_ERR(poch_cls);
+		goto out_unreg_chrdev;
+	}
+
+	ret = pci_register_driver(&poch_pci_driver);
+	if (ret) {
+		printk(KERN_ERR PFX "error register PCI device");
+		goto out_class_destroy;
+	}
+
+	return 0;
+
+ out_class_destroy:
+	class_destroy(poch_cls);
+
+ out_unreg_chrdev:
+	unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
+
+	return ret;
+}
+
+static void __exit poch_exit_module(void)
+{
+	pci_unregister_driver(&poch_pci_driver);
+	class_destroy(poch_cls);
+	unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
+}
+
+module_init(poch_init_module);
+module_exit(poch_exit_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/poch/poch.h b/drivers/staging/poch/poch.h
new file mode 100644
index 0000000..51a2d14
--- /dev/null
+++ b/drivers/staging/poch/poch.h
@@ -0,0 +1,29 @@
+/*
+ * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
+ *
+ * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
+ *
+ * Part of userspace API. Should be moved to a header file in
+ * include/linux for final version.
+ *
+ */
+struct poch_cbuf_header {
+	__s32 group_size_bytes;
+	__s32 group_count;
+	__s32 group_offsets[0];
+};
+
+struct poch_counters {
+	__u32 fifo_empty;
+	__u32 fifo_overflow;
+	__u32 pll_unlock;
+};
+
+#define POCH_IOC_NUM			'9'
+
+#define POCH_IOC_TRANSFER_START		_IO(POCH_IOC_NUM, 0)
+#define POCH_IOC_TRANSFER_STOP		_IO(POCH_IOC_NUM, 1)
+#define POCH_IOC_GET_COUNTERS		_IOR(POCH_IOC_NUM, 2, \
+					     struct poch_counters)
+#define POCH_IOC_SYNC_GROUP_FOR_USER	_IO(POCH_IOC_NUM, 3)
+#define POCH_IOC_SYNC_GROUP_FOR_DEVICE	_IO(POCH_IOC_NUM, 4)
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index b61ac4b..8fa9490 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -54,7 +54,6 @@
  *       IS-NIC driver.
  */
 
-#include <linux/version.h>
 
 #define SLIC_DUMP_ENABLED               0
 #define KLUDGE_FOR_4GB_BOUNDARY         1
@@ -96,17 +95,9 @@
 #include <linux/moduleparam.h>
 
 #include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/pci.h>
 #include <linux/dma-mapping.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
 #include <linux/mii.h>
 #include <linux/if_vlan.h>
-#include <linux/skbuff.h>
-#include <linux/string.h>
 #include <asm/unaligned.h>
 
 #include <linux/ethtool.h>
@@ -275,7 +266,6 @@
 			  card->reg_value[i], card->reg_valueh[i]);
 	}
 }
-}
 #endif
 
 static void slic_init_adapter(struct net_device *netdev,
@@ -606,6 +596,7 @@
 	uint mmio_len = 0;
 	struct adapter *adapter = (struct adapter *) netdev_priv(dev);
 	struct sliccard *card;
+	struct mcast_address *mcaddr, *mlist;
 
 	ASSERT(adapter);
 	DBG_MSG("slicoss: %s ENTER dev[%p] adapter[%p]\n", __func__, dev,
@@ -625,6 +616,13 @@
 	DBG_MSG("slicoss: %s iounmap dev->base_addr[%x]\n", __func__,
 		(uint) dev->base_addr);
 	iounmap((void __iomem *)dev->base_addr);
+	/* free multicast addresses */
+	mlist = adapter->mcastaddrs;
+	while (mlist) {
+		mcaddr = mlist;
+		mlist = mlist->next;
+		kfree(mcaddr);
+	}
 	ASSERT(adapter->card);
 	card = adapter->card;
 	ASSERT(card->adapters_allocated);
diff --git a/drivers/staging/sxg/Kconfig b/drivers/staging/sxg/Kconfig
index 1ae3508..6e6cf0b 100644
--- a/drivers/staging/sxg/Kconfig
+++ b/drivers/staging/sxg/Kconfig
@@ -1,6 +1,7 @@
 config SXG
 	tristate "Alacritech SLIC Technology Non-Accelerated 10Gbe support"
 	depends on PCI && NETDEV_10000
+	depends on X86
 	default n
 	help
 	  This driver supports the Alacritech SLIC Technology Non-Accelerated
diff --git a/drivers/staging/sxg/README b/drivers/staging/sxg/README
index 4d1ddbe..d514d18 100644
--- a/drivers/staging/sxg/README
+++ b/drivers/staging/sxg/README
@@ -7,6 +7,7 @@
 	- remove wrappers
 	- checkpatch.pl cleanups
 	- new functionality that the card needs
+	- remove reliance on x86
 
 Please send patches to:
         Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/sxg/sxg.c b/drivers/staging/sxg/sxg.c
index 6ccbee8..5272a18 100644
--- a/drivers/staging/sxg/sxg.c
+++ b/drivers/staging/sxg/sxg.c
@@ -112,12 +112,16 @@
 static struct net_device_stats *sxg_get_stats(p_net_device dev);
 #endif
 
+#define XXXTODO 0
+
+#if XXXTODO
 static int sxg_mac_set_address(p_net_device dev, void *ptr);
+static void sxg_mcast_set_list(p_net_device dev);
+#endif
 
 static void sxg_adapter_set_hwaddr(p_adapter_t adapter);
 
 static void sxg_unmap_mmio_space(p_adapter_t adapter);
-static void sxg_mcast_set_mask(p_adapter_t adapter);
 
 static int sxg_initialize_adapter(p_adapter_t adapter);
 static void sxg_stock_rcv_buffers(p_adapter_t adapter);
@@ -132,9 +136,6 @@
 			      u32 DevAddr, u32 RegAddr, u32 Value);
 static int sxg_read_mdio_reg(p_adapter_t adapter,
 			     u32 DevAddr, u32 RegAddr, u32 *pValue);
-static void sxg_mcast_set_list(p_net_device dev);
-
-#define XXXTODO 0
 
 static unsigned int sxg_first_init = 1;
 static char *sxg_banner =
@@ -202,7 +203,7 @@
 {
 	if (sxg_first_init) {
 		DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
-			  __FUNCTION__, jiffies);
+			  __func__, jiffies);
 		sxg_first_init = 0;
 		spin_lock_init(&sxg_global.driver_lock);
 	}
@@ -223,7 +224,7 @@
 	return;
 }
 
-// SXG Globals
+/* SXG Globals */
 static SXG_DRIVER SxgDriver;
 
 #ifdef  ATKDBG
@@ -250,7 +251,7 @@
 	u32 ThisSectionSize;
 	u32 *Instruction = NULL;
 	u32 BaseAddress, AddressOffset, Address;
-//      u32                         Failure;
+/*      u32                         Failure; */
 	u32 ValueRead;
 	u32 i;
 	u32 numSections = 0;
@@ -259,10 +260,10 @@
 
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
 		  adapter, 0, 0, 0);
-	DBG_ERROR("sxg: %s ENTER\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s ENTER\n", __func__);
 
 	switch (UcodeSel) {
-	case SXG_UCODE_SAHARA:	// Sahara operational ucode
+	case SXG_UCODE_SAHARA:	/* Sahara operational ucode */
 		numSections = SNumSections;
 		for (i = 0; i < numSections; i++) {
 			sectionSize[i] = SSectionSize[i];
@@ -276,13 +277,13 @@
 	}
 
 	DBG_ERROR("sxg: RESET THE CARD\n");
-	// First, reset the card
+	/* First, reset the card */
 	WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
 
-	// Download each section of the microcode as specified in
-	// its download file.  The *download.c file is generated using
-	// the saharaobjtoc facility which converts the metastep .obj
-	// file to a .c file which contains a two dimentional array.
+	/* Download each section of the microcode as specified in */
+	/* its download file.  The *download.c file is generated using */
+	/* the saharaobjtoc facility which converts the metastep .obj */
+	/* file to a .c file which contains a two dimentional array. */
 	for (Section = 0; Section < numSections; Section++) {
 		DBG_ERROR("sxg: SECTION # %d\n", Section);
 		switch (UcodeSel) {
@@ -294,35 +295,35 @@
 			break;
 		}
 		BaseAddress = sectionStart[Section];
-		ThisSectionSize = sectionSize[Section] / 12;	// Size in instructions
+		ThisSectionSize = sectionSize[Section] / 12;	/* Size in instructions */
 		for (AddressOffset = 0; AddressOffset < ThisSectionSize;
 		     AddressOffset++) {
 			Address = BaseAddress + AddressOffset;
 			ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
-			// Write instruction bits 31 - 0
+			/* Write instruction bits 31 - 0 */
 			WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
-			// Write instruction bits 63-32
+			/* Write instruction bits 63-32 */
 			WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
 				  FLUSH);
-			// Write instruction bits 95-64
+			/* Write instruction bits 95-64 */
 			WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
 				  FLUSH);
-			// Write instruction address with the WRITE bit set
+			/* Write instruction address with the WRITE bit set */
 			WRITE_REG(HwRegs->UcodeAddr,
 				  (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
-			// Sahara bug in the ucode download logic - the write to DataLow
-			// for the next instruction could get corrupted.  To avoid this,
-			// write to DataLow again for this instruction (which may get
-			// corrupted, but it doesn't matter), then increment the address
-			// and write the data for the next instruction to DataLow.  That
-			// write should succeed.
+			/* Sahara bug in the ucode download logic - the write to DataLow */
+			/* for the next instruction could get corrupted.  To avoid this, */
+			/* write to DataLow again for this instruction (which may get */
+			/* corrupted, but it doesn't matter), then increment the address */
+			/* and write the data for the next instruction to DataLow.  That */
+			/* write should succeed. */
 			WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
-			// Advance 3 u32S to start of next instruction
+			/* Advance 3 u32S to start of next instruction */
 			Instruction += 3;
 		}
 	}
-	// Now repeat the entire operation reading the instruction back and
-	// checking for parity errors
+	/* Now repeat the entire operation reading the instruction back and */
+	/* checking for parity errors */
 	for (Section = 0; Section < numSections; Section++) {
 		DBG_ERROR("sxg: check SECTION # %d\n", Section);
 		switch (UcodeSel) {
@@ -334,74 +335,74 @@
 			break;
 		}
 		BaseAddress = sectionStart[Section];
-		ThisSectionSize = sectionSize[Section] / 12;	// Size in instructions
+		ThisSectionSize = sectionSize[Section] / 12;	/* Size in instructions */
 		for (AddressOffset = 0; AddressOffset < ThisSectionSize;
 		     AddressOffset++) {
 			Address = BaseAddress + AddressOffset;
-			// Write the address with the READ bit set
+			/* Write the address with the READ bit set */
 			WRITE_REG(HwRegs->UcodeAddr,
 				  (Address | MICROCODE_ADDRESS_READ), FLUSH);
-			// Read it back and check parity bit.
+			/* Read it back and check parity bit. */
 			READ_REG(HwRegs->UcodeAddr, ValueRead);
 			if (ValueRead & MICROCODE_ADDRESS_PARITY) {
 				DBG_ERROR("sxg: %s PARITY ERROR\n",
-					  __FUNCTION__);
+					  __func__);
 
-				return (FALSE);	// Parity error
+				return (FALSE);	/* Parity error */
 			}
 			ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
-			// Read the instruction back and compare
+			/* Read the instruction back and compare */
 			READ_REG(HwRegs->UcodeDataLow, ValueRead);
 			if (ValueRead != *Instruction) {
 				DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
-					  __FUNCTION__);
-				return (FALSE);	// Miscompare
+					  __func__);
+				return (FALSE);	/* Miscompare */
 			}
 			READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
 			if (ValueRead != *(Instruction + 1)) {
 				DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
-					  __FUNCTION__);
-				return (FALSE);	// Miscompare
+					  __func__);
+				return (FALSE);	/* Miscompare */
 			}
 			READ_REG(HwRegs->UcodeDataHigh, ValueRead);
 			if (ValueRead != *(Instruction + 2)) {
 				DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
-					  __FUNCTION__);
-				return (FALSE);	// Miscompare
+					  __func__);
+				return (FALSE);	/* Miscompare */
 			}
-			// Advance 3 u32S to start of next instruction
+			/* Advance 3 u32S to start of next instruction */
 			Instruction += 3;
 		}
 	}
 
-	// Everything OK, Go.
+	/* Everything OK, Go. */
 	WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
 
-	// Poll the CardUp register to wait for microcode to initialize
-	// Give up after 10,000 attemps (500ms).
+	/* Poll the CardUp register to wait for microcode to initialize */
+	/* Give up after 10,000 attemps (500ms). */
 	for (i = 0; i < 10000; i++) {
 		udelay(50);
 		READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
 		if (ValueRead == 0xCAFE) {
-			DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __FUNCTION__);
+			DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __func__);
 			break;
 		}
 	}
 	if (i == 10000) {
-		DBG_ERROR("sxg: %s TIMEOUT\n", __FUNCTION__);
+		DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
 
-		return (FALSE);	// Timeout
+		return (FALSE);	/* Timeout */
 	}
-	// Now write the LoadSync register.  This is used to
-	// synchronize with the card so it can scribble on the memory
-	// that contained 0xCAFE from the "CardUp" step above
+	/* Now write the LoadSync register.  This is used to */
+	/* synchronize with the card so it can scribble on the memory */
+	/* that contained 0xCAFE from the "CardUp" step above */
 	if (UcodeSel == SXG_UCODE_SAHARA) {
 		WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
 	}
 
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
 		  adapter, 0, 0, 0);
-	DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s EXIT\n", __func__);
 
 	return (TRUE);
 }
@@ -420,29 +421,29 @@
 	int status;
 	u32 i;
 	u32 RssIds, IsrCount;
-//      PSXG_XMT_RING                                   XmtRing;
-//      PSXG_RCV_RING                                   RcvRing;
+/*      PSXG_XMT_RING                                   XmtRing; */
+/*      PSXG_RCV_RING                                   RcvRing; */
 
-	DBG_ERROR("%s ENTER\n", __FUNCTION__);
+	DBG_ERROR("%s ENTER\n", __func__);
 
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
 		  adapter, 0, 0, 0);
 
-	// Windows tells us how many CPUs it plans to use for
-	// RSS
+	/* Windows tells us how many CPUs it plans to use for */
+	/* RSS */
 	RssIds = SXG_RSS_CPU_COUNT(adapter);
 	IsrCount = adapter->MsiEnabled ? RssIds : 1;
 
-	DBG_ERROR("%s Setup the spinlocks\n", __FUNCTION__);
+	DBG_ERROR("%s Setup the spinlocks\n", __func__);
 
-	// Allocate spinlocks and initialize listheads first.
+	/* Allocate spinlocks and initialize listheads first. */
 	spin_lock_init(&adapter->RcvQLock);
 	spin_lock_init(&adapter->SglQLock);
 	spin_lock_init(&adapter->XmtZeroLock);
 	spin_lock_init(&adapter->Bit64RegLock);
 	spin_lock_init(&adapter->AdapterLock);
 
-	DBG_ERROR("%s Setup the lists\n", __FUNCTION__);
+	DBG_ERROR("%s Setup the lists\n", __func__);
 
 	InitializeListHead(&adapter->FreeRcvBuffers);
 	InitializeListHead(&adapter->FreeRcvBlocks);
@@ -450,39 +451,39 @@
 	InitializeListHead(&adapter->FreeSglBuffers);
 	InitializeListHead(&adapter->AllSglBuffers);
 
-	// Mark these basic allocations done.  This flags essentially
-	// tells the SxgFreeResources routine that it can grab spinlocks
-	// and reference listheads.
+	/* Mark these basic allocations done.  This flags essentially */
+	/* tells the SxgFreeResources routine that it can grab spinlocks */
+	/* and reference listheads. */
 	adapter->BasicAllocations = TRUE;
-	// Main allocation loop.  Start with the maximum supported by
-	// the microcode and back off if memory allocation
-	// fails.  If we hit a minimum, fail.
+	/* Main allocation loop.  Start with the maximum supported by */
+	/* the microcode and back off if memory allocation */
+	/* fails.  If we hit a minimum, fail. */
 
 	for (;;) {
-		DBG_ERROR("%s Allocate XmtRings size[%lx]\n", __FUNCTION__,
-			  (sizeof(SXG_XMT_RING) * 1));
+		DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
+			  (unsigned int)(sizeof(SXG_XMT_RING) * 1));
 
-		// Start with big items first - receive and transmit rings.  At the moment
-		// I'm going to keep the ring size fixed and adjust the number of
-		// TCBs if we fail.  Later we might consider reducing the ring size as well..
+		/* Start with big items first - receive and transmit rings.  At the moment */
+		/* I'm going to keep the ring size fixed and adjust the number of */
+		/* TCBs if we fail.  Later we might consider reducing the ring size as well.. */
 		adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
 							 sizeof(SXG_XMT_RING) *
 							 1,
 							 &adapter->PXmtRings);
-		DBG_ERROR("%s XmtRings[%p]\n", __FUNCTION__, adapter->XmtRings);
+		DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
 
 		if (!adapter->XmtRings) {
 			goto per_tcb_allocation_failed;
 		}
 		memset(adapter->XmtRings, 0, sizeof(SXG_XMT_RING) * 1);
 
-		DBG_ERROR("%s Allocate RcvRings size[%lx]\n", __FUNCTION__,
-			  (sizeof(SXG_RCV_RING) * 1));
+		DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
+			  (unsigned int)(sizeof(SXG_RCV_RING) * 1));
 		adapter->RcvRings =
 		    pci_alloc_consistent(adapter->pcidev,
 					 sizeof(SXG_RCV_RING) * 1,
 					 &adapter->PRcvRings);
-		DBG_ERROR("%s RcvRings[%p]\n", __FUNCTION__, adapter->RcvRings);
+		DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
 		if (!adapter->RcvRings) {
 			goto per_tcb_allocation_failed;
 		}
@@ -490,7 +491,7 @@
 		break;
 
 	      per_tcb_allocation_failed:
-		// an allocation failed.  Free any successful allocations.
+		/* an allocation failed.  Free any successful allocations. */
 		if (adapter->XmtRings) {
 			pci_free_consistent(adapter->pcidev,
 					    sizeof(SXG_XMT_RING) * 4096,
@@ -505,22 +506,22 @@
 					    adapter->PRcvRings);
 			adapter->RcvRings = NULL;
 		}
-		// Loop around and try again....
+		/* Loop around and try again.... */
 	}
 
-	DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __FUNCTION__);
-	// Initialize rcv zero and xmt zero rings
+	DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
+	/* Initialize rcv zero and xmt zero rings */
 	SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
 	SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
 
-	// Sanity check receive data structure format
+	/* Sanity check receive data structure format */
 	ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
 	       (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
 	ASSERT(sizeof(SXG_RCV_DESCRIPTOR_BLOCK) ==
 	       SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
 
-	// Allocate receive data buffers.  We allocate a block of buffers and
-	// a corresponding descriptor block at once.  See sxghw.h:SXG_RCV_BLOCK
+	/* Allocate receive data buffers.  We allocate a block of buffers and */
+	/* a corresponding descriptor block at once.  See sxghw.h:SXG_RCV_BLOCK */
 	for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
 	     i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
 		sxg_allocate_buffer_memory(adapter,
@@ -528,8 +529,8 @@
 							      ReceiveBufferSize),
 					   SXG_BUFFER_TYPE_RCV);
 	}
-	// NBL resource allocation can fail in the 'AllocateComplete' routine, which
-	// doesn't return status.  Make sure we got the number of buffers we requested
+	/* NBL resource allocation can fail in the 'AllocateComplete' routine, which */
+	/* doesn't return status.  Make sure we got the number of buffers we requested */
 	if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
 		SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
 			  adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
@@ -537,17 +538,17 @@
 		return (STATUS_RESOURCES);
 	}
 
-	DBG_ERROR("%s Allocate EventRings size[%lx]\n", __FUNCTION__,
-		  (sizeof(SXG_EVENT_RING) * RssIds));
+	DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
+		  (unsigned int)(sizeof(SXG_EVENT_RING) * RssIds));
 
-	// Allocate event queues.
+	/* Allocate event queues. */
 	adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
 						   sizeof(SXG_EVENT_RING) *
 						   RssIds,
 						   &adapter->PEventRings);
 
 	if (!adapter->EventRings) {
-		// Caller will call SxgFreeAdapter to clean up above allocations
+		/* Caller will call SxgFreeAdapter to clean up above allocations */
 		SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
 			  adapter, SXG_MAX_ENTRIES, 0, 0);
 		status = STATUS_RESOURCES;
@@ -555,12 +556,12 @@
 	}
 	memset(adapter->EventRings, 0, sizeof(SXG_EVENT_RING) * RssIds);
 
-	DBG_ERROR("%s Allocate ISR size[%x]\n", __FUNCTION__, IsrCount);
-	// Allocate ISR
+	DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
+	/* Allocate ISR */
 	adapter->Isr = pci_alloc_consistent(adapter->pcidev,
 					    IsrCount, &adapter->PIsr);
 	if (!adapter->Isr) {
-		// Caller will call SxgFreeAdapter to clean up above allocations
+		/* Caller will call SxgFreeAdapter to clean up above allocations */
 		SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
 			  adapter, SXG_MAX_ENTRIES, 0, 0);
 		status = STATUS_RESOURCES;
@@ -568,10 +569,10 @@
 	}
 	memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
 
-	DBG_ERROR("%s Allocate shared XMT ring zero index location size[%lx]\n",
-		  __FUNCTION__, sizeof(u32));
+	DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
+		  __func__, (unsigned int)sizeof(u32));
 
-	// Allocate shared XMT ring zero index location
+	/* Allocate shared XMT ring zero index location */
 	adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
 							 sizeof(u32),
 							 &adapter->
@@ -587,7 +588,7 @@
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
 		  adapter, SXG_MAX_ENTRIES, 0, 0);
 
-	DBG_ERROR("%s EXIT\n", __FUNCTION__);
+	DBG_ERROR("%s EXIT\n", __func__);
 	return (STATUS_SUCCESS);
 }
 
@@ -606,17 +607,17 @@
 	u16 new_command;
 
 	pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
-	DBG_ERROR("sxg: %s  PCI command[%4.4x]\n", __FUNCTION__, pci_command);
-	// Set the command register
-	new_command = pci_command | (PCI_COMMAND_MEMORY |	// Memory Space Enable
-				     PCI_COMMAND_MASTER |	// Bus master enable
-				     PCI_COMMAND_INVALIDATE |	// Memory write and invalidate
-				     PCI_COMMAND_PARITY |	// Parity error response
-				     PCI_COMMAND_SERR |	// System ERR
-				     PCI_COMMAND_FAST_BACK);	// Fast back-to-back
+	DBG_ERROR("sxg: %s  PCI command[%4.4x]\n", __func__, pci_command);
+	/* Set the command register */
+	new_command = pci_command | (PCI_COMMAND_MEMORY |	/* Memory Space Enable */
+				     PCI_COMMAND_MASTER |	/* Bus master enable */
+				     PCI_COMMAND_INVALIDATE |	/* Memory write and invalidate */
+				     PCI_COMMAND_PARITY |	/* Parity error response */
+				     PCI_COMMAND_SERR |	/* System ERR */
+				     PCI_COMMAND_FAST_BACK);	/* Fast back-to-back */
 	if (pci_command != new_command) {
 		DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
-			  __FUNCTION__, pci_command, new_command);
+			  __func__, pci_command, new_command);
 		pci_write_config_word(pcidev, PCI_COMMAND, new_command);
 	}
 }
@@ -634,9 +635,9 @@
 	ulong mmio_len = 0;
 
 	DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
-		  __FUNCTION__, jiffies, smp_processor_id());
+		  __func__, jiffies, smp_processor_id());
 
-	// Initialize trace buffer
+	/* Initialize trace buffer */
 #ifdef ATKDBG
 	SxgTraceBuffer = &LSxgTraceBuffer;
 	SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
@@ -701,11 +702,11 @@
 		  mmio_start, mmio_len);
 
 	memmapped_ioaddr = ioremap(mmio_start, mmio_len);
-	DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __FUNCTION__,
+	DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
 		  memmapped_ioaddr);
 	if (!memmapped_ioaddr) {
 		DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
-			  __FUNCTION__, mmio_len, mmio_start);
+			  __func__, mmio_len, mmio_start);
 		goto err_out_free_mmio_region;
 	}
 
@@ -727,7 +728,7 @@
 		  memmapped_ioaddr);
 	if (!memmapped_ioaddr) {
 		DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
-			  __FUNCTION__, mmio_len, mmio_start);
+			  __func__, mmio_len, mmio_start);
 		goto err_out_free_mmio_region;
 	}
 
@@ -738,13 +739,13 @@
 	adapter->UcodeRegs = (void *)memmapped_ioaddr;
 
 	adapter->State = SXG_STATE_INITIALIZING;
-	// Maintain a list of all adapters anchored by
-	// the global SxgDriver structure.
+	/* Maintain a list of all adapters anchored by */
+	/* the global SxgDriver structure. */
 	adapter->Next = SxgDriver.Adapters;
 	SxgDriver.Adapters = adapter;
 	adapter->AdapterID = ++SxgDriver.AdapterID;
 
-	// Initialize CRC table used to determine multicast hash
+	/* Initialize CRC table used to determine multicast hash */
 	sxg_mcast_init_crc32();
 
 	adapter->JumboEnabled = FALSE;
@@ -757,18 +758,18 @@
 		adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
 	}
 
-//    status = SXG_READ_EEPROM(adapter);
-//    if (!status) {
-//        goto sxg_init_bad;
-//    }
+/*    status = SXG_READ_EEPROM(adapter); */
+/*    if (!status) { */
+/*        goto sxg_init_bad; */
+/*    } */
 
-	DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
 	sxg_config_pci(pcidev);
-	DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
 
-	DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
 	sxg_init_driver();
-	DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
 
 	adapter->vendid = pci_tbl_entry->vendor;
 	adapter->devid = pci_tbl_entry->device;
@@ -780,23 +781,23 @@
 	adapter->irq = pcidev->irq;
 	adapter->next_netdevice = head_netdevice;
 	head_netdevice = netdev;
-//      adapter->chipid = chip_idx;
-	adapter->port = 0;	//adapter->functionnumber;
+/*      adapter->chipid = chip_idx; */
+	adapter->port = 0;	/*adapter->functionnumber; */
 	adapter->cardindex = adapter->port;
 
-	// Allocate memory and other resources
-	DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __FUNCTION__);
+	/* Allocate memory and other resources */
+	DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
 	status = sxg_allocate_resources(adapter);
 	DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
-		  __FUNCTION__, status);
+		  __func__, status);
 	if (status != STATUS_SUCCESS) {
 		goto err_out_unmap;
 	}
 
-	DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
 	if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
 		DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
-			  __FUNCTION__);
+			  __func__);
 		sxg_adapter_set_hwaddr(adapter);
 	} else {
 		adapter->state = ADAPT_FAIL;
@@ -819,7 +820,7 @@
 #endif
 
 	strcpy(netdev->name, "eth%d");
-//  strcpy(netdev->name, pci_name(pcidev));
+/*  strcpy(netdev->name, pci_name(pcidev)); */
 	if ((err = register_netdev(netdev))) {
 		DBG_ERROR("Cannot register net device, aborting. %s\n",
 			  netdev->name);
@@ -832,11 +833,11 @@
 	     netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
 	     netdev->dev_addr[4], netdev->dev_addr[5]);
 
-//sxg_init_bad:
+/*sxg_init_bad: */
 	ASSERT(status == FALSE);
-//      sxg_free_adapter(adapter);
+/*      sxg_free_adapter(adapter); */
 
-	DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __FUNCTION__,
+	DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
 		  status, jiffies, smp_processor_id());
 	return status;
 
@@ -848,7 +849,7 @@
 
       err_out_exit_sxg_probe:
 
-	DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __FUNCTION__, jiffies,
+	DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
 		  smp_processor_id());
 
 	return -ENODEV;
@@ -874,12 +875,12 @@
 {
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
 		  adapter, adapter->InterruptsEnabled, 0, 0);
-	// For now, RSS is disabled with line based interrupts
+	/* For now, RSS is disabled with line based interrupts */
 	ASSERT(adapter->RssEnabled == FALSE);
 	ASSERT(adapter->MsiEnabled == FALSE);
-	//
-	// Turn off interrupts by writing to the icr register.
-	//
+	/* */
+	/* Turn off interrupts by writing to the icr register. */
+	/* */
 	WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
 
 	adapter->InterruptsEnabled = 0;
@@ -905,12 +906,12 @@
 {
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
 		  adapter, adapter->InterruptsEnabled, 0, 0);
-	// For now, RSS is disabled with line based interrupts
+	/* For now, RSS is disabled with line based interrupts */
 	ASSERT(adapter->RssEnabled == FALSE);
 	ASSERT(adapter->MsiEnabled == FALSE);
-	//
-	// Turn on interrupts by writing to the icr register.
-	//
+	/* */
+	/* Turn on interrupts by writing to the icr register. */
+	/* */
 	WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
 
 	adapter->InterruptsEnabled = 1;
@@ -935,29 +936,29 @@
 {
 	p_net_device dev = (p_net_device) dev_id;
 	p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
-//      u32                 CpuMask = 0, i;
+/*      u32                 CpuMask = 0, i; */
 
 	adapter->Stats.NumInts++;
 	if (adapter->Isr[0] == 0) {
-		// The SLIC driver used to experience a number of spurious interrupts
-		// due to the delay associated with the masking of the interrupt
-		// (we'd bounce back in here).  If we see that again with Sahara,
-		// add a READ_REG of the Icr register after the WRITE_REG below.
+		/* The SLIC driver used to experience a number of spurious interrupts */
+		/* due to the delay associated with the masking of the interrupt */
+		/* (we'd bounce back in here).  If we see that again with Sahara, */
+		/* add a READ_REG of the Icr register after the WRITE_REG below. */
 		adapter->Stats.FalseInts++;
 		return IRQ_NONE;
 	}
-	//
-	// Move the Isr contents and clear the value in
-	// shared memory, and mask interrupts
-	//
+	/* */
+	/* Move the Isr contents and clear the value in */
+	/* shared memory, and mask interrupts */
+	/* */
 	adapter->IsrCopy[0] = adapter->Isr[0];
 	adapter->Isr[0] = 0;
 	WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
-//      ASSERT(adapter->IsrDpcsPending == 0);
-#if XXXTODO			// RSS Stuff
-	// If RSS is enabled and the ISR specifies
-	// SXG_ISR_EVENT, then schedule DPC's
-	// based on event queues.
+/*      ASSERT(adapter->IsrDpcsPending == 0); */
+#if XXXTODO			/* RSS Stuff */
+	/* If RSS is enabled and the ISR specifies */
+	/* SXG_ISR_EVENT, then schedule DPC's */
+	/* based on event queues. */
 	if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
 		for (i = 0;
 		     i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
@@ -973,8 +974,8 @@
 			}
 		}
 	}
-	// Now, either schedule the CPUs specified by the CpuMask,
-	// or queue default
+	/* Now, either schedule the CPUs specified by the CpuMask, */
+	/* or queue default */
 	if (CpuMask) {
 		*QueueDefault = FALSE;
 	} else {
@@ -983,9 +984,9 @@
 	}
 	*TargetCpus = CpuMask;
 #endif
-	//
-	//  There are no DPCs in Linux, so call the handler now
-	//
+	/* */
+	/*  There are no DPCs in Linux, so call the handler now */
+	/* */
 	sxg_handle_interrupt(adapter);
 
 	return IRQ_HANDLED;
@@ -993,7 +994,7 @@
 
 static void sxg_handle_interrupt(p_adapter_t adapter)
 {
-//    unsigned char           RssId   = 0;
+/*    unsigned char           RssId   = 0; */
 	u32 NewIsr;
 
 	if (adapter->Stats.RcvNoBuffer < 5) {
@@ -1002,32 +1003,32 @@
 	}
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
 		  adapter, adapter->IsrCopy[0], 0, 0);
-	// For now, RSS is disabled with line based interrupts
+	/* For now, RSS is disabled with line based interrupts */
 	ASSERT(adapter->RssEnabled == FALSE);
 	ASSERT(adapter->MsiEnabled == FALSE);
 	ASSERT(adapter->IsrCopy[0]);
-/////////////////////////////
+/*/////////////////////////// */
 
-	// Always process the event queue.
+	/* Always process the event queue. */
 	sxg_process_event_queue(adapter,
 				(adapter->RssEnabled ? /*RssId */ 0 : 0));
 
-#if XXXTODO			// RSS stuff
+#if XXXTODO			/* RSS stuff */
 	if (--adapter->IsrDpcsPending) {
-		// We're done.
+		/* We're done. */
 		ASSERT(adapter->RssEnabled);
 		SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
 			  adapter, 0, 0, 0);
 		return;
 	}
 #endif
-	//
-	// Last (or only) DPC processes the ISR and clears the interrupt.
-	//
+	/* */
+	/* Last (or only) DPC processes the ISR and clears the interrupt. */
+	/* */
 	NewIsr = sxg_process_isr(adapter, 0);
-	//
-	// Reenable interrupts
-	//
+	/* */
+	/* Reenable interrupts */
+	/* */
 	adapter->IsrCopy[0] = 0;
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
 		  adapter, NewIsr, 0, 0);
@@ -1063,75 +1064,75 @@
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
 		  adapter, Isr, 0, 0);
 
-	// Error
+	/* Error */
 	if (Isr & SXG_ISR_ERR) {
 		if (Isr & SXG_ISR_PDQF) {
 			adapter->Stats.PdqFull++;
-			DBG_ERROR("%s: SXG_ISR_ERR  PDQF!!\n", __FUNCTION__);
+			DBG_ERROR("%s: SXG_ISR_ERR  PDQF!!\n", __func__);
 		}
-		// No host buffer
+		/* No host buffer */
 		if (Isr & SXG_ISR_RMISS) {
-			// There is a bunch of code in the SLIC driver which
-			// attempts to process more receive events per DPC
-			// if we start to fall behind.  We'll probably
-			// need to do something similar here, but hold
-			// off for now.  I don't want to make the code more
-			// complicated than strictly needed.
+			/* There is a bunch of code in the SLIC driver which */
+			/* attempts to process more receive events per DPC */
+			/* if we start to fall behind.  We'll probably */
+			/* need to do something similar here, but hold */
+			/* off for now.  I don't want to make the code more */
+			/* complicated than strictly needed. */
 			adapter->Stats.RcvNoBuffer++;
 			if (adapter->Stats.RcvNoBuffer < 5) {
 				DBG_ERROR("%s: SXG_ISR_ERR  RMISS!!\n",
-					  __FUNCTION__);
+					  __func__);
 			}
 		}
-		// Card crash
+		/* Card crash */
 		if (Isr & SXG_ISR_DEAD) {
-			// Set aside the crash info and set the adapter state to RESET
+			/* Set aside the crash info and set the adapter state to RESET */
 			adapter->CrashCpu =
 			    (unsigned char)((Isr & SXG_ISR_CPU) >>
 					    SXG_ISR_CPU_SHIFT);
 			adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
 			adapter->Dead = TRUE;
-			DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __FUNCTION__,
+			DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
 				  adapter->CrashLocation, adapter->CrashCpu);
 		}
-		// Event ring full
+		/* Event ring full */
 		if (Isr & SXG_ISR_ERFULL) {
-			// Same issue as RMISS, really.  This means the
-			// host is falling behind the card.  Need to increase
-			// event ring size, process more events per interrupt,
-			// and/or reduce/remove interrupt aggregation.
+			/* Same issue as RMISS, really.  This means the */
+			/* host is falling behind the card.  Need to increase */
+			/* event ring size, process more events per interrupt, */
+			/* and/or reduce/remove interrupt aggregation. */
 			adapter->Stats.EventRingFull++;
 			DBG_ERROR("%s: SXG_ISR_ERR  EVENT RING FULL!!\n",
-				  __FUNCTION__);
+				  __func__);
 		}
-		// Transmit drop - no DRAM buffers or XMT error
+		/* Transmit drop - no DRAM buffers or XMT error */
 		if (Isr & SXG_ISR_XDROP) {
 			adapter->Stats.XmtDrops++;
 			adapter->Stats.XmtErrors++;
-			DBG_ERROR("%s: SXG_ISR_ERR  XDROP!!\n", __FUNCTION__);
+			DBG_ERROR("%s: SXG_ISR_ERR  XDROP!!\n", __func__);
 		}
 	}
-	// Slowpath send completions
+	/* Slowpath send completions */
 	if (Isr & SXG_ISR_SPSEND) {
 		sxg_complete_slow_send(adapter);
 	}
-	// Dump
+	/* Dump */
 	if (Isr & SXG_ISR_UPC) {
-		ASSERT(adapter->DumpCmdRunning);	// Maybe change when debug is added..
+		ASSERT(adapter->DumpCmdRunning);	/* Maybe change when debug is added.. */
 		adapter->DumpCmdRunning = FALSE;
 	}
-	// Link event
+	/* Link event */
 	if (Isr & SXG_ISR_LINK) {
 		sxg_link_event(adapter);
 	}
-	// Debug - breakpoint hit
+	/* Debug - breakpoint hit */
 	if (Isr & SXG_ISR_BREAK) {
-		// At the moment AGDB isn't written to support interactive
-		// debug sessions.  When it is, this interrupt will be used
-		// to signal AGDB that it has hit a breakpoint.  For now, ASSERT.
+		/* At the moment AGDB isn't written to support interactive */
+		/* debug sessions.  When it is, this interrupt will be used */
+		/* to signal AGDB that it has hit a breakpoint.  For now, ASSERT. */
 		ASSERT(0);
 	}
-	// Heartbeat response
+	/* Heartbeat response */
 	if (Isr & SXG_ISR_PING) {
 		adapter->PingOutstanding = FALSE;
 	}
@@ -1171,39 +1172,39 @@
 	       (adapter->State == SXG_STATE_PAUSING) ||
 	       (adapter->State == SXG_STATE_PAUSED) ||
 	       (adapter->State == SXG_STATE_HALTING));
-	// We may still have unprocessed events on the queue if
-	// the card crashed.  Don't process them.
+	/* We may still have unprocessed events on the queue if */
+	/* the card crashed.  Don't process them. */
 	if (adapter->Dead) {
 		return (0);
 	}
-	// In theory there should only be a single processor that
-	// accesses this queue, and only at interrupt-DPC time.  So
-	// we shouldn't need a lock for any of this.
+	/* In theory there should only be a single processor that */
+	/* accesses this queue, and only at interrupt-DPC time.  So */
+	/* we shouldn't need a lock for any of this. */
 	while (Event->Status & EVENT_STATUS_VALID) {
 		SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
 			  Event, Event->Code, Event->Status,
 			  adapter->NextEvent);
 		switch (Event->Code) {
 		case EVENT_CODE_BUFFERS:
-			ASSERT(!(Event->CommandIndex & 0xFF00));	// SXG_RING_INFO Head & Tail == unsigned char
-			//
+			ASSERT(!(Event->CommandIndex & 0xFF00));	/* SXG_RING_INFO Head & Tail == unsigned char */
+			/* */
 			sxg_complete_descriptor_blocks(adapter,
 						       Event->CommandIndex);
-			//
+			/* */
 			break;
 		case EVENT_CODE_SLOWRCV:
 			--adapter->RcvBuffersOnCard;
 			if ((skb = sxg_slow_receive(adapter, Event))) {
 				u32 rx_bytes;
 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
-				// Add it to our indication list
+				/* Add it to our indication list */
 				SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
 						   IndicationList, num_skbs);
-				//  In Linux, we just pass up each skb to the protocol above at this point,
-				//  there is no capability of an indication list.
+				/*  In Linux, we just pass up each skb to the protocol above at this point, */
+				/*  there is no capability of an indication list. */
 #else
-// CHECK            skb_pull(skb, INIC_RCVBUF_HEADSIZE);
-				rx_bytes = Event->Length;	// (rcvbuf->length & IRHDDR_FLEN_MSK);
+/* CHECK            skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
+				rx_bytes = Event->Length;	/* (rcvbuf->length & IRHDDR_FLEN_MSK); */
 				skb_put(skb, rx_bytes);
 				adapter->stats.rx_packets++;
 				adapter->stats.rx_bytes += rx_bytes;
@@ -1218,43 +1219,43 @@
 			break;
 		default:
 			DBG_ERROR("%s: ERROR  Invalid EventCode %d\n",
-				  __FUNCTION__, Event->Code);
-//                      ASSERT(0);
+				  __func__, Event->Code);
+/*                      ASSERT(0); */
 		}
-		// See if we need to restock card receive buffers.
-		// There are two things to note here:
-		//      First - This test is not SMP safe.  The
-		//              adapter->BuffersOnCard field is protected via atomic interlocked calls, but
-		//              we do not protect it with respect to these tests.  The only way to do that
-		//      is with a lock, and I don't want to grab a lock every time we adjust the
-		//      BuffersOnCard count.  Instead, we allow the buffer replenishment to be off
-		//      once in a while.  The worst that can happen is the card is given one
-		//      more-or-less descriptor block than the arbitrary value we've chosen.
-		//      No big deal
-		//      In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted.
-		//      Second - We expect this test to rarely evaluate to true.  We attempt to
-		//      refill descriptor blocks as they are returned to us
-		//      (sxg_complete_descriptor_blocks), so The only time this should evaluate
-		//      to true is when sxg_complete_descriptor_blocks failed to allocate
-		//              receive buffers.
+		/* See if we need to restock card receive buffers. */
+		/* There are two things to note here: */
+		/*      First - This test is not SMP safe.  The */
+		/*              adapter->BuffersOnCard field is protected via atomic interlocked calls, but */
+		/*              we do not protect it with respect to these tests.  The only way to do that */
+		/*      is with a lock, and I don't want to grab a lock every time we adjust the */
+		/*      BuffersOnCard count.  Instead, we allow the buffer replenishment to be off */
+		/*      once in a while.  The worst that can happen is the card is given one */
+		/*      more-or-less descriptor block than the arbitrary value we've chosen. */
+		/*      No big deal */
+		/*      In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted. */
+		/*      Second - We expect this test to rarely evaluate to true.  We attempt to */
+		/*      refill descriptor blocks as they are returned to us */
+		/*      (sxg_complete_descriptor_blocks), so The only time this should evaluate */
+		/*      to true is when sxg_complete_descriptor_blocks failed to allocate */
+		/*              receive buffers. */
 		if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
 			sxg_stock_rcv_buffers(adapter);
 		}
-		// It's more efficient to just set this to zero.
-		// But clearing the top bit saves potential debug info...
+		/* It's more efficient to just set this to zero. */
+		/* But clearing the top bit saves potential debug info... */
 		Event->Status &= ~EVENT_STATUS_VALID;
-		// Advanct to the next event
+		/* Advanct to the next event */
 		SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
 		Event = &EventRing->Ring[adapter->NextEvent[RssId]];
 		EventsProcessed++;
 		if (EventsProcessed == EVENT_RING_BATCH) {
-			// Release a batch of events back to the card
+			/* Release a batch of events back to the card */
 			WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
 				  EVENT_RING_BATCH, FALSE);
 			EventsProcessed = 0;
-			// If we've processed our batch limit, break out of the
-			// loop and return SXG_ISR_EVENT to arrange for us to
-			// be called again
+			/* If we've processed our batch limit, break out of the */
+			/* loop and return SXG_ISR_EVENT to arrange for us to */
+			/* be called again */
 			if (Batches++ == EVENT_BATCH_LIMIT) {
 				SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
 					  TRACE_NOISY, "EvtLimit", Batches,
@@ -1265,14 +1266,14 @@
 		}
 	}
 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
-	//
-	// Indicate any received dumb-nic frames
-	//
+	/* */
+	/* Indicate any received dumb-nic frames */
+	/* */
 	SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
 #endif
-	//
-	// Release events back to the card.
-	//
+	/* */
+	/* Release events back to the card. */
+	/* */
 	if (EventsProcessed) {
 		WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
 			  EventsProcessed, FALSE);
@@ -1299,43 +1300,43 @@
 	u32 *ContextType;
 	PSXG_CMD XmtCmd;
 
-	// NOTE - This lock is dropped and regrabbed in this loop.
-	// This means two different processors can both be running
-	// through this loop. Be *very* careful.
+	/* NOTE - This lock is dropped and regrabbed in this loop. */
+	/* This means two different processors can both be running */
+	/* through this loop. Be *very* careful. */
 	spin_lock(&adapter->XmtZeroLock);
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
 		  adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
 
 	while (XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) {
-		// Locate the current Cmd (ring descriptor entry), and
-		// associated SGL, and advance the tail
+		/* Locate the current Cmd (ring descriptor entry), and */
+		/* associated SGL, and advance the tail */
 		SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
 		ASSERT(ContextType);
 		SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
 			  XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
-		// Clear the SGL field.
+		/* Clear the SGL field. */
 		XmtCmd->Sgl = 0;
 
 		switch (*ContextType) {
 		case SXG_SGL_DUMB:
 			{
 				struct sk_buff *skb;
-				// Dumb-nic send.  Command context is the dumb-nic SGL
+				/* Dumb-nic send.  Command context is the dumb-nic SGL */
 				skb = (struct sk_buff *)ContextType;
-				// Complete the send
+				/* Complete the send */
 				SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
 					  TRACE_IMPORTANT, "DmSndCmp", skb, 0,
 					  0, 0);
 				ASSERT(adapter->Stats.XmtQLen);
-				adapter->Stats.XmtQLen--;	// within XmtZeroLock
+				adapter->Stats.XmtQLen--;	/* within XmtZeroLock */
 				adapter->Stats.XmtOk++;
-				// Now drop the lock and complete the send back to
-				// Microsoft.  We need to drop the lock because
-				// Microsoft can come back with a chimney send, which
-				// results in a double trip in SxgTcpOuput
+				/* Now drop the lock and complete the send back to */
+				/* Microsoft.  We need to drop the lock because */
+				/* Microsoft can come back with a chimney send, which */
+				/* results in a double trip in SxgTcpOuput */
 				spin_unlock(&adapter->XmtZeroLock);
 				SXG_COMPLETE_DUMB_SEND(adapter, skb);
-				// and reacquire..
+				/* and reacquire.. */
 				spin_lock(&adapter->XmtZeroLock);
 			}
 			break;
@@ -1371,7 +1372,7 @@
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
 		  RcvDataBufferHdr, RcvDataBufferHdr->State,
 		  RcvDataBufferHdr->VirtualAddress);
-	// Drop rcv frames in non-running state
+	/* Drop rcv frames in non-running state */
 	switch (adapter->State) {
 	case SXG_STATE_RUNNING:
 		break;
@@ -1384,12 +1385,12 @@
 		goto drop;
 	}
 
-	// Change buffer state to UPSTREAM
+	/* Change buffer state to UPSTREAM */
 	RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
 	if (Event->Status & EVENT_STATUS_RCVERR) {
 		SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
 			  Event, Event->Status, Event->HostHandle, 0);
-		// XXXTODO - Remove this print later
+		/* XXXTODO - Remove this print later */
 		DBG_ERROR("SXG: Receive error %x\n", *(u32 *)
 			  SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
 		sxg_process_rcv_error(adapter, *(u32 *)
@@ -1397,8 +1398,8 @@
 				      (RcvDataBufferHdr));
 		goto drop;
 	}
-#if XXXTODO			// VLAN stuff
-	// If there's a VLAN tag, extract it and validate it
+#if XXXTODO			/* VLAN stuff */
+	/* If there's a VLAN tag, extract it and validate it */
 	if (((p_ether_header) (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->
 	    EtherType == ETHERTYPE_VLAN) {
 		if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
@@ -1411,9 +1412,9 @@
 		}
 	}
 #endif
-	//
-	// Dumb-nic frame.  See if it passes our mac filter and update stats
-	//
+	/* */
+	/* Dumb-nic frame.  See if it passes our mac filter and update stats */
+	/* */
 	if (!sxg_mac_filter(adapter, (p_ether_header)
 			    SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
 			    Event->Length)) {
@@ -1427,9 +1428,9 @@
 
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
 		  RcvDataBufferHdr, Packet, Event->Length, 0);
-	//
-	// Lastly adjust the receive packet length.
-	//
+	/* */
+	/* Lastly adjust the receive packet length. */
+	/* */
 	SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
 
 	return (Packet);
@@ -1541,7 +1542,7 @@
 
 	if (SXG_MULTICAST_PACKET(EtherHdr)) {
 		if (SXG_BROADCAST_PACKET(EtherHdr)) {
-			// broadcast
+			/* broadcast */
 			if (adapter->MacFilter & MAC_BCAST) {
 				adapter->Stats.DumbRcvBcastPkts++;
 				adapter->Stats.DumbRcvBcastBytes += length;
@@ -1550,7 +1551,7 @@
 				return (TRUE);
 			}
 		} else {
-			// multicast
+			/* multicast */
 			if (adapter->MacFilter & MAC_ALLMCAST) {
 				adapter->Stats.DumbRcvMcastPkts++;
 				adapter->Stats.DumbRcvMcastBytes += length;
@@ -1580,9 +1581,9 @@
 			}
 		}
 	} else if (adapter->MacFilter & MAC_DIRECTED) {
-		// Not broadcast or multicast.  Must be directed at us or
-		// the card is in promiscuous mode.  Either way, consider it
-		// ours if MAC_DIRECTED is set
+		/* Not broadcast or multicast.  Must be directed at us or */
+		/* the card is in promiscuous mode.  Either way, consider it */
+		/* ours if MAC_DIRECTED is set */
 		adapter->Stats.DumbRcvUcastPkts++;
 		adapter->Stats.DumbRcvUcastBytes += length;
 		adapter->Stats.DumbRcvPkts++;
@@ -1590,7 +1591,7 @@
 		return (TRUE);
 	}
 	if (adapter->MacFilter & MAC_PROMISC) {
-		// Whatever it is, keep it.
+		/* Whatever it is, keep it. */
 		adapter->Stats.DumbRcvPkts++;
 		adapter->Stats.DumbRcvBytes += length;
 		return (TRUE);
@@ -1606,7 +1607,7 @@
 
 		DBG_ERROR
 		    ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
-		     __FUNCTION__, adapter, adapter->netdev->irq, NR_IRQS);
+		     __func__, adapter, adapter->netdev->irq, NR_IRQS);
 
 		spin_unlock_irqrestore(&sxg_global.driver_lock,
 				       sxg_global.flags);
@@ -1625,18 +1626,18 @@
 		}
 		adapter->intrregistered = 1;
 		adapter->IntRegistered = TRUE;
-		// Disable RSS with line-based interrupts
+		/* Disable RSS with line-based interrupts */
 		adapter->MsiEnabled = FALSE;
 		adapter->RssEnabled = FALSE;
 		DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
-			  __FUNCTION__, adapter, adapter->netdev->irq);
+			  __func__, adapter, adapter->netdev->irq);
 	}
 	return (STATUS_SUCCESS);
 }
 
 static void sxg_deregister_interrupt(p_adapter_t adapter)
 {
-	DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __FUNCTION__, adapter);
+	DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
 #if XXXTODO
 	slic_init_cleanup(adapter);
 #endif
@@ -1651,7 +1652,7 @@
 	adapter->rcv_broadcasts = 0;
 	adapter->rcv_multicasts = 0;
 	adapter->rcv_unicasts = 0;
-	DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s EXIT\n", __func__);
 }
 
 /*
@@ -1666,7 +1667,7 @@
 	int status = 0;
 
 	DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d:%d] flags[%x]\n",
-		  __FUNCTION__, adapter->netdev->name,
+		  __func__, adapter->netdev->name,
 		  adapter->queues_initialized, adapter->state,
 		  adapter->linkstate, dev->flags);
 
@@ -1680,7 +1681,7 @@
 	adapter->devflags_prev = dev->flags;
 	adapter->macopts = MAC_DIRECTED;
 	if (dev->flags) {
-		DBG_ERROR("sxg: %s (%s) Set MAC options: ", __FUNCTION__,
+		DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
 			  adapter->netdev->name);
 		if (dev->flags & IFF_BROADCAST) {
 			adapter->macopts |= MAC_BCAST;
@@ -1713,7 +1714,7 @@
 	/*
 	 *    clear any pending events, then enable interrupts
 	 */
-	DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
 
 	return (STATUS_SUCCESS);
 }
@@ -1724,11 +1725,11 @@
 	int status;
 
 	ASSERT(adapter);
-	DBG_ERROR("sxg: %s adapter->activated[%d]\n", __FUNCTION__,
+	DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
 		  adapter->activated);
 	DBG_ERROR
 	    ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
-	     __FUNCTION__, adapter->netdev->name, jiffies, smp_processor_id(),
+	     __func__, adapter->netdev->name, jiffies, smp_processor_id(),
 	     adapter->netdev, adapter, adapter->port);
 
 	netif_stop_queue(adapter->netdev);
@@ -1738,16 +1739,16 @@
 		sxg_global.num_sxg_ports_active++;
 		adapter->activated = 1;
 	}
-	// Initialize the adapter
-	DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __FUNCTION__);
+	/* Initialize the adapter */
+	DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
 	status = sxg_initialize_adapter(adapter);
 	DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
-		  __FUNCTION__, status);
+		  __func__, status);
 
 	if (status == STATUS_SUCCESS) {
-		DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __FUNCTION__);
+		DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
 		status = sxg_if_init(adapter);
-		DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __FUNCTION__,
+		DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
 			  status);
 	}
 
@@ -1760,12 +1761,12 @@
 				       sxg_global.flags);
 		return (status);
 	}
-	DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
 
-	// Enable interrupts
+	/* Enable interrupts */
 	SXG_ENABLE_ALL_INTERRUPTS(adapter);
 
-	DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s EXIT\n", __func__);
 
 	spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
 	return STATUS_SUCCESS;
@@ -1779,27 +1780,27 @@
 	p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
 
 	ASSERT(adapter);
-	DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __FUNCTION__, dev,
+	DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __func__, dev,
 		  adapter);
 	sxg_deregister_interrupt(adapter);
 	sxg_unmap_mmio_space(adapter);
-	DBG_ERROR("sxg: %s unregister_netdev\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s unregister_netdev\n", __func__);
 	unregister_netdev(dev);
 
 	mmio_start = pci_resource_start(pcidev, 0);
 	mmio_len = pci_resource_len(pcidev, 0);
 
-	DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
+	DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __func__,
 		  mmio_start, mmio_len);
 	release_mem_region(mmio_start, mmio_len);
 
-	DBG_ERROR("sxg: %s iounmap dev->base_addr[%x]\n", __FUNCTION__,
+	DBG_ERROR("sxg: %s iounmap dev->base_addr[%x]\n", __func__,
 		  (unsigned int)dev->base_addr);
 	iounmap((char *)dev->base_addr);
 
-	DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s deallocate device\n", __func__);
 	kfree(dev);
-	DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s EXIT\n", __func__);
 }
 
 static int sxg_entry_halt(p_net_device dev)
@@ -1807,17 +1808,17 @@
 	p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
 
 	spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
-	DBG_ERROR("sxg: %s (%s) ENTER\n", __FUNCTION__, dev->name);
+	DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name);
 
 	netif_stop_queue(adapter->netdev);
 	adapter->state = ADAPT_DOWN;
 	adapter->linkstate = LINK_DOWN;
 	adapter->devflags_prev = 0;
 	DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
-		  __FUNCTION__, dev->name, adapter, adapter->state);
+		  __func__, dev->name, adapter, adapter->state);
 
-	DBG_ERROR("sxg: %s (%s) EXIT\n", __FUNCTION__, dev->name);
-	DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+	DBG_ERROR("sxg: %s (%s) EXIT\n", __func__, dev->name);
+	DBG_ERROR("sxg: %s EXIT\n", __func__);
 	spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
 	return (STATUS_SUCCESS);
 }
@@ -1825,11 +1826,11 @@
 static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd)
 {
 	ASSERT(rq);
-//      DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __FUNCTION__, cmd, rq, dev);
+/*      DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev); */
 	switch (cmd) {
 	case SIOCSLICSETINTAGG:
 		{
-//                      p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
+/*                      p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); */
 			u32 data[7];
 			u32 intagg;
 
@@ -1841,12 +1842,12 @@
 			intagg = data[0];
 			printk(KERN_EMERG
 			       "%s: set interrupt aggregation to %d\n",
-			       __FUNCTION__, intagg);
+			       __func__, intagg);
 			return 0;
 		}
 
 	default:
-//              DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __FUNCTION__, cmd);
+/*              DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
 		return -EOPNOTSUPP;
 	}
 	return 0;
@@ -1870,15 +1871,15 @@
 	p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
 	u32 status = STATUS_SUCCESS;
 
-	DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
+	DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __func__,
 		  skb);
-	// Check the adapter state
+	/* Check the adapter state */
 	switch (adapter->State) {
 	case SXG_STATE_INITIALIZING:
 	case SXG_STATE_HALTED:
 	case SXG_STATE_SHUTDOWN:
-		ASSERT(0);	// unexpected
-		// fall through
+		ASSERT(0);	/* unexpected */
+		/* fall through */
 	case SXG_STATE_RESETTING:
 	case SXG_STATE_SLEEP:
 	case SXG_STATE_BOOTDIAG:
@@ -1898,23 +1899,23 @@
 	if (status != STATUS_SUCCESS) {
 		goto xmit_fail;
 	}
-	// send a packet
+	/* send a packet */
 	status = sxg_transmit_packet(adapter, skb);
 	if (status == STATUS_SUCCESS) {
 		goto xmit_done;
 	}
 
       xmit_fail:
-	// reject & complete all the packets if they cant be sent
+	/* reject & complete all the packets if they cant be sent */
 	if (status != STATUS_SUCCESS) {
 #if XXXTODO
-//      sxg_send_packets_fail(adapter, skb, status);
+/*      sxg_send_packets_fail(adapter, skb, status); */
 #else
 		SXG_DROP_DUMB_SEND(adapter, skb);
 		adapter->stats.tx_dropped++;
 #endif
 	}
-	DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __FUNCTION__,
+	DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
 		  status);
 
       xmit_done:
@@ -1940,12 +1941,12 @@
 	void *SglBuffer;
 	u32 SglBufferLength;
 
-	// The vast majority of work is done in the shared
-	// sxg_dumb_sgl routine.
+	/* The vast majority of work is done in the shared */
+	/* sxg_dumb_sgl routine. */
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
 		  adapter, skb, 0, 0);
 
-	// Allocate a SGL buffer
+	/* Allocate a SGL buffer */
 	SXG_GET_SGL_BUFFER(adapter, SxgSgl);
 	if (!SxgSgl) {
 		adapter->Stats.NoSglBuf++;
@@ -1963,9 +1964,9 @@
 	SxgSgl->DumbPacket = skb;
 	pSgl = NULL;
 
-	// Call the common sxg_dumb_sgl routine to complete the send.
+	/* Call the common sxg_dumb_sgl routine to complete the send. */
 	sxg_dumb_sgl(pSgl, SxgSgl);
-	// Return success   sxg_dumb_sgl (or something later) will complete it.
+	/* Return success   sxg_dumb_sgl (or something later) will complete it. */
 	return (STATUS_SUCCESS);
 }
 
@@ -1983,39 +1984,39 @@
 {
 	p_adapter_t adapter = SxgSgl->adapter;
 	struct sk_buff *skb = SxgSgl->DumbPacket;
-	// For now, all dumb-nic sends go on RSS queue zero
+	/* For now, all dumb-nic sends go on RSS queue zero */
 	PSXG_XMT_RING XmtRing = &adapter->XmtRings[0];
 	PSXG_RING_INFO XmtRingInfo = &adapter->XmtRingZeroInfo;
 	PSXG_CMD XmtCmd = NULL;
-//      u32                         Index = 0;
+/*      u32                         Index = 0; */
 	u32 DataLength = skb->len;
-//  unsigned int                                BufLen;
-//      u32                         SglOffset;
+/*  unsigned int                                BufLen; */
+/*      u32                         SglOffset; */
 	u64 phys_addr;
 
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
 		  pSgl, SxgSgl, 0, 0);
 
-	// Set aside a pointer to the sgl
+	/* Set aside a pointer to the sgl */
 	SxgSgl->pSgl = pSgl;
 
-	// Sanity check that our SGL format is as we expect.
+	/* Sanity check that our SGL format is as we expect. */
 	ASSERT(sizeof(SXG_X64_SGE) == sizeof(SCATTER_GATHER_ELEMENT));
-	// Shouldn't be a vlan tag on this frame
+	/* Shouldn't be a vlan tag on this frame */
 	ASSERT(SxgSgl->VlanTag.VlanTci == 0);
 	ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
 
-	// From here below we work with the SGL placed in our
-	// buffer.
+	/* From here below we work with the SGL placed in our */
+	/* buffer. */
 
 	SxgSgl->Sgl.NumberOfElements = 1;
 
-	// Grab the spinlock and acquire a command
+	/* Grab the spinlock and acquire a command */
 	spin_lock(&adapter->XmtZeroLock);
 	SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
 	if (XmtCmd == NULL) {
-		// Call sxg_complete_slow_send to see if we can
-		// free up any XmtRingZero entries and then try again
+		/* Call sxg_complete_slow_send to see if we can */
+		/* free up any XmtRingZero entries and then try again */
 		spin_unlock(&adapter->XmtZeroLock);
 		sxg_complete_slow_send(adapter);
 		spin_lock(&adapter->XmtZeroLock);
@@ -2027,10 +2028,10 @@
 	}
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
 		  XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
-	// Update stats
+	/* Update stats */
 	adapter->Stats.DumbXmtPkts++;
 	adapter->Stats.DumbXmtBytes += DataLength;
-#if XXXTODO			// Stats stuff
+#if XXXTODO			/* Stats stuff */
 	if (SXG_MULTICAST_PACKET(EtherHdr)) {
 		if (SXG_BROADCAST_PACKET(EtherHdr)) {
 			adapter->Stats.DumbXmtBcastPkts++;
@@ -2044,8 +2045,8 @@
 		adapter->Stats.DumbXmtUcastBytes += DataLength;
 	}
 #endif
-	// Fill in the command
-	// Copy out the first SGE to the command and adjust for offset
+	/* Fill in the command */
+	/* Copy out the first SGE to the command and adjust for offset */
 	phys_addr =
 	    pci_map_single(adapter->pcidev, skb->data, skb->len,
 			   PCI_DMA_TODEVICE);
@@ -2053,54 +2054,54 @@
 	XmtCmd->Buffer.FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress << 32;
 	XmtCmd->Buffer.FirstSgeAddress =
 	    XmtCmd->Buffer.FirstSgeAddress | SXG_GET_ADDR_LOW(phys_addr);
-//      XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address;
-//      XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset;
+/*      XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address; */
+/*      XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset; */
 	XmtCmd->Buffer.FirstSgeLength = DataLength;
-	// Set a pointer to the remaining SGL entries
-//      XmtCmd->Sgl = SxgSgl->PhysicalAddress;
-	// Advance the physical address of the SxgSgl structure to
-	// the second SGE
-//      SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) -
-//                                              (u32 *)SxgSgl);
-//      XmtCmd->Sgl.LowPart += SglOffset;
+	/* Set a pointer to the remaining SGL entries */
+/*      XmtCmd->Sgl = SxgSgl->PhysicalAddress; */
+	/* Advance the physical address of the SxgSgl structure to */
+	/* the second SGE */
+/*      SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) - */
+/*                                              (u32 *)SxgSgl); */
+/*      XmtCmd->Sgl.LowPart += SglOffset; */
 	XmtCmd->Buffer.SgeOffset = 0;
-	// Note - TotalLength might be overwritten with MSS below..
+	/* Note - TotalLength might be overwritten with MSS below.. */
 	XmtCmd->Buffer.TotalLength = DataLength;
-	XmtCmd->SgEntries = 1;	//(ushort)(SxgSgl->Sgl.NumberOfElements - Index);
+	XmtCmd->SgEntries = 1;	/*(ushort)(SxgSgl->Sgl.NumberOfElements - Index); */
 	XmtCmd->Flags = 0;
-	//
-	// Advance transmit cmd descripter by 1.
-	// NOTE - See comments in SxgTcpOutput where we write
-	// to the XmtCmd register regarding CPU ID values and/or
-	// multiple commands.
-	//
-	//
+	/* */
+	/* Advance transmit cmd descripter by 1. */
+	/* NOTE - See comments in SxgTcpOutput where we write */
+	/* to the XmtCmd register regarding CPU ID values and/or */
+	/* multiple commands. */
+	/* */
+	/* */
 	WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE);
-	//
-	//
-	adapter->Stats.XmtQLen++;	// Stats within lock
+	/* */
+	/* */
+	adapter->Stats.XmtQLen++;	/* Stats within lock */
 	spin_unlock(&adapter->XmtZeroLock);
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
 		  XmtCmd, pSgl, SxgSgl, 0);
 	return;
 
       abortcmd:
-	// NOTE - Only jump to this label AFTER grabbing the
-	// XmtZeroLock, and DO NOT DROP IT between the
-	// command allocation and the following abort.
+	/* NOTE - Only jump to this label AFTER grabbing the */
+	/* XmtZeroLock, and DO NOT DROP IT between the */
+	/* command allocation and the following abort. */
 	if (XmtCmd) {
 		SXG_ABORT_CMD(XmtRingInfo);
 	}
 	spin_unlock(&adapter->XmtZeroLock);
 
-// failsgl:
-	// Jump to this label if failure occurs before the
-	// XmtZeroLock is grabbed
+/* failsgl: */
+	/* Jump to this label if failure occurs before the */
+	/* XmtZeroLock is grabbed */
 	adapter->Stats.XmtErrors++;
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
 		  pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
 
-	SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);	// SxgSgl->DumbPacket is the skb
+	SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);	/* SxgSgl->DumbPacket is the skb */
 }
 
 /***************************************************************
@@ -2127,122 +2128,122 @@
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
 		  adapter, 0, 0, 0);
 
-	// Reset PHY and XGXS module
+	/* Reset PHY and XGXS module */
 	WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
 
-	// Reset transmit configuration register
+	/* Reset transmit configuration register */
 	WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
 
-	// Reset receive configuration register
+	/* Reset receive configuration register */
 	WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
 
-	// Reset all MAC modules
+	/* Reset all MAC modules */
 	WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
 
-	// Link address 0
-	// XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
-	// is stored with the first nibble (0a) in the byte 0
-	// of the Mac address.  Possibly reverse?
+	/* Link address 0 */
+	/* XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f) */
+	/* is stored with the first nibble (0a) in the byte 0 */
+	/* of the Mac address.  Possibly reverse? */
 	Value = *(u32 *) adapter->MacAddr;
 	WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
-	// also write the MAC address to the MAC.  Endian is reversed.
+	/* also write the MAC address to the MAC.  Endian is reversed. */
 	WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
 	Value = (*(u16 *) & adapter->MacAddr[4] & 0x0000FFFF);
 	WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
-	// endian swap for the MAC (put high bytes in bits [31:16], swapped)
+	/* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
 	Value = ntohl(Value);
 	WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
-	// Link address 1
+	/* Link address 1 */
 	WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
 	WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
-	// Link address 2
+	/* Link address 2 */
 	WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
 	WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
-	// Link address 3
+	/* Link address 3 */
 	WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
 	WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
 
-	// Enable MAC modules
+	/* Enable MAC modules */
 	WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
 
-	// Configure MAC
-	WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE |	// Allow sending of pause
-				       AXGMAC_CFG1_XMT_EN |	// Enable XMT
-				       AXGMAC_CFG1_RCV_PAUSE |	// Enable detection of pause
-				       AXGMAC_CFG1_RCV_EN |	// Enable receive
-				       AXGMAC_CFG1_SHORT_ASSERT |	// short frame detection
-				       AXGMAC_CFG1_CHECK_LEN |	// Verify frame length
-				       AXGMAC_CFG1_GEN_FCS |	// Generate FCS
-				       AXGMAC_CFG1_PAD_64),	// Pad frames to 64 bytes
+	/* Configure MAC */
+	WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE |	/* Allow sending of pause */
+				       AXGMAC_CFG1_XMT_EN |	/* Enable XMT */
+				       AXGMAC_CFG1_RCV_PAUSE |	/* Enable detection of pause */
+				       AXGMAC_CFG1_RCV_EN |	/* Enable receive */
+				       AXGMAC_CFG1_SHORT_ASSERT |	/* short frame detection */
+				       AXGMAC_CFG1_CHECK_LEN |	/* Verify frame length */
+				       AXGMAC_CFG1_GEN_FCS |	/* Generate FCS */
+				       AXGMAC_CFG1_PAD_64),	/* Pad frames to 64 bytes */
 		  TRUE);
 
-	// Set AXGMAC max frame length if jumbo.  Not needed for standard MTU
+	/* Set AXGMAC max frame length if jumbo.  Not needed for standard MTU */
 	if (adapter->JumboEnabled) {
 		WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
 	}
-	// AMIIM Configuration Register -
-	// The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
-	// (bottom bits) of this register is used to determine the
-	// MDC frequency as specified in the A-XGMAC Design Document.
-	// This value must not be zero.  The following value (62 or 0x3E)
-	// is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz.
-	// Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec),
-	// we get:  312.5/(2*(X+1)) < 2.5  ==> X = 62.
-	// This value happens to be the default value for this register,
-	// so we really don't have to do this.
+	/* AMIIM Configuration Register - */
+	/* The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion */
+	/* (bottom bits) of this register is used to determine the */
+	/* MDC frequency as specified in the A-XGMAC Design Document. */
+	/* This value must not be zero.  The following value (62 or 0x3E) */
+	/* is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz. */
+	/* Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec), */
+	/* we get:  312.5/(2*(X+1)) < 2.5  ==> X = 62. */
+	/* This value happens to be the default value for this register, */
+	/* so we really don't have to do this. */
 	WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
 
-	// Power up and enable PHY and XAUI/XGXS/Serdes logic
+	/* Power up and enable PHY and XAUI/XGXS/Serdes logic */
 	WRITE_REG(HwRegs->LinkStatus,
 		  (LS_PHY_CLR_RESET |
 		   LS_XGXS_ENABLE |
 		   LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
 	DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
 
-	// Per information given by Aeluros, wait 100 ms after removing reset.
-	// It's not enough to wait for the self-clearing reset bit in reg 0 to clear.
+	/* Per information given by Aeluros, wait 100 ms after removing reset. */
+	/* It's not enough to wait for the self-clearing reset bit in reg 0 to clear. */
 	mdelay(100);
 
-	// Verify the PHY has come up by checking that the Reset bit has cleared.
-	status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	// PHY PMA/PMD module
-				   PHY_PMA_CONTROL1,	// PMA/PMD control register
+	/* Verify the PHY has come up by checking that the Reset bit has cleared. */
+	status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	/* PHY PMA/PMD module */
+				   PHY_PMA_CONTROL1,	/* PMA/PMD control register */
 				   &Value);
 	if (status != STATUS_SUCCESS)
 		return (STATUS_FAILURE);
-	if (Value & PMA_CONTROL1_RESET)	// reset complete if bit is 0
+	if (Value & PMA_CONTROL1_RESET)	/* reset complete if bit is 0 */
 		return (STATUS_FAILURE);
 
-	// The SERDES should be initialized by now - confirm
+	/* The SERDES should be initialized by now - confirm */
 	READ_REG(HwRegs->LinkStatus, Value);
-	if (Value & LS_SERDES_DOWN)	// verify SERDES is initialized
+	if (Value & LS_SERDES_DOWN)	/* verify SERDES is initialized */
 		return (STATUS_FAILURE);
 
-	// The XAUI link should also be up - confirm
-	if (!(Value & LS_XAUI_LINK_UP))	// verify XAUI link is up
+	/* The XAUI link should also be up - confirm */
+	if (!(Value & LS_XAUI_LINK_UP))	/* verify XAUI link is up */
 		return (STATUS_FAILURE);
 
-	// Initialize the PHY
+	/* Initialize the PHY */
 	status = sxg_phy_init(adapter);
 	if (status != STATUS_SUCCESS)
 		return (STATUS_FAILURE);
 
-	// Enable the Link Alarm
-	status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	// PHY PMA/PMD module
-				    LASI_CONTROL,	// LASI control register
-				    LASI_CTL_LS_ALARM_ENABLE);	// enable link alarm bit
+	/* Enable the Link Alarm */
+	status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	/* PHY PMA/PMD module */
+				    LASI_CONTROL,	/* LASI control register */
+				    LASI_CTL_LS_ALARM_ENABLE);	/* enable link alarm bit */
 	if (status != STATUS_SUCCESS)
 		return (STATUS_FAILURE);
 
-	// XXXTODO - temporary - verify bit is set
-	status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	// PHY PMA/PMD module
-				   LASI_CONTROL,	// LASI control register
+	/* XXXTODO - temporary - verify bit is set */
+	status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	/* PHY PMA/PMD module */
+				   LASI_CONTROL,	/* LASI control register */
 				   &Value);
 	if (status != STATUS_SUCCESS)
 		return (STATUS_FAILURE);
 	if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
 		DBG_ERROR("Error!  LASI Control Alarm Enable bit not set!\n");
 	}
-	// Enable receive
+	/* Enable receive */
 	MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
 	ConfigData = (RCV_CONFIG_ENABLE |
 		      RCV_CONFIG_ENPARSE |
@@ -2256,7 +2257,7 @@
 
 	WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
 
-	// Mark the link as down.  We'll get a link event when it comes up.
+	/* Mark the link as down.  We'll get a link event when it comes up. */
 	sxg_link_state(adapter, SXG_LINK_DOWN);
 
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
@@ -2279,35 +2280,35 @@
 	PPHY_UCODE p;
 	int status;
 
-	DBG_ERROR("ENTER %s\n", __FUNCTION__);
+	DBG_ERROR("ENTER %s\n", __func__);
 
-	// Read a register to identify the PHY type
-	status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	// PHY PMA/PMD module
-				   0xC205,	// PHY ID register (?)
-				   &Value);	//    XXXTODO - add def
+	/* Read a register to identify the PHY type */
+	status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	/* PHY PMA/PMD module */
+				   0xC205,	/* PHY ID register (?) */
+				   &Value);	/*    XXXTODO - add def */
 	if (status != STATUS_SUCCESS)
 		return (STATUS_FAILURE);
 
-	if (Value == 0x0012) {	// 0x0012 == AEL2005C PHY(?) - XXXTODO - add def
+	if (Value == 0x0012) {	/* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
 		DBG_ERROR
 		    ("AEL2005C PHY detected.  Downloading PHY microcode.\n");
 
-		// Initialize AEL2005C PHY and download PHY microcode
+		/* Initialize AEL2005C PHY and download PHY microcode */
 		for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
 			if (p->Addr == 0) {
-				// if address == 0, data == sleep time in ms
+				/* if address == 0, data == sleep time in ms */
 				mdelay(p->Data);
 			} else {
-				// write the given data to the specified address
-				status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	// PHY PMA/PMD module
-							    p->Addr,	// PHY address
-							    p->Data);	// PHY data
+				/* write the given data to the specified address */
+				status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	/* PHY PMA/PMD module */
+							    p->Addr,	/* PHY address */
+							    p->Data);	/* PHY data */
 				if (status != STATUS_SUCCESS)
 					return (STATUS_FAILURE);
 			}
 		}
 	}
-	DBG_ERROR("EXIT %s\n", __FUNCTION__);
+	DBG_ERROR("EXIT %s\n", __func__);
 
 	return (STATUS_SUCCESS);
 }
@@ -2330,42 +2331,42 @@
 
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
 		  adapter, 0, 0, 0);
-	DBG_ERROR("ENTER %s\n", __FUNCTION__);
+	DBG_ERROR("ENTER %s\n", __func__);
 
-	// Check the Link Status register.  We should have a Link Alarm.
+	/* Check the Link Status register.  We should have a Link Alarm. */
 	READ_REG(HwRegs->LinkStatus, Value);
 	if (Value & LS_LINK_ALARM) {
-		// We got a Link Status alarm.  First, pause to let the
-		// link state settle (it can bounce a number of times)
+		/* We got a Link Status alarm.  First, pause to let the */
+		/* link state settle (it can bounce a number of times) */
 		mdelay(10);
 
-		// Now clear the alarm by reading the LASI status register.
-		status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	// PHY PMA/PMD module
-					   LASI_STATUS,	// LASI status register
+		/* Now clear the alarm by reading the LASI status register. */
+		status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	/* PHY PMA/PMD module */
+					   LASI_STATUS,	/* LASI status register */
 					   &Value);
 		if (status != STATUS_SUCCESS) {
 			DBG_ERROR("Error reading LASI Status MDIO register!\n");
 			sxg_link_state(adapter, SXG_LINK_DOWN);
-//                      ASSERT(0);
+/*                      ASSERT(0); */
 		}
 		ASSERT(Value & LASI_STATUS_LS_ALARM);
 
-		// Now get and set the link state
+		/* Now get and set the link state */
 		LinkState = sxg_get_link_state(adapter);
 		sxg_link_state(adapter, LinkState);
 		DBG_ERROR("SXG: Link Alarm occurred.  Link is %s\n",
 			  ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
 	} else {
-		// XXXTODO - Assuming Link Attention is only being generated for the
-		// Link Alarm pin (and not for a XAUI Link Status change), then it's
-		// impossible to get here.  Yet we've gotten here twice (under extreme
-		// conditions - bouncing the link up and down many times a second).
-		// Needs further investigation.
+		/* XXXTODO - Assuming Link Attention is only being generated for the */
+		/* Link Alarm pin (and not for a XAUI Link Status change), then it's */
+		/* impossible to get here.  Yet we've gotten here twice (under extreme */
+		/* conditions - bouncing the link up and down many times a second). */
+		/* Needs further investigation. */
 		DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
 		DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
-//              ASSERT(0);
+/*              ASSERT(0); */
 	}
-	DBG_ERROR("EXIT %s\n", __FUNCTION__);
+	DBG_ERROR("EXIT %s\n", __func__);
 
 }
 
@@ -2383,50 +2384,50 @@
 	int status;
 	u32 Value;
 
-	DBG_ERROR("ENTER %s\n", __FUNCTION__);
+	DBG_ERROR("ENTER %s\n", __func__);
 
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
 		  adapter, 0, 0, 0);
 
-	// Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
-	// the following 3 bits (from 3 different MDIO registers) are all true.
-	status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	// PHY PMA/PMD module
-				   PHY_PMA_RCV_DET,	// PMA/PMD Receive Signal Detect register
+	/* Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if */
+	/* the following 3 bits (from 3 different MDIO registers) are all true. */
+	status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,	/* PHY PMA/PMD module */
+				   PHY_PMA_RCV_DET,	/* PMA/PMD Receive Signal Detect register */
 				   &Value);
 	if (status != STATUS_SUCCESS)
 		goto bad;
 
-	// If PMA/PMD receive signal detect is 0, then the link is down
+	/* If PMA/PMD receive signal detect is 0, then the link is down */
 	if (!(Value & PMA_RCV_DETECT))
 		return (SXG_LINK_DOWN);
 
-	status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,	// PHY PCS module
-				   PHY_PCS_10G_STATUS1,	// PCS 10GBASE-R Status 1 register
+	status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,	/* PHY PCS module */
+				   PHY_PCS_10G_STATUS1,	/* PCS 10GBASE-R Status 1 register */
 				   &Value);
 	if (status != STATUS_SUCCESS)
 		goto bad;
 
-	// If PCS is not locked to receive blocks, then the link is down
+	/* If PCS is not locked to receive blocks, then the link is down */
 	if (!(Value & PCS_10B_BLOCK_LOCK))
 		return (SXG_LINK_DOWN);
 
-	status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,	// PHY XS module
-				   PHY_XS_LANE_STATUS,	// XS Lane Status register
+	status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,	/* PHY XS module */
+				   PHY_XS_LANE_STATUS,	/* XS Lane Status register */
 				   &Value);
 	if (status != STATUS_SUCCESS)
 		goto bad;
 
-	// If XS transmit lanes are not aligned, then the link is down
+	/* If XS transmit lanes are not aligned, then the link is down */
 	if (!(Value & XS_LANE_ALIGN))
 		return (SXG_LINK_DOWN);
 
-	// All 3 bits are true, so the link is up
-	DBG_ERROR("EXIT %s\n", __FUNCTION__);
+	/* All 3 bits are true, so the link is up */
+	DBG_ERROR("EXIT %s\n", __func__);
 
 	return (SXG_LINK_UP);
 
       bad:
-	// An error occurred reading an MDIO register.  This shouldn't happen.
+	/* An error occurred reading an MDIO register.  This shouldn't happen. */
 	DBG_ERROR("Error reading an MDIO register!\n");
 	ASSERT(0);
 	return (SXG_LINK_DOWN);
@@ -2437,11 +2438,11 @@
 {
 	if (adapter->LinkState == SXG_LINK_UP) {
 		DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
-			  __FUNCTION__);
+			  __func__);
 		netif_start_queue(adapter->netdev);
 	} else {
 		DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
-			  __FUNCTION__);
+			  __func__);
 		netif_stop_queue(adapter->netdev);
 	}
 }
@@ -2464,23 +2465,23 @@
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
 		  adapter, LinkState, adapter->LinkState, adapter->State);
 
-	DBG_ERROR("ENTER %s\n", __FUNCTION__);
+	DBG_ERROR("ENTER %s\n", __func__);
 
-	// Hold the adapter lock during this routine.  Maybe move
-	// the lock to the caller.
+	/* Hold the adapter lock during this routine.  Maybe move */
+	/* the lock to the caller. */
 	spin_lock(&adapter->AdapterLock);
 	if (LinkState == adapter->LinkState) {
-		// Nothing changed..
+		/* Nothing changed.. */
 		spin_unlock(&adapter->AdapterLock);
-		DBG_ERROR("EXIT #0 %s\n", __FUNCTION__);
+		DBG_ERROR("EXIT #0 %s\n", __func__);
 		return;
 	}
-	// Save the adapter state
+	/* Save the adapter state */
 	adapter->LinkState = LinkState;
 
-	// Drop the lock and indicate link state
+	/* Drop the lock and indicate link state */
 	spin_unlock(&adapter->AdapterLock);
-	DBG_ERROR("EXIT #1 %s\n", __FUNCTION__);
+	DBG_ERROR("EXIT #1 %s\n", __func__);
 
 	sxg_indicate_link_state(adapter, LinkState);
 }
@@ -2501,76 +2502,76 @@
 			      u32 DevAddr, u32 RegAddr, u32 Value)
 {
 	PSXG_HW_REGS HwRegs = adapter->HwRegs;
-	u32 AddrOp;		// Address operation (written to MIIM field reg)
-	u32 WriteOp;		// Write operation (written to MIIM field reg)
-	u32 Cmd;		// Command (written to MIIM command reg)
+	u32 AddrOp;		/* Address operation (written to MIIM field reg) */
+	u32 WriteOp;		/* Write operation (written to MIIM field reg) */
+	u32 Cmd;		/* Command (written to MIIM command reg) */
 	u32 ValueRead;
 	u32 Timeout;
 
-//  DBG_ERROR("ENTER %s\n", __FUNCTION__);
+/*  DBG_ERROR("ENTER %s\n", __func__); */
 
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
 		  adapter, 0, 0, 0);
 
-	// Ensure values don't exceed field width
-	DevAddr &= 0x001F;	// 5-bit field
-	RegAddr &= 0xFFFF;	// 16-bit field
-	Value &= 0xFFFF;	// 16-bit field
+	/* Ensure values don't exceed field width */
+	DevAddr &= 0x001F;	/* 5-bit field */
+	RegAddr &= 0xFFFF;	/* 16-bit field */
+	Value &= 0xFFFF;	/* 16-bit field */
 
-	// Set MIIM field register bits for an MIIM address operation
+	/* Set MIIM field register bits for an MIIM address operation */
 	AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
 	    (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
 	    (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
 	    (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
 
-	// Set MIIM field register bits for an MIIM write operation
+	/* Set MIIM field register bits for an MIIM write operation */
 	WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
 	    (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
 	    (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
 	    (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
 
-	// Set MIIM command register bits to execute an MIIM command
+	/* Set MIIM command register bits to execute an MIIM command */
 	Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
 
-	// Reset the command register command bit (in case it's not 0)
+	/* Reset the command register command bit (in case it's not 0) */
 	WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
 
-	// MIIM write to set the address of the specified MDIO register
+	/* MIIM write to set the address of the specified MDIO register */
 	WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
 
-	// Write to MIIM Command Register to execute to address operation
+	/* Write to MIIM Command Register to execute to address operation */
 	WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
 
-	// Poll AMIIM Indicator register to wait for completion
+	/* Poll AMIIM Indicator register to wait for completion */
 	Timeout = SXG_LINK_TIMEOUT;
 	do {
-		udelay(100);	// Timeout in 100us units
+		udelay(100);	/* Timeout in 100us units */
 		READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
 		if (--Timeout == 0) {
 			return (STATUS_FAILURE);
 		}
 	} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
 
-	// Reset the command register command bit
+	/* Reset the command register command bit */
 	WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
 
-	// MIIM write to set up an MDIO write operation
+	/* MIIM write to set up an MDIO write operation */
 	WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
 
-	// Write to MIIM Command Register to execute the write operation
+	/* Write to MIIM Command Register to execute the write operation */
 	WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
 
-	// Poll AMIIM Indicator register to wait for completion
+	/* Poll AMIIM Indicator register to wait for completion */
 	Timeout = SXG_LINK_TIMEOUT;
 	do {
-		udelay(100);	// Timeout in 100us units
+		udelay(100);	/* Timeout in 100us units */
 		READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
 		if (--Timeout == 0) {
 			return (STATUS_FAILURE);
 		}
 	} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
 
-//  DBG_ERROR("EXIT %s\n", __FUNCTION__);
+/*  DBG_ERROR("EXIT %s\n", __func__); */
 
 	return (STATUS_SUCCESS);
 }
@@ -2591,83 +2592,186 @@
 			     u32 DevAddr, u32 RegAddr, u32 *pValue)
 {
 	PSXG_HW_REGS HwRegs = adapter->HwRegs;
-	u32 AddrOp;		// Address operation (written to MIIM field reg)
-	u32 ReadOp;		// Read operation (written to MIIM field reg)
-	u32 Cmd;		// Command (written to MIIM command reg)
+	u32 AddrOp;		/* Address operation (written to MIIM field reg) */
+	u32 ReadOp;		/* Read operation (written to MIIM field reg) */
+	u32 Cmd;		/* Command (written to MIIM command reg) */
 	u32 ValueRead;
 	u32 Timeout;
 
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
 		  adapter, 0, 0, 0);
-//  DBG_ERROR("ENTER %s\n", __FUNCTION__);
+/*  DBG_ERROR("ENTER %s\n", __func__); */
 
-	// Ensure values don't exceed field width
-	DevAddr &= 0x001F;	// 5-bit field
-	RegAddr &= 0xFFFF;	// 16-bit field
+	/* Ensure values don't exceed field width */
+	DevAddr &= 0x001F;	/* 5-bit field */
+	RegAddr &= 0xFFFF;	/* 16-bit field */
 
-	// Set MIIM field register bits for an MIIM address operation
+	/* Set MIIM field register bits for an MIIM address operation */
 	AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
 	    (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
 	    (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
 	    (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
 
-	// Set MIIM field register bits for an MIIM read operation
+	/* Set MIIM field register bits for an MIIM read operation */
 	ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
 	    (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
 	    (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
 	    (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
 
-	// Set MIIM command register bits to execute an MIIM command
+	/* Set MIIM command register bits to execute an MIIM command */
 	Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
 
-	// Reset the command register command bit (in case it's not 0)
+	/* Reset the command register command bit (in case it's not 0) */
 	WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
 
-	// MIIM write to set the address of the specified MDIO register
+	/* MIIM write to set the address of the specified MDIO register */
 	WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
 
-	// Write to MIIM Command Register to execute to address operation
+	/* Write to MIIM Command Register to execute to address operation */
 	WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
 
-	// Poll AMIIM Indicator register to wait for completion
+	/* Poll AMIIM Indicator register to wait for completion */
 	Timeout = SXG_LINK_TIMEOUT;
 	do {
-		udelay(100);	// Timeout in 100us units
+		udelay(100);	/* Timeout in 100us units */
 		READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
 		if (--Timeout == 0) {
 			return (STATUS_FAILURE);
 		}
 	} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
 
-	// Reset the command register command bit
+	/* Reset the command register command bit */
 	WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
 
-	// MIIM write to set up an MDIO register read operation
+	/* MIIM write to set up an MDIO register read operation */
 	WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
 
-	// Write to MIIM Command Register to execute the read operation
+	/* Write to MIIM Command Register to execute the read operation */
 	WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
 
-	// Poll AMIIM Indicator register to wait for completion
+	/* Poll AMIIM Indicator register to wait for completion */
 	Timeout = SXG_LINK_TIMEOUT;
 	do {
-		udelay(100);	// Timeout in 100us units
+		udelay(100);	/* Timeout in 100us units */
 		READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
 		if (--Timeout == 0) {
 			return (STATUS_FAILURE);
 		}
 	} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
 
-	// Read the MDIO register data back from the field register
+	/* Read the MDIO register data back from the field register */
 	READ_REG(HwRegs->MacAmiimField, *pValue);
-	*pValue &= 0xFFFF;	// data is in the lower 16 bits
+	*pValue &= 0xFFFF;	/* data is in the lower 16 bits */
 
-//  DBG_ERROR("EXIT %s\n", __FUNCTION__);
+/*  DBG_ERROR("EXIT %s\n", __func__); */
 
 	return (STATUS_SUCCESS);
 }
 
 /*
+ * Functions to obtain the CRC corresponding to the destination mac address.
+ * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
+ * the polynomial:
+ *   x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1.
+ *
+ * After the CRC for the 6 bytes is generated (but before the value is complemented),
+ * we must then transpose the value and return bits 30-23.
+ *
+ */
+static u32 sxg_crc_table[256];	/* Table of CRC's for all possible byte values */
+
+/*
+ *  Contruct the CRC32 table
+ */
+static void sxg_mcast_init_crc32(void)
+{
+	u32 c;			/*  CRC shit reg                 */
+	u32 e = 0;		/*  Poly X-or pattern            */
+	int i;			/*  counter                      */
+	int k;			/*  byte being shifted into crc  */
+
+	static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
+
+	for (i = 0; i < sizeof(p) / sizeof(int); i++) {
+		e |= 1L << (31 - p[i]);
+	}
+
+	for (i = 1; i < 256; i++) {
+		c = i;
+		for (k = 8; k; k--) {
+			c = c & 1 ? (c >> 1) ^ e : c >> 1;
+		}
+		sxg_crc_table[i] = c;
+	}
+}
+
+#if XXXTODO
+static u32 sxg_crc_init;	/* Is table initialized */
+/*
+ *  Return the MAC hast as described above.
+ */
+static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
+{
+	u32 crc;
+	char *p;
+	int i;
+	unsigned char machash = 0;
+
+	if (!sxg_crc_init) {
+		sxg_mcast_init_crc32();
+		sxg_crc_init = 1;
+	}
+
+	crc = 0xFFFFFFFF;	/* Preload shift register, per crc-32 spec */
+	for (i = 0, p = macaddr; i < 6; ++p, ++i) {
+		crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
+	}
+
+	/* Return bits 1-8, transposed */
+	for (i = 1; i < 9; i++) {
+		machash |= (((crc >> i) & 1) << (8 - i));
+	}
+
+	return (machash);
+}
+
+static void sxg_mcast_set_mask(p_adapter_t adapter)
+{
+	PSXG_UCODE_REGS sxg_regs = adapter->UcodeRegs;
+
+	DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __func__,
+		  adapter->netdev->name, (unsigned int)adapter->MacFilter,
+		  adapter->MulticastMask);
+
+	if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
+		/* Turn on all multicast addresses. We have to do this for promiscuous
+		 * mode as well as ALLMCAST mode.  It saves the Microcode from having
+		 * to keep state about the MAC configuration.
+		 */
+/*              DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n      SLUT MODE!!!\n",__func__); */
+		WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
+		WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
+/*        DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__func__, adapter->netdev->name); */
+
+	} else {
+		/* Commit our multicast mast to the SLIC by writing to the multicast
+		 * address mask registers
+		 */
+		DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
+			  __func__, adapter->netdev->name,
+			  ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
+			  ((ulong)
+			   ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
+
+		WRITE_REG(sxg_regs->McastLow,
+			  (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
+		WRITE_REG(sxg_regs->McastHigh,
+			  (u32) ((adapter->
+				  MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
+	}
+}
+
+/*
  *  Allocate a mcast_address structure to hold the multicast address.
  *  Link it in.
  */
@@ -2699,72 +2803,6 @@
 	return (STATUS_SUCCESS);
 }
 
-/*
- * Functions to obtain the CRC corresponding to the destination mac address.
- * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
- * the polynomial:
- *   x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1.
- *
- * After the CRC for the 6 bytes is generated (but before the value is complemented),
- * we must then transpose the value and return bits 30-23.
- *
- */
-static u32 sxg_crc_table[256];	/* Table of CRC's for all possible byte values */
-static u32 sxg_crc_init;	/* Is table initialized                        */
-
-/*
- *  Contruct the CRC32 table
- */
-static void sxg_mcast_init_crc32(void)
-{
-	u32 c;			/*  CRC shit reg                 */
-	u32 e = 0;		/*  Poly X-or pattern            */
-	int i;			/*  counter                      */
-	int k;			/*  byte being shifted into crc  */
-
-	static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
-
-	for (i = 0; i < sizeof(p) / sizeof(int); i++) {
-		e |= 1L << (31 - p[i]);
-	}
-
-	for (i = 1; i < 256; i++) {
-		c = i;
-		for (k = 8; k; k--) {
-			c = c & 1 ? (c >> 1) ^ e : c >> 1;
-		}
-		sxg_crc_table[i] = c;
-	}
-}
-
-/*
- *  Return the MAC hast as described above.
- */
-static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
-{
-	u32 crc;
-	char *p;
-	int i;
-	unsigned char machash = 0;
-
-	if (!sxg_crc_init) {
-		sxg_mcast_init_crc32();
-		sxg_crc_init = 1;
-	}
-
-	crc = 0xFFFFFFFF;	/* Preload shift register, per crc-32 spec */
-	for (i = 0, p = macaddr; i < 6; ++p, ++i) {
-		crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
-	}
-
-	/* Return bits 1-8, transposed */
-	for (i = 1; i < 9; i++) {
-		machash |= (((crc >> i) & 1) << (8 - i));
-	}
-
-	return (machash);
-}
-
 static void sxg_mcast_set_bit(p_adapter_t adapter, char *address)
 {
 	unsigned char crcpoly;
@@ -2783,7 +2821,6 @@
 
 static void sxg_mcast_set_list(p_net_device dev)
 {
-#if XXXTODO
 	p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
 	int status = STATUS_SUCCESS;
 	int i;
@@ -2809,7 +2846,7 @@
 	}
 
 	DBG_ERROR("%s a->devflags_prev[%x] dev->flags[%x] status[%x]\n",
-		  __FUNCTION__, adapter->devflags_prev, dev->flags, status);
+		  __func__, adapter->devflags_prev, dev->flags, status);
 	if (adapter->devflags_prev != dev->flags) {
 		adapter->macopts = MAC_DIRECTED;
 		if (dev->flags) {
@@ -2828,60 +2865,24 @@
 		}
 		adapter->devflags_prev = dev->flags;
 		DBG_ERROR("%s call sxg_config_set adapter->macopts[%x]\n",
-			  __FUNCTION__, adapter->macopts);
+			  __func__, adapter->macopts);
 		sxg_config_set(adapter, TRUE);
 	} else {
 		if (status == STATUS_SUCCESS) {
 			sxg_mcast_set_mask(adapter);
 		}
 	}
-#endif
 	return;
 }
-
-static void sxg_mcast_set_mask(p_adapter_t adapter)
-{
-	PSXG_UCODE_REGS sxg_regs = adapter->UcodeRegs;
-
-	DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __FUNCTION__,
-		  adapter->netdev->name, (unsigned int)adapter->MacFilter,
-		  adapter->MulticastMask);
-
-	if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
-		/* Turn on all multicast addresses. We have to do this for promiscuous
-		 * mode as well as ALLMCAST mode.  It saves the Microcode from having
-		 * to keep state about the MAC configuration.
-		 */
-//              DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n      SLUT MODE!!!\n",__FUNCTION__);
-		WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
-		WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
-//        DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__FUNCTION__, adapter->netdev->name);
-
-	} else {
-		/* Commit our multicast mast to the SLIC by writing to the multicast
-		 * address mask registers
-		 */
-		DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
-			  __FUNCTION__, adapter->netdev->name,
-			  ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
-			  ((ulong)
-			   ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
-
-		WRITE_REG(sxg_regs->McastLow,
-			  (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
-		WRITE_REG(sxg_regs->McastHigh,
-			  (u32) ((adapter->
-				  MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
-	}
-}
+#endif
 
 static void sxg_unmap_mmio_space(p_adapter_t adapter)
 {
 #if LINUX_FREES_ADAPTER_RESOURCES
-//      if (adapter->Regs) {
-//              iounmap(adapter->Regs);
-//      }
-//      adapter->slic_regs = NULL;
+/*      if (adapter->Regs) { */
+/*              iounmap(adapter->Regs); */
+/*      } */
+/*      adapter->slic_regs = NULL; */
 #endif
 }
 
@@ -2909,8 +2910,8 @@
 	IsrCount = adapter->MsiEnabled ? RssIds : 1;
 
 	if (adapter->BasicAllocations == FALSE) {
-		// No allocations have been made, including spinlocks,
-		// or listhead initializations.  Return.
+		/* No allocations have been made, including spinlocks, */
+		/* or listhead initializations.  Return. */
 		return;
 	}
 
@@ -2920,7 +2921,7 @@
 	if (!(IsListEmpty(&adapter->AllSglBuffers))) {
 		SxgFreeSglBuffers(adapter);
 	}
-	// Free event queues.
+	/* Free event queues. */
 	if (adapter->EventRings) {
 		pci_free_consistent(adapter->pcidev,
 				    sizeof(SXG_EVENT_RING) * RssIds,
@@ -2947,17 +2948,17 @@
 	SXG_FREE_PACKET_POOL(adapter->PacketPoolHandle);
 	SXG_FREE_BUFFER_POOL(adapter->BufferPoolHandle);
 
-	// Unmap register spaces
+	/* Unmap register spaces */
 	SxgUnmapResources(adapter);
 
-	// Deregister DMA
+	/* Deregister DMA */
 	if (adapter->DmaHandle) {
 		SXG_DEREGISTER_DMA(adapter->DmaHandle);
 	}
-	// Deregister interrupt
+	/* Deregister interrupt */
 	SxgDeregisterInterrupt(adapter);
 
-	// Possibly free system info (5.2 only)
+	/* Possibly free system info (5.2 only) */
 	SXG_RELEASE_SYSTEM_INFO(adapter);
 
 	SxgDiagFreeResources(adapter);
@@ -3047,23 +3048,23 @@
 
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
 		  adapter, Size, BufferType, 0);
-	// Grab the adapter lock and check the state.
-	// If we're in anything other than INITIALIZING or
-	// RUNNING state, fail.  This is to prevent
-	// allocations in an improper driver state
+	/* Grab the adapter lock and check the state. */
+	/* If we're in anything other than INITIALIZING or */
+	/* RUNNING state, fail.  This is to prevent */
+	/* allocations in an improper driver state */
 	spin_lock(&adapter->AdapterLock);
 
-	// Increment the AllocationsPending count while holding
-	// the lock.  Pause processing relies on this
+	/* Increment the AllocationsPending count while holding */
+	/* the lock.  Pause processing relies on this */
 	++adapter->AllocationsPending;
 	spin_unlock(&adapter->AdapterLock);
 
-	// At initialization time allocate resources synchronously.
+	/* At initialization time allocate resources synchronously. */
 	Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
 	if (Buffer == NULL) {
 		spin_lock(&adapter->AdapterLock);
-		// Decrement the AllocationsPending count while holding
-		// the lock.  Pause processing relies on this
+		/* Decrement the AllocationsPending count while holding */
+		/* the lock.  Pause processing relies on this */
 		--adapter->AllocationsPending;
 		spin_unlock(&adapter->AdapterLock);
 		SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
@@ -3113,10 +3114,10 @@
 	ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
 	       (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
 	ASSERT(Length == SXG_RCV_BLOCK_SIZE(BufferSize));
-	// First, initialize the contained pool of receive data
-	// buffers.  This initialization requires NBL/NB/MDL allocations,
-	// If any of them fail, free the block and return without
-	// queueing the shared memory
+	/* First, initialize the contained pool of receive data */
+	/* buffers.  This initialization requires NBL/NB/MDL allocations, */
+	/* If any of them fail, free the block and return without */
+	/* queueing the shared memory */
 	RcvDataBuffer = RcvBlock;
 #if 0
 	for (i = 0, Paddr = *PhysicalAddress;
@@ -3126,14 +3127,14 @@
 		for (i = 0, Paddr = PhysicalAddress;
 		     i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
 		     i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) {
-			//
+			/* */
 			RcvDataBufferHdr =
 			    (PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer +
 							SXG_RCV_DATA_BUFFER_HDR_OFFSET
 							(BufferSize));
 			RcvDataBufferHdr->VirtualAddress = RcvDataBuffer;
 			RcvDataBufferHdr->PhysicalAddress = Paddr;
-			RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;	// For FREE macro assertion
+			RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;	/* For FREE macro assertion */
 			RcvDataBufferHdr->Size =
 			    SXG_RCV_BUFFER_DATA_SIZE(BufferSize);
 
@@ -3143,8 +3144,8 @@
 
 		}
 
-	// Place this entire block of memory on the AllRcvBlocks queue so it can be
-	// free later
+	/* Place this entire block of memory on the AllRcvBlocks queue so it can be */
+	/* free later */
 	RcvBlockHdr =
 	    (PSXG_RCV_BLOCK_HDR) ((unsigned char *)RcvBlock +
 				  SXG_RCV_BLOCK_HDR_OFFSET(BufferSize));
@@ -3155,7 +3156,7 @@
 	InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
 	spin_unlock(&adapter->RcvQLock);
 
-	// Now free the contained receive data buffers that we initialized above
+	/* Now free the contained receive data buffers that we initialized above */
 	RcvDataBuffer = RcvBlock;
 	for (i = 0, Paddr = PhysicalAddress;
 	     i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
@@ -3168,7 +3169,7 @@
 		spin_unlock(&adapter->RcvQLock);
 	}
 
-	// Locate the descriptor block and put it on a separate free queue
+	/* Locate the descriptor block and put it on a separate free queue */
 	RcvDescriptorBlock =
 	    (PSXG_RCV_DESCRIPTOR_BLOCK) ((unsigned char *)RcvBlock +
 					 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
@@ -3186,7 +3187,7 @@
 		  adapter, RcvBlock, Length, 0);
 	return;
       fail:
-	// Free any allocated resources
+	/* Free any allocated resources */
 	if (RcvBlock) {
 		RcvDataBuffer = RcvBlock;
 		for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
@@ -3200,7 +3201,7 @@
 		pci_free_consistent(adapter->pcidev,
 				    Length, RcvBlock, PhysicalAddress);
 	}
-	DBG_ERROR("%s: OUT OF RESOURCES\n", __FUNCTION__);
+	DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
 		  adapter, adapter->FreeRcvBufferCount,
 		  adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
@@ -3230,7 +3231,7 @@
 	adapter->AllSglBufferCount++;
 	memset(SxgSgl, 0, sizeof(SXG_SCATTER_GATHER));
 	SxgSgl->PhysicalAddress = PhysicalAddress;	/* *PhysicalAddress; */
-	SxgSgl->adapter = adapter;	// Initialize backpointer once
+	SxgSgl->adapter = adapter;	/* Initialize backpointer once */
 	InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
 	spin_unlock(&adapter->SglQLock);
 	SxgSgl->State = SXG_BUFFER_BUSY;
@@ -3244,14 +3245,14 @@
 
 static void sxg_adapter_set_hwaddr(p_adapter_t adapter)
 {
-//  DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __FUNCTION__,
-//             card->config_set, adapter->port, adapter->physport, adapter->functionnumber);
-//
-//  sxg_dbg_macaddrs(adapter);
+/*  DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __func__, */
+/*             card->config_set, adapter->port, adapter->physport, adapter->functionnumber); */
+/* */
+/*  sxg_dbg_macaddrs(adapter); */
 
 	memcpy(adapter->macaddr, temp_mac_address, sizeof(SXG_CONFIG_MAC));
-//      DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __FUNCTION__);
-//      sxg_dbg_macaddrs(adapter);
+/*      DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __func__); */
+/*      sxg_dbg_macaddrs(adapter); */
 	if (!(adapter->currmacaddr[0] ||
 	      adapter->currmacaddr[1] ||
 	      adapter->currmacaddr[2] ||
@@ -3262,18 +3263,18 @@
 	if (adapter->netdev) {
 		memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
 	}
-//  DBG_ERROR ("%s EXIT port %d\n", __FUNCTION__, adapter->port);
+/*  DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
 	sxg_dbg_macaddrs(adapter);
 
 }
 
+#if XXXTODO
 static int sxg_mac_set_address(p_net_device dev, void *ptr)
 {
-#if XXXTODO
 	p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
 	struct sockaddr *addr = ptr;
 
-	DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
+	DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
 
 	if (netif_running(dev)) {
 		return -EBUSY;
@@ -3282,22 +3283,22 @@
 		return -EBUSY;
 	}
 	DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
-		  __FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0],
+		  __func__, adapter->netdev->name, adapter->currmacaddr[0],
 		  adapter->currmacaddr[1], adapter->currmacaddr[2],
 		  adapter->currmacaddr[3], adapter->currmacaddr[4],
 		  adapter->currmacaddr[5]);
 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 	memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
 	DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
-		  __FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0],
+		  __func__, adapter->netdev->name, adapter->currmacaddr[0],
 		  adapter->currmacaddr[1], adapter->currmacaddr[2],
 		  adapter->currmacaddr[3], adapter->currmacaddr[4],
 		  adapter->currmacaddr[5]);
 
 	sxg_config_set(adapter, TRUE);
-#endif
 	return 0;
 }
+#endif
 
 /*****************************************************************************/
 /*************  SXG DRIVER FUNCTIONS  (below) ********************************/
@@ -3321,77 +3322,77 @@
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
 		  adapter, 0, 0, 0);
 
-	RssIds = 1;		//  XXXTODO  SXG_RSS_CPU_COUNT(adapter);
+	RssIds = 1;		/*  XXXTODO  SXG_RSS_CPU_COUNT(adapter); */
 	IsrCount = adapter->MsiEnabled ? RssIds : 1;
 
-	// Sanity check SXG_UCODE_REGS structure definition to
-	// make sure the length is correct
+	/* Sanity check SXG_UCODE_REGS structure definition to */
+	/* make sure the length is correct */
 	ASSERT(sizeof(SXG_UCODE_REGS) == SXG_REGISTER_SIZE_PER_CPU);
 
-	// Disable interrupts
+	/* Disable interrupts */
 	SXG_DISABLE_ALL_INTERRUPTS(adapter);
 
-	// Set MTU
+	/* Set MTU */
 	ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
 	       (adapter->FrameSize == JUMBOMAXFRAME));
 	WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
 
-	// Set event ring base address and size
+	/* Set event ring base address and size */
 	WRITE_REG64(adapter,
 		    adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
 	WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
 
-	// Per-ISR initialization
+	/* Per-ISR initialization */
 	for (i = 0; i < IsrCount; i++) {
 		u64 Addr;
-		// Set interrupt status pointer
+		/* Set interrupt status pointer */
 		Addr = adapter->PIsr + (i * sizeof(u32));
 		WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
 	}
 
-	// XMT ring zero index
+	/* XMT ring zero index */
 	WRITE_REG64(adapter,
 		    adapter->UcodeRegs[0].SPSendIndex,
 		    adapter->PXmtRingZeroIndex, 0);
 
-	// Per-RSS initialization
+	/* Per-RSS initialization */
 	for (i = 0; i < RssIds; i++) {
-		// Release all event ring entries to the Microcode
+		/* Release all event ring entries to the Microcode */
 		WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
 			  TRUE);
 	}
 
-	// Transmit ring base and size
+	/* Transmit ring base and size */
 	WRITE_REG64(adapter,
 		    adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
 	WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
 
-	// Receive ring base and size
+	/* Receive ring base and size */
 	WRITE_REG64(adapter,
 		    adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
 	WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE);
 
-	// Populate the card with receive buffers
+	/* Populate the card with receive buffers */
 	sxg_stock_rcv_buffers(adapter);
 
-	// Initialize checksum offload capabilities.  At the moment
-	// we always enable IP and TCP receive checksums on the card.
-	// Depending on the checksum configuration specified by the
-	// user, we can choose to report or ignore the checksum
-	// information provided by the card.
+	/* Initialize checksum offload capabilities.  At the moment */
+	/* we always enable IP and TCP receive checksums on the card. */
+	/* Depending on the checksum configuration specified by the */
+	/* user, we can choose to report or ignore the checksum */
+	/* information provided by the card. */
 	WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
 		  SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
 
-	// Initialize the MAC, XAUI
-	DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __FUNCTION__);
+	/* Initialize the MAC, XAUI */
+	DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
 	status = sxg_initialize_link(adapter);
-	DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __FUNCTION__,
+	DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
 		  status);
 	if (status != STATUS_SUCCESS) {
 		return (status);
 	}
-	// Initialize Dead to FALSE.
-	// SlicCheckForHang or SlicDumpThread will take it from here.
+	/* Initialize Dead to FALSE. */
+	/* SlicCheckForHang or SlicDumpThread will take it from here. */
 	adapter->Dead = FALSE;
 	adapter->PingOutstanding = FALSE;
 
@@ -3428,14 +3429,14 @@
 
 	ASSERT(RcvDescriptorBlockHdr);
 
-	// If we don't have the resources to fill the descriptor block,
-	// return failure
+	/* If we don't have the resources to fill the descriptor block, */
+	/* return failure */
 	if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
 	    SXG_RING_FULL(RcvRingInfo)) {
 		adapter->Stats.NoMem++;
 		return (STATUS_FAILURE);
 	}
-	// Get a ring descriptor command
+	/* Get a ring descriptor command */
 	SXG_GET_CMD(RingZero,
 		    RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
 	ASSERT(RingDescriptorCmd);
@@ -3443,7 +3444,7 @@
 	RcvDescriptorBlock =
 	    (PSXG_RCV_DESCRIPTOR_BLOCK) RcvDescriptorBlockHdr->VirtualAddress;
 
-	// Fill in the descriptor block
+	/* Fill in the descriptor block */
 	for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
 		SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
 		ASSERT(RcvDataBufferHdr);
@@ -3454,13 +3455,13 @@
 		RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
 		    RcvDataBufferHdr->PhysicalAddress;
 	}
-	// Add the descriptor block to receive descriptor ring 0
+	/* Add the descriptor block to receive descriptor ring 0 */
 	RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
 
-	// RcvBuffersOnCard is not protected via the receive lock (see
-	// sxg_process_event_queue) We don't want to grap a lock every time a
-	// buffer is returned to us, so we use atomic interlocked functions
-	// instead.
+	/* RcvBuffersOnCard is not protected via the receive lock (see */
+	/* sxg_process_event_queue) We don't want to grap a lock every time a */
+	/* buffer is returned to us, so we use atomic interlocked functions */
+	/* instead. */
 	adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
 
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
@@ -3490,10 +3491,10 @@
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
 		  adapter, adapter->RcvBuffersOnCard,
 		  adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
-	// First, see if we've got less than our minimum threshold of
-	// receive buffers, there isn't an allocation in progress, and
-	// we haven't exceeded our maximum.. get another block of buffers
-	// None of this needs to be SMP safe.  It's round numbers.
+	/* First, see if we've got less than our minimum threshold of */
+	/* receive buffers, there isn't an allocation in progress, and */
+	/* we haven't exceeded our maximum.. get another block of buffers */
+	/* None of this needs to be SMP safe.  It's round numbers. */
 	if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
 	    (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
 	    (adapter->AllocationsPending == 0)) {
@@ -3502,12 +3503,12 @@
 							      ReceiveBufferSize),
 					   SXG_BUFFER_TYPE_RCV);
 	}
-	// Now grab the RcvQLock lock and proceed
+	/* Now grab the RcvQLock lock and proceed */
 	spin_lock(&adapter->RcvQLock);
 	while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
 		PLIST_ENTRY _ple;
 
-		// Get a descriptor block
+		/* Get a descriptor block */
 		RcvDescriptorBlockHdr = NULL;
 		if (adapter->FreeRcvBlockCount) {
 			_ple = RemoveHeadList(&adapter->FreeRcvBlocks);
@@ -3519,14 +3520,14 @@
 		}
 
 		if (RcvDescriptorBlockHdr == NULL) {
-			// Bail out..
+			/* Bail out.. */
 			adapter->Stats.NoMem++;
 			break;
 		}
-		// Fill in the descriptor block and give it to the card
+		/* Fill in the descriptor block and give it to the card */
 		if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
 		    STATUS_FAILURE) {
-			// Free the descriptor block
+			/* Free the descriptor block */
 			SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
 						      RcvDescriptorBlockHdr);
 			break;
@@ -3560,15 +3561,15 @@
 	SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
 		  adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
 
-	// Now grab the RcvQLock lock and proceed
+	/* Now grab the RcvQLock lock and proceed */
 	spin_lock(&adapter->RcvQLock);
 	ASSERT(Index != RcvRingInfo->Tail);
 	while (RcvRingInfo->Tail != Index) {
-		//
-		// Locate the current Cmd (ring descriptor entry), and
-		// associated receive descriptor block, and advance
-		// the tail
-		//
+		/* */
+		/* Locate the current Cmd (ring descriptor entry), and */
+		/* associated receive descriptor block, and advance */
+		/* the tail */
+		/* */
 		SXG_RETURN_CMD(RingZero,
 			       RcvRingInfo,
 			       RingDescriptorCmd, RcvDescriptorBlockHdr);
@@ -3576,12 +3577,12 @@
 			  RcvRingInfo->Head, RcvRingInfo->Tail,
 			  RingDescriptorCmd, RcvDescriptorBlockHdr);
 
-		// Clear the SGL field
+		/* Clear the SGL field */
 		RingDescriptorCmd->Sgl = 0;
-		// Attempt to refill it and hand it right back to the
-		// card.  If we fail to refill it, free the descriptor block
-		// header.  The card will be restocked later via the
-		// RcvBuffersOnCard test
+		/* Attempt to refill it and hand it right back to the */
+		/* card.  If we fail to refill it, free the descriptor block */
+		/* header.  The card will be restocked later via the */
+		/* RcvBuffersOnCard test */
 		if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
 		    STATUS_FAILURE) {
 			SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
diff --git a/drivers/staging/sxg/sxg_os.h b/drivers/staging/sxg/sxg_os.h
index 26fb0ff..0118268 100644
--- a/drivers/staging/sxg/sxg_os.h
+++ b/drivers/staging/sxg/sxg_os.h
@@ -44,7 +44,6 @@
 #define FALSE	(0)
 #define TRUE	(1)
 
-
 typedef struct _LIST_ENTRY {
 	struct _LIST_ENTRY *nle_flink;
 	struct _LIST_ENTRY *nle_blink;
@@ -69,35 +68,32 @@
 
 /* These two have to be inlined since they return things. */
 
-static __inline PLIST_ENTRY
-RemoveHeadList(list_entry *l)
+static __inline PLIST_ENTRY RemoveHeadList(list_entry * l)
 {
-        list_entry              *f;
-        list_entry              *e;
+	list_entry *f;
+	list_entry *e;
 
-        e = l->nle_flink;
-        f = e->nle_flink;
-        l->nle_flink = f;
-        f->nle_blink = l;
+	e = l->nle_flink;
+	f = e->nle_flink;
+	l->nle_flink = f;
+	f->nle_blink = l;
 
-        return (e);
+	return (e);
 }
 
-static __inline PLIST_ENTRY
-RemoveTailList(list_entry *l)
+static __inline PLIST_ENTRY RemoveTailList(list_entry * l)
 {
-        list_entry              *b;
-        list_entry              *e;
+	list_entry *b;
+	list_entry *e;
 
-        e = l->nle_blink;
-        b = e->nle_blink;
-        l->nle_blink = b;
-        b->nle_flink = l;
+	e = l->nle_blink;
+	b = e->nle_blink;
+	l->nle_blink = b;
+	b->nle_flink = l;
 
-        return (e);
+	return (e);
 }
 
-
 #define InsertTailList(l, e)                    \
         do {                                    \
                 list_entry              *b;     \
@@ -120,7 +116,6 @@
                 (l)->nle_flink = (e);           \
         } while (0)
 
-
 #define ATK_DEBUG  1
 
 #if ATK_DEBUG
@@ -133,7 +128,6 @@
 #define SLIC_TIMESTAMP(value)
 #endif
 
-
 /******************  SXG DEFINES  *****************************************/
 
 #ifdef  ATKDBG
@@ -150,5 +144,4 @@
 #define WRITE_REG64(a,reg,value,cpu)                sxg_reg64_write((a),(&reg),(value),(cpu))
 #define READ_REG(reg,value)   (value) = readl((void __iomem *)(&reg))
 
-#endif  /* _SLIC_OS_SPECIFIC_H_  */
-
+#endif /* _SLIC_OS_SPECIFIC_H_  */
diff --git a/drivers/staging/sxg/sxgdbg.h b/drivers/staging/sxg/sxgdbg.h
index cfb6c7c..4522b8d 100644
--- a/drivers/staging/sxg/sxgdbg.h
+++ b/drivers/staging/sxg/sxgdbg.h
@@ -58,7 +58,7 @@
     {                                                                             \
         if (!(a)) {                                                               \
             DBG_ERROR("ASSERT() Failure: file %s, function %s  line %d\n",\
-                __FILE__, __FUNCTION__, __LINE__);                                \
+                __FILE__, __func__, __LINE__);                                \
         }                                                                         \
     }
 #endif
diff --git a/drivers/staging/sxg/sxghif.h b/drivers/staging/sxg/sxghif.h
index ed26cea..88bffba 100644
--- a/drivers/staging/sxg/sxghif.h
+++ b/drivers/staging/sxg/sxghif.h
@@ -14,119 +14,119 @@
  *******************************************************************************/
 typedef struct _SXG_UCODE_REGS {
 	// Address 0 - 0x3F = Command codes 0-15 for TCB 0.  Excode 0
-	u32		Icr;			// Code = 0 (extended), ExCode = 0 - Int control
-	u32		RsvdReg1;		// Code = 1 - TOE -NA
-	u32		RsvdReg2;		// Code = 2 - TOE -NA
-	u32		RsvdReg3;		// Code = 3 - TOE -NA
-	u32		RsvdReg4;		// Code = 4 - TOE -NA
-	u32		RsvdReg5;		// Code = 5 - TOE -NA
-	u32		CardUp;			// Code = 6 - Microcode initialized when 1
-	u32		RsvdReg7;		// Code = 7 - TOE -NA
-	u32		CodeNotUsed[8];		// Codes 8-15 not used.  ExCode = 0
+	u32 Icr;		// Code = 0 (extended), ExCode = 0 - Int control
+	u32 RsvdReg1;		// Code = 1 - TOE -NA
+	u32 RsvdReg2;		// Code = 2 - TOE -NA
+	u32 RsvdReg3;		// Code = 3 - TOE -NA
+	u32 RsvdReg4;		// Code = 4 - TOE -NA
+	u32 RsvdReg5;		// Code = 5 - TOE -NA
+	u32 CardUp;		// Code = 6 - Microcode initialized when 1
+	u32 RsvdReg7;		// Code = 7 - TOE -NA
+	u32 CodeNotUsed[8];	// Codes 8-15 not used.  ExCode = 0
 	// This brings us to ExCode 1 at address 0x40 = Interrupt status pointer
-	u32		Isp;			// Code = 0 (extended), ExCode = 1
-	u32		PadEx1[15];		// Codes 1-15 not used with extended codes
+	u32 Isp;		// Code = 0 (extended), ExCode = 1
+	u32 PadEx1[15];		// Codes 1-15 not used with extended codes
 	// ExCode 2 = Interrupt Status Register
-	u32		Isr;			// Code = 0 (extended), ExCode = 2
-	u32		PadEx2[15];
+	u32 Isr;		// Code = 0 (extended), ExCode = 2
+	u32 PadEx2[15];
 	// ExCode 3 = Event base register.  Location of event rings
-	u32		EventBase;		// Code = 0 (extended), ExCode = 3
-	u32		PadEx3[15];
+	u32 EventBase;		// Code = 0 (extended), ExCode = 3
+	u32 PadEx3[15];
 	// ExCode 4 = Event ring size
-	u32		EventSize;		// Code = 0 (extended), ExCode = 4
-	u32		PadEx4[15];
+	u32 EventSize;		// Code = 0 (extended), ExCode = 4
+	u32 PadEx4[15];
 	// ExCode 5 = TCB Buffers base address
-	u32		TcbBase;		// Code = 0 (extended), ExCode = 5
-	u32		PadEx5[15];
+	u32 TcbBase;		// Code = 0 (extended), ExCode = 5
+	u32 PadEx5[15];
 	// ExCode 6 = TCB Composite Buffers base address
-	u32		TcbCompBase;		// Code = 0 (extended), ExCode = 6
-	u32		PadEx6[15];
+	u32 TcbCompBase;	// Code = 0 (extended), ExCode = 6
+	u32 PadEx6[15];
 	// ExCode 7 = Transmit ring base address
-	u32		XmtBase;		// Code = 0 (extended), ExCode = 7
-	u32		PadEx7[15];
+	u32 XmtBase;		// Code = 0 (extended), ExCode = 7
+	u32 PadEx7[15];
 	// ExCode 8 = Transmit ring size
-	u32		XmtSize;		// Code = 0 (extended), ExCode = 8
-	u32		PadEx8[15];
+	u32 XmtSize;		// Code = 0 (extended), ExCode = 8
+	u32 PadEx8[15];
 	// ExCode 9 = Receive ring base address
-	u32		RcvBase;		// Code = 0 (extended), ExCode = 9
-	u32		PadEx9[15];
+	u32 RcvBase;		// Code = 0 (extended), ExCode = 9
+	u32 PadEx9[15];
 	// ExCode 10 = Receive ring size
-	u32		RcvSize;		// Code = 0 (extended), ExCode = 10
-	u32		PadEx10[15];
+	u32 RcvSize;		// Code = 0 (extended), ExCode = 10
+	u32 PadEx10[15];
 	// ExCode 11 = Read EEPROM Config
-	u32		Config;			// Code = 0 (extended), ExCode = 11
-	u32		PadEx11[15];
+	u32 Config;		// Code = 0 (extended), ExCode = 11
+	u32 PadEx11[15];
 	// ExCode 12 = Multicast bits 31:0
-	u32		McastLow;		// Code = 0 (extended), ExCode = 12
-	u32		PadEx12[15];
+	u32 McastLow;		// Code = 0 (extended), ExCode = 12
+	u32 PadEx12[15];
 	// ExCode 13 = Multicast bits 63:32
-	u32		McastHigh;		// Code = 0 (extended), ExCode = 13
-	u32		PadEx13[15];
+	u32 McastHigh;		// Code = 0 (extended), ExCode = 13
+	u32 PadEx13[15];
 	// ExCode 14 = Ping
-	u32		Ping;			// Code = 0 (extended), ExCode = 14
-	u32		PadEx14[15];
+	u32 Ping;		// Code = 0 (extended), ExCode = 14
+	u32 PadEx14[15];
 	// ExCode 15 = Link MTU
-	u32		LinkMtu;		// Code = 0 (extended), ExCode = 15
-	u32		PadEx15[15];
+	u32 LinkMtu;		// Code = 0 (extended), ExCode = 15
+	u32 PadEx15[15];
 	// ExCode 16 = Download synchronization
-	u32		LoadSync;		// Code = 0 (extended), ExCode = 16
-	u32		PadEx16[15];
+	u32 LoadSync;		// Code = 0 (extended), ExCode = 16
+	u32 PadEx16[15];
 	// ExCode 17 = Upper DRAM address bits on 32-bit systems
-	u32		Upper;			// Code = 0 (extended), ExCode = 17
-	u32		PadEx17[15];
+	u32 Upper;		// Code = 0 (extended), ExCode = 17
+	u32 PadEx17[15];
 	// ExCode 18 = Slowpath Send Index Address
-	u32		SPSendIndex;		// Code = 0 (extended), ExCode = 18
-	u32		PadEx18[15];
-	u32		RsvdXF;			// Code = 0 (extended), ExCode = 19
-	u32		PadEx19[15];
+	u32 SPSendIndex;	// Code = 0 (extended), ExCode = 18
+	u32 PadEx18[15];
+	u32 RsvdXF;		// Code = 0 (extended), ExCode = 19
+	u32 PadEx19[15];
 	// ExCode 20 = Aggregation
-	u32		Aggregation;		// Code = 0 (extended), ExCode = 20
-	u32		PadEx20[15];
+	u32 Aggregation;	// Code = 0 (extended), ExCode = 20
+	u32 PadEx20[15];
 	// ExCode 21 = Receive MDL push timer
-	u32		PushTicks;		// Code = 0 (extended), ExCode = 21
-	u32		PadEx21[15];
+	u32 PushTicks;		// Code = 0 (extended), ExCode = 21
+	u32 PadEx21[15];
 	// ExCode 22 = TOE NA
-	u32		AckFrequency;		// Code = 0 (extended), ExCode = 22
-	u32		PadEx22[15];
+	u32 AckFrequency;	// Code = 0 (extended), ExCode = 22
+	u32 PadEx22[15];
 	// ExCode 23 = TOE NA
-	u32		RsvdReg23;
-	u32		PadEx23[15];
+	u32 RsvdReg23;
+	u32 PadEx23[15];
 	// ExCode 24 = TOE NA
-	u32		RsvdReg24;
-	u32		PadEx24[15];
+	u32 RsvdReg24;
+	u32 PadEx24[15];
 	// ExCode 25 = TOE NA
-	u32		RsvdReg25;		// Code = 0 (extended), ExCode = 25
-	u32		PadEx25[15];
+	u32 RsvdReg25;		// Code = 0 (extended), ExCode = 25
+	u32 PadEx25[15];
 	// ExCode 26 = Receive checksum requirements
-	u32		ReceiveChecksum;	// Code = 0 (extended), ExCode = 26
-	u32		PadEx26[15];
+	u32 ReceiveChecksum;	// Code = 0 (extended), ExCode = 26
+	u32 PadEx26[15];
 	// ExCode 27 = RSS Requirements
-	u32		Rss;			// Code = 0 (extended), ExCode = 27
-	u32		PadEx27[15];
+	u32 Rss;		// Code = 0 (extended), ExCode = 27
+	u32 PadEx27[15];
 	// ExCode 28 = RSS Table
-	u32		RssTable;		// Code = 0 (extended), ExCode = 28
-	u32		PadEx28[15];
+	u32 RssTable;		// Code = 0 (extended), ExCode = 28
+	u32 PadEx28[15];
 	// ExCode 29 = Event ring release entries
-	u32		EventRelease;		// Code = 0 (extended), ExCode = 29
-	u32		PadEx29[15];
+	u32 EventRelease;	// Code = 0 (extended), ExCode = 29
+	u32 PadEx29[15];
 	// ExCode 30 = Number of receive bufferlist commands on ring 0
-	u32		RcvCmd;			// Code = 0 (extended), ExCode = 30
-	u32		PadEx30[15];
+	u32 RcvCmd;		// Code = 0 (extended), ExCode = 30
+	u32 PadEx30[15];
 	// ExCode 31 = slowpath transmit command - Data[31:0] = 1
-	u32		XmtCmd;			// Code = 0 (extended), ExCode = 31
-	u32		PadEx31[15];
+	u32 XmtCmd;		// Code = 0 (extended), ExCode = 31
+	u32 PadEx31[15];
 	// ExCode 32 = Dump command
-	u32		DumpCmd;		// Code = 0 (extended), ExCode = 32
-	u32		PadEx32[15];
+	u32 DumpCmd;		// Code = 0 (extended), ExCode = 32
+	u32 PadEx32[15];
 	// ExCode 33 = Debug command
-	u32		DebugCmd;		// Code = 0 (extended), ExCode = 33
-	u32		PadEx33[15];
+	u32 DebugCmd;		// Code = 0 (extended), ExCode = 33
+	u32 PadEx33[15];
 	// There are 128 possible extended commands - each of account for 16
 	// words (including the non-relevent base command codes 1-15).
 	// Pad for the remainder of these here to bring us to the next CPU
 	// base.  As extended codes are added, reduce the first array value in
 	// the following field
-	u32		PadToNextCpu[94][16];	// 94 = 128 - 34 (34 = Excodes 0 - 33)
+	u32 PadToNextCpu[94][16];	// 94 = 128 - 34 (34 = Excodes 0 - 33)
 } SXG_UCODE_REGS, *PSXG_UCODE_REGS;
 
 // Interrupt control register (0) values
@@ -141,7 +141,7 @@
 
 // The Microcode supports up to 16 RSS queues
 #define SXG_MAX_RSS				16
-#define SXG_MAX_RSS_TABLE_SIZE	256		// 256-byte max
+#define SXG_MAX_RSS_TABLE_SIZE	256	// 256-byte max
 
 #define SXG_RSS_TCP6				0x00000001	// RSS TCP over IPv6
 #define SXG_RSS_TCP4				0x00000002	// RSS TCP over IPv4
@@ -170,16 +170,16 @@
  * SXG_UCODE_REGS definition above
  */
 typedef struct _SXG_TCB_REGS {
-	u32		ExCode;		/* Extended codes - see SXG_UCODE_REGS */
-	u32		Xmt;		/* Code = 1 - # of Xmt descriptors added to ring */
-	u32		Rcv;		/* Code = 2 - # of Rcv descriptors added to ring */
-	u32		Rsvd1;		/* Code = 3 - TOE NA */
-	u32		Rsvd2;		/* Code = 4 - TOE NA */
-	u32		Rsvd3;		/* Code = 5 - TOE NA */
-	u32		Invalid;	/* Code = 6 - Reserved for "CardUp" see above */
-	u32		Rsvd4;		/* Code = 7 - TOE NA */
-	u32		Rsvd5;		/* Code = 8 - TOE NA */
-	u32		Pad[7];		/* Codes 8-15 - Not used. */
+	u32 ExCode;		/* Extended codes - see SXG_UCODE_REGS */
+	u32 Xmt;		/* Code = 1 - # of Xmt descriptors added to ring */
+	u32 Rcv;		/* Code = 2 - # of Rcv descriptors added to ring */
+	u32 Rsvd1;		/* Code = 3 - TOE NA */
+	u32 Rsvd2;		/* Code = 4 - TOE NA */
+	u32 Rsvd3;		/* Code = 5 - TOE NA */
+	u32 Invalid;		/* Code = 6 - Reserved for "CardUp" see above */
+	u32 Rsvd4;		/* Code = 7 - TOE NA */
+	u32 Rsvd5;		/* Code = 8 - TOE NA */
+	u32 Pad[7];		/* Codes 8-15 - Not used. */
 } SXG_TCB_REGS, *PSXG_TCB_REGS;
 
 /***************************************************************************
@@ -273,27 +273,27 @@
  */
 #pragma pack(push, 1)
 typedef struct _SXG_EVENT {
-	u32			Pad[1];		// not used
-	u32			SndUna;		// SndUna value
-	u32			Resid;		// receive MDL resid
+	u32 Pad[1];		// not used
+	u32 SndUna;		// SndUna value
+	u32 Resid;		// receive MDL resid
 	union {
-		void *		HostHandle;	// Receive host handle
-		u32		Rsvd1;		// TOE NA
+		void *HostHandle;	// Receive host handle
+		u32 Rsvd1;	// TOE NA
 		struct {
-			u32	NotUsed;
-			u32	Rsvd2;		// TOE NA
+			u32 NotUsed;
+			u32 Rsvd2;	// TOE NA
 		} Flush;
 	};
-	u32			Toeplitz;	// RSS Toeplitz hash
+	u32 Toeplitz;		// RSS Toeplitz hash
 	union {
-		ushort		Rsvd3;		// TOE NA
-		ushort		HdrOffset;	// Slowpath
+		ushort Rsvd3;	// TOE NA
+		ushort HdrOffset;	// Slowpath
 	};
-	ushort			Length;		//
-	unsigned char 		Rsvd4;		// TOE NA
-	unsigned char 		Code;		// Event code
-	unsigned char		CommandIndex;	// New ring index
-	unsigned char		Status;		// Event status
+	ushort Length;		//
+	unsigned char Rsvd4;	// TOE NA
+	unsigned char Code;	// Event code
+	unsigned char CommandIndex;	// New ring index
+	unsigned char Status;	// Event status
 } SXG_EVENT, *PSXG_EVENT;
 #pragma pack(pop)
 
@@ -318,12 +318,12 @@
 // Event ring
 // Size must be power of 2, between 128 and 16k
 #define EVENT_RING_SIZE		4096	// ??
-#define EVENT_RING_BATCH	16		// Hand entries back 16 at a time.
-#define EVENT_BATCH_LIMIT	256	    // Stop processing events after 256 (16 * 16)
+#define EVENT_RING_BATCH	16	// Hand entries back 16 at a time.
+#define EVENT_BATCH_LIMIT	256	// Stop processing events after 256 (16 * 16)
 
 typedef struct _SXG_EVENT_RING {
-	SXG_EVENT	Ring[EVENT_RING_SIZE];
-}SXG_EVENT_RING, *PSXG_EVENT_RING;
+	SXG_EVENT Ring[EVENT_RING_SIZE];
+} SXG_EVENT_RING, *PSXG_EVENT_RING;
 
 /***************************************************************************
  *
@@ -341,7 +341,7 @@
 #define SXG_TCB_PER_BUCKET		16
 #define SXG_TCB_BUCKET_MASK		0xFF0	// Bucket portion of TCB ID
 #define SXG_TCB_ELEMENT_MASK	0x00F	// Element within bucket
-#define SXG_TCB_BUCKETS			256		// 256 * 16 = 4k
+#define SXG_TCB_BUCKETS			256	// 256 * 16 = 4k
 
 #define SXG_TCB_BUFFER_SIZE	512	// ASSERT format is correct
 
@@ -368,7 +368,6 @@
 	&(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip				:		\
 	&(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip
 
-
 #if DBG
 // Horrible kludge to distinguish dumb-nic, slowpath, and
 // fastpath traffic.  Decrement the HopLimit by one
@@ -396,16 +395,16 @@
  * Receive and transmit rings
  ***************************************************************************/
 #define SXG_MAX_RING_SIZE	256
-#define SXG_XMT_RING_SIZE	128		// Start with 128
-#define SXG_RCV_RING_SIZE	128		// Start with 128
+#define SXG_XMT_RING_SIZE	128	// Start with 128
+#define SXG_RCV_RING_SIZE	128	// Start with 128
 #define SXG_MAX_ENTRIES     4096
 
 // Structure and macros to manage a ring
 typedef struct _SXG_RING_INFO {
-	unsigned char			Head;		// Where we add entries - Note unsigned char:RING_SIZE
-	unsigned char			Tail;		// Where we pull off completed entries
-	ushort			Size;		// Ring size - Must be multiple of 2
-	void *			Context[SXG_MAX_RING_SIZE];	// Shadow ring
+	unsigned char Head;	// Where we add entries - Note unsigned char:RING_SIZE
+	unsigned char Tail;	// Where we pull off completed entries
+	ushort Size;		// Ring size - Must be multiple of 2
+	void *Context[SXG_MAX_RING_SIZE];	// Shadow ring
 } SXG_RING_INFO, *PSXG_RING_INFO;
 
 #define SXG_INITIALIZE_RING(_ring, _size) {							\
@@ -483,40 +482,40 @@
  */
 #pragma pack(push, 1)
 typedef struct _SXG_CMD {
-	dma_addr_t             			Sgl;			// Physical address of SGL
+	dma_addr_t Sgl;		// Physical address of SGL
 	union {
 		struct {
-			dma64_addr_t          	FirstSgeAddress;// Address of first SGE
-			u32					FirstSgeLength;	// Length of first SGE
+			dma64_addr_t FirstSgeAddress;	// Address of first SGE
+			u32 FirstSgeLength;	// Length of first SGE
 			union {
-				u32				Rsvd1;	        // TOE NA
-				u32				SgeOffset;		// Slowpath - 2nd SGE offset
-				u32				Resid;			// MDL completion - clobbers update
+				u32 Rsvd1;	// TOE NA
+				u32 SgeOffset;	// Slowpath - 2nd SGE offset
+				u32 Resid;	// MDL completion - clobbers update
 			};
 			union {
-				u32				TotalLength;	// Total transfer length
-				u32				Mss;			// LSO MSS
+				u32 TotalLength;	// Total transfer length
+				u32 Mss;	// LSO MSS
 			};
 		} Buffer;
 	};
 	union {
 		struct {
-			unsigned char					Flags:4;		// slowpath flags
-			unsigned char					IpHl:4;			// Ip header length (>>2)
-			unsigned char					MacLen;			// Mac header len
+			unsigned char Flags:4;	// slowpath flags
+			unsigned char IpHl:4;	// Ip header length (>>2)
+			unsigned char MacLen;	// Mac header len
 		} CsumFlags;
 		struct {
-			ushort					Flags:4;		// slowpath flags
-			ushort					TcpHdrOff:7;	// TCP
-			ushort					MacLen:5;		// Mac header len
+			ushort Flags:4;	// slowpath flags
+			ushort TcpHdrOff:7;	// TCP
+			ushort MacLen:5;	// Mac header len
 		} LsoFlags;
-		ushort						Flags;			// flags
+		ushort Flags;	// flags
 	};
 	union {
-		ushort						SgEntries;		// SG entry count including first sge
+		ushort SgEntries;	// SG entry count including first sge
 		struct {
-			unsigned char					Status;		    // Copied from event status
-			unsigned char					NotUsed;
+			unsigned char Status;	// Copied from event status
+			unsigned char NotUsed;
 		} Status;
 	};
 } SXG_CMD, *PSXG_CMD;
@@ -524,8 +523,8 @@
 
 #pragma pack(push, 1)
 typedef struct _VLAN_HDR {
-	ushort	VlanTci;
-	ushort	VlanTpid;
+	ushort VlanTci;
+	ushort VlanTpid;
 } VLAN_HDR, *PVLAN_HDR;
 #pragma pack(pop)
 
@@ -561,16 +560,16 @@
  *
  */
 // Slowpath CMD flags
-#define SXG_SLOWCMD_CSUM_IP			0x01		// Checksum IP
-#define SXG_SLOWCMD_CSUM_TCP		0x02		// Checksum TCP
-#define SXG_SLOWCMD_LSO				0x04		// Large segment send
+#define SXG_SLOWCMD_CSUM_IP			0x01	// Checksum IP
+#define SXG_SLOWCMD_CSUM_TCP		0x02	// Checksum TCP
+#define SXG_SLOWCMD_LSO				0x04	// Large segment send
 
 typedef struct _SXG_XMT_RING {
-	SXG_CMD		Descriptors[SXG_XMT_RING_SIZE];
+	SXG_CMD Descriptors[SXG_XMT_RING_SIZE];
 } SXG_XMT_RING, *PSXG_XMT_RING;
 
 typedef struct _SXG_RCV_RING {
-	SXG_CMD		Descriptors[SXG_RCV_RING_SIZE];
+	SXG_CMD Descriptors[SXG_RCV_RING_SIZE];
 } SXG_RCV_RING, *PSXG_RCV_RING;
 
 /***************************************************************************
@@ -578,8 +577,8 @@
  * shared memory allocation
  ***************************************************************************/
 typedef enum {
-	SXG_BUFFER_TYPE_RCV,		// Receive buffer
-	SXG_BUFFER_TYPE_SGL			// SGL buffer
+	SXG_BUFFER_TYPE_RCV,	// Receive buffer
+	SXG_BUFFER_TYPE_SGL	// SGL buffer
 } SXG_BUFFER_TYPE;
 
 // State for SXG buffers
@@ -668,60 +667,60 @@
 #define SXG_RCV_DATA_BUFFERS			4096	// Amount to give to the card
 #define SXG_INITIAL_RCV_DATA_BUFFERS	8192	// Initial pool of buffers
 #define SXG_MIN_RCV_DATA_BUFFERS		2048	// Minimum amount and when to get more
-#define SXG_MAX_RCV_BLOCKS				128		// = 16384 receive buffers
+#define SXG_MAX_RCV_BLOCKS				128	// = 16384 receive buffers
 
 // Receive buffer header
 typedef struct _SXG_RCV_DATA_BUFFER_HDR {
-	dma_addr_t          			PhysicalAddress;	// Buffer physical address
+	dma_addr_t PhysicalAddress;	// Buffer physical address
 	// Note - DO NOT USE the VirtualAddress field to locate data.
 	// Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead.
-	void *VirtualAddress;		// Start of buffer
-	LIST_ENTRY						FreeList;			// Free queue of buffers
-	struct _SXG_RCV_DATA_BUFFER_HDR	*Next;				// Fastpath data buffer queue
-	u32							Size;				// Buffer size
-	u32							ByteOffset;			// See SXG_RESTORE_MDL_OFFSET
-	unsigned char							State;				// See SXG_BUFFER state above
-	unsigned char							Status;				// Event status (to log PUSH)
-	struct sk_buff                * skb;				// Double mapped (nbl and pkt)
+	void *VirtualAddress;	// Start of buffer
+	LIST_ENTRY FreeList;	// Free queue of buffers
+	struct _SXG_RCV_DATA_BUFFER_HDR *Next;	// Fastpath data buffer queue
+	u32 Size;		// Buffer size
+	u32 ByteOffset;		// See SXG_RESTORE_MDL_OFFSET
+	unsigned char State;	// See SXG_BUFFER state above
+	unsigned char Status;	// Event status (to log PUSH)
+	struct sk_buff *skb;	// Double mapped (nbl and pkt)
 } SXG_RCV_DATA_BUFFER_HDR, *PSXG_RCV_DATA_BUFFER_HDR;
 
 // SxgSlowReceive uses the PACKET (skb) contained
 // in the SXG_RCV_DATA_BUFFER_HDR when indicating dumb-nic data
 #define SxgDumbRcvPacket	        skb
 
-#define SXG_RCV_DATA_HDR_SIZE			256		// Space for SXG_RCV_DATA_BUFFER_HDR
+#define SXG_RCV_DATA_HDR_SIZE			256	// Space for SXG_RCV_DATA_BUFFER_HDR
 #define SXG_RCV_DATA_BUFFER_SIZE		2048	// Non jumbo = 2k including HDR
 #define SXG_RCV_JUMBO_BUFFER_SIZE		10240	// jumbo = 10k including HDR
 
 // Receive data descriptor
 typedef struct _SXG_RCV_DATA_DESCRIPTOR {
 	union {
-		struct sk_buff    *	VirtualAddress;			// Host handle
-		u64 			ForceTo8Bytes;			// Force x86 to 8-byte boundary
+		struct sk_buff *VirtualAddress;	// Host handle
+		u64 ForceTo8Bytes;	// Force x86 to 8-byte boundary
 	};
-	dma_addr_t             	PhysicalAddress;
+	dma_addr_t PhysicalAddress;
 } SXG_RCV_DATA_DESCRIPTOR, *PSXG_RCV_DATA_DESCRIPTOR;
 
 // Receive descriptor block
 #define SXG_RCV_DESCRIPTORS_PER_BLOCK		128
 #define SXG_RCV_DESCRIPTOR_BLOCK_SIZE		2048	// For sanity check
 typedef struct _SXG_RCV_DESCRIPTOR_BLOCK {
-	SXG_RCV_DATA_DESCRIPTOR		Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK];
+	SXG_RCV_DATA_DESCRIPTOR Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK];
 } SXG_RCV_DESCRIPTOR_BLOCK, *PSXG_RCV_DESCRIPTOR_BLOCK;
 
 // Receive descriptor block header
 typedef struct _SXG_RCV_DESCRIPTOR_BLOCK_HDR {
-	void *					VirtualAddress;			// Start of 2k buffer
-	dma_addr_t	            PhysicalAddress;		// ..and it's physical address
-	LIST_ENTRY				FreeList;				// Free queue of descriptor blocks
-	unsigned char					State;					// See SXG_BUFFER state above
+	void *VirtualAddress;	// Start of 2k buffer
+	dma_addr_t PhysicalAddress;	// ..and it's physical address
+	LIST_ENTRY FreeList;	// Free queue of descriptor blocks
+	unsigned char State;	// See SXG_BUFFER state above
 } SXG_RCV_DESCRIPTOR_BLOCK_HDR, *PSXG_RCV_DESCRIPTOR_BLOCK_HDR;
 
 // Receive block header
 typedef struct _SXG_RCV_BLOCK_HDR {
-	void *					VirtualAddress;			// Start of virtual memory
-	dma_addr_t	            PhysicalAddress;		// ..and it's physical address
-	LIST_ENTRY				AllList;				// Queue of all SXG_RCV_BLOCKS
+	void *VirtualAddress;	// Start of virtual memory
+	dma_addr_t PhysicalAddress;	// ..and it's physical address
+	LIST_ENTRY AllList;	// Queue of all SXG_RCV_BLOCKS
 } SXG_RCV_BLOCK_HDR, *PSXG_RCV_BLOCK_HDR;
 
 // Macros to determine data structure offsets into receive block
@@ -747,8 +746,8 @@
 // Use the miniport reserved portion of the NBL to locate
 // our SXG_RCV_DATA_BUFFER_HDR structure.
 typedef struct _SXG_RCV_NBL_RESERVED {
-	PSXG_RCV_DATA_BUFFER_HDR	RcvDataBufferHdr;
-	void *						Available;
+	PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
+	void *Available;
 } SXG_RCV_NBL_RESERVED, *PSXG_RCV_NBL_RESERVED;
 
 #define SXG_RCV_NBL_BUFFER_HDR(_NBL) (((PSXG_RCV_NBL_RESERVED)NET_BUFFER_LIST_MINIPORT_RESERVED(_NBL))->RcvDataBufferHdr)
@@ -760,12 +759,11 @@
 #define SXG_MIN_SGL_BUFFERS			2048	// Minimum amount and when to get more
 #define SXG_MAX_SGL_BUFFERS			16384	// Maximum to allocate (note ADAPT:ushort)
 
-
 // Self identifying structure type
 typedef enum _SXG_SGL_TYPE {
-	SXG_SGL_DUMB,				// Dumb NIC SGL
-	SXG_SGL_SLOW,				// Slowpath protocol header - see below
-	SXG_SGL_CHIMNEY				// Chimney offload SGL
+	SXG_SGL_DUMB,		// Dumb NIC SGL
+	SXG_SGL_SLOW,		// Slowpath protocol header - see below
+	SXG_SGL_CHIMNEY		// Chimney offload SGL
 } SXG_SGL_TYPE, PSXG_SGL_TYPE;
 
 // Note - the description below is Microsoft specific
@@ -774,14 +772,14 @@
 // for the SCATTER_GATHER_LIST portion of the SXG_SCATTER_GATHER data structure.
 // The following considerations apply when setting this value:
 // - First, the Sahara card is designed to read the Microsoft SGL structure
-// 	 straight out of host memory.  This means that the SGL must reside in
-//	 shared memory.  If the length here is smaller than the SGL for the
-//	 NET_BUFFER, then NDIS will allocate its own buffer.  The buffer
-//	 that NDIS allocates is not in shared memory, so when this happens,
-//	 the SGL will need to be copied to a set of SXG_SCATTER_GATHER buffers.
-//	 In other words.. we don't want this value to be too small.
+//       straight out of host memory.  This means that the SGL must reside in
+//       shared memory.  If the length here is smaller than the SGL for the
+//       NET_BUFFER, then NDIS will allocate its own buffer.  The buffer
+//       that NDIS allocates is not in shared memory, so when this happens,
+//       the SGL will need to be copied to a set of SXG_SCATTER_GATHER buffers.
+//       In other words.. we don't want this value to be too small.
 // - On the other hand.. we're allocating up to 16k of these things.  If
-//	 we make this too big, we start to consume a ton of memory..
+//       we make this too big, we start to consume a ton of memory..
 // At the moment, I'm going to limit the number of SG entries to 150.
 // If each entry maps roughly 4k, then this should cover roughly 600kB
 // NET_BUFFERs.  Furthermore, since each entry is 24 bytes, the total
@@ -801,24 +799,23 @@
 // the SGL.  The following structure defines an x64
 // formatted SGL entry
 typedef struct _SXG_X64_SGE {
-    dma64_addr_t      	Address;	// same as wdm.h
-    u32				Length;		// same as wdm.h
-	u32				CompilerPad;// The compiler pads to 8-bytes
-    u64 			Reserved;	// u32 * in wdm.h.  Force to 8 bytes
+	dma64_addr_t Address;	// same as wdm.h
+	u32 Length;		// same as wdm.h
+	u32 CompilerPad;	// The compiler pads to 8-bytes
+	u64 Reserved;		// u32 * in wdm.h.  Force to 8 bytes
 } SXG_X64_SGE, *PSXG_X64_SGE;
 
 typedef struct _SCATTER_GATHER_ELEMENT {
-    dma64_addr_t      	Address;	// same as wdm.h
-    u32				Length;		// same as wdm.h
-	u32				CompilerPad;// The compiler pads to 8-bytes
-    u64 			Reserved;	// u32 * in wdm.h.  Force to 8 bytes
+	dma64_addr_t Address;	// same as wdm.h
+	u32 Length;		// same as wdm.h
+	u32 CompilerPad;	// The compiler pads to 8-bytes
+	u64 Reserved;		// u32 * in wdm.h.  Force to 8 bytes
 } SCATTER_GATHER_ELEMENT, *PSCATTER_GATHER_ELEMENT;
 
-
 typedef struct _SCATTER_GATHER_LIST {
-    u32					NumberOfElements;
-    u32 *				Reserved;
-    SCATTER_GATHER_ELEMENT	Elements[];
+	u32 NumberOfElements;
+	u32 *Reserved;
+	SCATTER_GATHER_ELEMENT Elements[];
 } SCATTER_GATHER_LIST, *PSCATTER_GATHER_LIST;
 
 // The card doesn't care about anything except elements, so
@@ -826,26 +823,26 @@
 // SGL structure.  But redefine from wdm.h:SCATTER_GATHER_LIST so
 // we can specify SXG_X64_SGE and define a fixed number of elements
 typedef struct _SXG_X64_SGL {
-    u32					NumberOfElements;
-    u32 *				Reserved;
-    SXG_X64_SGE				Elements[SXG_SGL_ENTRIES];
+	u32 NumberOfElements;
+	u32 *Reserved;
+	SXG_X64_SGE Elements[SXG_SGL_ENTRIES];
 } SXG_X64_SGL, *PSXG_X64_SGL;
 
 typedef struct _SXG_SCATTER_GATHER {
-	SXG_SGL_TYPE						Type;			// FIRST! Dumb-nic or offload
-	void *   							adapter;		// Back pointer to adapter
-	LIST_ENTRY							FreeList;		// Free SXG_SCATTER_GATHER blocks
-	LIST_ENTRY							AllList;		// All SXG_SCATTER_GATHER blocks
-	dma_addr_t				            PhysicalAddress;// physical address
-	unsigned char								State;			// See SXG_BUFFER state above
-	unsigned char								CmdIndex;		// Command ring index
-	struct sk_buff                    *	DumbPacket;		// Associated Packet
-	u32								Direction;		// For asynchronous completions
-	u32								CurOffset;		// Current SGL offset
-	u32								SglRef;			// SGL reference count
-	VLAN_HDR							VlanTag;		// VLAN tag to be inserted into SGL
-	PSCATTER_GATHER_LIST   				pSgl;			// SGL Addr. Possibly &Sgl
-	SXG_X64_SGL							Sgl;			// SGL handed to card
+	SXG_SGL_TYPE Type;	// FIRST! Dumb-nic or offload
+	void *adapter;		// Back pointer to adapter
+	LIST_ENTRY FreeList;	// Free SXG_SCATTER_GATHER blocks
+	LIST_ENTRY AllList;	// All SXG_SCATTER_GATHER blocks
+	dma_addr_t PhysicalAddress;	// physical address
+	unsigned char State;	// See SXG_BUFFER state above
+	unsigned char CmdIndex;	// Command ring index
+	struct sk_buff *DumbPacket;	// Associated Packet
+	u32 Direction;		// For asynchronous completions
+	u32 CurOffset;		// Current SGL offset
+	u32 SglRef;		// SGL reference count
+	VLAN_HDR VlanTag;	// VLAN tag to be inserted into SGL
+	PSCATTER_GATHER_LIST pSgl;	// SGL Addr. Possibly &Sgl
+	SXG_X64_SGL Sgl;	// SGL handed to card
 } SXG_SCATTER_GATHER, *PSXG_SCATTER_GATHER;
 
 #if defined(CONFIG_X86_64)
@@ -856,6 +853,5 @@
 #define SXG_SGL_BUFFER(_SxgSgl)		NULL
 #define SXG_SGL_BUF_SIZE			0
 #else
-    Stop Compilation;
+Stop Compilation;
 #endif
-
diff --git a/drivers/staging/sxg/sxghw.h b/drivers/staging/sxg/sxghw.h
index 8f4f6ef..2222ae9 100644
--- a/drivers/staging/sxg/sxghw.h
+++ b/drivers/staging/sxg/sxghw.h
@@ -13,11 +13,11 @@
 /*******************************************************************************
  * Configuration space
  *******************************************************************************/
-//  PCI Vendor ID
-#define SXG_VENDOR_ID			0x139A	// Alacritech's Vendor ID
+/*  PCI Vendor ID */
+#define SXG_VENDOR_ID			0x139A	/* Alacritech's Vendor ID */
 
 //  PCI Device ID
-#define SXG_DEVICE_ID			0x0009	// Sahara Device ID
+#define SXG_DEVICE_ID			0x0009	/* Sahara Device ID */
 
 //
 // Subsystem IDs.
@@ -141,7 +141,7 @@
 #define SXG_REGISTER_SIZE_PER_CPU	0x00002000	// Used to sanity check UCODE_REGS structure
 
 // Sahara receive sequencer status values
-#define SXG_RCV_STATUS_ATTN					0x80000000	// Attention
+#define SXG_RCV_STATUS_ATTN			0x80000000	// Attention
 #define SXG_RCV_STATUS_TRANSPORT_MASK		0x3F000000	// Transport mask
 #define SXG_RCV_STATUS_TRANSPORT_ERROR		0x20000000	// Transport error
 #define SXG_RCV_STATUS_TRANSPORT_CSUM		0x23000000	// Transport cksum error
@@ -156,9 +156,9 @@
 #define SXG_RCV_STATUS_TRANSPORT_FTP		0x03000000	// Transport FTP
 #define SXG_RCV_STATUS_TRANSPORT_HTTP		0x02000000	// Transport HTTP
 #define SXG_RCV_STATUS_TRANSPORT_SMB		0x01000000	// Transport SMB
-#define SXG_RCV_STATUS_NETWORK_MASK			0x00FF0000	// Network mask
+#define SXG_RCV_STATUS_NETWORK_MASK		0x00FF0000	// Network mask
 #define SXG_RCV_STATUS_NETWORK_ERROR		0x00800000	// Network error
-#define SXG_RCV_STATUS_NETWORK_CSUM			0x00830000	// Network cksum error
+#define SXG_RCV_STATUS_NETWORK_CSUM		0x00830000	// Network cksum error
 #define SXG_RCV_STATUS_NETWORK_UFLOW		0x00820000	// Network underflow error
 #define SXG_RCV_STATUS_NETWORK_HDRLEN		0x00800000	// Network header length
 #define SXG_RCV_STATUS_NETWORK_OFLOW		0x00400000	// Network overflow detected
@@ -167,67 +167,67 @@
 #define SXG_RCV_STATUS_NETWORK_OFFSET		0x00080000	// Network offset detected
 #define SXG_RCV_STATUS_NETWORK_FRAGMENT		0x00040000	// Network fragment detected
 #define SXG_RCV_STATUS_NETWORK_TRANS_MASK	0x00030000	// Network transport type mask
-#define SXG_RCV_STATUS_NETWORK_UDP			0x00020000	// UDP
-#define SXG_RCV_STATUS_NETWORK_TCP			0x00010000	// TCP
-#define SXG_RCV_STATUS_IPONLY				0x00008000	// IP-only not TCP
-#define SXG_RCV_STATUS_PKT_PRI				0x00006000	// Receive priority
-#define SXG_RCV_STATUS_PKT_PRI_SHFT					13	// Receive priority shift
-#define SXG_RCV_STATUS_PARITY				0x00001000	// MAC Receive RAM parity error
-#define SXG_RCV_STATUS_ADDRESS_MASK			0x00000F00	// Link address detection mask
-#define SXG_RCV_STATUS_ADDRESS_D			0x00000B00	// Link address D
-#define SXG_RCV_STATUS_ADDRESS_C			0x00000A00	// Link address C
-#define SXG_RCV_STATUS_ADDRESS_B			0x00000900	// Link address B
-#define SXG_RCV_STATUS_ADDRESS_A			0x00000800	// Link address A
+#define SXG_RCV_STATUS_NETWORK_UDP		0x00020000	// UDP
+#define SXG_RCV_STATUS_NETWORK_TCP		0x00010000	// TCP
+#define SXG_RCV_STATUS_IPONLY			0x00008000	// IP-only not TCP
+#define SXG_RCV_STATUS_PKT_PRI			0x00006000	// Receive priority
+#define SXG_RCV_STATUS_PKT_PRI_SHFT			13	// Receive priority shift
+#define SXG_RCV_STATUS_PARITY			0x00001000	// MAC Receive RAM parity error
+#define SXG_RCV_STATUS_ADDRESS_MASK		0x00000F00	// Link address detection mask
+#define SXG_RCV_STATUS_ADDRESS_D		0x00000B00	// Link address D
+#define SXG_RCV_STATUS_ADDRESS_C		0x00000A00	// Link address C
+#define SXG_RCV_STATUS_ADDRESS_B		0x00000900	// Link address B
+#define SXG_RCV_STATUS_ADDRESS_A		0x00000800	// Link address A
 #define SXG_RCV_STATUS_ADDRESS_BCAST		0x00000300	// Link address broadcast
 #define SXG_RCV_STATUS_ADDRESS_MCAST		0x00000200	// Link address multicast
 #define SXG_RCV_STATUS_ADDRESS_CMCAST		0x00000100	// Link control multicast
-#define SXG_RCV_STATUS_LINK_MASK			0x000000FF	// Link status mask
-#define SXG_RCV_STATUS_LINK_ERROR			0x00000080	// Link error
-#define SXG_RCV_STATUS_LINK_MASK			0x000000FF	// Link status mask
-#define SXG_RCV_STATUS_LINK_PARITY			0x00000087	// RcvMacQ parity error
-#define SXG_RCV_STATUS_LINK_EARLY			0x00000086	// Data early
+#define SXG_RCV_STATUS_LINK_MASK		0x000000FF	// Link status mask
+#define SXG_RCV_STATUS_LINK_ERROR		0x00000080	// Link error
+#define SXG_RCV_STATUS_LINK_MASK		0x000000FF	// Link status mask
+#define SXG_RCV_STATUS_LINK_PARITY		0x00000087	// RcvMacQ parity error
+#define SXG_RCV_STATUS_LINK_EARLY		0x00000086	// Data early
 #define SXG_RCV_STATUS_LINK_BUFOFLOW		0x00000085	// Buffer overflow
-#define SXG_RCV_STATUS_LINK_CODE			0x00000084	// Link code error
-#define SXG_RCV_STATUS_LINK_DRIBBLE			0x00000083	// Dribble nibble
-#define SXG_RCV_STATUS_LINK_CRC				0x00000082	// CRC error
-#define SXG_RCV_STATUS_LINK_OFLOW			0x00000081	// Link overflow
-#define SXG_RCV_STATUS_LINK_UFLOW			0x00000080	// Link underflow
-#define SXG_RCV_STATUS_LINK_8023			0x00000020	// 802.3
-#define SXG_RCV_STATUS_LINK_SNAP			0x00000010	// Snap
-#define SXG_RCV_STATUS_LINK_VLAN			0x00000008	// VLAN
+#define SXG_RCV_STATUS_LINK_CODE		0x00000084	// Link code error
+#define SXG_RCV_STATUS_LINK_DRIBBLE		0x00000083	// Dribble nibble
+#define SXG_RCV_STATUS_LINK_CRC			0x00000082	// CRC error
+#define SXG_RCV_STATUS_LINK_OFLOW		0x00000081	// Link overflow
+#define SXG_RCV_STATUS_LINK_UFLOW		0x00000080	// Link underflow
+#define SXG_RCV_STATUS_LINK_8023		0x00000020	// 802.3
+#define SXG_RCV_STATUS_LINK_SNAP		0x00000010	// Snap
+#define SXG_RCV_STATUS_LINK_VLAN		0x00000008	// VLAN
 #define SXG_RCV_STATUS_LINK_TYPE_MASK		0x00000007	// Network type mask
-#define SXG_RCV_STATUS_LINK_CONTROL			0x00000003	// Control packet
-#define SXG_RCV_STATUS_LINK_IPV6			0x00000002	// IPv6 packet
-#define SXG_RCV_STATUS_LINK_IPV4			0x00000001	// IPv4 packet
+#define SXG_RCV_STATUS_LINK_CONTROL		0x00000003	// Control packet
+#define SXG_RCV_STATUS_LINK_IPV6		0x00000002	// IPv6 packet
+#define SXG_RCV_STATUS_LINK_IPV4		0x00000001	// IPv4 packet
 
 /***************************************************************************
  * Sahara receive and transmit configuration registers
  ***************************************************************************/
-#define	RCV_CONFIG_RESET			0x80000000	// RcvConfig register reset
-#define	RCV_CONFIG_ENABLE			0x40000000	// Enable the receive logic
-#define	RCV_CONFIG_ENPARSE			0x20000000	// Enable the receive parser
-#define	RCV_CONFIG_SOCKET			0x10000000	// Enable the socket detector
-#define	RCV_CONFIG_RCVBAD			0x08000000	// Receive all bad frames
-#define	RCV_CONFIG_CONTROL			0x04000000	// Receive all control frames
-#define	RCV_CONFIG_RCVPAUSE			0x02000000	// Enable pause transmit when attn
-#define	RCV_CONFIG_TZIPV6			0x01000000	// Include TCP port w/ IPv6 toeplitz
-#define	RCV_CONFIG_TZIPV4			0x00800000	// Include TCP port w/ IPv4 toeplitz
-#define	RCV_CONFIG_FLUSH			0x00400000	// Flush buffers
+#define	RCV_CONFIG_RESET		0x80000000	// RcvConfig register reset
+#define	RCV_CONFIG_ENABLE		0x40000000	// Enable the receive logic
+#define	RCV_CONFIG_ENPARSE		0x20000000	// Enable the receive parser
+#define	RCV_CONFIG_SOCKET		0x10000000	// Enable the socket detector
+#define	RCV_CONFIG_RCVBAD		0x08000000	// Receive all bad frames
+#define	RCV_CONFIG_CONTROL		0x04000000	// Receive all control frames
+#define	RCV_CONFIG_RCVPAUSE		0x02000000	// Enable pause transmit when attn
+#define	RCV_CONFIG_TZIPV6		0x01000000	// Include TCP port w/ IPv6 toeplitz
+#define	RCV_CONFIG_TZIPV4		0x00800000	// Include TCP port w/ IPv4 toeplitz
+#define	RCV_CONFIG_FLUSH		0x00400000	// Flush buffers
 #define	RCV_CONFIG_PRIORITY_MASK	0x00300000	// Priority level
 #define	RCV_CONFIG_HASH_MASK		0x00030000	// Hash depth
-#define	RCV_CONFIG_HASH_8			0x00000000	// Hash depth 8
-#define	RCV_CONFIG_HASH_16			0x00010000	// Hash depth 16
-#define	RCV_CONFIG_HASH_4			0x00020000	// Hash depth 4
-#define	RCV_CONFIG_HASH_2			0x00030000	// Hash depth 2
+#define	RCV_CONFIG_HASH_8		0x00000000	// Hash depth 8
+#define	RCV_CONFIG_HASH_16		0x00010000	// Hash depth 16
+#define	RCV_CONFIG_HASH_4		0x00020000	// Hash depth 4
+#define	RCV_CONFIG_HASH_2		0x00030000	// Hash depth 2
 #define	RCV_CONFIG_BUFLEN_MASK		0x0000FFF0	// Buffer length bits 15:4. ie multiple of 16.
-#define RCV_CONFIG_SKT_DIS			0x00000008	// Disable socket detection on attn
+#define RCV_CONFIG_SKT_DIS		0x00000008	// Disable socket detection on attn
 // Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size.
 // We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC,
 // and round up to nearest 16 byte boundary
 #define RCV_CONFIG_BUFSIZE(_MaxFrame) ((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK)
 
-#define	XMT_CONFIG_RESET			0x80000000	// XmtConfig register reset
-#define	XMT_CONFIG_ENABLE			0x40000000	// Enable transmit logic
+#define	XMT_CONFIG_RESET		0x80000000	// XmtConfig register reset
+#define	XMT_CONFIG_ENABLE		0x40000000	// Enable transmit logic
 #define	XMT_CONFIG_MAC_PARITY		0x20000000	// Inhibit MAC RAM parity error
 #define	XMT_CONFIG_BUF_PARITY		0x10000000	// Inhibit D2F buffer parity error
 #define	XMT_CONFIG_MEM_PARITY		0x08000000	// Inhibit 1T SRAM parity error
@@ -249,9 +249,9 @@
 
 // A-XGMAC Configuration Register 1
 #define AXGMAC_CFG1_XMT_PAUSE		0x80000000		// Allow the sending of Pause frames
-#define AXGMAC_CFG1_XMT_EN			0x40000000		// Enable transmit
+#define AXGMAC_CFG1_XMT_EN		0x40000000		// Enable transmit
 #define AXGMAC_CFG1_RCV_PAUSE		0x20000000		// Allow the detection of Pause frames
-#define AXGMAC_CFG1_RCV_EN			0x10000000		// Enable receive
+#define AXGMAC_CFG1_RCV_EN		0x10000000		// Enable receive
 #define AXGMAC_CFG1_XMT_STATE		0x04000000		// Current transmit state - READ ONLY
 #define AXGMAC_CFG1_RCV_STATE		0x01000000		// Current receive state - READ ONLY
 #define AXGMAC_CFG1_XOFF_SHORT		0x00001000		// Only pause for 64 slot on XOFF
@@ -262,24 +262,24 @@
 #define AXGMAC_CFG1_RCV_FCS2		0x00000200		// Delay receive FCS 2 4-byte words
 #define AXGMAC_CFG1_RCV_FCS3		0x00000300		// Delay receive FCS 3 4-byte words
 #define AXGMAC_CFG1_PKT_OVERRIDE	0x00000080		// Per-packet override enable
-#define AXGMAC_CFG1_SWAP			0x00000040		// Byte swap enable
+#define AXGMAC_CFG1_SWAP		0x00000040		// Byte swap enable
 #define AXGMAC_CFG1_SHORT_ASSERT	0x00000020		// ASSERT srdrpfrm on short frame (<64)
 #define AXGMAC_CFG1_RCV_STRICT		0x00000010		// RCV only 802.3AE when CLEAR
 #define AXGMAC_CFG1_CHECK_LEN		0x00000008		// Verify frame length
-#define AXGMAC_CFG1_GEN_FCS			0x00000004		// Generate FCS
+#define AXGMAC_CFG1_GEN_FCS		0x00000004		// Generate FCS
 #define AXGMAC_CFG1_PAD_MASK		0x00000003		// Mask for pad bits
-#define AXGMAC_CFG1_PAD_64			0x00000001		// Pad frames to 64 bytes
+#define AXGMAC_CFG1_PAD_64		0x00000001		// Pad frames to 64 bytes
 #define AXGMAC_CFG1_PAD_VLAN		0x00000002		// Detect VLAN and pad to 68 bytes
-#define AXGMAC_CFG1_PAD_68			0x00000003		// Pad to 68 bytes
+#define AXGMAC_CFG1_PAD_68		0x00000003		// Pad to 68 bytes
 
 // A-XGMAC Configuration Register 2
 #define AXGMAC_CFG2_GEN_PAUSE		0x80000000		// Generate single pause frame (test)
 #define AXGMAC_CFG2_LF_MANUAL		0x08000000		// Manual link fault sequence
-#define AXGMAC_CFG2_LF_AUTO			0x04000000		// Auto link fault sequence
+#define AXGMAC_CFG2_LF_AUTO		0x04000000		// Auto link fault sequence
 #define AXGMAC_CFG2_LF_REMOTE		0x02000000		// Remote link fault (READ ONLY)
 #define AXGMAC_CFG2_LF_LOCAL		0x01000000		// Local link fault (READ ONLY)
 #define AXGMAC_CFG2_IPG_MASK		0x001F0000		// Inter packet gap
-#define AXGMAC_CFG2_IPG_SHIFT		16
+#define AXGMAC_CFG2_IPG_SHIFT			16
 #define AXGMAC_CFG2_PAUSE_XMT		0x00008000		// Pause transmit module
 #define AXGMAC_CFG2_IPG_EXTEN		0x00000020		// Enable IPG extension algorithm
 #define AXGMAC_CFG2_IPGEX_MASK		0x0000001F		// IPG extension
@@ -299,9 +299,9 @@
 #define AXGMAC_SARHIGH_OCTET_SIX	0x00FF0000		// Sixth octet
 
 // A-XGMAC Maximum frame length register
-#define AXGMAC_MAXFRAME_XMT			0x3FFF0000		// Maximum transmit frame length
+#define AXGMAC_MAXFRAME_XMT		0x3FFF0000		// Maximum transmit frame length
 #define AXGMAC_MAXFRAME_XMT_SHIFT	16
-#define AXGMAC_MAXFRAME_RCV			0x0000FFFF		// Maximum receive frame length
+#define AXGMAC_MAXFRAME_RCV		0x0000FFFF		// Maximum receive frame length
 // This register doesn't need to be written for standard MTU.
 // For jumbo, I'll just statically define the value here.  This
 // value sets the receive byte count to 9036 (0x234C) and the
@@ -324,34 +324,34 @@
 
 // A-XGMAC AMIIM Field Register
 #define AXGMAC_AMIIM_FIELD_ST		0xC0000000		// 2-bit ST field
-#define AXGMAC_AMIIM_FIELD_ST_SHIFT			30
+#define AXGMAC_AMIIM_FIELD_ST_SHIFT		30
 #define AXGMAC_AMIIM_FIELD_OP		0x30000000		// 2-bit OP field
-#define AXGMAC_AMIIM_FIELD_OP_SHIFT			28
-#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000		// Port address field (hstphyadx in spec)
+#define AXGMAC_AMIIM_FIELD_OP_SHIFT		28
+#define AXGMAC_AMIIM_FIELD_PORT_ADDR	0x0F800000		// Port address field (hstphyadx in spec)
 #define AXGMAC_AMIIM_FIELD_PORT_SHIFT		23
 #define AXGMAC_AMIIM_FIELD_DEV_ADDR	0x007C0000		// Device address field (hstregadx in spec)
 #define AXGMAC_AMIIM_FIELD_DEV_SHIFT		18
 #define AXGMAC_AMIIM_FIELD_TA		0x00030000		// 2-bit TA field
-#define AXGMAC_AMIIM_FIELD_TA_SHIFT			16
+#define AXGMAC_AMIIM_FIELD_TA_SHIFT		16
 #define AXGMAC_AMIIM_FIELD_DATA		0x0000FFFF		// Data field
 
 // Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register
-#define	MIIM_OP_ADDR						0		// MIIM Address set operation
-#define	MIIM_OP_WRITE						1		// MIIM Write register operation
-#define	MIIM_OP_READ						2		// MIIM Read register operation
+#define	MIIM_OP_ADDR				0		// MIIM Address set operation
+#define	MIIM_OP_WRITE				1		// MIIM Write register operation
+#define	MIIM_OP_READ				2		// MIIM Read register operation
 #define	MIIM_OP_ADDR_SHIFT	(MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT)
 
 // Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM Field Register
-#define	MIIM_PORT_NUM						1		// All Sahara MIIM modules use port 1
+#define	MIIM_PORT_NUM				1		// All Sahara MIIM modules use port 1
 
 // Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM Field Register
-#define	MIIM_DEV_PHY_PMA					1		// PHY PMA/PMD module MIIM device number
-#define	MIIM_DEV_PHY_PCS					3		// PHY PCS module MIIM device number
-#define	MIIM_DEV_PHY_XS						4		// PHY XS module MIIM device number
-#define	MIIM_DEV_XGXS						5		// XGXS MIIM device number
+#define	MIIM_DEV_PHY_PMA			1		// PHY PMA/PMD module MIIM device number
+#define	MIIM_DEV_PHY_PCS			3		// PHY PCS module MIIM device number
+#define	MIIM_DEV_PHY_XS				4		// PHY XS module MIIM device number
+#define	MIIM_DEV_XGXS				5		// XGXS MIIM device number
 
 // Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field Register
-#define	MIIM_TA_10GB						2		// set to 2 for 10 GB operation
+#define	MIIM_TA_10GB				2		// set to 2 for 10 GB operation
 
 // A-XGMAC AMIIM Configuration Register
 #define AXGMAC_AMIIM_CFG_NOPREAM	0x00000080		// Bypass preamble of mngmt frame
@@ -365,25 +365,25 @@
 #define AXGMAC_AMIIM_INDC_BUSY		0x00000001		// Set until cmd operation complete
 
 // Link Status and Control Register
-#define	LS_PHY_CLR_RESET			0x80000000		// Clear reset signal to PHY
+#define	LS_PHY_CLR_RESET		0x80000000		// Clear reset signal to PHY
 #define	LS_SERDES_POWER_DOWN		0x40000000		// Power down the Sahara Serdes
-#define	LS_XGXS_ENABLE				0x20000000		// Enable the XAUI XGXS logic
-#define	LS_XGXS_CTL					0x10000000		// Hold XAUI XGXS logic reset until Serdes is up
-#define	LS_SERDES_DOWN				0x08000000		// When 0, XAUI Serdes is up and initialization is complete
-#define	LS_TRACE_DOWN				0x04000000		// When 0, Trace Serdes is up and initialization is complete
-#define	LS_PHY_CLK_25MHZ			0x02000000		// Set PHY clock to 25 MHz (else 156.125 MHz)
-#define	LS_PHY_CLK_EN				0x01000000		// Enable clock to PHY
-#define	LS_XAUI_LINK_UP				0x00000010		// XAUI link is up
-#define	LS_XAUI_LINK_CHNG			0x00000008		// XAUI link status has changed
-#define	LS_LINK_ALARM				0x00000004		// Link alarm pin
-#define	LS_ATTN_CTRL_MASK			0x00000003		// Mask link attention control bits
-#define	LS_ATTN_ALARM				0x00000000		// 00 => Attn on link alarm
+#define	LS_XGXS_ENABLE			0x20000000		// Enable the XAUI XGXS logic
+#define	LS_XGXS_CTL			0x10000000		// Hold XAUI XGXS logic reset until Serdes is up
+#define	LS_SERDES_DOWN			0x08000000		// When 0, XAUI Serdes is up and initialization is complete
+#define	LS_TRACE_DOWN			0x04000000		// When 0, Trace Serdes is up and initialization is complete
+#define	LS_PHY_CLK_25MHZ		0x02000000		// Set PHY clock to 25 MHz (else 156.125 MHz)
+#define	LS_PHY_CLK_EN			0x01000000		// Enable clock to PHY
+#define	LS_XAUI_LINK_UP			0x00000010		// XAUI link is up
+#define	LS_XAUI_LINK_CHNG		0x00000008		// XAUI link status has changed
+#define	LS_LINK_ALARM			0x00000004		// Link alarm pin
+#define	LS_ATTN_CTRL_MASK		0x00000003		// Mask link attention control bits
+#define	LS_ATTN_ALARM			0x00000000		// 00 => Attn on link alarm
 #define	LS_ATTN_ALARM_OR_STAT_CHNG	0x00000001		// 01 => Attn on link alarm or status change
-#define	LS_ATTN_STAT_CHNG			0x00000002		// 10 => Attn on link status change
-#define	LS_ATTN_NONE				0x00000003		// 11 => no Attn
+#define	LS_ATTN_STAT_CHNG		0x00000002		// 10 => Attn on link status change
+#define	LS_ATTN_NONE			0x00000003		// 11 => no Attn
 
 // Link Address High Registers
-#define	LINK_ADDR_ENABLE			0x80000000		// Enable this link address
+#define	LINK_ADDR_ENABLE		0x80000000		// Enable this link address
 
 
 /***************************************************************************
@@ -396,7 +396,7 @@
 #define XGXS_ADDRESS_STATUS1		0x0001			// XS Status 1
 #define XGXS_ADDRESS_DEVID_LOW		0x0002			// XS Device ID (low)
 #define XGXS_ADDRESS_DEVID_HIGH		0x0003			// XS Device ID (high)
-#define XGXS_ADDRESS_SPEED			0x0004			// XS Speed ability
+#define XGXS_ADDRESS_SPEED		0x0004			// XS Speed ability
 #define XGXS_ADDRESS_DEV_LOW		0x0005			// XS Devices in package
 #define XGXS_ADDRESS_DEV_HIGH		0x0006			// XS Devices in package
 #define XGXS_ADDRESS_STATUS2		0x0008			// XS Status 2
@@ -410,27 +410,27 @@
 #define XGXS_ADDRESS_RESET_HI2		0x8003			// Vendor-Specific Reset Hi 2
 
 // XS Control 1 register bit definitions
-#define XGXS_CONTROL1_RESET			0x8000			// Reset - self clearing
+#define XGXS_CONTROL1_RESET		0x8000			// Reset - self clearing
 #define XGXS_CONTROL1_LOOPBACK		0x4000			// Enable loopback
 #define XGXS_CONTROL1_SPEED1		0x2000			// 0 = unspecified, 1 = 10Gb+
 #define XGXS_CONTROL1_LOWPOWER		0x0400			// 1 = Low power mode
 #define XGXS_CONTROL1_SPEED2		0x0040			// Same as SPEED1 (?)
-#define XGXS_CONTROL1_SPEED			0x003C			// Everything reserved except zero (?)
+#define XGXS_CONTROL1_SPEED		0x003C			// Everything reserved except zero (?)
 
 // XS Status 1 register bit definitions
-#define XGXS_STATUS1_FAULT			0x0080			// Fault detected
-#define XGXS_STATUS1_LINK			0x0004			// 1 = Link up
+#define XGXS_STATUS1_FAULT		0x0080			// Fault detected
+#define XGXS_STATUS1_LINK		0x0004			// 1 = Link up
 #define XGXS_STATUS1_LOWPOWER		0x0002			// 1 = Low power supported
 
 // XS Speed register bit definitions
-#define XGXS_SPEED_10G				0x0001			// 1 = 10G capable
+#define XGXS_SPEED_10G			0x0001			// 1 = 10G capable
 
 // XS Devices register bit definitions
-#define XGXS_DEVICES_DTE			0x0020			// DTE XS Present
-#define XGXS_DEVICES_PHY			0x0010			// PHY XS Present
-#define XGXS_DEVICES_PCS			0x0008			// PCS Present
-#define XGXS_DEVICES_WIS			0x0004			// WIS Present
-#define XGXS_DEVICES_PMD			0x0002			// PMD/PMA Present
+#define XGXS_DEVICES_DTE		0x0020			// DTE XS Present
+#define XGXS_DEVICES_PHY		0x0010			// PHY XS Present
+#define XGXS_DEVICES_PCS		0x0008			// PCS Present
+#define XGXS_DEVICES_WIS		0x0004			// WIS Present
+#define XGXS_DEVICES_PMD		0x0002			// PMD/PMA Present
 #define XGXS_DEVICES_CLAUSE22		0x0001			// Clause 22 registers present
 
 // XS Devices High register bit definitions
@@ -444,18 +444,18 @@
 #define XGXS_STATUS2_RCV_FAULT		0x0400			// Receive fault
 
 // XS Package ID High register bit definitions
-#define XGXS_PKGID_HIGH_ORG			0xFC00			// Organizationally Unique
-#define XGXS_PKGID_HIGH_MFG			0x03F0			// Manufacturer Model
-#define XGXS_PKGID_HIGH_REV			0x000F			// Revision Number
+#define XGXS_PKGID_HIGH_ORG		0xFC00			// Organizationally Unique
+#define XGXS_PKGID_HIGH_MFG		0x03F0			// Manufacturer Model
+#define XGXS_PKGID_HIGH_REV		0x000F			// Revision Number
 
 // XS Lane Status register bit definitions
-#define XGXS_LANE_PHY				0x1000			// PHY/DTE lane alignment status
-#define XGXS_LANE_PATTERN			0x0800			// Pattern testing ability
-#define XGXS_LANE_LOOPBACK			0x0400			// PHY loopback ability
-#define XGXS_LANE_SYNC3				0x0008			// Lane 3 sync
-#define XGXS_LANE_SYNC2				0x0004			// Lane 2 sync
-#define XGXS_LANE_SYNC1				0x0002			// Lane 1 sync
-#define XGXS_LANE_SYNC0				0x0001			// Lane 0 sync
+#define XGXS_LANE_PHY			0x1000			// PHY/DTE lane alignment status
+#define XGXS_LANE_PATTERN		0x0800			// Pattern testing ability
+#define XGXS_LANE_LOOPBACK		0x0400			// PHY loopback ability
+#define XGXS_LANE_SYNC3			0x0008			// Lane 3 sync
+#define XGXS_LANE_SYNC2			0x0004			// Lane 2 sync
+#define XGXS_LANE_SYNC1			0x0002			// Lane 1 sync
+#define XGXS_LANE_SYNC0			0x0001			// Lane 0 sync
 
 // XS Test Control register bit definitions
 #define XGXS_TEST_PATTERN_ENABLE	0x0004			// Test pattern enabled
@@ -473,10 +473,10 @@
 // LASI (Link Alarm Status Interrupt) Registers (located in MIIM_DEV_PHY_PMA device)
 #define LASI_RX_ALARM_CONTROL		0x9000			// LASI RX_ALARM Control
 #define LASI_TX_ALARM_CONTROL		0x9001			// LASI TX_ALARM Control
-#define LASI_CONTROL				0x9002			// LASI Control
+#define LASI_CONTROL			0x9002			// LASI Control
 #define LASI_RX_ALARM_STATUS		0x9003			// LASI RX_ALARM Status
 #define LASI_TX_ALARM_STATUS		0x9004			// LASI TX_ALARM Status
-#define LASI_STATUS					0x9005			// LASI Status
+#define LASI_STATUS			0x9005			// LASI Status
 
 // LASI_CONTROL bit definitions
 #define	LASI_CTL_RX_ALARM_ENABLE	0x0004			// Enable RX_ALARM interrupts
@@ -489,34 +489,34 @@
 #define	LASI_STATUS_LS_ALARM		0x0001			// Link Status
 
 // PHY registers - PMA/PMD (device 1)
-#define	PHY_PMA_CONTROL1			0x0000			// PMA/PMD Control 1
-#define	PHY_PMA_STATUS1				0x0001			// PMA/PMD Status 1
-#define	PHY_PMA_RCV_DET				0x000A			// PMA/PMD Receive Signal Detect
+#define	PHY_PMA_CONTROL1		0x0000			// PMA/PMD Control 1
+#define	PHY_PMA_STATUS1			0x0001			// PMA/PMD Status 1
+#define	PHY_PMA_RCV_DET			0x000A			// PMA/PMD Receive Signal Detect
 		// other PMA/PMD registers exist and can be defined as needed
 
 // PHY registers - PCS (device 3)
-#define	PHY_PCS_CONTROL1			0x0000			// PCS Control 1
-#define	PHY_PCS_STATUS1				0x0001			// PCS Status 1
-#define	PHY_PCS_10G_STATUS1			0x0020			// PCS 10GBASE-R Status 1
+#define	PHY_PCS_CONTROL1		0x0000			// PCS Control 1
+#define	PHY_PCS_STATUS1			0x0001			// PCS Status 1
+#define	PHY_PCS_10G_STATUS1		0x0020			// PCS 10GBASE-R Status 1
 		// other PCS registers exist and can be defined as needed
 
 // PHY registers - XS (device 4)
-#define	PHY_XS_CONTROL1				0x0000			// XS Control 1
-#define	PHY_XS_STATUS1				0x0001			// XS Status 1
-#define	PHY_XS_LANE_STATUS			0x0018			// XS Lane Status
+#define	PHY_XS_CONTROL1			0x0000			// XS Control 1
+#define	PHY_XS_STATUS1			0x0001			// XS Status 1
+#define	PHY_XS_LANE_STATUS		0x0018			// XS Lane Status
 		// other XS registers exist and can be defined as needed
 
 // PHY_PMA_CONTROL1 register bit definitions
-#define	PMA_CONTROL1_RESET			0x8000			// PMA/PMD reset
+#define	PMA_CONTROL1_RESET		0x8000			// PMA/PMD reset
 
 // PHY_PMA_RCV_DET register bit definitions
-#define	PMA_RCV_DETECT				0x0001			// PMA/PMD receive signal detect
+#define	PMA_RCV_DETECT			0x0001			// PMA/PMD receive signal detect
 
 // PHY_PCS_10G_STATUS1 register bit definitions
-#define	PCS_10B_BLOCK_LOCK			0x0001			// PCS 10GBASE-R locked to receive blocks
+#define	PCS_10B_BLOCK_LOCK		0x0001			// PCS 10GBASE-R locked to receive blocks
 
 // PHY_XS_LANE_STATUS register bit definitions
-#define	XS_LANE_ALIGN				0x1000			// XS transmit lanes aligned
+#define	XS_LANE_ALIGN			0x1000			// XS transmit lanes aligned
 
 // PHY Microcode download data structure
 typedef struct _PHY_UCODE {
@@ -558,8 +558,8 @@
 		// command codes
 #define XMT_DESC_CMD_RAW_SEND		0		// raw send descriptor
 #define XMT_DESC_CMD_CSUM_INSERT	1		// checksum insert descriptor
-#define XMT_DESC_CMD_FORMAT			2		// format descriptor
-#define XMT_DESC_CMD_PRIME			3		// prime descriptor
+#define XMT_DESC_CMD_FORMAT		2		// format descriptor
+#define XMT_DESC_CMD_PRIME		3		// prime descriptor
 #define XMT_DESC_CMD_CODE_SHFT		6		// comand code shift (shift to bits [31:30] in word 0)
 		// shifted command codes
 #define XMT_RAW_SEND		(XMT_DESC_CMD_RAW_SEND    << XMT_DESC_CMD_CODE_SHFT)
@@ -569,22 +569,22 @@
 
 // XMT_DESC Control Byte (XmtCtl) definitions
 // NOTE:  These bits do not work on Sahara (Rev A)!
-#define	XMT_CTL_PAUSE_FRAME			0x80	// current frame is a pause control frame (for statistics)
+#define	XMT_CTL_PAUSE_FRAME		0x80	// current frame is a pause control frame (for statistics)
 #define	XMT_CTL_CONTROL_FRAME		0x40	// current frame is a control frame (for statistics)
 #define	XMT_CTL_PER_PKT_QUAL		0x20	// per packet qualifier
 #define	XMT_CTL_PAD_MODE_NONE		0x00	// do not pad frame
-#define	XMT_CTL_PAD_MODE_64			0x08	// pad frame to 64 bytes
+#define	XMT_CTL_PAD_MODE_64		0x08	// pad frame to 64 bytes
 #define	XMT_CTL_PAD_MODE_VLAN_68	0x10	// pad frame to 64 bytes, and VLAN frames to 68 bytes
-#define	XMT_CTL_PAD_MODE_68			0x18	// pad frame to 68 bytes
-#define	XMT_CTL_GEN_FCS				0x04	// generate FCS (CRC) for this frame
-#define	XMT_CTL_DELAY_FCS_0			0x00	// do not delay FCS calcution
-#define	XMT_CTL_DELAY_FCS_1			0x01	// delay FCS calculation by 1 (4-byte) word
-#define	XMT_CTL_DELAY_FCS_2			0x02	// delay FCS calculation by 2 (4-byte) words
-#define	XMT_CTL_DELAY_FCS_3			0x03	// delay FCS calculation by 3 (4-byte) words
+#define	XMT_CTL_PAD_MODE_68		0x18	// pad frame to 68 bytes
+#define	XMT_CTL_GEN_FCS			0x04	// generate FCS (CRC) for this frame
+#define	XMT_CTL_DELAY_FCS_0		0x00	// do not delay FCS calcution
+#define	XMT_CTL_DELAY_FCS_1		0x01	// delay FCS calculation by 1 (4-byte) word
+#define	XMT_CTL_DELAY_FCS_2		0x02	// delay FCS calculation by 2 (4-byte) words
+#define	XMT_CTL_DELAY_FCS_3		0x03	// delay FCS calculation by 3 (4-byte) words
 
 // XMT_DESC XmtBufId definition
-#define XMT_BUF_ID_SHFT		8			// The Xmt buffer ID is formed by dividing
-										// the buffer (DRAM) address by 256 (or << 8)
+#define XMT_BUF_ID_SHFT		8	// The Xmt buffer ID is formed by dividing
+					// the buffer (DRAM) address by 256 (or << 8)
 
 /*****************************************************************************
  * Receiver Sequencer Definitions
@@ -594,8 +594,8 @@
 #define	RCV_EVTQ_RBFID_MASK		0x0000FFFF	// bit mask for the Receive Buffer ID
 
 // Receive Buffer ID definition
-#define RCV_BUF_ID_SHFT		5			// The Rcv buffer ID is formed by dividing
-										// the buffer (DRAM) address by 32 (or << 5)
+#define RCV_BUF_ID_SHFT		5	// The Rcv buffer ID is formed by dividing
+					// the buffer (DRAM) address by 32 (or << 5)
 
 // Format of the 18 byte Receive Buffer returned by the
 // Receive Sequencer for received packets
@@ -623,48 +623,48 @@
  * Queue definitions
  *****************************************************************************/
 
-// Ingress (read only) queue numbers
-#define PXY_BUF_Q		0		// Proxy Buffer Queue
-#define HST_EVT_Q		1		// Host Event Queue
-#define XMT_BUF_Q		2		// Transmit Buffer Queue
-#define SKT_EVL_Q		3		// RcvSqr Socket Event Low Priority Queue
-#define RCV_EVL_Q		4		// RcvSqr Rcv Event Low Priority Queue
-#define SKT_EVH_Q		5		// RcvSqr Socket Event High Priority Queue
-#define RCV_EVH_Q		6		// RcvSqr Rcv Event High Priority Queue
-#define DMA_RSP_Q		7		// Dma Response Queue - one per CPU context
-// Local (read/write) queue numbers
-#define LOCAL_A_Q		8		// Spare local Queue
-#define LOCAL_B_Q		9		// Spare local Queue
-#define LOCAL_C_Q		10		// Spare local Queue
-#define FSM_EVT_Q		11		// Finite-State-Machine Event Queue
-#define SBF_PAL_Q		12		// System Buffer Physical Address (low) Queue
-#define SBF_PAH_Q		13		// System Buffer Physical Address (high) Queue
-#define SBF_VAL_Q		14		// System Buffer Virtual Address (low) Queue
-#define SBF_VAH_Q		15		// System Buffer Virtual Address (high) Queue
-// Egress (write only) queue numbers
-#define H2G_CMD_Q		16		// Host to GlbRam DMA Command Queue
-#define H2D_CMD_Q		17		// Host to DRAM DMA Command Queue
-#define G2H_CMD_Q		18		// GlbRam to Host DMA Command Queue
-#define G2D_CMD_Q		19		// GlbRam to DRAM DMA Command Queue
-#define D2H_CMD_Q		20		// DRAM to Host DMA Command Queue
-#define D2G_CMD_Q		21		// DRAM to GlbRam DMA Command Queue
-#define D2D_CMD_Q		22		// DRAM to DRAM DMA Command Queue
-#define PXL_CMD_Q		23		// Low Priority Proxy Command Queue
-#define PXH_CMD_Q		24		// High Priority Proxy Command Queue
-#define RSQ_CMD_Q		25		// Receive Sequencer Command Queue
-#define RCV_BUF_Q		26		// Receive Buffer Queue
+/* Ingress (read only) queue numbers */
+#define PXY_BUF_Q		0		/* Proxy Buffer Queue */
+#define HST_EVT_Q		1		/* Host Event Queue */
+#define XMT_BUF_Q		2		/* Transmit Buffer Queue */
+#define SKT_EVL_Q		3		/* RcvSqr Socket Event Low Priority Queue */
+#define RCV_EVL_Q		4		/* RcvSqr Rcv Event Low Priority Queue */
+#define SKT_EVH_Q		5		/* RcvSqr Socket Event High Priority Queue */
+#define RCV_EVH_Q		6		/* RcvSqr Rcv Event High Priority Queue */
+#define DMA_RSP_Q		7		/* Dma Response Queue - one per CPU context */
+/* Local (read/write) queue numbers */
+#define LOCAL_A_Q		8		/* Spare local Queue */
+#define LOCAL_B_Q		9		/* Spare local Queue */
+#define LOCAL_C_Q		10		/* Spare local Queue */
+#define FSM_EVT_Q		11		/* Finite-State-Machine Event Queue */
+#define SBF_PAL_Q		12		/* System Buffer Physical Address (low) Queue */
+#define SBF_PAH_Q		13		/* System Buffer Physical Address (high) Queue */
+#define SBF_VAL_Q		14		/* System Buffer Virtual Address (low) Queue */
+#define SBF_VAH_Q		15		/* System Buffer Virtual Address (high) Queue */
+/* Egress (write only) queue numbers */
+#define H2G_CMD_Q		16		/* Host to GlbRam DMA Command Queue */
+#define H2D_CMD_Q		17		/* Host to DRAM DMA Command Queue */
+#define G2H_CMD_Q		18		/* GlbRam to Host DMA Command Queue */
+#define G2D_CMD_Q		19		/* GlbRam to DRAM DMA Command Queue */
+#define D2H_CMD_Q		20		/* DRAM to Host DMA Command Queue */
+#define D2G_CMD_Q		21		/* DRAM to GlbRam DMA Command Queue */
+#define D2D_CMD_Q		22		/* DRAM to DRAM DMA Command Queue */
+#define PXL_CMD_Q		23		/* Low Priority Proxy Command Queue */
+#define PXH_CMD_Q		24		/* High Priority Proxy Command Queue */
+#define RSQ_CMD_Q		25		/* Receive Sequencer Command Queue */
+#define RCV_BUF_Q		26		/* Receive Buffer Queue */
 
-// Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q)
-#define PXY_COPY_EN		0x00200000		// enable copy of xmt descriptor to xmt command queue
-#define PXY_SIZE_16		0x00000000		// copy 16 bytes
-#define PXY_SIZE_32		0x00100000		// copy 32 bytes
+/* Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) */
+#define PXY_COPY_EN		0x00200000		/* enable copy of xmt descriptor to xmt command queue */
+#define PXY_SIZE_16		0x00000000		/* copy 16 bytes */
+#define PXY_SIZE_32		0x00100000		/* copy 32 bytes */
 
 /*****************************************************************************
  * SXG EEPROM/Flash Configuration Definitions
  *****************************************************************************/
 #pragma pack(push, 1)
 
-//
+/* */
 typedef struct _HW_CFG_DATA {
 	ushort		Addr;
 	union {
@@ -673,22 +673,22 @@
 	};
 } HW_CFG_DATA, *PHW_CFG_DATA;
 
-//
+/* */
 #define	NUM_HW_CFG_ENTRIES	((128/sizeof(HW_CFG_DATA)) - 4)
 
-// MAC address
+/* MAC address */
 typedef struct _SXG_CONFIG_MAC {
-	unsigned char		MacAddr[6];			// MAC Address
+	unsigned char		MacAddr[6];			/* MAC Address */
 } SXG_CONFIG_MAC, *PSXG_CONFIG_MAC;
 
-//
+/* */
 typedef struct _ATK_FRU {
 	unsigned char		PartNum[6];
 	unsigned char		Revision[2];
 	unsigned char		Serial[14];
 } ATK_FRU, *PATK_FRU;
 
-// OEM FRU Format types
+/* OEM FRU Format types */
 #define	ATK_FRU_FORMAT		0x0000
 #define CPQ_FRU_FORMAT		0x0001
 #define DELL_FRU_FORMAT		0x0002
@@ -697,24 +697,24 @@
 #define EMC_FRU_FORMAT		0x0005
 #define NO_FRU_FORMAT		0xFFFF
 
-// EEPROM/Flash Format
+/* EEPROM/Flash Format */
 typedef struct _SXG_CONFIG {
-	//
-	// Section 1 (128 bytes)
-	//
-	ushort			MagicWord;			// EEPROM/FLASH Magic code 'A5A5'
-	ushort			SpiClks;			// SPI bus clock dividers
+	/* */
+	/* Section 1 (128 bytes) */
+	/* */
+	ushort			MagicWord;			/* EEPROM/FLASH Magic code 'A5A5' */
+	ushort			SpiClks;			/* SPI bus clock dividers */
 	HW_CFG_DATA		HwCfg[NUM_HW_CFG_ENTRIES];
-	//
-	//
-	//
-	ushort			Version;			// EEPROM format version
-	SXG_CONFIG_MAC	MacAddr[4];			// space for 4 MAC addresses
-	ATK_FRU			AtkFru;				// FRU information
-	ushort			OemFruFormat;		// OEM FRU format type
-	unsigned char			OemFru[76];			// OEM FRU information (optional)
-	ushort			Checksum;			// Checksum of section 2
-	// CS info XXXTODO
+	/* */
+	/* */
+	/* */
+	ushort			Version;			/* EEPROM format version */
+	SXG_CONFIG_MAC	MacAddr[4];			/* space for 4 MAC addresses */
+	ATK_FRU			AtkFru;				/* FRU information */
+	ushort			OemFruFormat;		/* OEM FRU format type */
+	unsigned char			OemFru[76];			/* OEM FRU information (optional) */
+	ushort			Checksum;			/* Checksum of section 2 */
+	/* CS info XXXTODO */
 } SXG_CONFIG, *PSXG_CONFIG;
 #pragma pack(pop)
 
@@ -723,12 +723,12 @@
  *****************************************************************************/
 
 // Sahara (ASIC level) defines
-#define SAHARA_GRAM_SIZE			0x020000		// GRAM size - 128 KB
-#define SAHARA_DRAM_SIZE			0x200000		// DRAM size - 2 MB
-#define SAHARA_QRAM_SIZE			0x004000		// QRAM size - 16K entries (64 KB)
-#define SAHARA_WCS_SIZE				0x002000		// WCS - 8K instructions (x 108 bits)
+#define SAHARA_GRAM_SIZE		0x020000		// GRAM size - 128 KB
+#define SAHARA_DRAM_SIZE		0x200000		// DRAM size - 2 MB
+#define SAHARA_QRAM_SIZE		0x004000		// QRAM size - 16K entries (64 KB)
+#define SAHARA_WCS_SIZE			0x002000		// WCS - 8K instructions (x 108 bits)
 
 // Arabia (board level) defines
-#define	FLASH_SIZE				0x080000		// 512 KB (4 Mb)
-#define	EEPROM_SIZE_XFMR		512				// true EEPROM size (bytes), including xfmr area
-#define	EEPROM_SIZE_NO_XFMR		256				// EEPROM size excluding xfmr area
+#define	FLASH_SIZE			0x080000		// 512 KB (4 Mb)
+#define	EEPROM_SIZE_XFMR		512			// true EEPROM size (bytes), including xfmr area
+#define	EEPROM_SIZE_NO_XFMR		256			// EEPROM size excluding xfmr area
diff --git a/drivers/staging/sxg/sxgphycode.h b/drivers/staging/sxg/sxgphycode.h
index 26b36c8..8dbaeda 100644
--- a/drivers/staging/sxg/sxgphycode.h
+++ b/drivers/staging/sxg/sxgphycode.h
@@ -34,7 +34,7 @@
 	 */
 	/* Addr, Data */
 	{0xc017, 0xfeb0},	/* flip RX_LOS polarity (mandatory */
-				/*  patch for SFP+ applications) */
+	/*  patch for SFP+ applications) */
 	{0xC001, 0x0428},	/* flip RX serial polarity */
 
 	{0xc013, 0xf341},	/* invert lxmit clock (mandatory patch) */
@@ -43,7 +43,7 @@
 	{0xc210, 0x8000},	/* reset datapath (mandatory patch) */
 	{0xc210, 0x0000},	/* reset datapath (mandatory patch) */
 	{0x0000, 0x0032},	/* wait for 50ms for datapath reset to */
-				/* complete. (mandatory patch) */
+	/* complete. (mandatory patch) */
 
 	/* Configure the LED's */
 	{0xc214, 0x0099},	/* configure the LED drivers */
@@ -52,15 +52,15 @@
 
 	/* Transceiver-specific MDIO Patches: */
 	{0xc010, 0x448a},	/* (bit 14) mask out high BER input from the */
-				/* LOS signal in 1.000A */
-				/* (mandatory patch for SR code)*/
+	/* LOS signal in 1.000A */
+	/* (mandatory patch for SR code) */
 	{0xc003, 0x0181},	/* (bit 7) enable the CDR inc setting in */
-				/* 1.C005 (mandatory patch for SR code) */
+	/* 1.C005 (mandatory patch for SR code) */
 
 	/* Transceiver-specific Microcontroller Initialization: */
 	{0xc04a, 0x5200},	/* activate microcontroller and pause */
 	{0x0000, 0x0032},	/* wait 50ms for microcontroller before */
-				/* writing in code. */
+	/* writing in code. */
 
 	/* code block starts here: */
 	{0xcc00, 0x2009},
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index e64918f..72e2092 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -221,7 +221,7 @@
 static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd)
 {
 	if (!cmd) {
-		printk("      %s : null pointer\n", __FUNCTION__);
+		printk("      %s : null pointer\n", __func__);
 		return;
 	}
 
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 933ccaf..58e3995 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -202,7 +202,7 @@
 	ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
 	if (ret != sizeof(pdu)) {
 		uerr("receiving pdu failed! size is %d, should be %d\n",
-				ret, sizeof(pdu));
+				ret, (unsigned int)sizeof(pdu));
 		usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
 		return;
 	}
diff --git a/drivers/staging/winbond/Kconfig b/drivers/staging/winbond/Kconfig
index 10d72be..425219e 100644
--- a/drivers/staging/winbond/Kconfig
+++ b/drivers/staging/winbond/Kconfig
@@ -1,6 +1,6 @@
 config W35UND
 	tristate "Winbond driver"
-	depends on MAC80211 && WLAN_80211 && EXPERIMENTAL && !4KSTACKS
+	depends on MAC80211 && WLAN_80211 && USB && EXPERIMENTAL && !4KSTACKS
 	default n
 	---help---
 	  This is highly experimental driver for winbond wifi card on some Kohjinsha notebooks
diff --git a/drivers/staging/winbond/README b/drivers/staging/winbond/README
index 707b6b3..cb944e4 100644
--- a/drivers/staging/winbond/README
+++ b/drivers/staging/winbond/README
@@ -5,6 +5,7 @@
 	- remove typedefs
 	- remove unused ioctls
 	- use cfg80211 for regulatory stuff
+	- fix 4k stack problems
 
 Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
 Pavel Machek <pavel@suse.cz>
diff --git a/drivers/staging/winbond/bss_f.h b/drivers/staging/winbond/bss_f.h
index c957bc9..0131831 100644
--- a/drivers/staging/winbond/bss_f.h
+++ b/drivers/staging/winbond/bss_f.h
@@ -24,7 +24,7 @@
 							 u8 *pBasicRateSet, u8 BasicRateCount,
 							 u8 *pOperationRateSet, u8 OperationRateCount);
 void BSSAddIBSSdata(PWB32_ADAPTER Adapter, PWB_BSSDESCRIPTION psDesData);
-unsigned char boCmpMacAddr( PUCHAR, PUCHAR );
+unsigned char boCmpMacAddr( u8 *, u8 *);
 unsigned char boCmpSSID(struct SSID_Element *psSSID1, struct SSID_Element *psSSID2);
 u16 wBSSfindSSID(PWB32_ADAPTER Adapter, struct SSID_Element *psSsid);
 u16 wRoamingQuery(PWB32_ADAPTER Adapter);
@@ -42,11 +42,11 @@
 void Assemble_IE(PWB32_ADAPTER Adapter, u16 wBssIdx);
 void SetMaxTxRate(PWB32_ADAPTER Adapter);
 
-void CreateWpaIE(PWB32_ADAPTER Adapter, u16* iFildOffset, PUCHAR msg, struct  Management_Frame* msgHeader,
+void CreateWpaIE(PWB32_ADAPTER Adapter, u16* iFildOffset, u8 *msg, struct  Management_Frame* msgHeader,
 				 struct Association_Request_Frame_Body* msgBody, u16 iMSindex); //added by WS 05/14/05
 
 #ifdef _WPA2_
-void CreateRsnIE(PWB32_ADAPTER Adapter, u16* iFildOffset, PUCHAR msg, struct  Management_Frame* msgHeader,
+void CreateRsnIE(PWB32_ADAPTER Adapter, u16* iFildOffset, u8 *msg, struct  Management_Frame* msgHeader,
 				 struct Association_Request_Frame_Body* msgBody, u16 iMSindex);//added by WS 05/14/05
 
 u16 SearchPmkid(PWB32_ADAPTER Adapter, struct  Management_Frame* msgHeader,
diff --git a/drivers/staging/winbond/ds_tkip.h b/drivers/staging/winbond/ds_tkip.h
index 29e5055..6841d66 100644
--- a/drivers/staging/winbond/ds_tkip.h
+++ b/drivers/staging/winbond/ds_tkip.h
@@ -25,9 +25,9 @@
 	s32		bytes_in_M;	// # bytes in M
 } tkip_t;
 
-//void _append_data( PUCHAR pData, u16 size, tkip_t *p );
-void Mds_MicGet(  void* Adapter,  void* pRxLayer1,  PUCHAR pKey,  PUCHAR pMic );
-void Mds_MicFill(  void* Adapter,  void* pDes,  PUCHAR XmitBufAddress );
+//void _append_data( u8 *pData, u16 size, tkip_t *p );
+void Mds_MicGet(  void* Adapter,  void* pRxLayer1,  u8 *pKey,  u8 *pMic );
+void Mds_MicFill(  void* Adapter,  void* pDes,  u8 *XmitBufAddress );
 
 
 
diff --git a/drivers/staging/winbond/linux/common.h b/drivers/staging/winbond/linux/common.h
index 6b00bad..712a86c 100644
--- a/drivers/staging/winbond/linux/common.h
+++ b/drivers/staging/winbond/linux/common.h
@@ -39,14 +39,6 @@
 // Common type definition
 //===============================================================
 
-typedef u8*            PUCHAR;
-typedef s8*            PCHAR;
-typedef u8*            PBOOLEAN;
-typedef u16*           PUSHORT;
-typedef u32*           PULONG;
-typedef s16*   PSHORT;
-
-
 //===========================================
 #define IGNORE      2
 #define	SUCCESS     1
@@ -110,16 +102,9 @@
 #define OS_ATOMIC_READ( _A, _V )	_V
 #define OS_ATOMIC_INC( _A, _V )		EncapAtomicInc( _A, (void*)_V )
 #define OS_ATOMIC_DEC( _A, _V )		EncapAtomicDec( _A, (void*)_V )
-#define OS_MEMORY_CLEAR( _A, _S )	memset( (PUCHAR)_A,0,_S)
+#define OS_MEMORY_CLEAR( _A, _S )	memset( (u8 *)_A,0,_S)
 #define OS_MEMORY_COMPARE( _A, _B, _S )	(memcmp(_A,_B,_S)? 0 : 1) // Definition is reverse with Ndis 1: the same 0: different
 
-
-#define OS_SPIN_LOCK				spinlock_t
-#define OS_SPIN_LOCK_ALLOCATE( _S )		spin_lock_init( _S );
-#define OS_SPIN_LOCK_FREE( _S )
-#define OS_SPIN_LOCK_ACQUIRED( _S )		spin_lock_irq( _S )
-#define OS_SPIN_LOCK_RELEASED( _S )		spin_unlock_irq( _S );
-
 #define OS_TIMER	struct timer_list
 #define OS_TIMER_INITIAL( _T, _F, _P )			\
 {							\
diff --git a/drivers/staging/winbond/linux/wb35reg.c b/drivers/staging/winbond/linux/wb35reg.c
index 2c0b454..ebb6db5 100644
--- a/drivers/staging/winbond/linux/wb35reg.c
+++ b/drivers/staging/winbond/linux/wb35reg.c
@@ -10,7 +10,7 @@
 // Flag : AUTO_INCREMENT - RegisterNo will auto increment 4
 //		  NO_INCREMENT - Function will write data into the same register
 unsigned char
-Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterData, u8 NumberOfData, u8 Flag)
+Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterData, u8 NumberOfData, u8 Flag)
 {
 	PWB35REG pWb35Reg = &pHwData->Wb35Reg;
 	PURB		pUrb = NULL;
@@ -30,13 +30,13 @@
 	if( pUrb && pRegQueue ) {
 		pRegQueue->DIRECT = 2;// burst write register
 		pRegQueue->INDEX = RegisterNo;
-		pRegQueue->pBuffer = (PULONG)((PUCHAR)pRegQueue + sizeof(REG_QUEUE));
+		pRegQueue->pBuffer = (u32 *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
 		memcpy( pRegQueue->pBuffer, pRegisterData, DataSize );
 		//the function for reversing register data from little endian to big endian
 		for( i=0; i<NumberOfData ; i++ )
 			pRegQueue->pBuffer[i] = cpu_to_le32( pRegQueue->pBuffer[i] );
 
-		dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE) + DataSize);
+		dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE) + DataSize);
 		dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
 		dr->bRequest = 0x04; // USB or vendor-defined request code, burst mode
 		dr->wValue = cpu_to_le16( Flag ); // 0: Register number auto-increment, 1: No auto increment
@@ -46,14 +46,14 @@
 		pRegQueue->pUsbReq = dr;
 		pRegQueue->pUrb = pUrb;
 
-		OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock );
+		spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
 		if (pWb35Reg->pRegFirst == NULL)
 			pWb35Reg->pRegFirst = pRegQueue;
 		else
 			pWb35Reg->pRegLast->Next = pRegQueue;
 		pWb35Reg->pRegLast = pRegQueue;
 
-		OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
+		spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
 
 		// Start EP0VM
 		Wb35Reg_EP0VM_start(pHwData);
@@ -181,7 +181,7 @@
 		pRegQueue->INDEX = RegisterNo;
 		pRegQueue->VALUE = cpu_to_le32(RegisterValue);
 		pRegQueue->RESERVED_VALID = FALSE;
-		dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE));
+		dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
 		dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE;
 		dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode
 		dr->wValue = cpu_to_le16(0x0);
@@ -193,14 +193,14 @@
 		pRegQueue->pUsbReq = dr;
 		pRegQueue->pUrb = pUrb;
 
-		OS_SPIN_LOCK_ACQUIRED(&pWb35Reg->EP0VM_spin_lock );
+		spin_lock_irq(&pWb35Reg->EP0VM_spin_lock );
 		if (pWb35Reg->pRegFirst == NULL)
 			pWb35Reg->pRegFirst = pRegQueue;
 		else
 			pWb35Reg->pRegLast->Next = pRegQueue;
 		pWb35Reg->pRegLast = pRegQueue;
 
-		OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
+		spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
 
 		// Start EP0VM
 		Wb35Reg_EP0VM_start(pHwData);
@@ -220,7 +220,7 @@
 // FALSE : register not support
 unsigned char
 Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue,
-				PCHAR pValue, s8 Len)
+				s8 *pValue, s8 Len)
 {
 	PWB35REG	pWb35Reg = &pHwData->Wb35Reg;
 	struct usb_ctrlrequest *dr;
@@ -243,7 +243,7 @@
 		//NOTE : Users must guarantee the size of value will not exceed the buffer size.
 		memcpy(pRegQueue->RESERVED, pValue, Len);
 		pRegQueue->RESERVED_VALID = TRUE;
-		dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE));
+		dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
 		dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE;
 		dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode
 		dr->wValue = cpu_to_le16(0x0);
@@ -254,14 +254,14 @@
 		pRegQueue->Next = NULL;
 		pRegQueue->pUsbReq = dr;
 		pRegQueue->pUrb = pUrb;
-		OS_SPIN_LOCK_ACQUIRED (&pWb35Reg->EP0VM_spin_lock );
+		spin_lock_irq (&pWb35Reg->EP0VM_spin_lock );
 		if( pWb35Reg->pRegFirst == NULL )
 			pWb35Reg->pRegFirst = pRegQueue;
 		else
 			pWb35Reg->pRegLast->Next = pRegQueue;
 		pWb35Reg->pRegLast = pRegQueue;
 
-		OS_SPIN_LOCK_RELEASED ( &pWb35Reg->EP0VM_spin_lock );
+		spin_unlock_irq ( &pWb35Reg->EP0VM_spin_lock );
 
 		// Start EP0VM
 		Wb35Reg_EP0VM_start(pHwData);
@@ -278,10 +278,10 @@
 // FALSE : register not support
 // pRegisterValue : It must be a resident buffer due to asynchronous read register.
 unsigned char
-Wb35Reg_ReadSync(  phw_data_t pHwData,  u16 RegisterNo,   PULONG pRegisterValue )
+Wb35Reg_ReadSync(  phw_data_t pHwData,  u16 RegisterNo,   u32 * pRegisterValue )
 {
 	PWB35REG pWb35Reg = &pHwData->Wb35Reg;
-	PULONG	pltmp = pRegisterValue;
+	u32 *	pltmp = pRegisterValue;
 	int ret = -1;
 
 	// Module shutdown
@@ -327,7 +327,7 @@
 // FALSE : register not support
 // pRegisterValue : It must be a resident buffer due to asynchronous read register.
 unsigned char
-Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo,  PULONG pRegisterValue )
+Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo,  u32 * pRegisterValue )
 {
 	PWB35REG	pWb35Reg = &pHwData->Wb35Reg;
 	struct usb_ctrlrequest * dr;
@@ -348,7 +348,7 @@
 		pRegQueue->DIRECT = 0;// read register
 		pRegQueue->INDEX = RegisterNo;
 		pRegQueue->pBuffer = pRegisterValue;
-		dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE));
+		dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
 		dr->bRequestType = USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN;
 		dr->bRequest = 0x01; // USB or vendor-defined request code, burst mode
 		dr->wValue = cpu_to_le16(0x0);
@@ -359,14 +359,14 @@
 		pRegQueue->Next = NULL;
 		pRegQueue->pUsbReq = dr;
 		pRegQueue->pUrb = pUrb;
-		OS_SPIN_LOCK_ACQUIRED ( &pWb35Reg->EP0VM_spin_lock );
+		spin_lock_irq ( &pWb35Reg->EP0VM_spin_lock );
 		if( pWb35Reg->pRegFirst == NULL )
 			pWb35Reg->pRegFirst = pRegQueue;
 		else
 			pWb35Reg->pRegLast->Next = pRegQueue;
 		pWb35Reg->pRegLast = pRegQueue;
 
-		OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
+		spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
 
 		// Start EP0VM
 		Wb35Reg_EP0VM_start( pHwData );
@@ -399,7 +399,7 @@
 	PWB35REG	pWb35Reg = &pHwData->Wb35Reg;
 	PURB		pUrb;
 	struct usb_ctrlrequest *dr;
-	PULONG		pBuffer;
+	u32 *		pBuffer;
 	int			ret = -1;
 	PREG_QUEUE	pRegQueue;
 
@@ -411,9 +411,9 @@
 		goto cleanup;
 
 	// Get the register data and send to USB through Irp
-	OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock );
+	spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
 	pRegQueue = pWb35Reg->pRegFirst;
-	OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
+	spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
 
 	if (!pRegQueue)
 		goto cleanup;
@@ -429,7 +429,7 @@
 
 	usb_fill_control_urb( pUrb, pHwData->WbUsb.udev,
 			      REG_DIRECTION(pHwData->WbUsb.udev,pRegQueue),
-			      (PUCHAR)dr,pBuffer,cpu_to_le16(dr->wLength),
+			      (u8 *)dr,pBuffer,cpu_to_le16(dr->wLength),
 			      Wb35Reg_EP0VM_complete, (void*)pHwData);
 
 	pWb35Reg->EP0vm_state = VM_RUNNING;
@@ -468,12 +468,12 @@
 		OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Reg->RegFireCount );
 	} else {
 		// Complete to send, remove the URB from the first
-		OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock );
+		spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
 		pRegQueue = pWb35Reg->pRegFirst;
 		if (pRegQueue == pWb35Reg->pRegLast)
 			pWb35Reg->pRegLast = NULL;
 		pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next;
-		OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
+		spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
 
 		if (pWb35Reg->EP0VM_status) {
 #ifdef _PE_REG_DUMP_
@@ -513,7 +513,7 @@
 	OS_SLEEP(10000);  // Delay for waiting function enter 940623.1.b
 
 	// Release all the data in RegQueue
-	OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock );
+	spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
 	pRegQueue = pWb35Reg->pRegFirst;
 	while (pRegQueue) {
 		if (pRegQueue == pWb35Reg->pRegLast)
@@ -521,7 +521,7 @@
 		pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next;
 
 		pUrb = pRegQueue->pUrb;
-		OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
+		spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
 		if (pUrb) {
 			usb_free_urb(pUrb);
 			kfree(pRegQueue);
@@ -530,14 +530,11 @@
 			WBDEBUG(("EP0 queue release error\n"));
 			#endif
 		}
-		OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock );
+		spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
 
 		pRegQueue = pWb35Reg->pRegFirst;
 	}
-	OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
-
-	// Free resource
-	OS_SPIN_LOCK_FREE(  &pWb35Reg->EP0VM_spin_lock );
+	spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
 }
 
 //====================================================================================
@@ -550,7 +547,7 @@
 	u32 SoftwareSet, VCO_trim, TxVga, Region_ScanInterval;
 
 	// Spin lock is acquired for read and write IRP command
-	OS_SPIN_LOCK_ALLOCATE( &pWb35Reg->EP0VM_spin_lock );
+	spin_lock_init( &pWb35Reg->EP0VM_spin_lock );
 
 	// Getting RF module type from EEPROM ------------------------------------
 	Wb35Reg_WriteSync( pHwData, 0x03b4, 0x080d0000 ); // Start EEPROM access + Read + address(0x0d)
@@ -655,7 +652,7 @@
 //    version in _GENREQ.ASM of the DWB NE1000/2000 driver.
 //==================================================================================
 u32
-CardComputeCrc(PUCHAR Buffer, u32 Length)
+CardComputeCrc(u8 * Buffer, u32 Length)
 {
     u32 Crc, Carry;
     u32  i, j;
diff --git a/drivers/staging/winbond/linux/wb35reg_f.h b/drivers/staging/winbond/linux/wb35reg_f.h
index 38e2906..3006cfe 100644
--- a/drivers/staging/winbond/linux/wb35reg_f.h
+++ b/drivers/staging/winbond/linux/wb35reg_f.h
@@ -29,16 +29,16 @@
 
 void Wb35Reg_destroy(  phw_data_t pHwData );
 
-unsigned char Wb35Reg_Read(  phw_data_t pHwData,  u16 RegisterNo,   PULONG pRegisterValue );
-unsigned char Wb35Reg_ReadSync(  phw_data_t pHwData,  u16 RegisterNo,   PULONG pRegisterValue );
+unsigned char Wb35Reg_Read(  phw_data_t pHwData,  u16 RegisterNo,   u32 * pRegisterValue );
+unsigned char Wb35Reg_ReadSync(  phw_data_t pHwData,  u16 RegisterNo,   u32 * pRegisterValue );
 unsigned char Wb35Reg_Write(  phw_data_t pHwData,  u16 RegisterNo,  u32 RegisterValue );
 unsigned char Wb35Reg_WriteSync(  phw_data_t pHwData,  u16 RegisterNo,  u32 RegisterValue );
 unsigned char Wb35Reg_WriteWithCallbackValue(  phw_data_t pHwData,
 								 u16 RegisterNo,
 								 u32 RegisterValue,
-								 PCHAR pValue,
-								 s8	Len);
-unsigned char Wb35Reg_BurstWrite(  phw_data_t pHwData,  u16 RegisterNo,  PULONG pRegisterData,  u8 NumberOfData,  u8 Flag );
+								 s8 *pValue,
+								 s8 Len);
+unsigned char Wb35Reg_BurstWrite(  phw_data_t pHwData,  u16 RegisterNo,  u32 * pRegisterData,  u8 NumberOfData,  u8 Flag );
 
 void Wb35Reg_EP0VM(  phw_data_t pHwData );
 void Wb35Reg_EP0VM_start(  phw_data_t pHwData );
@@ -47,7 +47,7 @@
 u32 BitReverse( u32 dwData, u32 DataLength);
 
 void CardGetMulticastBit(   u8 Address[MAC_ADDR_LENGTH],  u8 *Byte,  u8 *Value );
-u32 CardComputeCrc(  PUCHAR Buffer,  u32 Length );
+u32 CardComputeCrc(  u8 * Buffer,  u32 Length );
 
 void Wb35Reg_phy_calibration(  phw_data_t pHwData );
 void Wb35Reg_Update(  phw_data_t pHwData,  u16 RegisterNo,  u32 RegisterValue );
diff --git a/drivers/staging/winbond/linux/wb35reg_s.h b/drivers/staging/winbond/linux/wb35reg_s.h
index a7595b1..8b35b93 100644
--- a/drivers/staging/winbond/linux/wb35reg_s.h
+++ b/drivers/staging/winbond/linux/wb35reg_s.h
@@ -75,7 +75,7 @@
 	union
 	{
 		u32	VALUE;
-		PULONG	pBuffer;
+		u32 *	pBuffer;
 	};
 	u8	RESERVED[4];// space reserved for communication
 
@@ -143,7 +143,7 @@
 	//-------------------
 	// VM
 	//-------------------
-	OS_SPIN_LOCK	EP0VM_spin_lock; // 4B
+	spinlock_t	EP0VM_spin_lock; // 4B
 	u32	        EP0VM_status;//$$
 	PREG_QUEUE	    pRegFirst;
 	PREG_QUEUE	    pRegLast;
diff --git a/drivers/staging/winbond/linux/wb35rx.c b/drivers/staging/winbond/linux/wb35rx.c
index 26157eb..b4b9f5f 100644
--- a/drivers/staging/winbond/linux/wb35rx.c
+++ b/drivers/staging/winbond/linux/wb35rx.c
@@ -27,7 +27,7 @@
 void Wb35Rx(  phw_data_t pHwData )
 {
 	PWB35RX	pWb35Rx = &pHwData->Wb35Rx;
-	PUCHAR	pRxBufferAddress;
+	u8 *	pRxBufferAddress;
 	PURB	pUrb = (PURB)pWb35Rx->RxUrb;
 	int	retv;
 	u32	RxBufferId;
@@ -35,51 +35,50 @@
 	//
 	// Issuing URB
 	//
-	do {
-		if (pHwData->SurpriseRemove || pHwData->HwStop)
-			break;
+	if (pHwData->SurpriseRemove || pHwData->HwStop)
+		goto error;
 
-		if (pWb35Rx->rx_halt)
-			break;
+	if (pWb35Rx->rx_halt)
+		goto error;
 
-		// Get RxBuffer's ID
-		RxBufferId = pWb35Rx->RxBufferId;
-		if (!pWb35Rx->RxOwner[RxBufferId]) {
-			// It's impossible to run here.
-			#ifdef _PE_RX_DUMP_
-			WBDEBUG(("Rx driver fifo unavailable\n"));
-			#endif
-			break;
-		}
+	// Get RxBuffer's ID
+	RxBufferId = pWb35Rx->RxBufferId;
+	if (!pWb35Rx->RxOwner[RxBufferId]) {
+		// It's impossible to run here.
+		#ifdef _PE_RX_DUMP_
+		WBDEBUG(("Rx driver fifo unavailable\n"));
+		#endif
+		goto error;
+	}
 
-		// Update buffer point, then start to bulkin the data from USB
-		pWb35Rx->RxBufferId++;
-		pWb35Rx->RxBufferId %= MAX_USB_RX_BUFFER_NUMBER;
+	// Update buffer point, then start to bulkin the data from USB
+	pWb35Rx->RxBufferId++;
+	pWb35Rx->RxBufferId %= MAX_USB_RX_BUFFER_NUMBER;
 
-		pWb35Rx->CurrentRxBufferId = RxBufferId;
+	pWb35Rx->CurrentRxBufferId = RxBufferId;
 
-		if (1 != OS_MEMORY_ALLOC((void* *)&pWb35Rx->pDRx, MAX_USB_RX_BUFFER)) {
-			printk("w35und: Rx memory alloc failed\n");
-			break;
-		}
-		pRxBufferAddress = pWb35Rx->pDRx;
+	if (1 != OS_MEMORY_ALLOC((void* *)&pWb35Rx->pDRx, MAX_USB_RX_BUFFER)) {
+		printk("w35und: Rx memory alloc failed\n");
+		goto error;
+	}
+	pRxBufferAddress = pWb35Rx->pDRx;
 
-		usb_fill_bulk_urb(pUrb, pHwData->WbUsb.udev,
-				  usb_rcvbulkpipe(pHwData->WbUsb.udev, 3),
-				  pRxBufferAddress, MAX_USB_RX_BUFFER,
-				  Wb35Rx_Complete, pHwData);
+	usb_fill_bulk_urb(pUrb, pHwData->WbUsb.udev,
+			  usb_rcvbulkpipe(pHwData->WbUsb.udev, 3),
+			  pRxBufferAddress, MAX_USB_RX_BUFFER,
+			  Wb35Rx_Complete, pHwData);
 
-		pWb35Rx->EP3vm_state = VM_RUNNING;
+	pWb35Rx->EP3vm_state = VM_RUNNING;
 
-		retv = wb_usb_submit_urb(pUrb);
+	retv = wb_usb_submit_urb(pUrb);
 
-		if (retv != 0) {
-			printk("Rx URB sending error\n");
-			break;
-		}
-		return;
-	} while(FALSE);
+	if (retv != 0) {
+		printk("Rx URB sending error\n");
+		goto error;
+	}
+	return;
 
+error:
 	// VM stop
 	pWb35Rx->EP3vm_state = VM_STOP;
 	OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Rx->RxFireCounter );
@@ -89,7 +88,7 @@
 {
 	phw_data_t	pHwData = pUrb->context;
 	PWB35RX		pWb35Rx = &pHwData->Wb35Rx;
-	PUCHAR		pRxBufferAddress;
+	u8 *		pRxBufferAddress;
 	u32		SizeCheck;
 	u16		BulkLength;
 	u32		RxBufferId;
@@ -99,65 +98,63 @@
 	pWb35Rx->EP3vm_state = VM_COMPLETED;
 	pWb35Rx->EP3VM_status = pUrb->status;//Store the last result of Irp
 
-	do {
-		RxBufferId = pWb35Rx->CurrentRxBufferId;
+	RxBufferId = pWb35Rx->CurrentRxBufferId;
 
-		pRxBufferAddress = pWb35Rx->pDRx;
-		BulkLength = (u16)pUrb->actual_length;
+	pRxBufferAddress = pWb35Rx->pDRx;
+	BulkLength = (u16)pUrb->actual_length;
 
-		// The IRP is completed
-		pWb35Rx->EP3vm_state = VM_COMPLETED;
+	// The IRP is completed
+	pWb35Rx->EP3vm_state = VM_COMPLETED;
 
-		if (pHwData->SurpriseRemove || pHwData->HwStop) // Must be here, or RxBufferId is invalid
-			break;
+	if (pHwData->SurpriseRemove || pHwData->HwStop) // Must be here, or RxBufferId is invalid
+		goto error;
 
-		if (pWb35Rx->rx_halt)
-			break;
+	if (pWb35Rx->rx_halt)
+		goto error;
 
-		// Start to process the data only in successful condition
-		pWb35Rx->RxOwner[ RxBufferId ] = 0; // Set the owner to driver
-		R00.value = le32_to_cpu(*(PULONG)pRxBufferAddress);
+	// Start to process the data only in successful condition
+	pWb35Rx->RxOwner[ RxBufferId ] = 0; // Set the owner to driver
+	R00.value = le32_to_cpu(*(u32 *)pRxBufferAddress);
 
-		// The URB is completed, check the result
-		if (pWb35Rx->EP3VM_status != 0) {
-			#ifdef _PE_USB_STATE_DUMP_
-			WBDEBUG(("EP3 IoCompleteRoutine return error\n"));
-			DebugUsbdStatusInformation( pWb35Rx->EP3VM_status );
-			#endif
+	// The URB is completed, check the result
+	if (pWb35Rx->EP3VM_status != 0) {
+		#ifdef _PE_USB_STATE_DUMP_
+		WBDEBUG(("EP3 IoCompleteRoutine return error\n"));
+		DebugUsbdStatusInformation( pWb35Rx->EP3VM_status );
+		#endif
+		pWb35Rx->EP3vm_state = VM_STOP;
+		goto error;
+	}
+
+	// 20060220 For recovering. check if operating in single USB mode
+	if (!HAL_USB_MODE_BURST(pHwData)) {
+		SizeCheck = R00.R00_receive_byte_count;  //20060926 anson's endian
+		if ((SizeCheck & 0x03) > 0)
+			SizeCheck -= 4;
+		SizeCheck = (SizeCheck + 3) & ~0x03;
+		SizeCheck += 12; // 8 + 4 badbeef
+		if ((BulkLength > 1600) ||
+			(SizeCheck > 1600) ||
+			(BulkLength != SizeCheck) ||
+			(BulkLength == 0)) { // Add for fail Urb
 			pWb35Rx->EP3vm_state = VM_STOP;
-			break;
+			pWb35Rx->Ep3ErrorCount2++;
 		}
+	}
 
-		// 20060220 For recovering. check if operating in single USB mode
-		if (!HAL_USB_MODE_BURST(pHwData)) {
-			SizeCheck = R00.R00_receive_byte_count;  //20060926 anson's endian
-			if ((SizeCheck & 0x03) > 0)
-				SizeCheck -= 4;
-			SizeCheck = (SizeCheck + 3) & ~0x03;
-			SizeCheck += 12; // 8 + 4 badbeef
-			if ((BulkLength > 1600) ||
-				(SizeCheck > 1600) ||
-				(BulkLength != SizeCheck) ||
-				(BulkLength == 0)) { // Add for fail Urb
-				pWb35Rx->EP3vm_state = VM_STOP;
-				pWb35Rx->Ep3ErrorCount2++;
-			}
-		}
+	// Indicating the receiving data
+	pWb35Rx->ByteReceived += BulkLength;
+	pWb35Rx->RxBufferSize[ RxBufferId ] = BulkLength;
 
-		// Indicating the receiving data
-		pWb35Rx->ByteReceived += BulkLength;
-		pWb35Rx->RxBufferSize[ RxBufferId ] = BulkLength;
+	if (!pWb35Rx->RxOwner[ RxBufferId ])
+		Wb35Rx_indicate(pHwData);
 
-		if (!pWb35Rx->RxOwner[ RxBufferId ])
-			Wb35Rx_indicate(pHwData);
+	kfree(pWb35Rx->pDRx);
+	// Do the next receive
+	Wb35Rx(pHwData);
+	return;
 
-		kfree(pWb35Rx->pDRx);
-		// Do the next receive
-		Wb35Rx(pHwData);
-		return;
-
-	} while(FALSE);
-
+error:
 	pWb35Rx->RxOwner[ RxBufferId ] = 1; // Set the owner to hardware
 	OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Rx->RxFireCounter );
 	pWb35Rx->EP3vm_state = VM_STOP;
@@ -223,7 +220,7 @@
 
 void Wb35Rx_adjust(PDESCRIPTOR pRxDes)
 {
-	PULONG	pRxBufferAddress;
+	u32 *	pRxBufferAddress;
 	u32	DecryptionMethod;
 	u32	i;
 	u16	BufferSize;
@@ -264,7 +261,7 @@
 {
 	DESCRIPTOR	RxDes;
 	PWB35RX	pWb35Rx = &pHwData->Wb35Rx;
-	PUCHAR		pRxBufferAddress;
+	u8 *		pRxBufferAddress;
 	u16		PacketSize;
 	u16		stmp, BufferSize, stmp2 = 0;
 	u32		RxBufferId;
@@ -283,13 +280,13 @@
 
 		// Parse the bulkin buffer
 		while (BufferSize >= 4) {
-			if ((cpu_to_le32(*(PULONG)pRxBufferAddress) & 0x0fffffff) == RX_END_TAG) //Is ending? 921002.9.a
+			if ((cpu_to_le32(*(u32 *)pRxBufferAddress) & 0x0fffffff) == RX_END_TAG) //Is ending? 921002.9.a
 				break;
 
 			// Get the R00 R01 first
-			RxDes.R00.value = le32_to_cpu(*(PULONG)pRxBufferAddress);
+			RxDes.R00.value = le32_to_cpu(*(u32 *)pRxBufferAddress);
 			PacketSize = (u16)RxDes.R00.R00_receive_byte_count;
-			RxDes.R01.value = le32_to_cpu(*((PULONG)(pRxBufferAddress+4)));
+			RxDes.R01.value = le32_to_cpu(*((u32 *)(pRxBufferAddress+4)));
 			// For new DMA 4k
 			if ((PacketSize & 0x03) > 0)
 				PacketSize -= 4;
diff --git a/drivers/staging/winbond/linux/wb35rx_s.h b/drivers/staging/winbond/linux/wb35rx_s.h
index 53b831f..b90c269 100644
--- a/drivers/staging/winbond/linux/wb35rx_s.h
+++ b/drivers/staging/winbond/linux/wb35rx_s.h
@@ -41,7 +41,7 @@
 	u32		Ep3ErrorCount2; // 20060625.1 Usbd for Rx DMA error count
 
 	int		EP3VM_status;
-	PUCHAR	pDRx;
+	u8 *	pDRx;
 
 } WB35RX, *PWB35RX;
 
diff --git a/drivers/staging/winbond/linux/wb35tx.c b/drivers/staging/winbond/linux/wb35tx.c
index cf19c3b..ba9d512 100644
--- a/drivers/staging/winbond/linux/wb35tx.c
+++ b/drivers/staging/winbond/linux/wb35tx.c
@@ -12,7 +12,7 @@
 
 
 unsigned char
-Wb35Tx_get_tx_buffer(phw_data_t pHwData, PUCHAR *pBuffer )
+Wb35Tx_get_tx_buffer(phw_data_t pHwData, u8 **pBuffer)
 {
 	PWB35TX pWb35Tx = &pHwData->Wb35Tx;
 
@@ -37,7 +37,7 @@
 {
 	PWB35TX		pWb35Tx = &pHwData->Wb35Tx;
 	PADAPTER	Adapter = pHwData->Adapter;
-	PUCHAR		pTxBufferAddress;
+	u8		*pTxBufferAddress;
 	PMDS		pMds = &Adapter->Mds;
 	struct urb *	pUrb = (struct urb *)pWb35Tx->Tx4Urb;
 	int         	retv;
@@ -100,25 +100,24 @@
 	pWb35Tx->TxSendIndex++;
 	pWb35Tx->TxSendIndex %= MAX_USB_TX_BUFFER_NUMBER;
 
-	do {
-		if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
-			break;
+	if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
+		goto error;
 
-		if (pWb35Tx->tx_halt)
-			break;
+	if (pWb35Tx->tx_halt)
+		goto error;
 
-		// The URB is completed, check the result
-		if (pWb35Tx->EP4VM_status != 0) {
-			printk("URB submission failed\n");
-			pWb35Tx->EP4vm_state = VM_STOP;
-			break; // Exit while(FALSE);
-		}
+	// The URB is completed, check the result
+	if (pWb35Tx->EP4VM_status != 0) {
+		printk("URB submission failed\n");
+		pWb35Tx->EP4vm_state = VM_STOP;
+		goto error;
+	}
 
-		Mds_Tx(Adapter);
-		Wb35Tx(pHwData);
-		return;
-	} while(FALSE);
+	Mds_Tx(Adapter);
+	Wb35Tx(pHwData);
+	return;
 
+error:
 	OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxFireCounter );
 	pWb35Tx->EP4vm_state = VM_STOP;
 }
@@ -225,36 +224,33 @@
 {
 	PWB35TX pWb35Tx = &pHwData->Wb35Tx;
 	struct urb *	pUrb = (struct urb *)pWb35Tx->Tx2Urb;
-	PULONG	pltmp = (PULONG)pWb35Tx->EP2_buf;
+	u32 *	pltmp = (u32 *)pWb35Tx->EP2_buf;
 	int		retv;
 
-	do {
-		if (pHwData->SurpriseRemove || pHwData->HwStop)
-			break;
+	if (pHwData->SurpriseRemove || pHwData->HwStop)
+		goto error;
 
-		if (pWb35Tx->tx_halt)
-			break;
+	if (pWb35Tx->tx_halt)
+		goto error;
 
-		//
-		// Issuing URB
-		//
-		usb_fill_int_urb( pUrb, pHwData->WbUsb.udev, usb_rcvintpipe(pHwData->WbUsb.udev,2),
-				  pltmp, MAX_INTERRUPT_LENGTH, Wb35Tx_EP2VM_complete, pHwData, 32);
+	//
+	// Issuing URB
+	//
+	usb_fill_int_urb( pUrb, pHwData->WbUsb.udev, usb_rcvintpipe(pHwData->WbUsb.udev,2),
+			  pltmp, MAX_INTERRUPT_LENGTH, Wb35Tx_EP2VM_complete, pHwData, 32);
 
-		pWb35Tx->EP2vm_state = VM_RUNNING;
-		retv = wb_usb_submit_urb( pUrb );
+	pWb35Tx->EP2vm_state = VM_RUNNING;
+	retv = wb_usb_submit_urb( pUrb );
 
-		if(retv < 0) {
-			#ifdef _PE_TX_DUMP_
-			WBDEBUG(("EP2 Tx Irp sending error\n"));
-			#endif
-			break;
-		}
+	if (retv < 0) {
+		#ifdef _PE_TX_DUMP_
+		WBDEBUG(("EP2 Tx Irp sending error\n"));
+		#endif
+		goto error;
+	}
 
-		return;
-
-	} while(FALSE);
-
+	return;
+error:
 	pWb35Tx->EP2vm_state = VM_STOP;
 	OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxResultCount );
 }
@@ -266,7 +262,7 @@
 	T02_DESCRIPTOR	T02, TSTATUS;
 	PADAPTER	Adapter = (PADAPTER)pHwData->Adapter;
 	PWB35TX		pWb35Tx = &pHwData->Wb35Tx;
-	PULONG		pltmp = (PULONG)pWb35Tx->EP2_buf;
+	u32 *		pltmp = (u32 *)pWb35Tx->EP2_buf;
 	u32		i;
 	u16		InterruptInLength;
 
@@ -275,38 +271,36 @@
 	pWb35Tx->EP2vm_state = VM_COMPLETED;
 	pWb35Tx->EP2VM_status = pUrb->status;
 
-	do {
-		// For Linux 2.4. Interrupt will always trigger
-		if( pHwData->SurpriseRemove || pHwData->HwStop ) // Let WbWlanHalt to handle surprise remove
-			break;
+	// For Linux 2.4. Interrupt will always trigger
+	if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
+		goto error;
 
-		if( pWb35Tx->tx_halt )
-			break;
+	if (pWb35Tx->tx_halt)
+		goto error;
 
-		//The Urb is completed, check the result
-		if (pWb35Tx->EP2VM_status != 0) {
-			WBDEBUG(("EP2 IoCompleteRoutine return error\n"));
-			pWb35Tx->EP2vm_state= VM_STOP;
-			break; // Exit while(FALSE);
-		}
+	//The Urb is completed, check the result
+	if (pWb35Tx->EP2VM_status != 0) {
+		WBDEBUG(("EP2 IoCompleteRoutine return error\n"));
+		pWb35Tx->EP2vm_state= VM_STOP;
+		goto error;
+	}
 
-		// Update the Tx result
-		InterruptInLength = pUrb->actual_length;
-		// Modify for minimum memory access and DWORD alignment.
-		T02.value = cpu_to_le32(pltmp[0]) >> 8; // [31:8] -> [24:0]
-		InterruptInLength -= 1;// 20051221.1.c Modify the follow for more stable
-		InterruptInLength >>= 2; // InterruptInLength/4
-		for (i=1; i<=InterruptInLength; i++) {
-			T02.value |= ((cpu_to_le32(pltmp[i]) & 0xff) << 24);
+	// Update the Tx result
+	InterruptInLength = pUrb->actual_length;
+	// Modify for minimum memory access and DWORD alignment.
+	T02.value = cpu_to_le32(pltmp[0]) >> 8; // [31:8] -> [24:0]
+	InterruptInLength -= 1;// 20051221.1.c Modify the follow for more stable
+	InterruptInLength >>= 2; // InterruptInLength/4
+	for (i = 1; i <= InterruptInLength; i++) {
+		T02.value |= ((cpu_to_le32(pltmp[i]) & 0xff) << 24);
 
-			TSTATUS.value = T02.value;  //20061009 anson's endian
-			Mds_SendComplete( Adapter, &TSTATUS );
-			T02.value = cpu_to_le32(pltmp[i]) >> 8;
-		}
+		TSTATUS.value = T02.value;  //20061009 anson's endian
+		Mds_SendComplete( Adapter, &TSTATUS );
+		T02.value = cpu_to_le32(pltmp[i]) >> 8;
+	}
 
-		return;
-	} while(FALSE);
-
+	return;
+error:
 	OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxResultCount );
 	pWb35Tx->EP2vm_state = VM_STOP;
 }
diff --git a/drivers/staging/winbond/linux/wb35tx_f.h b/drivers/staging/winbond/linux/wb35tx_f.h
index 7705a84..107b129 100644
--- a/drivers/staging/winbond/linux/wb35tx_f.h
+++ b/drivers/staging/winbond/linux/wb35tx_f.h
@@ -3,7 +3,7 @@
 //====================================
 unsigned char Wb35Tx_initial(	 phw_data_t pHwData );
 void Wb35Tx_destroy(  phw_data_t pHwData );
-unsigned char Wb35Tx_get_tx_buffer(  phw_data_t pHwData,  PUCHAR *pBuffer );
+unsigned char Wb35Tx_get_tx_buffer(  phw_data_t pHwData,  u8 **pBuffer );
 
 void Wb35Tx_EP2VM(  phw_data_t pHwData );
 void Wb35Tx_EP2VM_start(  phw_data_t pHwData );
diff --git a/drivers/staging/winbond/linux/wbusb.c b/drivers/staging/winbond/linux/wbusb.c
index cbad5fb..f4a7875 100644
--- a/drivers/staging/winbond/linux/wbusb.c
+++ b/drivers/staging/winbond/linux/wbusb.c
@@ -6,42 +6,29 @@
 #include "sysdef.h"
 #include <net/mac80211.h>
 
-
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 MODULE_VERSION("0.1");
 
-
-//============================================================
-// vendor ID and product ID can into here for others
-//============================================================
-static struct usb_device_id Id_Table[] =
-{
-  {USB_DEVICE( 0x0416, 0x0035 )},
-  {USB_DEVICE( 0x18E8, 0x6201 )},
-  {USB_DEVICE( 0x18E8, 0x6206 )},
-  {USB_DEVICE( 0x18E8, 0x6217 )},
-  {USB_DEVICE( 0x18E8, 0x6230 )},
-  {USB_DEVICE( 0x18E8, 0x6233 )},
-  {USB_DEVICE( 0x1131, 0x2035 )},
-  {  }
+static struct usb_device_id wb35_table[] __devinitdata = {
+	{USB_DEVICE(0x0416, 0x0035)},
+	{USB_DEVICE(0x18E8, 0x6201)},
+	{USB_DEVICE(0x18E8, 0x6206)},
+	{USB_DEVICE(0x18E8, 0x6217)},
+	{USB_DEVICE(0x18E8, 0x6230)},
+	{USB_DEVICE(0x18E8, 0x6233)},
+	{USB_DEVICE(0x1131, 0x2035)},
+	{ 0, }
 };
 
-MODULE_DEVICE_TABLE(usb, Id_Table);
+MODULE_DEVICE_TABLE(usb, wb35_table);
 
-static struct usb_driver wb35_driver = {
-	.name =		"w35und",
-	.probe =	wb35_probe,
-	.disconnect = wb35_disconnect,
-	.id_table = Id_Table,
-};
-
-static const struct ieee80211_rate wbsoft_rates[] = {
+static struct ieee80211_rate wbsoft_rates[] = {
 	{ .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 };
 
-static const struct ieee80211_channel wbsoft_channels[] = {
+static struct ieee80211_channel wbsoft_channels[] = {
 	{ .center_freq = 2412},
 };
 
@@ -62,9 +49,22 @@
 	printk("wbsoft_remove interface called\n");
 }
 
-static int wbsoft_nop(void)
+static void wbsoft_stop(struct ieee80211_hw *hw)
 {
-	printk("wbsoft_nop called\n");
+	printk(KERN_INFO "%s called\n", __func__);
+}
+
+static int wbsoft_get_stats(struct ieee80211_hw *hw,
+			    struct ieee80211_low_level_stats *stats)
+{
+	printk(KERN_INFO "%s called\n", __func__);
+	return 0;
+}
+
+static int wbsoft_get_tx_stats(struct ieee80211_hw *hw,
+			       struct ieee80211_tx_queue_stats *stats)
+{
+	printk(KERN_INFO "%s called\n", __func__);
 	return 0;
 }
 
@@ -105,8 +105,7 @@
 	*total_flags = new_flags;
 }
 
-static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
-		      struct ieee80211_tx_control *control)
+static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
 	char *buffer = kmalloc(skb->len, GFP_ATOMIC);
 	printk("Sending frame %d bytes\n", skb->len);
@@ -136,7 +135,7 @@
 	hal_set_current_channel(&my_adapter->sHwData, ch);
 	hal_set_beacon_period(&my_adapter->sHwData, conf->beacon_int);
 //	hal_set_cap_info(&my_adapter->sHwData, ?? );
-// hal_set_ssid(phw_data_t pHwData,  PUCHAR pssid,  u8 ssid_len); ??
+// hal_set_ssid(phw_data_t pHwData,  u8 * pssid,  u8 ssid_len); ??
 	hal_set_accept_broadcast(&my_adapter->sHwData, 1);
 	hal_set_accept_promiscuous(&my_adapter->sHwData,  1);
 	hal_set_accept_multicast(&my_adapter->sHwData,  1);
@@ -148,7 +147,7 @@
 
 //	hal_start_bss(&my_adapter->sHwData, WLAN_BSSTYPE_INFRASTRUCTURE);	??
 
-//void hal_set_rates(phw_data_t pHwData, PUCHAR pbss_rates,
+//void hal_set_rates(phw_data_t pHwData, u8 * pbss_rates,
 //		   u8 length, unsigned char basic_rate_set)
 
 	return 0;
@@ -171,14 +170,14 @@
 static const struct ieee80211_ops wbsoft_ops = {
 	.tx			= wbsoft_tx,
 	.start			= wbsoft_start,		/* Start can be pretty much empty as we do WbWLanInitialize() during probe? */
-	.stop			= wbsoft_nop,
+	.stop			= wbsoft_stop,
 	.add_interface		= wbsoft_add_interface,
 	.remove_interface	= wbsoft_remove_interface,
 	.config			= wbsoft_config,
 	.config_interface	= wbsoft_config_interface,
 	.configure_filter	= wbsoft_configure_filter,
-	.get_stats		= wbsoft_nop,
-	.get_tx_stats		= wbsoft_nop,
+	.get_stats		= wbsoft_get_stats,
+	.get_tx_stats		= wbsoft_get_tx_stats,
 	.get_tsf		= wbsoft_get_tsf,
 // conf_tx: hal_set_cwmin()/hal_set_cwmax;
 };
@@ -187,21 +186,6 @@
 };
 
 
-int __init wb35_init(void)
-{
-	printk("[w35und]driver init\n");
-	return usb_register(&wb35_driver);
-}
-
-void __exit wb35_exit(void)
-{
-	printk("[w35und]driver exit\n");
-	usb_deregister( &wb35_driver );
-}
-
-module_init(wb35_init);
-module_exit(wb35_exit);
-
 // Usb kernel subsystem will call this function when a new device is plugged into.
 int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id_table)
 {
@@ -210,7 +194,7 @@
 	PWBUSB		pWbUsb;
         struct usb_host_interface *interface;
 	struct usb_endpoint_descriptor *endpoint;
-	int	i, ret = -1;
+	int	ret = -1;
 	u32	ltmp;
 	struct usb_device *udev = interface_to_usbdev(intf);
 
@@ -218,114 +202,95 @@
 
 	printk("[w35und]wb35_probe ->\n");
 
-	do {
-		for (i=0; i<(sizeof(Id_Table)/sizeof(struct usb_device_id)); i++ ) {
-			if ((udev->descriptor.idVendor == Id_Table[i].idVendor) &&
-				(udev->descriptor.idProduct == Id_Table[i].idProduct)) {
-				printk("[w35und]Found supported hardware\n");
-				break;
-			}
-		}
-		if ((i == (sizeof(Id_Table)/sizeof(struct usb_device_id)))) {
-			#ifdef _PE_USB_INI_DUMP_
-			WBDEBUG(("[w35und] This is not the one we are interested about\n"));
-			#endif
-			return -ENODEV;
+	// 20060630.2 Check the device if it already be opened
+	ret = usb_control_msg(udev, usb_rcvctrlpipe( udev, 0 ),
+			      0x01, USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN,
+			      0x0, 0x400, &ltmp, 4, HZ*100 );
+	if (ret < 0)
+		goto error;
+
+	ltmp = cpu_to_le32(ltmp);
+	if (ltmp)  // Is already initialized?
+		goto error;
+
+	Adapter = kzalloc(sizeof(ADAPTER), GFP_KERNEL);
+
+	my_adapter = Adapter;
+	pWbLinux = &Adapter->WbLinux;
+	pWbUsb = &Adapter->sHwData.WbUsb;
+	pWbUsb->udev = udev;
+
+        interface = intf->cur_altsetting;
+        endpoint = &interface->endpoint[0].desc;
+
+	if (endpoint[2].wMaxPacketSize == 512) {
+		printk("[w35und] Working on USB 2.0\n");
+		pWbUsb->IsUsb20 = 1;
+	}
+
+	if (!WbWLanInitialize(Adapter)) {
+		printk("[w35und]WbWLanInitialize fail\n");
+		goto error;
+	}
+
+	{
+		struct wbsoft_priv *priv;
+		struct ieee80211_hw *dev;
+		static struct ieee80211_supported_band band;
+		int res;
+
+		dev = ieee80211_alloc_hw(sizeof(*priv), &wbsoft_ops);
+
+		if (!dev) {
+			printk("w35und: ieee80211 alloc failed\n" );
+			BUG();
 		}
 
-		// 20060630.2 Check the device if it already be opened
-		ret = usb_control_msg(udev, usb_rcvctrlpipe( udev, 0 ),
-				      0x01, USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN,
-				      0x0, 0x400, &ltmp, 4, HZ*100 );
-		if( ret < 0 )
-			break;
+		my_dev = dev;
 
-		ltmp = cpu_to_le32(ltmp);
-		if (ltmp)  // Is already initialized?
-			break;
-
-
-		Adapter = kzalloc(sizeof(ADAPTER), GFP_KERNEL);
-
-		my_adapter = Adapter;
-		pWbLinux = &Adapter->WbLinux;
-		pWbUsb = &Adapter->sHwData.WbUsb;
-		pWbUsb->udev = udev;
-
-	        interface = intf->cur_altsetting;
-	        endpoint = &interface->endpoint[0].desc;
-
-		if (endpoint[2].wMaxPacketSize == 512) {
-			printk("[w35und] Working on USB 2.0\n");
-			pWbUsb->IsUsb20 = 1;
-		}
-
-		if (!WbWLanInitialize(Adapter)) {
-			printk("[w35und]WbWLanInitialize fail\n");
-			break;
-		}
-
+		SET_IEEE80211_DEV(dev, &udev->dev);
 		{
-			struct wbsoft_priv *priv;
-			struct ieee80211_hw *dev;
-			int res;
-
-			dev = ieee80211_alloc_hw(sizeof(*priv), &wbsoft_ops);
-
-			if (!dev) {
-				printk("w35und: ieee80211 alloc failed\n" );
-				BUG();
-			}
-
-			my_dev = dev;
-
-			SET_IEEE80211_DEV(dev, &udev->dev);
-			{
-				phw_data_t pHwData = &Adapter->sHwData;
-				unsigned char		dev_addr[MAX_ADDR_LEN];
-				hal_get_permanent_address(pHwData, dev_addr);
-				SET_IEEE80211_PERM_ADDR(dev, dev_addr);
-			}
+			phw_data_t pHwData = &Adapter->sHwData;
+			unsigned char		dev_addr[MAX_ADDR_LEN];
+			hal_get_permanent_address(pHwData, dev_addr);
+			SET_IEEE80211_PERM_ADDR(dev, dev_addr);
+		}
 
 
-			dev->extra_tx_headroom = 12;	/* FIXME */
-			dev->flags = 0;
+		dev->extra_tx_headroom = 12;	/* FIXME */
+		dev->flags = 0;
 
-			dev->channel_change_time = 1000;
-//			dev->max_rssi = 100;
+		dev->channel_change_time = 1000;
+//		dev->max_rssi = 100;
 
-			dev->queues = 1;
+		dev->queues = 1;
 
-			static struct ieee80211_supported_band band;
+		band.channels = wbsoft_channels;
+		band.n_channels = ARRAY_SIZE(wbsoft_channels);
+		band.bitrates = wbsoft_rates;
+		band.n_bitrates = ARRAY_SIZE(wbsoft_rates);
 
-			band.channels = wbsoft_channels;
-			band.n_channels = ARRAY_SIZE(wbsoft_channels);
-			band.bitrates = wbsoft_rates;
-			band.n_bitrates = ARRAY_SIZE(wbsoft_rates);
-
-			dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band;
+		dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band;
 #if 0
-			wbsoft_modes[0].num_channels = 1;
-			wbsoft_modes[0].channels = wbsoft_channels;
-			wbsoft_modes[0].mode = MODE_IEEE80211B;
-			wbsoft_modes[0].num_rates = ARRAY_SIZE(wbsoft_rates);
-			wbsoft_modes[0].rates = wbsoft_rates;
+		wbsoft_modes[0].num_channels = 1;
+		wbsoft_modes[0].channels = wbsoft_channels;
+		wbsoft_modes[0].mode = MODE_IEEE80211B;
+		wbsoft_modes[0].num_rates = ARRAY_SIZE(wbsoft_rates);
+		wbsoft_modes[0].rates = wbsoft_rates;
 
-			res = ieee80211_register_hwmode(dev, &wbsoft_modes[0]);
-			BUG_ON(res);
+		res = ieee80211_register_hwmode(dev, &wbsoft_modes[0]);
+		BUG_ON(res);
 #endif
 
-			res = ieee80211_register_hw(dev);
-			BUG_ON(res);
-		}
+		res = ieee80211_register_hw(dev);
+		BUG_ON(res);
+	}
 
-		usb_set_intfdata( intf, Adapter );
+	usb_set_intfdata( intf, Adapter );
 
-		printk("[w35und] _probe OK\n");
-		return 0;
-
-	} while(FALSE);
-
+	printk("[w35und] _probe OK\n");
+	return 0;
+error:
 	return -ENOMEM;
 }
 
@@ -401,4 +366,22 @@
 
 }
 
+static struct usb_driver wb35_driver = {
+	.name		= "w35und",
+	.id_table	= wb35_table,
+	.probe		= wb35_probe,
+	.disconnect	= wb35_disconnect,
+};
 
+static int __init wb35_init(void)
+{
+	return usb_register(&wb35_driver);
+}
+
+static void __exit wb35_exit(void)
+{
+	usb_deregister(&wb35_driver);
+}
+
+module_init(wb35_init);
+module_exit(wb35_exit);
diff --git a/drivers/staging/winbond/mds.c b/drivers/staging/winbond/mds.c
index 8ce6389..f1de813 100644
--- a/drivers/staging/winbond/mds.c
+++ b/drivers/staging/winbond/mds.c
@@ -40,7 +40,7 @@
 	PMDS		pMds = &Adapter->Mds;
 	DESCRIPTOR	TxDes;
 	PDESCRIPTOR	pTxDes = &TxDes;
-	PUCHAR		XmitBufAddress;
+	u8		*XmitBufAddress;
 	u16		XmitBufSize, PacketSize, stmp, CurrentSize, FragmentThreshold;
 	u8		FillIndex, TxDesIndex, FragmentCount, FillCount;
 	unsigned char	BufferFilled = FALSE, MICAdd = 0;
@@ -90,7 +90,7 @@
 			BufferFilled = TRUE;
 
 			/* Leaves first u8 intact */
-			memset((PUCHAR)pTxDes + 1, 0, sizeof(DESCRIPTOR) - 1);
+			memset((u8 *)pTxDes + 1, 0, sizeof(DESCRIPTOR) - 1);
 
 			TxDesIndex = pMds->TxDesIndex;//Get the current ID
 			pTxDes->Descriptor_ID = TxDesIndex;
@@ -229,10 +229,10 @@
 }
 
 void
-Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
+Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer)
 {
 	PMDS	pMds = &Adapter->Mds;
-	PUCHAR	src_buffer = pDes->buffer_address[0];//931130.5.g
+	u8	*src_buffer = pDes->buffer_address[0];//931130.5.g
 	PT00_DESCRIPTOR	pT00;
 	PT01_DESCRIPTOR	pT01;
 	u16	stmp;
@@ -276,7 +276,7 @@
 	//
 	// Set tx rate
 	//
-	stmp = *(PUSHORT)(TargetBuffer+30); // 2n alignment address
+	stmp = *(u16 *)(TargetBuffer+30); // 2n alignment address
 
 	//Use basic rate
 	ctmp1 = ctmpf = CURRENT_TX_RATE_FOR_MNG;
@@ -326,11 +326,13 @@
 
 // The function return the 4n size of usb pk
 u16
-Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
+Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer)
 {
 	PT00_DESCRIPTOR	pT00;
 	PMDS	pMds = &Adapter->Mds;
-	PUCHAR	buffer, src_buffer, pctmp;
+	u8	*buffer;
+	u8	*src_buffer;
+	u8	*pctmp;
 	u16	Size = 0;
 	u16	SizeLeft, CopySize, CopyLeft, stmp;
 	u8	buf_index, FragmentCount = 0;
@@ -354,7 +356,7 @@
 		SizeLeft -= CopySize;
 
 		// 1 Byte operation
-		pctmp = (PUCHAR)( buffer + 8 + DOT_11_SEQUENCE_OFFSET );
+		pctmp = (u8 *)( buffer + 8 + DOT_11_SEQUENCE_OFFSET );
 		*pctmp &= 0xf0;
 		*pctmp |= FragmentCount;//931130.5.m
 		if( !FragmentCount )
@@ -379,7 +381,7 @@
 				buf_index++;
 				buf_index %= MAX_DESCRIPTOR_BUFFER_INDEX;
 			} else {
-				PUCHAR	pctmp = pDes->buffer_address[buf_index];
+				u8	*pctmp = pDes->buffer_address[buf_index];
 				pctmp += CopySize;
 				pDes->buffer_address[buf_index] = pctmp;
 				pDes->buffer_size[buf_index] -= CopySize;
@@ -419,7 +421,7 @@
 
 	pT00->T00_last_mpdu = 1;
 	pT00->T00_IsLastMpdu = 1;
-	buffer = (PUCHAR)pT00 + 8; // +8 for USB hdr
+	buffer = (u8 *)pT00 + 8; // +8 for USB hdr
 	buffer[1] &= ~0x04; // Clear more frag bit of 802.11 frame control
 	pDes->FragmentCount = FragmentCount; // Update the correct fragment number
 	return Size;
@@ -427,7 +429,7 @@
 
 
 void
-Mds_DurationSet(  PADAPTER Adapter,  PDESCRIPTOR pDes,  PUCHAR buffer )
+Mds_DurationSet(  PADAPTER Adapter,  PDESCRIPTOR pDes,  u8 *buffer )
 {
 	PT00_DESCRIPTOR	pT00;
 	PT01_DESCRIPTOR	pT01;
@@ -435,7 +437,7 @@
 	u8	Rate, i;
 	unsigned char	CTS_on = FALSE, RTS_on = FALSE;
 	PT00_DESCRIPTOR pNextT00;
-	u16 BodyLen;
+	u16 BodyLen = 0;
 	unsigned char boGroupAddr = FALSE;
 
 
@@ -574,7 +576,7 @@
 							DEFAULT_SIFSTIME*3 );
 			}
 
-			((PUSHORT)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
+			((u16 *)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
 
 			//----20061009 add by anson's endian
 			pNextT00->value = cpu_to_le32(pNextT00->value);
@@ -615,7 +617,7 @@
 		}
 	}
 
-	((PUSHORT)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
+	((u16 *)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
 	pT00->value = cpu_to_le32(pT00->value);
 	pT01->value = cpu_to_le32(pT01->value);
 	//--end 20061009 add
diff --git a/drivers/staging/winbond/mds_f.h b/drivers/staging/winbond/mds_f.h
index 651188b..7a682d4 100644
--- a/drivers/staging/winbond/mds_f.h
+++ b/drivers/staging/winbond/mds_f.h
@@ -1,9 +1,9 @@
 unsigned char Mds_initial(  PADAPTER Adapter );
 void Mds_Destroy(  PADAPTER Adapter );
 void Mds_Tx(  PADAPTER Adapter );
-void Mds_HeaderCopy(  PADAPTER Adapter,  PDESCRIPTOR pDes,  PUCHAR TargetBuffer );
-u16 Mds_BodyCopy(  PADAPTER Adapter,  PDESCRIPTOR pDes,  PUCHAR TargetBuffer );
-void Mds_DurationSet(  PADAPTER Adapter,  PDESCRIPTOR pDes,  PUCHAR TargetBuffer );
+void Mds_HeaderCopy(  PADAPTER Adapter,  PDESCRIPTOR pDes,  u8 *TargetBuffer );
+u16 Mds_BodyCopy(  PADAPTER Adapter,  PDESCRIPTOR pDes,  u8 *TargetBuffer );
+void Mds_DurationSet(  PADAPTER Adapter,  PDESCRIPTOR pDes,  u8 *TargetBuffer );
 void Mds_SendComplete(  PADAPTER Adapter,  PT02_DESCRIPTOR pT02 );
 void Mds_MpduProcess(  PADAPTER Adapter,  PDESCRIPTOR pRxDes );
 void Mds_reset_descriptor(  PADAPTER Adapter );
diff --git a/drivers/staging/winbond/mds_s.h b/drivers/staging/winbond/mds_s.h
index 4738279..9df2e09 100644
--- a/drivers/staging/winbond/mds_s.h
+++ b/drivers/staging/winbond/mds_s.h
@@ -86,7 +86,7 @@
 {
 	// For Tx usage
 	u8	TxOwner[ ((MAX_USB_TX_BUFFER_NUMBER + 3) & ~0x03) ];
-	PUCHAR	pTxBuffer;
+	u8	*pTxBuffer;
 	u16	TxBufferSize[ ((MAX_USB_TX_BUFFER_NUMBER + 1) & ~0x01) ];
 	u8	TxDesFrom[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ];//931130.4.u // 1: MLME 2: NDIS control 3: NDIS data
 	u8	TxCountInBuffer[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ]; // 20060928
@@ -103,7 +103,7 @@
 	u16	TxResult[ ((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01) ];//Collect the sending result of Mpdu
 
 	u8	MicRedundant[8]; // For tmp use
-	PUCHAR	MicWriteAddress[2]; //The start address to fill the Mic, use 2 point due to Mic maybe fragment
+	u8	*MicWriteAddress[2]; //The start address to fill the Mic, use 2 point due to Mic maybe fragment
 
 	u16	MicWriteSize[2]; //931130.4.x
 
@@ -144,7 +144,7 @@
 
 typedef struct _RxBuffer
 {
-    PUCHAR  pBufferAddress;     // Pointer the received data buffer.
+    u8 * pBufferAddress;     // Pointer the received data buffer.
 	u16	BufferSize;
 	u8	RESERVED;
 	u8	BufferIndex;// Only 1 byte
@@ -176,7 +176,7 @@
 	/////////////////////////////////////////////////////////////////////////////////////////////
 	// For brand-new Rx system
 	u8	ReservedBuffer[ 2400 ];//If Buffer ID is reserved one, it must copy the data into this area
-	PUCHAR	ReservedBufferPoint;// Point to the next availabe address of reserved buffer
+	u8	*ReservedBufferPoint;// Point to the next availabe address of reserved buffer
 
 }RXLAYER1, * PRXLAYER1;
 
diff --git a/drivers/staging/winbond/mlme_s.h b/drivers/staging/winbond/mlme_s.h
index 58094f6..039fd40 100644
--- a/drivers/staging/winbond/mlme_s.h
+++ b/drivers/staging/winbond/mlme_s.h
@@ -125,12 +125,12 @@
 typedef struct _MLME_FRAME
 {
 	//NDIS_PACKET		MLME_Packet;
-	PCHAR			pMMPDU;
+	s8 *			pMMPDU;
 	u16			len;
 	u8			DataType;
 	u8			IsInUsed;
 
-	OS_SPIN_LOCK	MLMESpinLock;
+	spinlock_t	MLMESpinLock;
 
     u8		TxMMPDU[MAX_NUM_TX_MMPDU][MAX_MMPDU_SIZE];
 	u8		TxMMPDUInUse[ (MAX_NUM_TX_MMPDU+3) & ~0x03 ];
diff --git a/drivers/staging/winbond/mlmetxrx.c b/drivers/staging/winbond/mlmetxrx.c
index 46b091e..e8533b8d1 100644
--- a/drivers/staging/winbond/mlmetxrx.c
+++ b/drivers/staging/winbond/mlmetxrx.c
@@ -113,13 +113,13 @@
 	pDes->Type = Adapter->sMlmeFrame.DataType;
 }
 
-void MLMEfreeMMPDUBuffer(PWB32_ADAPTER Adapter, PCHAR pData)
+void MLMEfreeMMPDUBuffer(PWB32_ADAPTER Adapter, s8 *pData)
 {
 	int i;
 
 	// Reclaim the data buffer
 	for (i = 0; i < MAX_NUM_TX_MMPDU; i++) {
-		if (pData == (PCHAR)&(Adapter->sMlmeFrame.TxMMPDU[i]))
+		if (pData == (s8 *)&(Adapter->sMlmeFrame.TxMMPDU[i]))
 			break;
 	}
 	if (Adapter->sMlmeFrame.TxMMPDUInUse[i])
diff --git a/drivers/staging/winbond/mlmetxrx_f.h b/drivers/staging/winbond/mlmetxrx_f.h
index d74e225..24cd5f3 100644
--- a/drivers/staging/winbond/mlmetxrx_f.h
+++ b/drivers/staging/winbond/mlmetxrx_f.h
@@ -20,7 +20,7 @@
      PWB32_ADAPTER    Adapter
    );
 
-void MLMEfreeMMPDUBuffer( PWB32_ADAPTER Adapter,  PCHAR pData);
+void MLMEfreeMMPDUBuffer( PWB32_ADAPTER Adapter,  s8 * pData);
 
 void MLME_GetNextPacket(  PADAPTER Adapter,  PDESCRIPTOR pDes );
 u8 MLMESendFrame( PWB32_ADAPTER Adapter,
@@ -42,7 +42,7 @@
 void
 MLMEReturnPacket(
      PWB32_ADAPTER    Adapter,
-     PUCHAR           pRxBufer
+     u8 *          pRxBufer
    );
 #ifdef _IBSS_BEACON_SEQ_STICK_
 s8 SendBCNullData(PWB32_ADAPTER Adapter, u16 wIdx);
diff --git a/drivers/staging/winbond/reg.c b/drivers/staging/winbond/reg.c
index b475c7a..57af5b8 100644
--- a/drivers/staging/winbond/reg.c
+++ b/drivers/staging/winbond/reg.c
@@ -922,16 +922,16 @@
 	// Only unplug and plug again can make hardware read EEPROM again. 20060727
 	Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08000000 ); // Start EEPROM access + Read + address(0x0d)
 	Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
-	*(PUSHORT)pHwData->PermanentMacAddress = cpu_to_le16((u16)ltmp); //20060926 anson's endian
+	*(u16 *)pHwData->PermanentMacAddress = cpu_to_le16((u16)ltmp); //20060926 anson's endian
 	Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08010000 ); // Start EEPROM access + Read + address(0x0d)
 	Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
-	*(PUSHORT)(pHwData->PermanentMacAddress + 2) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
+	*(u16 *)(pHwData->PermanentMacAddress + 2) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
 	Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08020000 ); // Start EEPROM access + Read + address(0x0d)
 	Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
-	*(PUSHORT)(pHwData->PermanentMacAddress + 4) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
-	*(PUSHORT)(pHwData->PermanentMacAddress + 6) = 0;
-	Wb35Reg_WriteSync( pHwData, 0x03e8, cpu_to_le32(*(PULONG)pHwData->PermanentMacAddress) ); //20060926 anson's endian
-	Wb35Reg_WriteSync( pHwData, 0x03ec, cpu_to_le32(*(PULONG)(pHwData->PermanentMacAddress+4)) ); //20060926 anson's endian
+	*(u16 *)(pHwData->PermanentMacAddress + 4) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
+	*(u16 *)(pHwData->PermanentMacAddress + 6) = 0;
+	Wb35Reg_WriteSync( pHwData, 0x03e8, cpu_to_le32(*(u32 *)pHwData->PermanentMacAddress) ); //20060926 anson's endian
+	Wb35Reg_WriteSync( pHwData, 0x03ec, cpu_to_le32(*(u32 *)(pHwData->PermanentMacAddress+4)) ); //20060926 anson's endian
 }
 
 
@@ -1038,7 +1038,7 @@
 RFSynthesizer_initial(phw_data_t pHwData)
 {
 	u32	altmp[32];
-	PULONG	pltmp = altmp;
+	u32 *	pltmp = altmp;
 	u32	ltmp;
 	u8	number=0x00; // The number of register vale
 	u8	i;
@@ -2358,11 +2358,11 @@
 	pltmp[2] = pWb35Reg->M2C_MacControl;
 
 	// M30 BSSID
-	pltmp[3] = *(PULONG)pHwData->bssid;
+	pltmp[3] = *(u32 *)pHwData->bssid;
 
 	// M34
 	pHwData->AID = DEFAULT_AID;
-	tmp = *(PUSHORT)(pHwData->bssid+4);
+	tmp = *(u16 *)(pHwData->bssid+4);
 	tmp |= DEFAULT_AID << 16;
 	pltmp[4] = tmp;
 
@@ -2428,7 +2428,7 @@
 {
 	u32		i, j, ltmp;
 	u16		Value[MAX_TXVGA_EEPROM];
-	PUCHAR		pctmp;
+	u8		*pctmp;
 	u8		ctmp=0;
 
 	// Get the entire TxVga setting in EEPROM
@@ -2441,7 +2441,7 @@
 	}
 
 	// Adjust the filed which fills with reserved value.
-	pctmp = (PUCHAR)Value;
+	pctmp = (u8 *)Value;
 	for( i=0; i<(MAX_TXVGA_EEPROM*2); i++ )
 	{
 		if( pctmp[i] != 0xff )
@@ -2480,7 +2480,7 @@
 // This function will use default TxVgaSettingInEEPROM data to calculate new TxVga.
 void EEPROMTxVgaAdjust(  phw_data_t pHwData ) // 20060619.5 Add
 {
-	PUCHAR		pTxVga = pHwData->TxVgaSettingInEEPROM;
+	u8	*	pTxVga = pHwData->TxVgaSettingInEEPROM;
 	s16		i, stmp;
 
 	//-- 2.4G -- 20060704.2 Request from Tiger
diff --git a/drivers/staging/winbond/sme_api.c b/drivers/staging/winbond/sme_api.c
index 40e93b7..31c9673 100644
--- a/drivers/staging/winbond/sme_api.c
+++ b/drivers/staging/winbond/sme_api.c
@@ -10,4 +10,5 @@
 s8 sme_get_rssi(void *pcore_data, s32 *prssi)
 {
        BUG();
+       return 0;
 }
diff --git a/drivers/staging/winbond/sme_api.h b/drivers/staging/winbond/sme_api.h
index 016b225..745eb37 100644
--- a/drivers/staging/winbond/sme_api.h
+++ b/drivers/staging/winbond/sme_api.h
@@ -208,7 +208,7 @@
 s8 sme_set_IBSS_chan(void *pcore_data, ChanInfo chan);
 
 //20061108 WPS
-s8 sme_set_IE_append(void *pcore_data, PUCHAR buffer, u16 buf_len);
+s8 sme_set_IE_append(void *pcore_data, u8 *buffer, u16 buf_len);
 
 
 
diff --git a/drivers/staging/winbond/wbhal.c b/drivers/staging/winbond/wbhal.c
index daf4422..5d68ece 100644
--- a/drivers/staging/winbond/wbhal.c
+++ b/drivers/staging/winbond/wbhal.c
@@ -1,13 +1,13 @@
 #include "os_common.h"
 
-void hal_get_ethernet_address( phw_data_t pHwData, PUCHAR current_address )
+void hal_get_ethernet_address( phw_data_t pHwData, u8 *current_address )
 {
 	if( pHwData->SurpriseRemove ) return;
 
 	memcpy( current_address, pHwData->CurrentMacAddress, ETH_LENGTH_OF_ADDRESS );
 }
 
-void hal_set_ethernet_address( phw_data_t pHwData, PUCHAR current_address )
+void hal_set_ethernet_address( phw_data_t pHwData, u8 *current_address )
 {
 	u32 ltmp[2];
 
@@ -15,13 +15,13 @@
 
 	memcpy( pHwData->CurrentMacAddress, current_address, ETH_LENGTH_OF_ADDRESS );
 
-	ltmp[0]= cpu_to_le32( *(PULONG)pHwData->CurrentMacAddress );
-	ltmp[1]= cpu_to_le32( *(PULONG)(pHwData->CurrentMacAddress + 4) ) & 0xffff;
+	ltmp[0]= cpu_to_le32( *(u32 *)pHwData->CurrentMacAddress );
+	ltmp[1]= cpu_to_le32( *(u32 *)(pHwData->CurrentMacAddress + 4) ) & 0xffff;
 
 	Wb35Reg_BurstWrite( pHwData, 0x03e8, ltmp, 2, AUTO_INCREMENT );
 }
 
-void hal_get_permanent_address( phw_data_t pHwData, PUCHAR pethernet_address )
+void hal_get_permanent_address( phw_data_t pHwData, u8 *pethernet_address )
 {
 	if( pHwData->SurpriseRemove ) return;
 
@@ -89,7 +89,7 @@
 }
 
 //---------------------------------------------------------------------------------------------------
-void hal_set_rates(phw_data_t pHwData, PUCHAR pbss_rates,
+void hal_set_rates(phw_data_t pHwData, u8 *pbss_rates,
 		   u8 length, unsigned char basic_rate_set)
 {
 	PWB35REG	pWb35Reg = &pHwData->Wb35Reg;
@@ -158,13 +158,13 @@
 	// Fill data into support rate until buffer full
 	//---20060926 add by anson's endian
 	for (i=0; i<4; i++)
-		*(PULONG)(SupportedRate+(i<<2)) = cpu_to_le32( *(PULONG)(SupportedRate+(i<<2)) );
+		*(u32 *)(SupportedRate+(i<<2)) = cpu_to_le32( *(u32 *)(SupportedRate+(i<<2)) );
 	//--- end 20060926 add by anson's endian
-	Wb35Reg_BurstWrite( pHwData,0x087c, (PULONG)SupportedRate, 4, AUTO_INCREMENT );
-	pWb35Reg->M7C_MacControl = ((PULONG)SupportedRate)[0];
-	pWb35Reg->M80_MacControl = ((PULONG)SupportedRate)[1];
-	pWb35Reg->M84_MacControl = ((PULONG)SupportedRate)[2];
-	pWb35Reg->M88_MacControl = ((PULONG)SupportedRate)[3];
+	Wb35Reg_BurstWrite( pHwData,0x087c, (u32 *)SupportedRate, 4, AUTO_INCREMENT );
+	pWb35Reg->M7C_MacControl = ((u32 *)SupportedRate)[0];
+	pWb35Reg->M80_MacControl = ((u32 *)SupportedRate)[1];
+	pWb35Reg->M84_MacControl = ((u32 *)SupportedRate)[2];
+	pWb35Reg->M88_MacControl = ((u32 *)SupportedRate)[3];
 
 	// Fill length
 	tmp = Count1<<28 | Count2<<24;
@@ -206,7 +206,7 @@
 	pWb35Reg->M28_MacControl &= ~0xff; // Clean channel information field
 	pWb35Reg->M28_MacControl |= channel.ChanNo;
 	Wb35Reg_WriteWithCallbackValue( pHwData, 0x0828, pWb35Reg->M28_MacControl,
-					(PCHAR)&channel, sizeof(ChanInfo));
+					(s8 *)&channel, sizeof(ChanInfo));
 }
 //---------------------------------------------------------------------------------------------------
 void hal_set_current_channel(  phw_data_t pHwData,  ChanInfo channel )
@@ -277,7 +277,7 @@
 	Wb35Reg_Write( pHwData, 0x0800, pWb35Reg->M00_MacControl );
 }
 //---------------------------------------------------------------------------------------------------
-void hal_set_multicast_address( phw_data_t pHwData, PUCHAR address, u8 number )
+void hal_set_multicast_address( phw_data_t pHwData, u8 *address, u8 number )
 {
 	PWB35REG	pWb35Reg = &pHwData->Wb35Reg;
 	u8		Byte, Bit;
@@ -297,7 +297,7 @@
 	}
 
 	// Updating register
-	Wb35Reg_BurstWrite( pHwData, 0x0804, (PULONG)pWb35Reg->Multicast, 2, AUTO_INCREMENT );
+	Wb35Reg_BurstWrite( pHwData, 0x0804, (u32 *)pWb35Reg->Multicast, 2, AUTO_INCREMENT );
 }
 //---------------------------------------------------------------------------------------------------
 u8 hal_get_accept_beacon(  phw_data_t pHwData )
@@ -806,7 +806,7 @@
 	}
 }
 
-unsigned char hal_get_dxx_reg(  phw_data_t pHwData,  u16 number,  PULONG pValue )
+unsigned char hal_get_dxx_reg(  phw_data_t pHwData,  u16 number,  u32 * pValue )
 {
 	if( number < 0x1000 )
 		number += 0x1000;
diff --git a/drivers/staging/winbond/wbhal_f.h b/drivers/staging/winbond/wbhal_f.h
index fe25f97..ea9531a 100644
--- a/drivers/staging/winbond/wbhal_f.h
+++ b/drivers/staging/winbond/wbhal_f.h
@@ -16,23 +16,23 @@
 //====================================================================================
 // Function declaration
 //====================================================================================
-void hal_remove_mapping_key(  phw_data_t pHwData,  PUCHAR pmac_addr );
+void hal_remove_mapping_key(  phw_data_t pHwData,  u8 *pmac_addr );
 void hal_remove_default_key(  phw_data_t pHwData,  u32 index );
-unsigned char hal_set_mapping_key(  phw_data_t Adapter,  PUCHAR pmac_addr,  u8 null_key,  u8 wep_on,  PUCHAR ptx_tsc,  PUCHAR prx_tsc,  u8 key_type,  u8 key_len,  PUCHAR pkey_data );
-unsigned char hal_set_default_key(  phw_data_t Adapter,  u8 index,  u8 null_key,  u8 wep_on,  PUCHAR ptx_tsc,  PUCHAR prx_tsc,  u8 key_type,  u8 key_len,  PUCHAR pkey_data );
+unsigned char hal_set_mapping_key(  phw_data_t Adapter,  u8 *pmac_addr,  u8 null_key,  u8 wep_on,  u8 *ptx_tsc,  u8 *prx_tsc,  u8 key_type,  u8 key_len,  u8 *pkey_data );
+unsigned char hal_set_default_key(  phw_data_t Adapter,  u8 index,  u8 null_key,  u8 wep_on,  u8 *ptx_tsc,  u8 *prx_tsc,  u8 key_type,  u8 key_len,  u8 *pkey_data );
 void hal_clear_all_default_key(  phw_data_t pHwData );
 void hal_clear_all_group_key(  phw_data_t pHwData );
 void hal_clear_all_mapping_key(  phw_data_t pHwData );
 void hal_clear_all_key(  phw_data_t pHwData );
-void hal_get_ethernet_address(  phw_data_t pHwData,  PUCHAR current_address );
-void hal_set_ethernet_address(  phw_data_t pHwData,  PUCHAR current_address );
-void hal_get_permanent_address(  phw_data_t pHwData,  PUCHAR pethernet_address );
+void hal_get_ethernet_address(  phw_data_t pHwData,  u8 *current_address );
+void hal_set_ethernet_address(  phw_data_t pHwData,  u8 *current_address );
+void hal_get_permanent_address(  phw_data_t pHwData,  u8 *pethernet_address );
 unsigned char hal_init_hardware(  phw_data_t pHwData,  PADAPTER Adapter );
 void hal_set_power_save_mode(  phw_data_t pHwData,  unsigned char power_save,  unsigned char wakeup,  unsigned char dtim );
-void hal_get_power_save_mode(  phw_data_t pHwData,   PBOOLEAN pin_pwr_save );
+void hal_get_power_save_mode(  phw_data_t pHwData,   u8 *pin_pwr_save );
 void hal_set_slot_time(  phw_data_t pHwData,  u8 type );
 #define hal_set_atim_window( _A, _ATM )
-void hal_set_rates(  phw_data_t pHwData,  PUCHAR pbss_rates,  u8 length,  unsigned char basic_rate_set );
+void hal_set_rates(  phw_data_t pHwData,  u8 *pbss_rates,  u8 length,  unsigned char basic_rate_set );
 #define hal_set_basic_rates( _A, _R, _L ) hal_set_rates( _A, _R, _L, TRUE )
 #define hal_set_op_rates( _A, _R, _L ) hal_set_rates( _A, _R, _L, FALSE )
 void hal_start_bss(  phw_data_t pHwData,  u8 mac_op_mode );
@@ -40,19 +40,19 @@
 void hal_stop_sync_bss(  phw_data_t pHwData );
 void hal_resume_sync_bss(  phw_data_t pHwData);
 void hal_set_aid(  phw_data_t pHwData,  u16 aid );
-void hal_set_bssid(  phw_data_t pHwData,  PUCHAR pbssid );
-void hal_get_bssid(  phw_data_t pHwData,  PUCHAR pbssid );
+void hal_set_bssid(  phw_data_t pHwData,  u8 *pbssid );
+void hal_get_bssid(  phw_data_t pHwData,  u8 *pbssid );
 void hal_set_beacon_period(  phw_data_t pHwData,  u16 beacon_period );
 void hal_set_listen_interval(  phw_data_t pHwData,  u16 listen_interval );
 void hal_set_cap_info(  phw_data_t pHwData,  u16 capability_info );
-void hal_set_ssid(  phw_data_t pHwData,  PUCHAR pssid,  u8 ssid_len );
+void hal_set_ssid(  phw_data_t pHwData,  u8 *pssid,  u8 ssid_len );
 void hal_set_current_channel(  phw_data_t pHwData,  ChanInfo channel );
 void hal_set_current_channel_ex(  phw_data_t pHwData,  ChanInfo channel );
 void hal_get_current_channel(  phw_data_t pHwData,  ChanInfo *channel );
 void hal_set_accept_broadcast(  phw_data_t pHwData,  u8 enable );
 void hal_set_accept_multicast(  phw_data_t pHwData,  u8 enable );
 void hal_set_accept_beacon(  phw_data_t pHwData,  u8 enable );
-void hal_set_multicast_address(  phw_data_t pHwData,  PUCHAR address,  u8 number );
+void hal_set_multicast_address(  phw_data_t pHwData,  u8 *address,  u8 number );
 u8 hal_get_accept_beacon(  phw_data_t pHwData );
 void hal_stop(  phw_data_t pHwData );
 void hal_halt(  phw_data_t pHwData, void *ppa_data );
@@ -97,7 +97,7 @@
 
 
 void hal_rate_change(  phw_data_t pHwData ); // Notify the HAL rate is changing 20060613.1
-unsigned char hal_get_dxx_reg(  phw_data_t pHwData,  u16 number,  PULONG pValue );
+unsigned char hal_get_dxx_reg(  phw_data_t pHwData,  u16 number,  u32 * pValue );
 unsigned char hal_set_dxx_reg(  phw_data_t pHwData,  u16 number,  u32 value );
 #define hal_get_time_count( _P )	(_P->time_count/10)	// return 100ms count
 #define hal_detect_error( _P )		(_P->WbUsb.DetectCount)
@@ -116,7 +116,7 @@
 #define pa_stall_execution( _A )	//OS_SLEEP( 1 )
 #define hw_get_cxx_reg( _A, _B, _C )
 #define hw_set_cxx_reg( _A, _B, _C )
-#define hw_get_dxx_reg( _A, _B, _C )	hal_get_dxx_reg( _A, _B, (PULONG)_C )
+#define hw_get_dxx_reg( _A, _B, _C )	hal_get_dxx_reg( _A, _B, (u32 *)_C )
 #define hw_set_dxx_reg( _A, _B, _C )	hal_set_dxx_reg( _A, _B, (u32)_C )
 
 
diff --git a/drivers/staging/winbond/wbhal_s.h b/drivers/staging/winbond/wbhal_s.h
index 5b862ff..2ee3f0f 100644
--- a/drivers/staging/winbond/wbhal_s.h
+++ b/drivers/staging/winbond/wbhal_s.h
@@ -461,7 +461,7 @@
 	//=====================================================================
 	// Definition for 802.11
 	//=====================================================================
-	PUCHAR	bssid_pointer; // Used by hal_get_bssid for return value
+	u8	*bssid_pointer; // Used by hal_get_bssid for return value
 	u8	bssid[8];// Only 6 byte will be used. 8 byte is required for read buffer
 	u8	ssid[32];// maximum ssid length is 32 byte
 
@@ -486,7 +486,7 @@
 	u32	CurrentRadioSw; // 20060320.2 0:On 1:Off
 	u32	CurrentRadioHw; // 20060825 0:On 1:Off
 
-	PUCHAR	power_save_point;  // Used by hal_get_power_save_mode for return value
+	u8	*power_save_point;  // Used by hal_get_power_save_mode for return value
 	u8	cwmin;
 	u8	desired_power_save;
 	u8	dtim;// Is running dtim
diff --git a/drivers/staging/winbond/wblinux.c b/drivers/staging/winbond/wblinux.c
index 2eade5a..4ed45e4 100644
--- a/drivers/staging/winbond/wblinux.c
+++ b/drivers/staging/winbond/wblinux.c
@@ -25,11 +25,11 @@
 {
 	PWBLINUX pWbLinux = &Adapter->WbLinux;
 	u32	ltmp;
-	PULONG	pltmp = (PULONG)pAtomic;
-	OS_SPIN_LOCK_ACQUIRED( &pWbLinux->AtomicSpinLock );
+	u32 *	pltmp = (u32 *)pAtomic;
+	spin_lock_irq( &pWbLinux->AtomicSpinLock );
 	(*pltmp)++;
 	ltmp = (*pltmp);
-	OS_SPIN_LOCK_RELEASED( &pWbLinux->AtomicSpinLock );
+	spin_unlock_irq( &pWbLinux->AtomicSpinLock );
 	return ltmp;
 }
 
@@ -38,11 +38,11 @@
 {
 	PWBLINUX pWbLinux = &Adapter->WbLinux;
 	u32	ltmp;
-	PULONG	pltmp = (PULONG)pAtomic;
-	OS_SPIN_LOCK_ACQUIRED( &pWbLinux->AtomicSpinLock );
+	u32 *	pltmp = (u32 *)pAtomic;
+	spin_lock_irq( &pWbLinux->AtomicSpinLock );
 	(*pltmp)--;
 	ltmp = (*pltmp);
-	OS_SPIN_LOCK_RELEASED( &pWbLinux->AtomicSpinLock );
+	spin_unlock_irq( &pWbLinux->AtomicSpinLock );
 	return ltmp;
 }
 
@@ -51,8 +51,8 @@
 {
 	PWBLINUX pWbLinux = &Adapter->WbLinux;
 
-	OS_SPIN_LOCK_ALLOCATE( &pWbLinux->SpinLock );
-	OS_SPIN_LOCK_ALLOCATE( &pWbLinux->AtomicSpinLock );
+	spin_lock_init( &pWbLinux->SpinLock );
+	spin_lock_init( &pWbLinux->AtomicSpinLock );
 	return TRUE;
 }
 
@@ -79,7 +79,6 @@
 WBLINUX_Destroy(PADAPTER Adapter)
 {
 	WBLINUX_stop( Adapter );
-	OS_SPIN_LOCK_FREE( &pWbNdis->SpinLock );
 #ifdef _PE_USB_INI_DUMP_
 	WBDEBUG(("[w35und] unregister_netdev!\n"));
 #endif
@@ -142,119 +141,118 @@
 WbWLanInitialize(PADAPTER Adapter)
 {
 	phw_data_t	pHwData;
-	PUCHAR		pMacAddr, pMacAddr2;
+	u8		*pMacAddr;
+	u8		*pMacAddr2;
 	u32		InitStep = 0;
 	u8		EEPROM_region;
 	u8		HwRadioOff;
 
-	do {
-		//
-		// Setting default value for Linux
-		//
-		Adapter->sLocalPara.region_INF = REGION_AUTO;
-		Adapter->sLocalPara.TxRateMode = RATE_AUTO;
-		psLOCAL->bMacOperationMode = MODE_802_11_BG;	// B/G mode
-		Adapter->Mds.TxRTSThreshold = DEFAULT_RTSThreshold;
-		Adapter->Mds.TxFragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD;
-		hal_set_phy_type( &Adapter->sHwData, RF_WB_242_1 );
-		Adapter->sLocalPara.MTUsize = MAX_ETHERNET_PACKET_SIZE;
-		psLOCAL->bPreambleMode = AUTO_MODE;
-		Adapter->sLocalPara.RadioOffStatus.boSwRadioOff = FALSE;
-		pHwData = &Adapter->sHwData;
-		hal_set_phy_type( pHwData, RF_DECIDE_BY_INF );
+	//
+	// Setting default value for Linux
+	//
+	Adapter->sLocalPara.region_INF = REGION_AUTO;
+	Adapter->sLocalPara.TxRateMode = RATE_AUTO;
+	psLOCAL->bMacOperationMode = MODE_802_11_BG;	// B/G mode
+	Adapter->Mds.TxRTSThreshold = DEFAULT_RTSThreshold;
+	Adapter->Mds.TxFragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD;
+	hal_set_phy_type( &Adapter->sHwData, RF_WB_242_1 );
+	Adapter->sLocalPara.MTUsize = MAX_ETHERNET_PACKET_SIZE;
+	psLOCAL->bPreambleMode = AUTO_MODE;
+	Adapter->sLocalPara.RadioOffStatus.boSwRadioOff = FALSE;
+	pHwData = &Adapter->sHwData;
+	hal_set_phy_type( pHwData, RF_DECIDE_BY_INF );
 
-		//
-		// Initial each module and variable
-		//
-		if (!WBLINUX_Initial(Adapter)) {
+	//
+	// Initial each module and variable
+	//
+	if (!WBLINUX_Initial(Adapter)) {
 #ifdef _PE_USB_INI_DUMP_
-			WBDEBUG(("[w35und]WBNDIS initialization failed\n"));
+		WBDEBUG(("[w35und]WBNDIS initialization failed\n"));
 #endif
-			break;
-		}
-
-		// Initial Software variable
-		Adapter->sLocalPara.ShutDowned = FALSE;
-
-		//added by ws for wep key error detection
-		Adapter->sLocalPara.bWepKeyError= FALSE;
-		Adapter->sLocalPara.bToSelfPacketReceived = FALSE;
-		Adapter->sLocalPara.WepKeyDetectTimerCount= 2 * 100; /// 2 seconds
-
-		// Initial USB hal
-		InitStep = 1;
-		pHwData = &Adapter->sHwData;
-		if (!hal_init_hardware(pHwData, Adapter))
-			break;
-
-		EEPROM_region = hal_get_region_from_EEPROM( pHwData );
-		if (EEPROM_region != REGION_AUTO)
-			psLOCAL->region = EEPROM_region;
-		else {
-			if (psLOCAL->region_INF != REGION_AUTO)
-				psLOCAL->region = psLOCAL->region_INF;
-			else
-				psLOCAL->region = REGION_USA;	//default setting
-		}
-
-		// Get Software setting flag from hal
-		Adapter->sLocalPara.boAntennaDiversity = FALSE;
-		if (hal_software_set(pHwData) & 0x00000001)
-			Adapter->sLocalPara.boAntennaDiversity = TRUE;
-
-		//
-		// For TS module
-		//
-		InitStep = 2;
-
-		// For MDS module
-		InitStep = 3;
-		Mds_initial(Adapter);
-
-		//=======================================
-		// Initialize the SME, SCAN, MLME, ROAM
-		//=======================================
-		InitStep = 4;
-		InitStep = 5;
-		InitStep = 6;
-
-		// If no user-defined address in the registry, use the addresss "burned" on the NIC instead.
-		pMacAddr = Adapter->sLocalPara.ThisMacAddress;
-		pMacAddr2 = Adapter->sLocalPara.PermanentAddress;
-		hal_get_permanent_address( pHwData, Adapter->sLocalPara.PermanentAddress );// Reading ethernet address from EEPROM
-		if (OS_MEMORY_COMPARE(pMacAddr, "\x00\x00\x00\x00\x00\x00", MAC_ADDR_LENGTH )) // Is equal
-		{
-			memcpy( pMacAddr, pMacAddr2, MAC_ADDR_LENGTH );
-		} else {
-			// Set the user define MAC address
-			hal_set_ethernet_address( pHwData, Adapter->sLocalPara.ThisMacAddress );
-		}
-
-		//get current antenna
-		psLOCAL->bAntennaNo = hal_get_antenna_number(pHwData);
-#ifdef _PE_STATE_DUMP_
-		WBDEBUG(("Driver init, antenna no = %d\n", psLOCAL->bAntennaNo));
-#endif
-		hal_get_hw_radio_off( pHwData );
-
-		// Waiting for HAL setting OK
-		while (!hal_idle(pHwData))
-			OS_SLEEP(10000);
-
-		MTO_Init(Adapter);
-
-		HwRadioOff = hal_get_hw_radio_off( pHwData );
-		psLOCAL->RadioOffStatus.boHwRadioOff = !!HwRadioOff;
-
-		hal_set_radio_mode( pHwData, (unsigned char)(psLOCAL->RadioOffStatus.boSwRadioOff || psLOCAL->RadioOffStatus.boHwRadioOff) );
-
-		hal_driver_init_OK(pHwData) = 1; // Notify hal that the driver is ready now.
-		//set a tx power for reference.....
-//		sme_set_tx_power_level(Adapter, 12);	FIXME?
-		return TRUE;
+		goto error;
 	}
-	while(FALSE);
 
+	// Initial Software variable
+	Adapter->sLocalPara.ShutDowned = FALSE;
+
+	//added by ws for wep key error detection
+	Adapter->sLocalPara.bWepKeyError= FALSE;
+	Adapter->sLocalPara.bToSelfPacketReceived = FALSE;
+	Adapter->sLocalPara.WepKeyDetectTimerCount= 2 * 100; /// 2 seconds
+
+	// Initial USB hal
+	InitStep = 1;
+	pHwData = &Adapter->sHwData;
+	if (!hal_init_hardware(pHwData, Adapter))
+		goto error;
+
+	EEPROM_region = hal_get_region_from_EEPROM( pHwData );
+	if (EEPROM_region != REGION_AUTO)
+		psLOCAL->region = EEPROM_region;
+	else {
+		if (psLOCAL->region_INF != REGION_AUTO)
+			psLOCAL->region = psLOCAL->region_INF;
+		else
+			psLOCAL->region = REGION_USA;	//default setting
+	}
+
+	// Get Software setting flag from hal
+	Adapter->sLocalPara.boAntennaDiversity = FALSE;
+	if (hal_software_set(pHwData) & 0x00000001)
+		Adapter->sLocalPara.boAntennaDiversity = TRUE;
+
+	//
+	// For TS module
+	//
+	InitStep = 2;
+
+	// For MDS module
+	InitStep = 3;
+	Mds_initial(Adapter);
+
+	//=======================================
+	// Initialize the SME, SCAN, MLME, ROAM
+	//=======================================
+	InitStep = 4;
+	InitStep = 5;
+	InitStep = 6;
+
+	// If no user-defined address in the registry, use the addresss "burned" on the NIC instead.
+	pMacAddr = Adapter->sLocalPara.ThisMacAddress;
+	pMacAddr2 = Adapter->sLocalPara.PermanentAddress;
+	hal_get_permanent_address( pHwData, Adapter->sLocalPara.PermanentAddress );// Reading ethernet address from EEPROM
+	if (OS_MEMORY_COMPARE(pMacAddr, "\x00\x00\x00\x00\x00\x00", MAC_ADDR_LENGTH )) // Is equal
+	{
+		memcpy( pMacAddr, pMacAddr2, MAC_ADDR_LENGTH );
+	} else {
+		// Set the user define MAC address
+		hal_set_ethernet_address( pHwData, Adapter->sLocalPara.ThisMacAddress );
+	}
+
+	//get current antenna
+	psLOCAL->bAntennaNo = hal_get_antenna_number(pHwData);
+#ifdef _PE_STATE_DUMP_
+	WBDEBUG(("Driver init, antenna no = %d\n", psLOCAL->bAntennaNo));
+#endif
+	hal_get_hw_radio_off( pHwData );
+
+	// Waiting for HAL setting OK
+	while (!hal_idle(pHwData))
+		OS_SLEEP(10000);
+
+	MTO_Init(Adapter);
+
+	HwRadioOff = hal_get_hw_radio_off( pHwData );
+	psLOCAL->RadioOffStatus.boHwRadioOff = !!HwRadioOff;
+
+	hal_set_radio_mode( pHwData, (unsigned char)(psLOCAL->RadioOffStatus.boSwRadioOff || psLOCAL->RadioOffStatus.boHwRadioOff) );
+
+	hal_driver_init_OK(pHwData) = 1; // Notify hal that the driver is ready now.
+	//set a tx power for reference.....
+//	sme_set_tx_power_level(Adapter, 12);	FIXME?
+	return TRUE;
+
+error:
 	switch (InitStep) {
 	case 5:
 	case 4:
diff --git a/drivers/staging/winbond/wblinux_s.h b/drivers/staging/winbond/wblinux_s.h
index 97e9167..fd2bb43 100644
--- a/drivers/staging/winbond/wblinux_s.h
+++ b/drivers/staging/winbond/wblinux_s.h
@@ -24,8 +24,8 @@
 
 typedef struct _WBLINUX
 {
-	OS_SPIN_LOCK	AtomicSpinLock;
-	OS_SPIN_LOCK	SpinLock;
+	spinlock_t	AtomicSpinLock;
+	spinlock_t	SpinLock;
 	u32	shutdown;
 
 	OS_ATOMIC	ThreadCount;
diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig
index 10b1f0f..2425d86 100644
--- a/drivers/staging/wlan-ng/Kconfig
+++ b/drivers/staging/wlan-ng/Kconfig
@@ -1,6 +1,6 @@
 config PRISM2_USB
 	tristate "Prism2.5 USB driver"
-	depends on USB
+	depends on WLAN_80211 && USB
 	default n
 	---help---
 	  This is the wlan-ng prism 2.5 USB driver for a wide range of
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index a205463..0dfb8ce 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -824,7 +824,7 @@
 #define		HFA384x_CMD_MACPORT_SET(value)		((UINT16)HFA384x_CMD_AINFO_SET(value))
 #define		HFA384x_CMD_ISRECL(value)		((UINT16)(HFA384x_CMD_AINFO_GET((UINT16)(value) & HFA384x_CMD_RECL)))
 #define		HFA384x_CMD_RECL_SET(value)		((UINT16)HFA384x_CMD_AINFO_SET(value))
-#define		HFA384x_CMD_QOS_GET(value)		((UINT16((((UINT16)(value))&((UINT16)0x3000)) >> 12))
+#define		HFA384x_CMD_QOS_GET(value)		((UINT16)((((UINT16)(value))&((UINT16)0x3000)) >> 12))
 #define		HFA384x_CMD_QOS_SET(value)		((UINT16)((((UINT16)(value)) << 12) & 0x3000))
 #define		HFA384x_CMD_ISWRITE(value)		((UINT16)(HFA384x_CMD_AINFO_GET((UINT16)(value) & HFA384x_CMD_WRITE)))
 #define		HFA384x_CMD_WRITE_SET(value)		((UINT16)HFA384x_CMD_AINFO_SET((UINT16)value))
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 53fe298..11a50c7 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -64,7 +64,6 @@
 /*================================================================*/
 /* Project Includes */
 
-#include "version.h"
 #include "p80211hdr.h"
 #include "p80211types.h"
 #include "p80211msg.h"
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 268fd9b..eac06f7 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -90,8 +90,6 @@
 #include <linux/usb.h>
 //#endif
 
-#include "wlan_compat.h"
-
 /*================================================================*/
 /* Project Includes */
 
diff --git a/drivers/staging/wlan-ng/wlan_compat.h b/drivers/staging/wlan-ng/wlan_compat.h
index 1702657..59dfa8f 100644
--- a/drivers/staging/wlan-ng/wlan_compat.h
+++ b/drivers/staging/wlan-ng/wlan_compat.h
@@ -245,11 +245,11 @@
 #  define preempt_count() (0UL)
 #endif
 
-#define WLAN_LOG_ERROR(x,args...) printk(KERN_ERR "%s: " x , __FUNCTION__ , ##args);
+#define WLAN_LOG_ERROR(x,args...) printk(KERN_ERR "%s: " x , __func__ , ##args);
 
-#define WLAN_LOG_WARNING(x,args...) printk(KERN_WARNING "%s: " x , __FUNCTION__ , ##args);
+#define WLAN_LOG_WARNING(x,args...) printk(KERN_WARNING "%s: " x , __func__ , ##args);
 
-#define WLAN_LOG_NOTICE(x,args...) printk(KERN_NOTICE "%s: " x , __FUNCTION__ , ##args);
+#define WLAN_LOG_NOTICE(x,args...) printk(KERN_NOTICE "%s: " x , __func__ , ##args);
 
 #define WLAN_LOG_INFO(args... ) printk(KERN_INFO args)
 
@@ -265,7 +265,7 @@
 	#define DBFENTER { if ( WLAN_DBVAR >= 5 ){ WLAN_LOG_DEBUG(3,"---->\n"); } }
 	#define DBFEXIT  { if ( WLAN_DBVAR >= 5 ){ WLAN_LOG_DEBUG(3,"<----\n"); } }
 
-	#define WLAN_LOG_DEBUG(l,x,args...) if ( WLAN_DBVAR >= (l)) printk(KERN_DEBUG "%s(%lu): " x ,  __FUNCTION__, (preempt_count() & PREEMPT_MASK), ##args );
+	#define WLAN_LOG_DEBUG(l,x,args...) if ( WLAN_DBVAR >= (l)) printk(KERN_DEBUG "%s(%lu): " x ,  __func__, (preempt_count() & PREEMPT_MASK), ##args );
 #else
 	#define WLAN_ASSERT(c)
 	#define WLAN_HEX_DUMP( l, s, p, n)
diff --git a/drivers/telephony/phonedev.c b/drivers/telephony/phonedev.c
index 4d74ba3..37caf4d 100644
--- a/drivers/telephony/phonedev.c
+++ b/drivers/telephony/phonedev.c
@@ -54,7 +54,6 @@
 	if (minor >= PHONE_NUM_DEVICES)
 		return -ENODEV;
 
-	lock_kernel();
 	mutex_lock(&phone_lock);
 	p = phone_device[minor];
 	if (p)
@@ -81,7 +80,6 @@
 	fops_put(old_fops);
 end:
 	mutex_unlock(&phone_lock);
-	unlock_kernel();
 	return err;
 }
 
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 5dccf05..f9b4647 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -47,6 +47,9 @@
 	struct class *class;
 } *uio_class;
 
+/* Protect idr accesses */
+static DEFINE_MUTEX(minor_lock);
+
 /*
  * attributes
  */
@@ -239,7 +242,6 @@
 
 static int uio_get_minor(struct uio_device *idev)
 {
-	static DEFINE_MUTEX(minor_lock);
 	int retval = -ENOMEM;
 	int id;
 
@@ -261,7 +263,9 @@
 
 static void uio_free_minor(struct uio_device *idev)
 {
+	mutex_lock(&minor_lock);
 	idr_remove(&uio_idr, idev->minor);
+	mutex_unlock(&minor_lock);
 }
 
 /**
@@ -305,8 +309,9 @@
 	struct uio_listener *listener;
 	int ret = 0;
 
-	lock_kernel();
+	mutex_lock(&minor_lock);
 	idev = idr_find(&uio_idr, iminor(inode));
+	mutex_unlock(&minor_lock);
 	if (!idev) {
 		ret = -ENODEV;
 		goto out;
@@ -332,18 +337,15 @@
 		if (ret)
 			goto err_infoopen;
 	}
-	unlock_kernel();
 	return 0;
 
 err_infoopen:
-
 	kfree(listener);
-err_alloc_listener:
 
+err_alloc_listener:
 	module_put(idev->owner);
 
 out:
-	unlock_kernel();
 	return ret;
 }
 
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index bcefbdd..289d81a 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -36,7 +36,8 @@
 	default y if PXA3xx
 	default y if ARCH_EP93XX
 	default y if ARCH_AT91
-	default y if ARCH_PNX4008
+	default y if ARCH_PNX4008 && I2C
+	default y if MFD_TC6393XB
 	# PPC:
 	default y if STB03xxx
 	default y if PPC_MPC52xx
@@ -97,6 +98,8 @@
 
 source "drivers/usb/mon/Kconfig"
 
+source "drivers/usb/wusbcore/Kconfig"
+
 source "drivers/usb/host/Kconfig"
 
 source "drivers/usb/musb/Kconfig"
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index a419c42..8b7c419 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -16,9 +16,12 @@
 obj-$(CONFIG_USB_SL811_HCD)	+= host/
 obj-$(CONFIG_USB_U132_HCD)	+= host/
 obj-$(CONFIG_USB_R8A66597_HCD)	+= host/
+obj-$(CONFIG_USB_HWA_HCD)	+= host/
 
 obj-$(CONFIG_USB_C67X00_HCD)	+= c67x00/
 
+obj-$(CONFIG_USB_WUSB)		+= wusbcore/
+
 obj-$(CONFIG_USB_ACM)		+= class/
 obj-$(CONFIG_USB_PRINTER)	+= class/
 
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 76fce44..3e86240 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -722,6 +722,16 @@
 	flush_scheduled_work();
 }
 
+static int speedtch_pre_reset(struct usb_interface *intf)
+{
+	return 0;
+}
+
+static int speedtch_post_reset(struct usb_interface *intf)
+{
+	return 0;
+}
+
 
 /**********
 **  USB  **
@@ -740,6 +750,8 @@
 	.name		= speedtch_driver_name,
 	.probe		= speedtch_usb_probe,
 	.disconnect	= usbatm_usb_disconnect,
+	.pre_reset	= speedtch_pre_reset,
+	.post_reset	= speedtch_post_reset,
 	.id_table	= speedtch_usb_ids
 };
 
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 0da2c25..06dd114 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -344,7 +344,7 @@
 				__func__, sarb->len, vcc);
 		/* discard cells already received */
 		skb_trim(sarb, 0);
-		UDSL_ASSERT(sarb->tail + ATM_CELL_PAYLOAD <= sarb->end);
+		UDSL_ASSERT(instance, sarb->tail + ATM_CELL_PAYLOAD <= sarb->end);
 	}
 
 	memcpy(skb_tail_pointer(sarb), source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD);
@@ -432,7 +432,7 @@
 		unsigned char *cell_buf = instance->cell_buf;
 		unsigned int space_left = stride - buf_usage;
 
-		UDSL_ASSERT(buf_usage <= stride);
+		UDSL_ASSERT(instance, buf_usage <= stride);
 
 		if (avail_data >= space_left) {
 			/* add new data and process cell */
@@ -475,7 +475,7 @@
 	unsigned int stride = instance->tx_channel.stride;
 
 	vdbg("%s: skb->len=%d, avail_space=%u", __func__, skb->len, avail_space);
-	UDSL_ASSERT(!(avail_space % stride));
+	UDSL_ASSERT(instance, !(avail_space % stride));
 
 	for (bytes_written = 0; bytes_written < avail_space && ctrl->len;
 	     bytes_written += stride, target += stride) {
@@ -547,7 +547,7 @@
 				if (!urb->iso_frame_desc[i].status) {
 					unsigned int actual_length = urb->iso_frame_desc[i].actual_length;
 
-					UDSL_ASSERT(actual_length <= packet_size);
+					UDSL_ASSERT(instance, actual_length <= packet_size);
 
 					if (!merge_length)
 						merge_start = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
@@ -1188,7 +1188,7 @@
 		struct urb *urb;
 		unsigned int iso_packets = usb_pipeisoc(channel->endpoint) ? channel->buf_size / channel->packet_size : 0;
 
-		UDSL_ASSERT(!usb_pipeisoc(channel->endpoint) || usb_pipein(channel->endpoint));
+		UDSL_ASSERT(instance, !usb_pipeisoc(channel->endpoint) || usb_pipein(channel->endpoint));
 
 		urb = usb_alloc_urb(iso_packets, GFP_KERNEL);
 		if (!urb) {
diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
index e6887c6..f6f4508 100644
--- a/drivers/usb/atm/usbatm.h
+++ b/drivers/usb/atm/usbatm.h
@@ -40,9 +40,15 @@
 */
 
 #ifdef DEBUG
-#define UDSL_ASSERT(x)	BUG_ON(!(x))
+#define UDSL_ASSERT(instance, x)	BUG_ON(!(x))
 #else
-#define UDSL_ASSERT(x)	do { if (!(x)) warn("failed assertion '%s' at line %d", __stringify(x), __LINE__); } while(0)
+#define UDSL_ASSERT(instance, x)					\
+	do {	\
+		if (!(x))						\
+			dev_warn(&(instance)->usb_intf->dev,		\
+				 "failed assertion '%s' at line %d",	\
+				 __stringify(x), __LINE__);		\
+	} while(0)
 #endif
 
 #define usb_err(instance, format, arg...)	\
diff --git a/drivers/usb/atm/xusbatm.c b/drivers/usb/atm/xusbatm.c
index 8472543..17d167b 100644
--- a/drivers/usb/atm/xusbatm.c
+++ b/drivers/usb/atm/xusbatm.c
@@ -193,7 +193,7 @@
 	    num_vendor != num_product ||
 	    num_vendor != num_rx_endpoint ||
 	    num_vendor != num_tx_endpoint) {
-		warn("malformed module parameters");
+		printk(KERN_WARNING "xusbatm: malformed module parameters\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/usb/class/Kconfig b/drivers/usb/class/Kconfig
index 66f17ed..2519e32 100644
--- a/drivers/usb/class/Kconfig
+++ b/drivers/usb/class/Kconfig
@@ -40,3 +40,13 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called cdc-wdm.
 
+config USB_TMC
+	tristate "USB Test and Measurement Class support"
+	depends on USB
+	help
+	  Say Y here if you want to connect a USB device that follows
+	  the USB.org specification for USB Test and Measurement devices
+	  to your computer's USB port.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called usbtmc.
diff --git a/drivers/usb/class/Makefile b/drivers/usb/class/Makefile
index 535d59a..32e8527 100644
--- a/drivers/usb/class/Makefile
+++ b/drivers/usb/class/Makefile
@@ -6,3 +6,4 @@
 obj-$(CONFIG_USB_ACM)		+= cdc-acm.o
 obj-$(CONFIG_USB_PRINTER)	+= usblp.o
 obj-$(CONFIG_USB_WDM)		+= cdc-wdm.o
+obj-$(CONFIG_USB_TMC)		+= usbtmc.o
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index c257453..20104443 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -326,8 +326,8 @@
 	usb_mark_last_busy(acm->dev);
 	retval = usb_submit_urb (urb, GFP_ATOMIC);
 	if (retval)
-		err ("%s - usb_submit_urb failed with result %d",
-		     __func__, retval);
+		dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with "
+			"result %d", __func__, retval);
 }
 
 /* data interface returns incoming bytes, or we got unthrottled */
@@ -514,7 +514,7 @@
 
 	rv = usb_autopm_get_interface(acm->control);
 	if (rv < 0) {
-		err("Autopm failure in %s", __func__);
+		dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__);
 		return;
 	}
 	if (acm->delayed_wb) {
@@ -849,9 +849,10 @@
 {
 	int i;
 	struct acm_wb *wb;
+	struct usb_device *usb_dev = interface_to_usbdev(acm->control);
 
 	for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) {
-		usb_buffer_free(acm->dev, acm->writesize, wb->buf, wb->dmah);
+		usb_buffer_free(usb_dev, acm->writesize, wb->buf, wb->dmah);
 	}
 }
 
@@ -924,7 +925,7 @@
 	
 	/* normal probing*/
 	if (!buffer) {
-		err("Weird descriptor references\n");
+		dev_err(&intf->dev, "Weird descriptor references\n");
 		return -EINVAL;
 	}
 
@@ -934,21 +935,24 @@
 			buflen = intf->cur_altsetting->endpoint->extralen;
 			buffer = intf->cur_altsetting->endpoint->extra;
 		} else {
-			err("Zero length descriptor references\n");
+			dev_err(&intf->dev,
+				"Zero length descriptor references\n");
 			return -EINVAL;
 		}
 	}
 
 	while (buflen > 0) {
 		if (buffer [1] != USB_DT_CS_INTERFACE) {
-			err("skipping garbage\n");
+			dev_err(&intf->dev, "skipping garbage\n");
 			goto next_desc;
 		}
 
 		switch (buffer [2]) {
 			case USB_CDC_UNION_TYPE: /* we've found it */
 				if (union_header) {
-					err("More than one union descriptor, skipping ...");
+					dev_err(&intf->dev, "More than one "
+						"union descriptor, "
+						"skipping ...\n");
 					goto next_desc;
 				}
 				union_header = (struct usb_cdc_union_desc *)
@@ -966,7 +970,9 @@
 				call_management_function = buffer[3];
 				call_interface_num = buffer[4];
 				if ((call_management_function & 3) != 3)
-					err("This device cannot do calls on its own. It is no modem.");
+					dev_err(&intf->dev, "This device "
+						"cannot do calls on its own. "
+						"It is no modem.\n");
 				break;
 			default:
 				/* there are LOTS more CDC descriptors that
@@ -1051,7 +1057,7 @@
 	for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++);
 
 	if (minor == ACM_TTY_MINORS) {
-		err("no more free acm devices");
+		dev_err(&intf->dev, "no more free acm devices\n");
 		return -ENODEV;
 	}
 
@@ -1454,7 +1460,8 @@
 		return retval;
 	}
 
-	info(DRIVER_VERSION ":" DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 	return 0;
 }
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 7e8e123..5a8ecc0 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -42,6 +42,8 @@
 	{ }
 };
 
+MODULE_DEVICE_TABLE (usb, wdm_ids);
+
 #define WDM_MINOR_BASE	176
 
 
@@ -132,10 +134,12 @@
 				"nonzero urb status received: -ESHUTDOWN");
 			break;
 		case -EPIPE:
-			err("nonzero urb status received: -EPIPE");
+			dev_err(&desc->intf->dev,
+				"nonzero urb status received: -EPIPE\n");
 			break;
 		default:
-			err("Unexpected error %d", status);
+			dev_err(&desc->intf->dev,
+				"Unexpected error %d\n", status);
 			break;
 		}
 	}
@@ -170,16 +174,18 @@
 			return; /* unplug */
 		case -EPIPE:
 			set_bit(WDM_INT_STALL, &desc->flags);
-			err("Stall on int endpoint");
+			dev_err(&desc->intf->dev, "Stall on int endpoint\n");
 			goto sw; /* halt is cleared in work */
 		default:
-			err("nonzero urb status received: %d", status);
+			dev_err(&desc->intf->dev,
+				"nonzero urb status received: %d\n", status);
 			break;
 		}
 	}
 
 	if (urb->actual_length < sizeof(struct usb_cdc_notification)) {
-		err("wdm_int_callback - %d bytes", urb->actual_length);
+		dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
+			urb->actual_length);
 		goto exit;
 	}
 
@@ -198,7 +204,8 @@
 		goto exit;
 	default:
 		clear_bit(WDM_POLL_RUNNING, &desc->flags);
-		err("unknown notification %d received: index %d len %d",
+		dev_err(&desc->intf->dev,
+			"unknown notification %d received: index %d len %d\n",
 			dr->bNotificationType, dr->wIndex, dr->wLength);
 		goto exit;
 	}
@@ -236,14 +243,16 @@
 sw:
 			rv = schedule_work(&desc->rxwork);
 			if (rv)
-				err("Cannot schedule work");
+				dev_err(&desc->intf->dev,
+					"Cannot schedule work\n");
 		}
 	}
 exit:
 	rv = usb_submit_urb(urb, GFP_ATOMIC);
 	if (rv)
-		err("%s - usb_submit_urb failed with result %d",
-		     __func__, rv);
+		dev_err(&desc->intf->dev,
+			"%s - usb_submit_urb failed with result %d\n",
+			__func__, rv);
 
 }
 
@@ -353,7 +362,7 @@
 	if (rv < 0) {
 		kfree(buf);
 		clear_bit(WDM_IN_USE, &desc->flags);
-		err("Tx URB error: %d", rv);
+		dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
 	} else {
 		dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
 			req->wIndex);
@@ -401,7 +410,8 @@
 			int t = desc->rerr;
 			desc->rerr = 0;
 			spin_unlock_irq(&desc->iuspin);
-			err("reading had resulted in %d", t);
+			dev_err(&desc->intf->dev,
+				"reading had resulted in %d\n", t);
 			rv = -EIO;
 			goto err;
 		}
@@ -440,7 +450,7 @@
 err:
 	mutex_unlock(&desc->rlock);
 	if (rv < 0)
-		err("wdm_read: exit error");
+		dev_err(&desc->intf->dev, "wdm_read: exit error\n");
 	return rv;
 }
 
@@ -450,7 +460,8 @@
 
 	wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
 	if (desc->werr < 0)
-		err("Error in flush path: %d", desc->werr);
+		dev_err(&desc->intf->dev, "Error in flush path: %d\n",
+			desc->werr);
 
 	return desc->werr;
 }
@@ -502,7 +513,7 @@
 
 	rv = usb_autopm_get_interface(desc->intf);
 	if (rv < 0) {
-		err("Error autopm - %d", rv);
+		dev_err(&desc->intf->dev, "Error autopm - %d\n", rv);
 		goto out;
 	}
 	intf->needs_remote_wakeup = 1;
@@ -512,7 +523,8 @@
 		rv = usb_submit_urb(desc->validity, GFP_KERNEL);
 		if (rv < 0) {
 			desc->count--;
-			err("Error submitting int urb - %d", rv);
+			dev_err(&desc->intf->dev,
+				"Error submitting int urb - %d\n", rv);
 		}
 	} else {
 		rv = 0;
@@ -600,7 +612,7 @@
 
 	while (buflen > 0) {
 		if (buffer [1] != USB_DT_CS_INTERFACE) {
-			err("skipping garbage");
+			dev_err(&intf->dev, "skipping garbage\n");
 			goto next_desc;
 		}
 
@@ -614,7 +626,8 @@
 				"Finding maximum buffer length: %d", maxcom);
 			break;
 		default:
-			err("Ignoring extra header, type %d, length %d",
+			dev_err(&intf->dev,
+				"Ignoring extra header, type %d, length %d\n",
 				buffer[2], buffer[0]);
 			break;
 		}
@@ -772,7 +785,8 @@
 	if (desc->count) {
 		rv = usb_submit_urb(desc->validity, GFP_NOIO);
 		if (rv < 0)
-			err("Error resume submitting int urb - %d", rv);
+			dev_err(&desc->intf->dev,
+				"Error resume submitting int urb - %d\n", rv);
 	}
 	return rv;
 }
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 0647164..b5775af 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -593,8 +593,9 @@
 				err = usblp_hp_channel_change_request(usblp,
 					arg, &newChannel);
 				if (err < 0) {
-					err("usblp%d: error = %d setting "
-						"HP channel",
+					dev_err(&usblp->dev->dev,
+						"usblp%d: error = %d setting "
+						"HP channel\n",
 						usblp->minor, err);
 					retval = -EIO;
 					goto done;
@@ -1076,15 +1077,16 @@
 		       const struct usb_device_id *id)
 {
 	struct usb_device *dev = interface_to_usbdev (intf);
-	struct usblp *usblp = NULL;
+	struct usblp *usblp;
 	int protocol;
 	int retval;
 
 	/* Malloc and start initializing usblp structure so we can use it
 	 * directly. */
-	if (!(usblp = kzalloc(sizeof(struct usblp), GFP_KERNEL))) {
+	usblp = kzalloc(sizeof(struct usblp), GFP_KERNEL);
+	if (!usblp) {
 		retval = -ENOMEM;
-		goto abort;
+		goto abort_ret;
 	}
 	usblp->dev = dev;
 	mutex_init(&usblp->wmut);
@@ -1179,12 +1181,11 @@
 	usb_set_intfdata (intf, NULL);
 	device_remove_file(&intf->dev, &dev_attr_ieee1284_id);
 abort:
-	if (usblp) {
-		kfree(usblp->readbuf);
-		kfree(usblp->statusbuf);
-		kfree(usblp->device_id_string);
-		kfree(usblp);
-	}
+	kfree(usblp->readbuf);
+	kfree(usblp->statusbuf);
+	kfree(usblp->device_id_string);
+	kfree(usblp);
+abort_ret:
 	return retval;
 }
 
@@ -1345,7 +1346,7 @@
 	usb_deregister_dev(intf, &usblp_class);
 
 	if (!usblp || !usblp->dev) {
-		err("bogus disconnect");
+		dev_err(&intf->dev, "bogus disconnect\n");
 		BUG ();
 	}
 
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
new file mode 100644
index 0000000..543811f
--- /dev/null
+++ b/drivers/usb/class/usbtmc.c
@@ -0,0 +1,1087 @@
+/**
+ * drivers/usb/class/usbtmc.c - USB Test & Measurment class driver
+ *
+ * Copyright (C) 2007 Stefan Kopp, Gechingen, Germany
+ * Copyright (C) 2008 Novell, Inc.
+ * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * The GNU General Public License is available at
+ * http://www.gnu.org/copyleft/gpl.html.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/usb/tmc.h>
+
+
+#define USBTMC_MINOR_BASE	176
+
+/*
+ * Size of driver internal IO buffer. Must be multiple of 4 and at least as
+ * large as wMaxPacketSize (which is usually 512 bytes).
+ */
+#define USBTMC_SIZE_IOBUFFER	2048
+
+/* Default USB timeout (in milliseconds) */
+#define USBTMC_TIMEOUT		10
+
+/*
+ * Maximum number of read cycles to empty bulk in endpoint during CLEAR and
+ * ABORT_BULK_IN requests. Ends the loop if (for whatever reason) a short
+ * packet is never read.
+ */
+#define USBTMC_MAX_READS_TO_CLEAR_BULK_IN	100
+
+static struct usb_device_id usbtmc_devices[] = {
+	{ USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), },
+	{ 0, } /* terminating entry */
+};
+
+/*
+ * This structure is the capabilities for the device
+ * See section 4.2.1.8 of the USBTMC specification for details.
+ */
+struct usbtmc_dev_capabilities {
+	__u8 interface_capabilities;
+	__u8 device_capabilities;
+	__u8 usb488_interface_capabilities;
+	__u8 usb488_device_capabilities;
+};
+
+/* This structure holds private data for each USBTMC device. One copy is
+ * allocated for each USBTMC device in the driver's probe function.
+ */
+struct usbtmc_device_data {
+	const struct usb_device_id *id;
+	struct usb_device *usb_dev;
+	struct usb_interface *intf;
+
+	unsigned int bulk_in;
+	unsigned int bulk_out;
+
+	u8 bTag;
+	u8 bTag_last_write;	/* needed for abort */
+	u8 bTag_last_read;	/* needed for abort */
+
+	/* attributes from the USB TMC spec for this device */
+	u8 TermChar;
+	bool TermCharEnabled;
+	bool auto_abort;
+
+	struct usbtmc_dev_capabilities	capabilities;
+	struct kref kref;
+	struct mutex io_mutex;	/* only one i/o function running at a time */
+};
+#define to_usbtmc_data(d) container_of(d, struct usbtmc_device_data, kref)
+
+/* Forward declarations */
+static struct usb_driver usbtmc_driver;
+
+static void usbtmc_delete(struct kref *kref)
+{
+	struct usbtmc_device_data *data = to_usbtmc_data(kref);
+
+	usb_put_dev(data->usb_dev);
+	kfree(data);
+}
+
+static int usbtmc_open(struct inode *inode, struct file *filp)
+{
+	struct usb_interface *intf;
+	struct usbtmc_device_data *data;
+	int retval = -ENODEV;
+
+	intf = usb_find_interface(&usbtmc_driver, iminor(inode));
+	if (!intf) {
+		printk(KERN_ERR KBUILD_MODNAME
+		       ": can not find device for minor %d", iminor(inode));
+		goto exit;
+	}
+
+	data = usb_get_intfdata(intf);
+	kref_get(&data->kref);
+
+	/* Store pointer in file structure's private data field */
+	filp->private_data = data;
+
+exit:
+	return retval;
+}
+
+static int usbtmc_release(struct inode *inode, struct file *file)
+{
+	struct usbtmc_device_data *data = file->private_data;
+
+	kref_put(&data->kref, usbtmc_delete);
+	return 0;
+}
+
+static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data)
+{
+	char *buffer;
+	struct device *dev;
+	int rv;
+	int n;
+	int actual;
+	struct usb_host_interface *current_setting;
+	int max_size;
+
+	dev = &data->intf->dev;
+	buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	rv = usb_control_msg(data->usb_dev,
+			     usb_rcvctrlpipe(data->usb_dev, 0),
+			     USBTMC_REQUEST_INITIATE_ABORT_BULK_IN,
+			     USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
+			     data->bTag_last_read, data->bulk_in,
+			     buffer, 2, USBTMC_TIMEOUT);
+
+	if (rv < 0) {
+		dev_err(dev, "usb_control_msg returned %d\n", rv);
+		goto exit;
+	}
+
+	dev_dbg(dev, "INITIATE_ABORT_BULK_IN returned %x\n", buffer[0]);
+
+	if (buffer[0] == USBTMC_STATUS_FAILED) {
+		rv = 0;
+		goto exit;
+	}
+
+	if (buffer[0] != USBTMC_STATUS_SUCCESS) {
+		dev_err(dev, "INITIATE_ABORT_BULK_IN returned %x\n",
+			buffer[0]);
+		rv = -EPERM;
+		goto exit;
+	}
+
+	max_size = 0;
+	current_setting = data->intf->cur_altsetting;
+	for (n = 0; n < current_setting->desc.bNumEndpoints; n++)
+		if (current_setting->endpoint[n].desc.bEndpointAddress ==
+			data->bulk_in)
+			max_size = le16_to_cpu(current_setting->endpoint[n].
+						desc.wMaxPacketSize);
+
+	if (max_size == 0) {
+		dev_err(dev, "Couldn't get wMaxPacketSize\n");
+		rv = -EPERM;
+		goto exit;
+	}
+
+	dev_dbg(&data->intf->dev, "wMaxPacketSize is %d\n", max_size);
+
+	n = 0;
+
+	do {
+		dev_dbg(dev, "Reading from bulk in EP\n");
+
+		rv = usb_bulk_msg(data->usb_dev,
+				  usb_rcvbulkpipe(data->usb_dev,
+						  data->bulk_in),
+				  buffer, USBTMC_SIZE_IOBUFFER,
+				  &actual, USBTMC_TIMEOUT);
+
+		n++;
+
+		if (rv < 0) {
+			dev_err(dev, "usb_bulk_msg returned %d\n", rv);
+			goto exit;
+		}
+	} while ((actual == max_size) &&
+		 (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
+
+	if (actual == max_size) {
+		dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
+			USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
+		rv = -EPERM;
+		goto exit;
+	}
+
+	n = 0;
+
+usbtmc_abort_bulk_in_status:
+	rv = usb_control_msg(data->usb_dev,
+			     usb_rcvctrlpipe(data->usb_dev, 0),
+			     USBTMC_REQUEST_CHECK_ABORT_BULK_IN_STATUS,
+			     USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
+			     0, data->bulk_in, buffer, 0x08,
+			     USBTMC_TIMEOUT);
+
+	if (rv < 0) {
+		dev_err(dev, "usb_control_msg returned %d\n", rv);
+		goto exit;
+	}
+
+	dev_dbg(dev, "INITIATE_ABORT_BULK_IN returned %x\n", buffer[0]);
+
+	if (buffer[0] == USBTMC_STATUS_SUCCESS) {
+		rv = 0;
+		goto exit;
+	}
+
+	if (buffer[0] != USBTMC_STATUS_PENDING) {
+		dev_err(dev, "INITIATE_ABORT_BULK_IN returned %x\n", buffer[0]);
+		rv = -EPERM;
+		goto exit;
+	}
+
+	if (buffer[1] == 1)
+		do {
+			dev_dbg(dev, "Reading from bulk in EP\n");
+
+			rv = usb_bulk_msg(data->usb_dev,
+					  usb_rcvbulkpipe(data->usb_dev,
+							  data->bulk_in),
+					  buffer, USBTMC_SIZE_IOBUFFER,
+					  &actual, USBTMC_TIMEOUT);
+
+			n++;
+
+			if (rv < 0) {
+				dev_err(dev, "usb_bulk_msg returned %d\n", rv);
+				goto exit;
+			}
+		} while ((actual = max_size) &&
+			 (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
+
+	if (actual == max_size) {
+		dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
+			USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
+		rv = -EPERM;
+		goto exit;
+	}
+
+	goto usbtmc_abort_bulk_in_status;
+
+exit:
+	kfree(buffer);
+	return rv;
+
+}
+
+static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
+{
+	struct device *dev;
+	u8 *buffer;
+	int rv;
+	int n;
+
+	dev = &data->intf->dev;
+
+	buffer = kmalloc(8, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	rv = usb_control_msg(data->usb_dev,
+			     usb_rcvctrlpipe(data->usb_dev, 0),
+			     USBTMC_REQUEST_INITIATE_ABORT_BULK_OUT,
+			     USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
+			     data->bTag_last_write, data->bulk_out,
+			     buffer, 2, USBTMC_TIMEOUT);
+
+	if (rv < 0) {
+		dev_err(dev, "usb_control_msg returned %d\n", rv);
+		goto exit;
+	}
+
+	dev_dbg(dev, "INITIATE_ABORT_BULK_OUT returned %x\n", buffer[0]);
+
+	if (buffer[0] != USBTMC_STATUS_SUCCESS) {
+		dev_err(dev, "INITIATE_ABORT_BULK_OUT returned %x\n",
+			buffer[0]);
+		rv = -EPERM;
+		goto exit;
+	}
+
+	n = 0;
+
+usbtmc_abort_bulk_out_check_status:
+	rv = usb_control_msg(data->usb_dev,
+			     usb_rcvctrlpipe(data->usb_dev, 0),
+			     USBTMC_REQUEST_CHECK_ABORT_BULK_OUT_STATUS,
+			     USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
+			     0, data->bulk_out, buffer, 0x08,
+			     USBTMC_TIMEOUT);
+	n++;
+	if (rv < 0) {
+		dev_err(dev, "usb_control_msg returned %d\n", rv);
+		goto exit;
+	}
+
+	dev_dbg(dev, "CHECK_ABORT_BULK_OUT returned %x\n", buffer[0]);
+
+	if (buffer[0] == USBTMC_STATUS_SUCCESS)
+		goto usbtmc_abort_bulk_out_clear_halt;
+
+	if ((buffer[0] == USBTMC_STATUS_PENDING) &&
+	    (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN))
+		goto usbtmc_abort_bulk_out_check_status;
+
+	rv = -EPERM;
+	goto exit;
+
+usbtmc_abort_bulk_out_clear_halt:
+	rv = usb_control_msg(data->usb_dev,
+			     usb_sndctrlpipe(data->usb_dev, 0),
+			     USB_REQ_CLEAR_FEATURE,
+			     USB_DIR_OUT | USB_TYPE_STANDARD |
+			     USB_RECIP_ENDPOINT,
+			     USB_ENDPOINT_HALT, data->bulk_out, buffer,
+			     0, USBTMC_TIMEOUT);
+
+	if (rv < 0) {
+		dev_err(dev, "usb_control_msg returned %d\n", rv);
+		goto exit;
+	}
+	rv = 0;
+
+exit:
+	kfree(buffer);
+	return rv;
+}
+
+static ssize_t usbtmc_read(struct file *filp, char __user *buf,
+			   size_t count, loff_t *f_pos)
+{
+	struct usbtmc_device_data *data;
+	struct device *dev;
+	unsigned long int n_characters;
+	u8 *buffer;
+	int actual;
+	int done;
+	int remaining;
+	int retval;
+	int this_part;
+
+	/* Get pointer to private data structure */
+	data = filp->private_data;
+	dev = &data->intf->dev;
+
+	buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	mutex_lock(&data->io_mutex);
+
+	remaining = count;
+	done = 0;
+
+	while (remaining > 0) {
+		if (remaining > USBTMC_SIZE_IOBUFFER - 12 - 3)
+			this_part = USBTMC_SIZE_IOBUFFER - 12 - 3;
+		else
+			this_part = remaining;
+
+		/* Setup IO buffer for DEV_DEP_MSG_IN message
+		 * Refer to class specs for details
+		 */
+		buffer[0] = 2;
+		buffer[1] = data->bTag;
+		buffer[2] = ~(data->bTag);
+		buffer[3] = 0; /* Reserved */
+		buffer[4] = (this_part - 12 - 3) & 255;
+		buffer[5] = ((this_part - 12 - 3) >> 8) & 255;
+		buffer[6] = ((this_part - 12 - 3) >> 16) & 255;
+		buffer[7] = ((this_part - 12 - 3) >> 24) & 255;
+		buffer[8] = data->TermCharEnabled * 2;
+		/* Use term character? */
+		buffer[9] = data->TermChar;
+		buffer[10] = 0; /* Reserved */
+		buffer[11] = 0; /* Reserved */
+
+		/* Send bulk URB */
+		retval = usb_bulk_msg(data->usb_dev,
+				      usb_sndbulkpipe(data->usb_dev,
+						      data->bulk_out),
+				      buffer, 12, &actual, USBTMC_TIMEOUT);
+
+		/* Store bTag (in case we need to abort) */
+		data->bTag_last_write = data->bTag;
+
+		/* Increment bTag -- and increment again if zero */
+		data->bTag++;
+		if (!data->bTag)
+			(data->bTag)++;
+
+		if (retval < 0) {
+			dev_err(dev, "usb_bulk_msg returned %d\n", retval);
+			if (data->auto_abort)
+				usbtmc_ioctl_abort_bulk_out(data);
+			goto exit;
+		}
+
+		/* Send bulk URB */
+		retval = usb_bulk_msg(data->usb_dev,
+				      usb_rcvbulkpipe(data->usb_dev,
+						      data->bulk_in),
+				      buffer, USBTMC_SIZE_IOBUFFER, &actual,
+				      USBTMC_TIMEOUT);
+
+		/* Store bTag (in case we need to abort) */
+		data->bTag_last_read = data->bTag;
+
+		if (retval < 0) {
+			dev_err(dev, "Unable to read data, error %d\n", retval);
+			if (data->auto_abort)
+				usbtmc_ioctl_abort_bulk_in(data);
+			goto exit;
+		}
+
+		/* How many characters did the instrument send? */
+		n_characters = buffer[4] +
+			       (buffer[5] << 8) +
+			       (buffer[6] << 16) +
+			       (buffer[7] << 24);
+
+		/* Copy buffer to user space */
+		if (copy_to_user(buf + done, &buffer[12], n_characters)) {
+			/* There must have been an addressing problem */
+			retval = -EFAULT;
+			goto exit;
+		}
+
+		done += n_characters;
+		if (n_characters < USBTMC_SIZE_IOBUFFER)
+			remaining = 0;
+	}
+
+	/* Update file position value */
+	*f_pos = *f_pos + done;
+	retval = done;
+
+exit:
+	mutex_unlock(&data->io_mutex);
+	kfree(buffer);
+	return retval;
+}
+
+static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
+			    size_t count, loff_t *f_pos)
+{
+	struct usbtmc_device_data *data;
+	u8 *buffer;
+	int retval;
+	int actual;
+	unsigned long int n_bytes;
+	int n;
+	int remaining;
+	int done;
+	int this_part;
+
+	data = filp->private_data;
+
+	buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	mutex_lock(&data->io_mutex);
+
+	remaining = count;
+	done = 0;
+
+	while (remaining > 0) {
+		if (remaining > USBTMC_SIZE_IOBUFFER - 12) {
+			this_part = USBTMC_SIZE_IOBUFFER - 12;
+			buffer[8] = 0;
+		} else {
+			this_part = remaining;
+			buffer[8] = 1;
+		}
+
+		/* Setup IO buffer for DEV_DEP_MSG_OUT message */
+		buffer[0] = 1;
+		buffer[1] = data->bTag;
+		buffer[2] = ~(data->bTag);
+		buffer[3] = 0; /* Reserved */
+		buffer[4] = this_part & 255;
+		buffer[5] = (this_part >> 8) & 255;
+		buffer[6] = (this_part >> 16) & 255;
+		buffer[7] = (this_part >> 24) & 255;
+		/* buffer[8] is set above... */
+		buffer[9] = 0; /* Reserved */
+		buffer[10] = 0; /* Reserved */
+		buffer[11] = 0; /* Reserved */
+
+		if (copy_from_user(&buffer[12], buf + done, this_part)) {
+			retval = -EFAULT;
+			goto exit;
+		}
+
+		n_bytes = 12 + this_part;
+		if (this_part % 4)
+			n_bytes += 4 - this_part % 4;
+			for (n = 12 + this_part; n < n_bytes; n++)
+				buffer[n] = 0;
+
+		retval = usb_bulk_msg(data->usb_dev,
+				      usb_sndbulkpipe(data->usb_dev,
+						      data->bulk_out),
+				      buffer, n_bytes, &actual, USBTMC_TIMEOUT);
+
+		data->bTag_last_write = data->bTag;
+		data->bTag++;
+
+		if (!data->bTag)
+			data->bTag++;
+
+		if (retval < 0) {
+			dev_err(&data->intf->dev,
+				"Unable to send data, error %d\n", retval);
+			if (data->auto_abort)
+				usbtmc_ioctl_abort_bulk_out(data);
+			goto exit;
+		}
+
+		remaining -= this_part;
+		done += this_part;
+	}
+
+	retval = count;
+exit:
+	mutex_unlock(&data->io_mutex);
+	kfree(buffer);
+	return retval;
+}
+
+static int usbtmc_ioctl_clear(struct usbtmc_device_data *data)
+{
+	struct usb_host_interface *current_setting;
+	struct usb_endpoint_descriptor *desc;
+	struct device *dev;
+	u8 *buffer;
+	int rv;
+	int n;
+	int actual;
+	int max_size;
+
+	dev = &data->intf->dev;
+
+	dev_dbg(dev, "Sending INITIATE_CLEAR request\n");
+
+	buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	rv = usb_control_msg(data->usb_dev,
+			     usb_rcvctrlpipe(data->usb_dev, 0),
+			     USBTMC_REQUEST_INITIATE_CLEAR,
+			     USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			     0, 0, buffer, 1, USBTMC_TIMEOUT);
+	if (rv < 0) {
+		dev_err(dev, "usb_control_msg returned %d\n", rv);
+		goto exit;
+	}
+
+	dev_dbg(dev, "INITIATE_CLEAR returned %x\n", buffer[0]);
+
+	if (buffer[0] != USBTMC_STATUS_SUCCESS) {
+		dev_err(dev, "INITIATE_CLEAR returned %x\n", buffer[0]);
+		rv = -EPERM;
+		goto exit;
+	}
+
+	max_size = 0;
+	current_setting = data->intf->cur_altsetting;
+	for (n = 0; n < current_setting->desc.bNumEndpoints; n++) {
+		desc = &current_setting->endpoint[n].desc;
+		if (desc->bEndpointAddress == data->bulk_in)
+			max_size = le16_to_cpu(desc->wMaxPacketSize);
+	}
+
+	if (max_size == 0) {
+		dev_err(dev, "Couldn't get wMaxPacketSize\n");
+		rv = -EPERM;
+		goto exit;
+	}
+
+	dev_dbg(dev, "wMaxPacketSize is %d\n", max_size);
+
+	n = 0;
+
+usbtmc_clear_check_status:
+
+	dev_dbg(dev, "Sending CHECK_CLEAR_STATUS request\n");
+
+	rv = usb_control_msg(data->usb_dev,
+			     usb_rcvctrlpipe(data->usb_dev, 0),
+			     USBTMC_REQUEST_CHECK_CLEAR_STATUS,
+			     USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			     0, 0, buffer, 2, USBTMC_TIMEOUT);
+	if (rv < 0) {
+		dev_err(dev, "usb_control_msg returned %d\n", rv);
+		goto exit;
+	}
+
+	dev_dbg(dev, "CHECK_CLEAR_STATUS returned %x\n", buffer[0]);
+
+	if (buffer[0] == USBTMC_STATUS_SUCCESS)
+		goto usbtmc_clear_bulk_out_halt;
+
+	if (buffer[0] != USBTMC_STATUS_PENDING) {
+		dev_err(dev, "CHECK_CLEAR_STATUS returned %x\n", buffer[0]);
+		rv = -EPERM;
+		goto exit;
+	}
+
+	if (buffer[1] == 1)
+		do {
+			dev_dbg(dev, "Reading from bulk in EP\n");
+
+			rv = usb_bulk_msg(data->usb_dev,
+					  usb_rcvbulkpipe(data->usb_dev,
+							  data->bulk_in),
+					  buffer, USBTMC_SIZE_IOBUFFER,
+					  &actual, USBTMC_TIMEOUT);
+			n++;
+
+			if (rv < 0) {
+				dev_err(dev, "usb_control_msg returned %d\n",
+					rv);
+				goto exit;
+			}
+		} while ((actual == max_size) &&
+			  (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
+
+	if (actual == max_size) {
+		dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
+			USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
+		rv = -EPERM;
+		goto exit;
+	}
+
+	goto usbtmc_clear_check_status;
+
+usbtmc_clear_bulk_out_halt:
+
+	rv = usb_control_msg(data->usb_dev,
+			     usb_sndctrlpipe(data->usb_dev, 0),
+			     USB_REQ_CLEAR_FEATURE,
+			     USB_DIR_OUT | USB_TYPE_STANDARD |
+			     USB_RECIP_ENDPOINT,
+			     USB_ENDPOINT_HALT,
+			     data->bulk_out, buffer, 0,
+			     USBTMC_TIMEOUT);
+	if (rv < 0) {
+		dev_err(dev, "usb_control_msg returned %d\n", rv);
+		goto exit;
+	}
+	rv = 0;
+
+exit:
+	kfree(buffer);
+	return rv;
+}
+
+static int usbtmc_ioctl_clear_out_halt(struct usbtmc_device_data *data)
+{
+	u8 *buffer;
+	int rv;
+
+	buffer = kmalloc(2, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	rv = usb_control_msg(data->usb_dev,
+			     usb_sndctrlpipe(data->usb_dev, 0),
+			     USB_REQ_CLEAR_FEATURE,
+			     USB_DIR_OUT | USB_TYPE_STANDARD |
+			     USB_RECIP_ENDPOINT,
+			     USB_ENDPOINT_HALT, data->bulk_out,
+			     buffer, 0, USBTMC_TIMEOUT);
+
+	if (rv < 0) {
+		dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
+			rv);
+		goto exit;
+	}
+	rv = 0;
+
+exit:
+	kfree(buffer);
+	return rv;
+}
+
+static int usbtmc_ioctl_clear_in_halt(struct usbtmc_device_data *data)
+{
+	u8 *buffer;
+	int rv;
+
+	buffer = kmalloc(2, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	rv = usb_control_msg(data->usb_dev, usb_sndctrlpipe(data->usb_dev, 0),
+			     USB_REQ_CLEAR_FEATURE,
+			     USB_DIR_OUT | USB_TYPE_STANDARD |
+			     USB_RECIP_ENDPOINT,
+			     USB_ENDPOINT_HALT, data->bulk_in, buffer, 0,
+			     USBTMC_TIMEOUT);
+
+	if (rv < 0) {
+		dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
+			rv);
+		goto exit;
+	}
+	rv = 0;
+
+exit:
+	kfree(buffer);
+	return rv;
+}
+
+static int get_capabilities(struct usbtmc_device_data *data)
+{
+	struct device *dev = &data->usb_dev->dev;
+	char *buffer;
+	int rv;
+
+	buffer = kmalloc(0x18, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	rv = usb_control_msg(data->usb_dev, usb_rcvctrlpipe(data->usb_dev, 0),
+			     USBTMC_REQUEST_GET_CAPABILITIES,
+			     USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			     0, 0, buffer, 0x18, USBTMC_TIMEOUT);
+	if (rv < 0) {
+		dev_err(dev, "usb_control_msg returned %d\n", rv);
+		return rv;
+	}
+
+	dev_dbg(dev, "GET_CAPABILITIES returned %x\n", buffer[0]);
+	dev_dbg(dev, "Interface capabilities are %x\n", buffer[4]);
+	dev_dbg(dev, "Device capabilities are %x\n", buffer[5]);
+	dev_dbg(dev, "USB488 interface capabilities are %x\n", buffer[14]);
+	dev_dbg(dev, "USB488 device capabilities are %x\n", buffer[15]);
+	if (buffer[0] != USBTMC_STATUS_SUCCESS) {
+		dev_err(dev, "GET_CAPABILITIES returned %x\n", buffer[0]);
+		return -EPERM;
+	}
+
+	data->capabilities.interface_capabilities = buffer[4];
+	data->capabilities.device_capabilities = buffer[5];
+	data->capabilities.usb488_interface_capabilities = buffer[14];
+	data->capabilities.usb488_device_capabilities = buffer[15];
+
+	kfree(buffer);
+	return 0;
+}
+
+#define capability_attribute(name)					\
+static ssize_t show_##name(struct device *dev,				\
+			   struct device_attribute *attr, char *buf)	\
+{									\
+	struct usb_interface *intf = to_usb_interface(dev);		\
+	struct usbtmc_device_data *data = usb_get_intfdata(intf);	\
+									\
+	return sprintf(buf, "%d\n", data->capabilities.name);		\
+}									\
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+
+capability_attribute(interface_capabilities);
+capability_attribute(device_capabilities);
+capability_attribute(usb488_interface_capabilities);
+capability_attribute(usb488_device_capabilities);
+
+static struct attribute *capability_attrs[] = {
+	&dev_attr_interface_capabilities.attr,
+	&dev_attr_device_capabilities.attr,
+	&dev_attr_usb488_interface_capabilities.attr,
+	&dev_attr_usb488_device_capabilities.attr,
+	NULL,
+};
+
+static struct attribute_group capability_attr_grp = {
+	.attrs = capability_attrs,
+};
+
+static ssize_t show_TermChar(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct usbtmc_device_data *data = usb_get_intfdata(intf);
+
+	return sprintf(buf, "%c\n", data->TermChar);
+}
+
+static ssize_t store_TermChar(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct usbtmc_device_data *data = usb_get_intfdata(intf);
+
+	if (count < 1)
+		return -EINVAL;
+	data->TermChar = buf[0];
+	return count;
+}
+static DEVICE_ATTR(TermChar, S_IRUGO, show_TermChar, store_TermChar);
+
+#define data_attribute(name)						\
+static ssize_t show_##name(struct device *dev,				\
+			   struct device_attribute *attr, char *buf)	\
+{									\
+	struct usb_interface *intf = to_usb_interface(dev);		\
+	struct usbtmc_device_data *data = usb_get_intfdata(intf);	\
+									\
+	return sprintf(buf, "%d\n", data->name);			\
+}									\
+static ssize_t store_##name(struct device *dev,				\
+			    struct device_attribute *attr,		\
+			    const char *buf, size_t count)		\
+{									\
+	struct usb_interface *intf = to_usb_interface(dev);		\
+	struct usbtmc_device_data *data = usb_get_intfdata(intf);	\
+	ssize_t result;							\
+	unsigned val;							\
+									\
+	result = sscanf(buf, "%u\n", &val);				\
+	if (result != 1)						\
+		result = -EINVAL;					\
+	data->name = val;						\
+	if (result < 0)							\
+		return result;						\
+	else								\
+		return count;						\
+}									\
+static DEVICE_ATTR(name, S_IRUGO, show_##name, store_##name)
+
+data_attribute(TermCharEnabled);
+data_attribute(auto_abort);
+
+static struct attribute *data_attrs[] = {
+	&dev_attr_TermChar.attr,
+	&dev_attr_TermCharEnabled.attr,
+	&dev_attr_auto_abort.attr,
+	NULL,
+};
+
+static struct attribute_group data_attr_grp = {
+	.attrs = data_attrs,
+};
+
+static int usbtmc_ioctl_indicator_pulse(struct usbtmc_device_data *data)
+{
+	struct device *dev;
+	u8 *buffer;
+	int rv;
+
+	dev = &data->intf->dev;
+
+	buffer = kmalloc(2, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	rv = usb_control_msg(data->usb_dev,
+			     usb_rcvctrlpipe(data->usb_dev, 0),
+			     USBTMC_REQUEST_INDICATOR_PULSE,
+			     USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			     0, 0, buffer, 0x01, USBTMC_TIMEOUT);
+
+	if (rv < 0) {
+		dev_err(dev, "usb_control_msg returned %d\n", rv);
+		goto exit;
+	}
+
+	dev_dbg(dev, "INDICATOR_PULSE returned %x\n", buffer[0]);
+
+	if (buffer[0] != USBTMC_STATUS_SUCCESS) {
+		dev_err(dev, "INDICATOR_PULSE returned %x\n", buffer[0]);
+		rv = -EPERM;
+		goto exit;
+	}
+	rv = 0;
+
+exit:
+	kfree(buffer);
+	return rv;
+}
+
+static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct usbtmc_device_data *data;
+	int retval = -EBADRQC;
+
+	data = file->private_data;
+	mutex_lock(&data->io_mutex);
+
+	switch (cmd) {
+	case USBTMC_IOCTL_CLEAR_OUT_HALT:
+		retval = usbtmc_ioctl_clear_out_halt(data);
+
+	case USBTMC_IOCTL_CLEAR_IN_HALT:
+		retval = usbtmc_ioctl_clear_in_halt(data);
+
+	case USBTMC_IOCTL_INDICATOR_PULSE:
+		retval = usbtmc_ioctl_indicator_pulse(data);
+
+	case USBTMC_IOCTL_CLEAR:
+		retval = usbtmc_ioctl_clear(data);
+
+	case USBTMC_IOCTL_ABORT_BULK_OUT:
+		retval = usbtmc_ioctl_abort_bulk_out(data);
+
+	case USBTMC_IOCTL_ABORT_BULK_IN:
+		retval = usbtmc_ioctl_abort_bulk_in(data);
+	}
+
+	mutex_unlock(&data->io_mutex);
+	return retval;
+}
+
+static struct file_operations fops = {
+	.owner		= THIS_MODULE,
+	.read		= usbtmc_read,
+	.write		= usbtmc_write,
+	.open		= usbtmc_open,
+	.release	= usbtmc_release,
+	.unlocked_ioctl	= usbtmc_ioctl,
+};
+
+static struct usb_class_driver usbtmc_class = {
+	.name =		"usbtmc%d",
+	.fops =		&fops,
+	.minor_base =	USBTMC_MINOR_BASE,
+};
+
+
+static int usbtmc_probe(struct usb_interface *intf,
+			const struct usb_device_id *id)
+{
+	struct usbtmc_device_data *data;
+	struct usb_host_interface *iface_desc;
+	struct usb_endpoint_descriptor *endpoint;
+	int n;
+	int retcode;
+
+	dev_dbg(&intf->dev, "%s called\n", __func__);
+
+	data = kmalloc(sizeof(struct usbtmc_device_data), GFP_KERNEL);
+	if (!data) {
+		dev_err(&intf->dev, "Unable to allocate kernel memory\n");
+		return -ENOMEM;
+	}
+
+	data->intf = intf;
+	data->id = id;
+	data->usb_dev = usb_get_dev(interface_to_usbdev(intf));
+	usb_set_intfdata(intf, data);
+	kref_init(&data->kref);
+	mutex_init(&data->io_mutex);
+
+	/* Initialize USBTMC bTag and other fields */
+	data->bTag	= 1;
+	data->TermCharEnabled = 0;
+	data->TermChar = '\n';
+
+	/* USBTMC devices have only one setting, so use that */
+	iface_desc = data->intf->cur_altsetting;
+
+	/* Find bulk in endpoint */
+	for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
+		endpoint = &iface_desc->endpoint[n].desc;
+
+		if (usb_endpoint_is_bulk_in(endpoint)) {
+			data->bulk_in = endpoint->bEndpointAddress;
+			dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n",
+				data->bulk_in);
+			break;
+		}
+	}
+
+	/* Find bulk out endpoint */
+	for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
+		endpoint = &iface_desc->endpoint[n].desc;
+
+		if (usb_endpoint_is_bulk_out(endpoint)) {
+			data->bulk_out = endpoint->bEndpointAddress;
+			dev_dbg(&intf->dev, "Found Bulk out endpoint at %u\n",
+				data->bulk_out);
+			break;
+		}
+	}
+
+	retcode = get_capabilities(data);
+	if (retcode)
+		dev_err(&intf->dev, "can't read capabilities\n");
+	else
+		retcode = sysfs_create_group(&intf->dev.kobj,
+					     &capability_attr_grp);
+
+	retcode = sysfs_create_group(&intf->dev.kobj, &data_attr_grp);
+
+	retcode = usb_register_dev(intf, &usbtmc_class);
+	if (retcode) {
+		dev_err(&intf->dev, "Not able to get a minor"
+			" (base %u, slice default): %d\n", USBTMC_MINOR_BASE,
+			retcode);
+		goto error_register;
+	}
+	dev_dbg(&intf->dev, "Using minor number %d\n", intf->minor);
+
+	return 0;
+
+error_register:
+	sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
+	sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
+	kref_put(&data->kref, usbtmc_delete);
+	return retcode;
+}
+
+static void usbtmc_disconnect(struct usb_interface *intf)
+{
+	struct usbtmc_device_data *data;
+
+	dev_dbg(&intf->dev, "usbtmc_disconnect called\n");
+
+	data = usb_get_intfdata(intf);
+	usb_deregister_dev(intf, &usbtmc_class);
+	sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
+	sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
+	kref_put(&data->kref, usbtmc_delete);
+}
+
+static struct usb_driver usbtmc_driver = {
+	.name		= "usbtmc",
+	.id_table	= usbtmc_devices,
+	.probe		= usbtmc_probe,
+	.disconnect	= usbtmc_disconnect
+};
+
+static int __init usbtmc_init(void)
+{
+	int retcode;
+
+	retcode = usb_register(&usbtmc_driver);
+	if (retcode)
+		printk(KERN_ERR KBUILD_MODNAME": Unable to register driver\n");
+	return retcode;
+}
+module_init(usbtmc_init);
+
+static void __exit usbtmc_exit(void)
+{
+	usb_deregister(&usbtmc_driver);
+}
+module_exit(usbtmc_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index cc9f397..e1759d17 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -134,5 +134,5 @@
 	  If you say Y here, then Linux will refuse to enumerate
 	  external hubs.  OTG hosts are allowed to reduce hardware
 	  and software costs by not supporting external hubs.  So
-	  are "Emedded Hosts" that don't offer OTG support.
+	  are "Embedded Hosts" that don't offer OTG support.
 
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 7a4fa79..2bccefe 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -413,7 +413,8 @@
 	if (likely(ifnum < 8*sizeof(ps->ifclaimed)))
 		clear_bit(ifnum, &ps->ifclaimed);
 	else
-		warn("interface number %u out of range", ifnum);
+		dev_warn(&intf->dev, "interface number %u out of range\n",
+			 ifnum);
 
 	usb_set_intfdata(intf, NULL);
 
@@ -624,6 +625,8 @@
 	smp_wmb();
 	list_add_tail(&ps->list, &dev->filelist);
 	file->private_data = ps;
+	snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current),
+			current->comm);
  out:
 	if (ret) {
 		kfree(ps);
@@ -1774,19 +1777,20 @@
 	retval = register_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX,
 					"usb_device");
 	if (retval) {
-		err("unable to register minors for usb_device");
+		printk(KERN_ERR "Unable to register minors for usb_device\n");
 		goto out;
 	}
 	cdev_init(&usb_device_cdev, &usbdev_file_operations);
 	retval = cdev_add(&usb_device_cdev, USB_DEVICE_DEV, USB_DEVICE_MAX);
 	if (retval) {
-		err("unable to get usb_device major %d", USB_DEVICE_MAJOR);
+		printk(KERN_ERR "Unable to get usb_device major %d\n",
+		       USB_DEVICE_MAJOR);
 		goto error_cdev;
 	}
 #ifdef CONFIG_USB_DEVICE_CLASS
 	usb_classdev_class = class_create(THIS_MODULE, "usb_device");
 	if (IS_ERR(usb_classdev_class)) {
-		err("unable to register usb_device class");
+		printk(KERN_ERR "Unable to register usb_device class\n");
 		retval = PTR_ERR(usb_classdev_class);
 		cdev_del(&usb_device_cdev);
 		usb_classdev_class = NULL;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 5a7fa6f..3d7793d 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1070,7 +1070,8 @@
 				struct usb_driver *driver;
 
 				driver = to_usb_driver(intf->dev.driver);
-				if (!driver->reset_resume)
+				if (!driver->reset_resume ||
+				    intf->needs_remote_wakeup)
 					return -EOPNOTSUPP;
 			}
 		}
@@ -1609,7 +1610,8 @@
 	status = usb_resume_both(udev);
 	udev->last_busy = jiffies;
 	usb_pm_unlock(udev);
-	do_unbind_rebind(udev, DO_REBIND);
+	if (status == 0)
+		do_unbind_rebind(udev, DO_REBIND);
 
 	/* Now that the device is awake, we can start trying to autosuspend
 	 * it again. */
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 2291213..946fae4 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -169,7 +169,8 @@
 	error = alloc_chrdev_region(&dev, 0, MAX_ENDPOINT_MINORS,
 				    "usb_endpoint");
 	if (error) {
-		err("unable to get a dynamic major for usb endpoints");
+		printk(KERN_ERR "Unable to get a dynamic major for "
+		       "usb endpoints.\n");
 		return error;
 	}
 	usb_endpoint_major = MAJOR(dev);
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 55f7f31..997e659 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -86,7 +86,7 @@
 	usb_class->class = class_create(THIS_MODULE, "usb");
 	if (IS_ERR(usb_class->class)) {
 		result = IS_ERR(usb_class->class);
-		err("class_create failed for usb devices");
+		printk(KERN_ERR "class_create failed for usb devices\n");
 		kfree(usb_class);
 		usb_class = NULL;
 	}
@@ -115,7 +115,8 @@
 
 	error = register_chrdev(USB_MAJOR, "usb", &usb_fops);
 	if (error)
-		err("unable to get major %d for usb devices", USB_MAJOR);
+		printk(KERN_ERR "Unable to get major %d for usb devices\n",
+		       USB_MAJOR);
 
 	return error;
 }
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index c8035a8..fc9018e 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -81,6 +81,10 @@
 
 /*-------------------------------------------------------------------------*/
 
+/* Keep track of which host controller drivers are loaded */
+unsigned long usb_hcds_loaded;
+EXPORT_SYMBOL_GPL(usb_hcds_loaded);
+
 /* host controllers we manage */
 LIST_HEAD (usb_bus_list);
 EXPORT_SYMBOL_GPL (usb_bus_list);
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index e710ce0..2dcde61 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -482,4 +482,10 @@
  */
 extern struct rw_semaphore ehci_cf_port_reset_rwsem;
 
+/* Keep track of which host controller drivers are loaded */
+#define USB_UHCI_LOADED		0
+#define USB_OHCI_LOADED		1
+#define USB_EHCI_LOADED		2
+extern unsigned long usb_hcds_loaded;
+
 #endif /* __KERNEL__ */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d999638..9b3f16b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -77,6 +77,7 @@
 	unsigned		has_indicators:1;
 	u8			indicator[USB_MAXCHILDREN];
 	struct delayed_work	leds;
+	struct delayed_work	init_work;
 };
 
 
@@ -100,6 +101,15 @@
 MODULE_PARM_DESC (blinkenlights, "true to cycle leds on hubs");
 
 /*
+ * Device SATA8000 FW1.0 from DATAST0R Technology Corp requires about
+ * 10 seconds to send reply for the initial 64-byte descriptor request.
+ */
+/* define initial 64-byte descriptor request timeout in milliseconds */
+static int initial_descriptor_timeout = USB_CTRL_GET_TIMEOUT;
+module_param(initial_descriptor_timeout, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(initial_descriptor_timeout, "initial 64-byte descriptor request timeout in milliseconds (default 5000 - 5.0 seconds)");
+
+/*
  * As of 2.6.10 we introduce a new USB device initialization scheme which
  * closely resembles the way Windows works.  Hopefully it will be compatible
  * with a wider range of devices than the old scheme.  However some previously
@@ -515,10 +525,14 @@
 }
 EXPORT_SYMBOL_GPL(usb_hub_tt_clear_buffer);
 
-static void hub_power_on(struct usb_hub *hub)
+/* If do_delay is false, return the number of milliseconds the caller
+ * needs to delay.
+ */
+static unsigned hub_power_on(struct usb_hub *hub, bool do_delay)
 {
 	int port1;
 	unsigned pgood_delay = hub->descriptor->bPwrOn2PwrGood * 2;
+	unsigned delay;
 	u16 wHubCharacteristics =
 			le16_to_cpu(hub->descriptor->wHubCharacteristics);
 
@@ -537,7 +551,10 @@
 		set_port_feature(hub->hdev, port1, USB_PORT_FEAT_POWER);
 
 	/* Wait at least 100 msec for power to become stable */
-	msleep(max(pgood_delay, (unsigned) 100));
+	delay = max(pgood_delay, (unsigned) 100);
+	if (do_delay)
+		msleep(delay);
+	return delay;
 }
 
 static int hub_hub_status(struct usb_hub *hub,
@@ -599,21 +616,55 @@
 }
 
 enum hub_activation_type {
-	HUB_INIT, HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME
+	HUB_INIT, HUB_INIT2, HUB_INIT3,
+	HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME,
 };
 
+static void hub_init_func2(struct work_struct *ws);
+static void hub_init_func3(struct work_struct *ws);
+
 static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 {
 	struct usb_device *hdev = hub->hdev;
 	int port1;
 	int status;
 	bool need_debounce_delay = false;
+	unsigned delay;
+
+	/* Continue a partial initialization */
+	if (type == HUB_INIT2)
+		goto init2;
+	if (type == HUB_INIT3)
+		goto init3;
 
 	/* After a resume, port power should still be on.
 	 * For any other type of activation, turn it on.
 	 */
-	if (type != HUB_RESUME)
-		hub_power_on(hub);
+	if (type != HUB_RESUME) {
+
+		/* Speed up system boot by using a delayed_work for the
+		 * hub's initial power-up delays.  This is pretty awkward
+		 * and the implementation looks like a home-brewed sort of
+		 * setjmp/longjmp, but it saves at least 100 ms for each
+		 * root hub (assuming usbcore is compiled into the kernel
+		 * rather than as a module).  It adds up.
+		 *
+		 * This can't be done for HUB_RESUME or HUB_RESET_RESUME
+		 * because for those activation types the ports have to be
+		 * operational when we return.  In theory this could be done
+		 * for HUB_POST_RESET, but it's easier not to.
+		 */
+		if (type == HUB_INIT) {
+			delay = hub_power_on(hub, false);
+			PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func2);
+			schedule_delayed_work(&hub->init_work,
+					msecs_to_jiffies(delay));
+			return;		/* Continues at init2: below */
+		} else {
+			hub_power_on(hub, true);
+		}
+	}
+ init2:
 
 	/* Check each port and set hub->change_bits to let khubd know
 	 * which ports need attention.
@@ -692,9 +743,20 @@
 	 * If any port-status changes do occur during this delay, khubd
 	 * will see them later and handle them normally.
 	 */
-	if (need_debounce_delay)
-		msleep(HUB_DEBOUNCE_STABLE);
+	if (need_debounce_delay) {
+		delay = HUB_DEBOUNCE_STABLE;
 
+		/* Don't do a long sleep inside a workqueue routine */
+		if (type == HUB_INIT2) {
+			PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3);
+			schedule_delayed_work(&hub->init_work,
+					msecs_to_jiffies(delay));
+			return;		/* Continues at init3: below */
+		} else {
+			msleep(delay);
+		}
+	}
+ init3:
 	hub->quiescing = 0;
 
 	status = usb_submit_urb(hub->urb, GFP_NOIO);
@@ -707,6 +769,21 @@
 	kick_khubd(hub);
 }
 
+/* Implement the continuations for the delays above */
+static void hub_init_func2(struct work_struct *ws)
+{
+	struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work);
+
+	hub_activate(hub, HUB_INIT2);
+}
+
+static void hub_init_func3(struct work_struct *ws)
+{
+	struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work);
+
+	hub_activate(hub, HUB_INIT3);
+}
+
 enum hub_quiescing_type {
 	HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND
 };
@@ -716,6 +793,8 @@
 	struct usb_device *hdev = hub->hdev;
 	int i;
 
+	cancel_delayed_work_sync(&hub->init_work);
+
 	/* khubd and related activity won't re-trigger */
 	hub->quiescing = 1;
 
@@ -1099,6 +1178,7 @@
 	hub->intfdev = &intf->dev;
 	hub->hdev = hdev;
 	INIT_DELAYED_WORK(&hub->leds, led_work);
+	INIT_DELAYED_WORK(&hub->init_work, NULL);
 	usb_get_intf(intf);
 
 	usb_set_intfdata (intf, hub);
@@ -2467,7 +2547,7 @@
 					USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
 					USB_DT_DEVICE << 8, 0,
 					buf, GET_DESCRIPTOR_BUFSIZE,
-					USB_CTRL_GET_TIMEOUT);
+					initial_descriptor_timeout);
 				switch (buf->bMaxPacketSize0) {
 				case 8: case 16: case 32: case 64: case 255:
 					if (buf->bDescriptorType ==
@@ -3035,7 +3115,7 @@
 					i);
 				clear_port_feature(hdev, i,
 					USB_PORT_FEAT_C_OVER_CURRENT);
-				hub_power_on(hub);
+				hub_power_on(hub, true);
 			}
 
 			if (portchange & USB_PORT_STAT_C_RESET) {
@@ -3070,7 +3150,7 @@
 				dev_dbg (hub_dev, "overcurrent change\n");
 				msleep(500);	/* Cool down */
 				clear_hub_feature(hdev, C_HUB_OVER_CURRENT);
-                        	hub_power_on(hub);
+                        	hub_power_on(hub, true);
 			}
 		}
 
@@ -3424,7 +3504,7 @@
 						USB_INTERFACE_BOUND)
 					rebind = 1;
 			}
-			if (rebind)
+			if (ret == 0 && rebind)
 				usb_rebind_intf(cintf);
 		}
 	}
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 77fa7a0..9463226 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -180,8 +180,8 @@
 			listmode = option & S_IRWXUGO;
 			break;
 		default:
-			err("usbfs: unrecognised mount option \"%s\" "
-			    "or missing value\n", p);
+			printk(KERN_ERR "usbfs: unrecognised mount option "
+			       "\"%s\" or missing value\n", p);
 			return -EINVAL;
 		}
 	}
@@ -240,7 +240,9 @@
 				update_special(bus);
 				break;
 			default:
-				warn("Unknown node %s mode %x found on remount!\n",bus->d_name.name,bus->d_inode->i_mode);
+				printk(KERN_WARNING "usbfs: Unknown node %s "
+				       "mode %x found on remount!\n",
+				       bus->d_name.name, bus->d_inode->i_mode);
 				break;
 			}
 		}
@@ -259,7 +261,7 @@
 		return 0;
 
 	if (parse_options(sb, data)) {
-		warn("usbfs: mount parameter error:");
+		printk(KERN_WARNING "usbfs: mount parameter error.\n");
 		return -EINVAL;
 	}
 
@@ -599,7 +601,7 @@
 	/* create the devices special file */
 	retval = simple_pin_fs(&usb_fs_type, &usbfs_mount, &usbfs_mount_count);
 	if (retval) {
-		err ("Unable to get usbfs mount");
+		printk(KERN_ERR "Unable to get usbfs mount\n");
 		goto exit;
 	}
 
@@ -611,7 +613,7 @@
 					       NULL, &usbfs_devices_fops,
 					       listuid, listgid);
 	if (devices_usbfs_dentry == NULL) {
-		err ("Unable to create devices usbfs file");
+		printk(KERN_ERR "Unable to create devices usbfs file\n");
 		retval = -ENODEV;
 		goto error_clean_mounts;
 	}
@@ -663,7 +665,7 @@
 	bus->usbfs_dentry = fs_create_file (name, busmode | S_IFDIR, parent,
 					    bus, NULL, busuid, busgid);
 	if (bus->usbfs_dentry == NULL) {
-		err ("error creating usbfs bus entry");
+		printk(KERN_ERR "Error creating usbfs bus entry\n");
 		return;
 	}
 }
@@ -694,7 +696,7 @@
 					    &usbdev_file_operations,
 					    devuid, devgid);
 	if (dev->usbfs_dentry == NULL) {
-		err ("error creating usbfs device entry");
+		printk(KERN_ERR "Error creating usbfs device entry\n");
 		return;
 	}
 
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 286b443..8877385 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1204,7 +1204,8 @@
 
 	alt = usb_altnum_to_altsetting(iface, alternate);
 	if (!alt) {
-		warn("selecting invalid altsetting %d", alternate);
+		dev_warn(&dev->dev, "selecting invalid altsetting %d",
+			 alternate);
 		return -EINVAL;
 	}
 
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 5e1f5d5..f66fba1 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -743,6 +743,29 @@
 }
 static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
 
+static ssize_t show_supports_autosuspend(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usb_interface *intf;
+	struct usb_device *udev;
+	int ret;
+
+	intf = to_usb_interface(dev);
+	udev = interface_to_usbdev(intf);
+
+	usb_lock_device(udev);
+	/* Devices will be autosuspended even when an interface isn't claimed */
+	if (!intf->dev.driver ||
+			to_usb_driver(intf->dev.driver)->supports_autosuspend)
+		ret = sprintf(buf, "%u\n", 1);
+	else
+		ret = sprintf(buf, "%u\n", 0);
+	usb_unlock_device(udev);
+
+	return ret;
+}
+static DEVICE_ATTR(supports_autosuspend, S_IRUGO, show_supports_autosuspend, NULL);
+
 static struct attribute *intf_attrs[] = {
 	&dev_attr_bInterfaceNumber.attr,
 	&dev_attr_bAlternateSetting.attr,
@@ -751,6 +774,7 @@
 	&dev_attr_bInterfaceSubClass.attr,
 	&dev_attr_bInterfaceProtocol.attr,
 	&dev_attr_modalias.attr,
+	&dev_attr_supports_autosuspend.attr,
 	NULL,
 };
 static struct attribute_group intf_attr_grp = {
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 47111e8..f263800 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -10,6 +10,8 @@
 
 #define to_urb(d) container_of(d, struct urb, kref)
 
+static DEFINE_SPINLOCK(usb_reject_lock);
+
 static void urb_destroy(struct kref *kref)
 {
 	struct urb *urb = to_urb(kref);
@@ -68,7 +70,7 @@
 		iso_packets * sizeof(struct usb_iso_packet_descriptor),
 		mem_flags);
 	if (!urb) {
-		err("alloc_urb: kmalloc failed");
+		printk(KERN_ERR "alloc_urb: kmalloc failed\n");
 		return NULL;
 	}
 	usb_init_urb(urb);
@@ -127,6 +129,13 @@
 	usb_get_urb(urb);
 	list_add_tail(&urb->anchor_list, &anchor->urb_list);
 	urb->anchor = anchor;
+
+	if (unlikely(anchor->poisoned)) {
+		spin_lock(&usb_reject_lock);
+		urb->reject++;
+		spin_unlock(&usb_reject_lock);
+	}
+
 	spin_unlock_irqrestore(&anchor->lock, flags);
 }
 EXPORT_SYMBOL_GPL(usb_anchor_urb);
@@ -398,7 +407,7 @@
 
 	/* fail if submitter gave bogus flags */
 	if (urb->transfer_flags != orig_flags) {
-		err("BOGUS urb flags, %x --> %x",
+		dev_err(&dev->dev, "BOGUS urb flags, %x --> %x\n",
 			orig_flags, urb->transfer_flags);
 		return -EINVAL;
 	}
@@ -544,25 +553,70 @@
  */
 void usb_kill_urb(struct urb *urb)
 {
-	static DEFINE_MUTEX(reject_mutex);
-
 	might_sleep();
 	if (!(urb && urb->dev && urb->ep))
 		return;
-	mutex_lock(&reject_mutex);
+	spin_lock_irq(&usb_reject_lock);
 	++urb->reject;
-	mutex_unlock(&reject_mutex);
+	spin_unlock_irq(&usb_reject_lock);
 
 	usb_hcd_unlink_urb(urb, -ENOENT);
 	wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
 
-	mutex_lock(&reject_mutex);
+	spin_lock_irq(&usb_reject_lock);
 	--urb->reject;
-	mutex_unlock(&reject_mutex);
+	spin_unlock_irq(&usb_reject_lock);
 }
 EXPORT_SYMBOL_GPL(usb_kill_urb);
 
 /**
+ * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
+ * @urb: pointer to URB describing a previously submitted request,
+ *	may be NULL
+ *
+ * This routine cancels an in-progress request.  It is guaranteed that
+ * upon return all completion handlers will have finished and the URB
+ * will be totally idle and cannot be reused.  These features make
+ * this an ideal way to stop I/O in a disconnect() callback.
+ * If the request has not already finished or been unlinked
+ * the completion handler will see urb->status == -ENOENT.
+ *
+ * After and while the routine runs, attempts to resubmit the URB will fail
+ * with error -EPERM.  Thus even if the URB's completion handler always
+ * tries to resubmit, it will not succeed and the URB will become idle.
+ *
+ * This routine may not be used in an interrupt context (such as a bottom
+ * half or a completion handler), or when holding a spinlock, or in other
+ * situations where the caller can't schedule().
+ */
+void usb_poison_urb(struct urb *urb)
+{
+	might_sleep();
+	if (!(urb && urb->dev && urb->ep))
+		return;
+	spin_lock_irq(&usb_reject_lock);
+	++urb->reject;
+	spin_unlock_irq(&usb_reject_lock);
+
+	usb_hcd_unlink_urb(urb, -ENOENT);
+	wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
+}
+EXPORT_SYMBOL_GPL(usb_poison_urb);
+
+void usb_unpoison_urb(struct urb *urb)
+{
+	unsigned long flags;
+
+	if (!urb)
+		return;
+
+	spin_lock_irqsave(&usb_reject_lock, flags);
+	--urb->reject;
+	spin_unlock_irqrestore(&usb_reject_lock, flags);
+}
+EXPORT_SYMBOL_GPL(usb_unpoison_urb);
+
+/**
  * usb_kill_anchored_urbs - cancel transfer requests en masse
  * @anchor: anchor the requests are bound to
  *
@@ -589,6 +643,35 @@
 }
 EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
 
+
+/**
+ * usb_poison_anchored_urbs - cease all traffic from an anchor
+ * @anchor: anchor the requests are bound to
+ *
+ * this allows all outstanding URBs to be poisoned starting
+ * from the back of the queue. Newly added URBs will also be
+ * poisoned
+ */
+void usb_poison_anchored_urbs(struct usb_anchor *anchor)
+{
+	struct urb *victim;
+
+	spin_lock_irq(&anchor->lock);
+	anchor->poisoned = 1;
+	while (!list_empty(&anchor->urb_list)) {
+		victim = list_entry(anchor->urb_list.prev, struct urb,
+				    anchor_list);
+		/* we must make sure the URB isn't freed before we kill it*/
+		usb_get_urb(victim);
+		spin_unlock_irq(&anchor->lock);
+		/* this will unanchor the URB */
+		usb_poison_urb(victim);
+		usb_put_urb(victim);
+		spin_lock_irq(&anchor->lock);
+	}
+	spin_unlock_irq(&anchor->lock);
+}
+EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
 /**
  * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
  * @anchor: anchor the requests are bound to
@@ -633,3 +716,73 @@
 				  msecs_to_jiffies(timeout));
 }
 EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
+
+/**
+ * usb_get_from_anchor - get an anchor's oldest urb
+ * @anchor: the anchor whose urb you want
+ *
+ * this will take the oldest urb from an anchor,
+ * unanchor and return it
+ */
+struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
+{
+	struct urb *victim;
+	unsigned long flags;
+
+	spin_lock_irqsave(&anchor->lock, flags);
+	if (!list_empty(&anchor->urb_list)) {
+		victim = list_entry(anchor->urb_list.next, struct urb,
+				    anchor_list);
+		usb_get_urb(victim);
+		spin_unlock_irqrestore(&anchor->lock, flags);
+		usb_unanchor_urb(victim);
+	} else {
+		spin_unlock_irqrestore(&anchor->lock, flags);
+		victim = NULL;
+	}
+
+	return victim;
+}
+
+EXPORT_SYMBOL_GPL(usb_get_from_anchor);
+
+/**
+ * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
+ * @anchor: the anchor whose urbs you want to unanchor
+ *
+ * use this to get rid of all an anchor's urbs
+ */
+void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
+{
+	struct urb *victim;
+	unsigned long flags;
+
+	spin_lock_irqsave(&anchor->lock, flags);
+	while (!list_empty(&anchor->urb_list)) {
+		victim = list_entry(anchor->urb_list.prev, struct urb,
+				    anchor_list);
+		usb_get_urb(victim);
+		spin_unlock_irqrestore(&anchor->lock, flags);
+		/* this may free the URB */
+		usb_unanchor_urb(victim);
+		usb_put_urb(victim);
+		spin_lock_irqsave(&anchor->lock, flags);
+	}
+	spin_unlock_irqrestore(&anchor->lock, flags);
+}
+
+EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
+
+/**
+ * usb_anchor_empty - is an anchor empty
+ * @anchor: the anchor you want to query
+ *
+ * returns 1 if the anchor has no urbs associated with it
+ */
+int usb_anchor_empty(struct usb_anchor *anchor)
+{
+	return list_empty(&anchor->urb_list);
+}
+
+EXPORT_SYMBOL_GPL(usb_anchor_empty);
+
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index acc95b2..dd4cd5a 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -45,7 +45,7 @@
 
 config USB_GADGET_DEBUG
 	boolean "Debugging messages (DEVELOPMENT)"
-	depends on USB_GADGET && DEBUG_KERNEL
+	depends on DEBUG_KERNEL
 	help
 	   Many controller and gadget drivers will print some debugging
 	   messages if you use this option to ask for those messages.
@@ -59,7 +59,7 @@
 
 config USB_GADGET_DEBUG_FILES
 	boolean "Debugging information files (DEVELOPMENT)"
-	depends on USB_GADGET && PROC_FS
+	depends on PROC_FS
 	help
 	   Some of the drivers in the "gadget" framework can expose
 	   debugging information in files such as /proc/driver/udc
@@ -70,7 +70,7 @@
 
 config USB_GADGET_DEBUG_FS
 	boolean "Debugging information files in debugfs (DEVELOPMENT)"
-	depends on USB_GADGET && DEBUG_FS
+	depends on DEBUG_FS
 	help
 	   Some of the drivers in the "gadget" framework can expose
 	   debugging information in files under /sys/kernel/debug/.
@@ -79,12 +79,36 @@
 	   Enable these files by choosing "Y" here.  If in doubt, or
 	   to conserve kernel memory, say "N".
 
+config USB_GADGET_VBUS_DRAW
+	int "Maximum VBUS Power usage (2-500 mA)"
+	range 2 500
+	default 2
+	help
+	   Some devices need to draw power from USB when they are
+	   configured, perhaps to operate circuitry or to recharge
+	   batteries.  This is in addition to any local power supply,
+	   such as an AC adapter or batteries.
+
+	   Enter the maximum power your device draws through USB, in
+	   milliAmperes.  The permitted range of values is 2 - 500 mA;
+	   0 mA would be legal, but can make some hosts misbehave.
+
+	   This value will be used except for system-specific gadget
+	   drivers that have more specific information.
+
 config	USB_GADGET_SELECTED
 	boolean
 
 #
 # USB Peripheral Controller Support
 #
+# The order here is alphabetical, except that integrated controllers go
+# before discrete ones so they will be the initial/default value:
+#   - integrated/SOC controllers first
+#   - licensed IP used in both SOC and discrete versions
+#   - discrete ones (including all PCI-only controllers)
+#   - debug/dummy gadget+hcd is last.
+#
 choice
 	prompt "USB Peripheral Controller"
 	depends on USB_GADGET
@@ -94,26 +118,27 @@
 	   Many controller drivers are platform-specific; these
 	   often need board-specific hooks.
 
-config USB_GADGET_AMD5536UDC
-	boolean "AMD5536 UDC"
-	depends on PCI
-	select USB_GADGET_DUALSPEED
+#
+# Integrated controllers
+#
+
+config USB_GADGET_AT91
+	boolean "Atmel AT91 USB Device Port"
+	depends on ARCH_AT91 && !ARCH_AT91SAM9RL && !ARCH_AT91CAP9
+	select USB_GADGET_SELECTED
 	help
-	   The AMD5536 UDC is part of the AMD Geode CS5536, an x86 southbridge.
-	   It is a USB Highspeed DMA capable USB device controller. Beside ep0
-	   it provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
-	   The UDC port supports OTG operation, and may be used as a host port
-	   if it's not being used to implement peripheral or OTG roles.
+	   Many Atmel AT91 processors (such as the AT91RM2000) have a
+	   full speed USB Device Port with support for five configurable
+	   endpoints (plus endpoint zero).
 
 	   Say "y" to link the driver statically, or "m" to build a
-	   dynamically linked module called "amd5536udc" and force all
+	   dynamically linked module called "at91_udc" and force all
 	   gadget drivers to also be dynamically linked.
 
-config USB_AMD5536UDC
+config USB_AT91
 	tristate
-	depends on USB_GADGET_AMD5536UDC
+	depends on USB_GADGET_AT91
 	default USB_GADGET
-	select USB_GADGET_SELECTED
 
 config USB_GADGET_ATMEL_USBA
 	boolean "Atmel USBA"
@@ -150,28 +175,50 @@
 	default USB_GADGET
 	select USB_GADGET_SELECTED
 
-config USB_GADGET_NET2280
-	boolean "NetChip 228x"
-	depends on PCI
-	select USB_GADGET_DUALSPEED
+config USB_GADGET_LH7A40X
+	boolean "LH7A40X"
+	depends on ARCH_LH7A40X
 	help
-	   NetChip 2280 / 2282 is a PCI based USB peripheral controller which
-	   supports both full and high speed USB 2.0 data transfers.  
-	   
-	   It has six configurable endpoints, as well as endpoint zero
-	   (for control transfers) and several endpoints with dedicated
-	   functions.
+	   This driver provides USB Device Controller driver for LH7A40x
 
-	   Say "y" to link the driver statically, or "m" to build a
-	   dynamically linked module called "net2280" and force all
-	   gadget drivers to also be dynamically linked.
-
-config USB_NET2280
+config USB_LH7A40X
 	tristate
-	depends on USB_GADGET_NET2280
+	depends on USB_GADGET_LH7A40X
 	default USB_GADGET
 	select USB_GADGET_SELECTED
 
+config USB_GADGET_OMAP
+	boolean "OMAP USB Device Controller"
+	depends on ARCH_OMAP
+	select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_H4_OTG
+	help
+	   Many Texas Instruments OMAP processors have flexible full
+	   speed USB device controllers, with support for up to 30
+	   endpoints (plus endpoint zero).  This driver supports the
+	   controller in the OMAP 1611, and should work with controllers
+	   in other OMAP processors too, given minor tweaks.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "omap_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_OMAP
+	tristate
+	depends on USB_GADGET_OMAP
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
+config USB_OTG
+	boolean "OTG Support"
+	depends on USB_GADGET_OMAP && ARCH_OMAP_OTG && USB_OHCI_HCD
+	help
+	   The most notable feature of USB OTG is support for a
+	   "Dual-Role" device, which can act as either a device
+	   or a host.  The initial role choice can be changed
+	   later, when two dual-role devices talk to each other.
+
+	   Select this only if your OMAP board has a Mini-AB connector.
+
 config USB_GADGET_PXA25X
 	boolean "PXA 25x or IXP 4xx"
 	depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX
@@ -203,6 +250,61 @@
 	default y if USB_ETH
 	default y if USB_G_SERIAL
 
+config USB_GADGET_PXA27X
+	boolean "PXA 27x"
+	depends on ARCH_PXA && PXA27x
+	help
+	   Intel's PXA 27x series XScale ARM v5TE processors include
+	   an integrated full speed USB 1.1 device controller.
+
+	   It has up to 23 endpoints, as well as endpoint zero (for
+	   control transfers).
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "pxa27x_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_PXA27X
+	tristate
+	depends on USB_GADGET_PXA27X
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
+config USB_GADGET_S3C2410
+	boolean "S3C2410 USB Device Controller"
+	depends on ARCH_S3C2410
+	help
+	  Samsung's S3C2410 is an ARM-4 processor with an integrated
+	  full speed USB 1.1 device controller.  It has 4 configurable
+	  endpoints, as well as endpoint zero (for control transfers).
+
+	  This driver has been tested on the S3C2410, S3C2412, and
+	  S3C2440 processors.
+
+config USB_S3C2410
+	tristate
+	depends on USB_GADGET_S3C2410
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
+config USB_S3C2410_DEBUG
+	boolean "S3C2410 udc debug messages"
+	depends on USB_GADGET_S3C2410
+
+#
+# Controllers available in both integrated and discrete versions
+#
+
+# musb builds in ../musb along with host support
+config USB_GADGET_MUSB_HDRC
+	boolean "Inventra HDRC USB Peripheral (TI, ...)"
+	depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+	select USB_GADGET_DUALSPEED
+	select USB_GADGET_SELECTED
+	help
+	  This OTG-capable silicon IP is used in dual designs including
+	  the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010.
+
 config USB_GADGET_M66592
 	boolean "Renesas M66592 USB Peripheral Controller"
 	select USB_GADGET_DUALSPEED
@@ -231,23 +333,69 @@
 	   However, this problem is improved if change a value of
 	   NET_IP_ALIGN to 4.
 
-config USB_GADGET_PXA27X
-	boolean "PXA 27x"
-	depends on ARCH_PXA && PXA27x
-	help
-	   Intel's PXA 27x series XScale ARM v5TE processors include
-	   an integrated full speed USB 1.1 device controller.
+#
+# Controllers available only in discrete form (and all PCI controllers)
+#
 
-	   It has up to 23 endpoints, as well as endpoint zero (for
-	   control transfers).
+config USB_GADGET_AMD5536UDC
+	boolean "AMD5536 UDC"
+	depends on PCI
+	select USB_GADGET_DUALSPEED
+	help
+	   The AMD5536 UDC is part of the AMD Geode CS5536, an x86 southbridge.
+	   It is a USB Highspeed DMA capable USB device controller. Beside ep0
+	   it provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
+	   The UDC port supports OTG operation, and may be used as a host port
+	   if it's not being used to implement peripheral or OTG roles.
 
 	   Say "y" to link the driver statically, or "m" to build a
-	   dynamically linked module called "pxa27x_udc" and force all
+	   dynamically linked module called "amd5536udc" and force all
 	   gadget drivers to also be dynamically linked.
 
-config USB_PXA27X
+config USB_AMD5536UDC
 	tristate
-	depends on USB_GADGET_PXA27X
+	depends on USB_GADGET_AMD5536UDC
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
+config USB_GADGET_FSL_QE
+	boolean "Freescale QE/CPM USB Device Controller"
+	depends on FSL_SOC && (QUICC_ENGINE || CPM)
+	help
+	   Some of Freescale PowerPC processors have a Full Speed
+	   QE/CPM2 USB controller, which support device mode with 4
+	   programmable endpoints. This driver supports the
+	   controller in the MPC8360 and MPC8272, and should work with
+	   controllers having QE or CPM2, given minor tweaks.
+
+	   Set CONFIG_USB_GADGET to "m" to build this driver as a
+	   dynmically linked module called "fsl_qe_udc".
+
+config USB_FSL_QE
+	tristate
+	depends on USB_GADGET_FSL_QE
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
+config USB_GADGET_NET2280
+	boolean "NetChip 228x"
+	depends on PCI
+	select USB_GADGET_DUALSPEED
+	help
+	   NetChip 2280 / 2282 is a PCI based USB peripheral controller which
+	   supports both full and high speed USB 2.0 data transfers.
+
+	   It has six configurable endpoints, as well as endpoint zero
+	   (for control transfers) and several endpoints with dedicated
+	   functions.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "net2280" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_NET2280
+	tristate
+	depends on USB_GADGET_NET2280
 	default USB_GADGET
 	select USB_GADGET_SELECTED
 
@@ -257,7 +405,7 @@
 	help
 	   The Toshiba TC86C001 is a PCI device which includes controllers
 	   for full speed USB devices, IDE, I2C, SIO, plus a USB host (OHCI).
-	   
+
 	   The device controller has three configurable (bulk or interrupt)
 	   endpoints, plus endpoint zero (for control transfers).
 
@@ -272,98 +420,9 @@
 	select USB_GADGET_SELECTED
 
 
-config USB_GADGET_LH7A40X
-	boolean "LH7A40X"
-	depends on ARCH_LH7A40X
-	help
-    This driver provides USB Device Controller driver for LH7A40x
-
-config USB_LH7A40X
-	tristate
-	depends on USB_GADGET_LH7A40X
-	default USB_GADGET
-	select USB_GADGET_SELECTED
-
-# built in ../musb along with host support
-config USB_GADGET_MUSB_HDRC
-	boolean "Inventra HDRC USB Peripheral (TI, ...)"
-	depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
-	select USB_GADGET_DUALSPEED
-	select USB_GADGET_SELECTED
-	help
-	  This OTG-capable silicon IP is used in dual designs including
-	  the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010.
-
-config USB_GADGET_OMAP
-	boolean "OMAP USB Device Controller"
-	depends on ARCH_OMAP
-	select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
-	help
-	   Many Texas Instruments OMAP processors have flexible full
-	   speed USB device controllers, with support for up to 30
-	   endpoints (plus endpoint zero).  This driver supports the
-	   controller in the OMAP 1611, and should work with controllers
-	   in other OMAP processors too, given minor tweaks.
-
-	   Say "y" to link the driver statically, or "m" to build a
-	   dynamically linked module called "omap_udc" and force all
-	   gadget drivers to also be dynamically linked.
-
-config USB_OMAP
-	tristate
-	depends on USB_GADGET_OMAP
-	default USB_GADGET
-	select USB_GADGET_SELECTED
-
-config USB_OTG
-	boolean "OTG Support"
-	depends on USB_GADGET_OMAP && ARCH_OMAP_OTG && USB_OHCI_HCD
-	help
-	   The most notable feature of USB OTG is support for a
-	   "Dual-Role" device, which can act as either a device
-	   or a host.  The initial role choice can be changed
-	   later, when two dual-role devices talk to each other.
-
-	   Select this only if your OMAP board has a Mini-AB connector.
-
-config USB_GADGET_S3C2410
-	boolean "S3C2410 USB Device Controller"
-	depends on ARCH_S3C2410
-	help
-	  Samsung's S3C2410 is an ARM-4 processor with an integrated
-	  full speed USB 1.1 device controller.  It has 4 configurable
-	  endpoints, as well as endpoint zero (for control transfers).
-
-	  This driver has been tested on the S3C2410, S3C2412, and
-	  S3C2440 processors.
-
-config USB_S3C2410
-	tristate
-	depends on USB_GADGET_S3C2410
-	default USB_GADGET
-	select USB_GADGET_SELECTED
-
-config USB_S3C2410_DEBUG
-	boolean "S3C2410 udc debug messages"
-	depends on USB_GADGET_S3C2410
-
-config USB_GADGET_AT91
-	boolean "AT91 USB Device Port"
-	depends on ARCH_AT91 && !ARCH_AT91SAM9RL && !ARCH_AT91CAP9
-	select USB_GADGET_SELECTED
-	help
-	   Many Atmel AT91 processors (such as the AT91RM2000) have a
-	   full speed USB Device Port with support for five configurable
-	   endpoints (plus endpoint zero).
-
-	   Say "y" to link the driver statically, or "m" to build a
-	   dynamically linked module called "at91_udc" and force all
-	   gadget drivers to also be dynamically linked.
-
-config USB_AT91
-	tristate
-	depends on USB_GADGET_AT91
-	default USB_GADGET
+#
+# LAST -- dummy/emulated controller
+#
 
 config USB_GADGET_DUMMY_HCD
 	boolean "Dummy HCD (DEVELOPMENT)"
@@ -553,19 +612,23 @@
 	  normal operation.
 
 config USB_G_SERIAL
-	tristate "Serial Gadget (with CDC ACM support)"
+	tristate "Serial Gadget (with CDC ACM and CDC OBEX support)"
 	help
 	  The Serial Gadget talks to the Linux-USB generic serial driver.
 	  This driver supports a CDC-ACM module option, which can be used
 	  to interoperate with MS-Windows hosts or with the Linux-USB
 	  "cdc-acm" driver.
 
+	  This driver also supports a CDC-OBEX option.  You will need a
+	  user space OBEX server talking to /dev/ttyGS*, since the kernel
+	  itself doesn't implement the OBEX protocol.
+
 	  Say "y" to link the driver statically, or "m" to build a
 	  dynamically linked module called "g_serial".
 
 	  For more information, see Documentation/usb/gadget_serial.txt
 	  which includes instructions and a "driver info file" needed to
-	  make MS-Windows work with this driver.
+	  make MS-Windows work with CDC ACM.
 
 config USB_MIDI_GADGET
 	tristate "MIDI Gadget (EXPERIMENTAL)"
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 2267fa0..bd4041b 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -18,6 +18,7 @@
 obj-$(CONFIG_USB_ATMEL_USBA)	+= atmel_usba_udc.o
 obj-$(CONFIG_USB_FSL_USB2)	+= fsl_usb2_udc.o
 obj-$(CONFIG_USB_M66592)	+= m66592-udc.o
+obj-$(CONFIG_USB_FSL_QE)	+= fsl_qe_udc.o
 
 #
 # USB gadget drivers
diff --git a/drivers/usb/gadget/cdc2.c b/drivers/usb/gadget/cdc2.c
index a724fc1..5495b17 100644
--- a/drivers/usb/gadget/cdc2.c
+++ b/drivers/usb/gadget/cdc2.c
@@ -155,7 +155,6 @@
 	.bConfigurationValue	= 1,
 	/* .iConfiguration = DYNAMIC */
 	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
-	.bMaxPower		= 1,	/* 2 mA, minimal */
 };
 
 /*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 85c876c..f2da026 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -128,6 +128,70 @@
 }
 
 /**
+ * usb_function_deactivate - prevent function and gadget enumeration
+ * @function: the function that isn't yet ready to respond
+ *
+ * Blocks response of the gadget driver to host enumeration by
+ * preventing the data line pullup from being activated.  This is
+ * normally called during @bind() processing to change from the
+ * initial "ready to respond" state, or when a required resource
+ * becomes available.
+ *
+ * For example, drivers that serve as a passthrough to a userspace
+ * daemon can block enumeration unless that daemon (such as an OBEX,
+ * MTP, or print server) is ready to handle host requests.
+ *
+ * Not all systems support software control of their USB peripheral
+ * data pullups.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_function_deactivate(struct usb_function *function)
+{
+	struct usb_composite_dev	*cdev = function->config->cdev;
+	int				status = 0;
+
+	spin_lock(&cdev->lock);
+
+	if (cdev->deactivations == 0)
+		status = usb_gadget_disconnect(cdev->gadget);
+	if (status == 0)
+		cdev->deactivations++;
+
+	spin_unlock(&cdev->lock);
+	return status;
+}
+
+/**
+ * usb_function_activate - allow function and gadget enumeration
+ * @function: function on which usb_function_activate() was called
+ *
+ * Reverses effect of usb_function_deactivate().  If no more functions
+ * are delaying their activation, the gadget driver will respond to
+ * host enumeration procedures.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_function_activate(struct usb_function *function)
+{
+	struct usb_composite_dev	*cdev = function->config->cdev;
+	int				status = 0;
+
+	spin_lock(&cdev->lock);
+
+	if (WARN_ON(cdev->deactivations == 0))
+		status = -EINVAL;
+	else {
+		cdev->deactivations--;
+		if (cdev->deactivations == 0)
+			status = usb_gadget_connect(cdev->gadget);
+	}
+
+	spin_unlock(&cdev->lock);
+	return status;
+}
+
+/**
  * usb_interface_id() - allocate an unused interface ID
  * @config: configuration associated with the interface
  * @function: function handling the interface
@@ -181,7 +245,7 @@
 	c->bConfigurationValue = config->bConfigurationValue;
 	c->iConfiguration = config->iConfiguration;
 	c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes;
-	c->bMaxPower = config->bMaxPower;
+	c->bMaxPower = config->bMaxPower ? : (CONFIG_USB_GADGET_VBUS_DRAW / 2);
 
 	/* There may be e.g. OTG descriptors */
 	if (config->descriptors) {
@@ -368,7 +432,7 @@
 	}
 
 	/* when we return, be sure our power usage is valid */
-	power = 2 * c->bMaxPower;
+	power = c->bMaxPower ? (2 * c->bMaxPower) : CONFIG_USB_GADGET_VBUS_DRAW;
 done:
 	usb_gadget_vbus_draw(gadget, power);
 	return result;
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 1ca1c32..e1191b9 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -168,7 +168,7 @@
  * usb_find_endpoint - find a copy of an endpoint descriptor
  * @src: original vector of descriptors
  * @copy: copy of @src
- * @ep: endpoint descriptor found in @src
+ * @match: endpoint descriptor found in @src
  *
  * This returns the copy of the @match descriptor made for @copy.  Its
  * intended use is to help remembering the endpoint descriptor to use
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 7600a0c..9064696 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -82,6 +82,7 @@
 	const struct usb_endpoint_descriptor *desc;
 	struct usb_ep			ep;
 	unsigned			halted : 1;
+	unsigned			wedged : 1;
 	unsigned			already_seen : 1;
 	unsigned			setup_stage : 1;
 };
@@ -436,6 +437,7 @@
 	/* at this point real hardware should be NAKing transfers
 	 * to that endpoint, until a buffer is queued to it.
 	 */
+	ep->halted = ep->wedged = 0;
 	retval = 0;
 done:
 	return retval;
@@ -597,7 +599,7 @@
 }
 
 static int
-dummy_set_halt (struct usb_ep *_ep, int value)
+dummy_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
 {
 	struct dummy_ep		*ep;
 	struct dummy		*dum;
@@ -609,16 +611,32 @@
 	if (!dum->driver)
 		return -ESHUTDOWN;
 	if (!value)
-		ep->halted = 0;
+		ep->halted = ep->wedged = 0;
 	else if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
 			!list_empty (&ep->queue))
 		return -EAGAIN;
-	else
+	else {
 		ep->halted = 1;
+		if (wedged)
+			ep->wedged = 1;
+	}
 	/* FIXME clear emulated data toggle too */
 	return 0;
 }
 
+static int
+dummy_set_halt(struct usb_ep *_ep, int value)
+{
+	return dummy_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int dummy_set_wedge(struct usb_ep *_ep)
+{
+	if (!_ep || _ep->name == ep0name)
+		return -EINVAL;
+	return dummy_set_halt_and_wedge(_ep, 1, 1);
+}
+
 static const struct usb_ep_ops dummy_ep_ops = {
 	.enable		= dummy_enable,
 	.disable	= dummy_disable,
@@ -630,6 +648,7 @@
 	.dequeue	= dummy_dequeue,
 
 	.set_halt	= dummy_set_halt,
+	.set_wedge	= dummy_set_wedge,
 };
 
 /*-------------------------------------------------------------------------*/
@@ -760,7 +779,8 @@
 		ep->ep.name = ep_name [i];
 		ep->ep.ops = &dummy_ep_ops;
 		list_add_tail (&ep->ep.ep_list, &dum->gadget.ep_list);
-		ep->halted = ep->already_seen = ep->setup_stage = 0;
+		ep->halted = ep->wedged = ep->already_seen =
+				ep->setup_stage = 0;
 		ep->ep.maxpacket = ~0;
 		ep->last_io = jiffies;
 		ep->gadget = &dum->gadget;
@@ -1351,7 +1371,7 @@
 				} else if (setup.bRequestType == Ep_Request) {
 					// endpoint halt
 					ep2 = find_endpoint (dum, w_index);
-					if (!ep2) {
+					if (!ep2 || ep2->ep.name == ep0name) {
 						value = -EOPNOTSUPP;
 						break;
 					}
@@ -1380,7 +1400,8 @@
 						value = -EOPNOTSUPP;
 						break;
 					}
-					ep2->halted = 0;
+					if (!ep2->wedged)
+						ep2->halted = 0;
 					value = 0;
 					status = 0;
 				}
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 944c8e8..37252d0 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -242,7 +242,6 @@
 	.bConfigurationValue	= 2,
 	/* .iConfiguration = DYNAMIC */
 	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
-	.bMaxPower		= 1,	/* 2 mA, minimal */
 };
 
 /*-------------------------------------------------------------------------*/
@@ -271,7 +270,6 @@
 	.bConfigurationValue	= 1,
 	/* .iConfiguration = DYNAMIC */
 	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
-	.bMaxPower		= 1,	/* 2 mA, minimal */
 };
 
 /*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/f_loopback.c b/drivers/usb/gadget/f_loopback.c
index 87dde01..8affe1d 100644
--- a/drivers/usb/gadget/f_loopback.c
+++ b/drivers/usb/gadget/f_loopback.c
@@ -352,7 +352,6 @@
 	.bind		= loopback_bind_config,
 	.bConfigurationValue = 2,
 	.bmAttributes	= USB_CONFIG_ATT_SELFPOWER,
-	.bMaxPower	= 1,	/* 2 mA, minimal */
 	/* .iConfiguration = DYNAMIC */
 };
 
diff --git a/drivers/usb/gadget/f_obex.c b/drivers/usb/gadget/f_obex.c
new file mode 100644
index 0000000..80c2e7e
--- /dev/null
+++ b/drivers/usb/gadget/f_obex.c
@@ -0,0 +1,493 @@
+/*
+ * f_obex.c -- USB CDC OBEX function driver
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * Based on f_acm.c by Al Borchers and David Brownell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+
+#include "u_serial.h"
+#include "gadget_chips.h"
+
+
+/*
+ * This CDC OBEX function support just packages a TTY-ish byte stream.
+ * A user mode server will put it into "raw" mode and handle all the
+ * relevant protocol details ... this is just a kernel passthrough.
+ * When possible, we prevent gadget enumeration until that server is
+ * ready to handle the commands.
+ */
+
+struct obex_ep_descs {
+	struct usb_endpoint_descriptor	*obex_in;
+	struct usb_endpoint_descriptor	*obex_out;
+};
+
+struct f_obex {
+	struct gserial			port;
+	u8				ctrl_id;
+	u8				data_id;
+	u8				port_num;
+	u8				can_activate;
+
+	struct obex_ep_descs		fs;
+	struct obex_ep_descs		hs;
+};
+
+static inline struct f_obex *func_to_obex(struct usb_function *f)
+{
+	return container_of(f, struct f_obex, port.func);
+}
+
+static inline struct f_obex *port_to_obex(struct gserial *p)
+{
+	return container_of(p, struct f_obex, port);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define OBEX_CTRL_IDX	0
+#define OBEX_DATA_IDX	1
+
+static struct usb_string obex_string_defs[] = {
+	[OBEX_CTRL_IDX].s	= "CDC Object Exchange (OBEX)",
+	[OBEX_DATA_IDX].s	= "CDC OBEX Data",
+	{  },	/* end of list */
+};
+
+static struct usb_gadget_strings obex_string_table = {
+	.language		= 0x0409,	/* en-US */
+	.strings		= obex_string_defs,
+};
+
+static struct usb_gadget_strings *obex_strings[] = {
+	&obex_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_interface_descriptor obex_control_intf __initdata = {
+	.bLength		= sizeof(obex_control_intf),
+	.bDescriptorType	= USB_DT_INTERFACE,
+	.bInterfaceNumber	= 0,
+
+	.bAlternateSetting	= 0,
+	.bNumEndpoints		= 0,
+	.bInterfaceClass	= USB_CLASS_COMM,
+	.bInterfaceSubClass	= USB_CDC_SUBCLASS_OBEX,
+};
+
+static struct usb_interface_descriptor obex_data_nop_intf __initdata = {
+	.bLength		= sizeof(obex_data_nop_intf),
+	.bDescriptorType	= USB_DT_INTERFACE,
+	.bInterfaceNumber	= 1,
+
+	.bAlternateSetting	= 0,
+	.bNumEndpoints		= 0,
+	.bInterfaceClass	= USB_CLASS_CDC_DATA,
+};
+
+static struct usb_interface_descriptor obex_data_intf __initdata = {
+	.bLength		= sizeof(obex_data_intf),
+	.bDescriptorType	= USB_DT_INTERFACE,
+	.bInterfaceNumber	= 2,
+
+	.bAlternateSetting	= 1,
+	.bNumEndpoints		= 2,
+	.bInterfaceClass	= USB_CLASS_CDC_DATA,
+};
+
+static struct usb_cdc_header_desc obex_cdc_header_desc __initdata = {
+	.bLength		= sizeof(obex_cdc_header_desc),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= USB_CDC_HEADER_TYPE,
+	.bcdCDC			= __constant_cpu_to_le16(0x0120),
+};
+
+static struct usb_cdc_union_desc obex_cdc_union_desc __initdata = {
+	.bLength		= sizeof(obex_cdc_union_desc),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= USB_CDC_UNION_TYPE,
+	.bMasterInterface0	= 1,
+	.bSlaveInterface0	= 2,
+};
+
+static struct usb_cdc_obex_desc obex_desc __initdata = {
+	.bLength		= sizeof(obex_desc),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= USB_CDC_OBEX_TYPE,
+	.bcdVersion		= __constant_cpu_to_le16(0x0100),
+};
+
+/* High-Speed Support */
+
+static struct usb_endpoint_descriptor obex_hs_ep_out_desc __initdata = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+
+	.bEndpointAddress	= USB_DIR_OUT,
+	.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize		= __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor obex_hs_ep_in_desc __initdata = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+
+	.bEndpointAddress	= USB_DIR_IN,
+	.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize		= __constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *hs_function[] __initdata = {
+	(struct usb_descriptor_header *) &obex_control_intf,
+	(struct usb_descriptor_header *) &obex_cdc_header_desc,
+	(struct usb_descriptor_header *) &obex_desc,
+	(struct usb_descriptor_header *) &obex_cdc_union_desc,
+
+	(struct usb_descriptor_header *) &obex_data_nop_intf,
+	(struct usb_descriptor_header *) &obex_data_intf,
+	(struct usb_descriptor_header *) &obex_hs_ep_in_desc,
+	(struct usb_descriptor_header *) &obex_hs_ep_out_desc,
+	NULL,
+};
+
+/* Full-Speed Support */
+
+static struct usb_endpoint_descriptor obex_fs_ep_in_desc __initdata = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+
+	.bEndpointAddress	= USB_DIR_IN,
+	.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor obex_fs_ep_out_desc __initdata = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+
+	.bEndpointAddress	= USB_DIR_OUT,
+	.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_function[] __initdata = {
+	(struct usb_descriptor_header *) &obex_control_intf,
+	(struct usb_descriptor_header *) &obex_cdc_header_desc,
+	(struct usb_descriptor_header *) &obex_desc,
+	(struct usb_descriptor_header *) &obex_cdc_union_desc,
+
+	(struct usb_descriptor_header *) &obex_data_nop_intf,
+	(struct usb_descriptor_header *) &obex_data_intf,
+	(struct usb_descriptor_header *) &obex_fs_ep_in_desc,
+	(struct usb_descriptor_header *) &obex_fs_ep_out_desc,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int obex_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_obex		*obex = func_to_obex(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	if (intf == obex->ctrl_id) {
+		if (alt != 0)
+			goto fail;
+		/* NOP */
+		DBG(cdev, "reset obex ttyGS%d control\n", obex->port_num);
+
+	} else if (intf == obex->data_id) {
+		if (alt > 1)
+			goto fail;
+
+		if (obex->port.in->driver_data) {
+			DBG(cdev, "reset obex ttyGS%d\n", obex->port_num);
+			gserial_disconnect(&obex->port);
+		}
+
+		if (!obex->port.in_desc) {
+			DBG(cdev, "init obex ttyGS%d\n", obex->port_num);
+			obex->port.in_desc = ep_choose(cdev->gadget,
+					obex->hs.obex_in, obex->fs.obex_in);
+			obex->port.out_desc = ep_choose(cdev->gadget,
+					obex->hs.obex_out, obex->fs.obex_out);
+		}
+
+		if (alt == 1) {
+			DBG(cdev, "activate obex ttyGS%d\n", obex->port_num);
+			gserial_connect(&obex->port, obex->port_num);
+		}
+
+	} else
+		goto fail;
+
+	return 0;
+
+fail:
+	return -EINVAL;
+}
+
+static int obex_get_alt(struct usb_function *f, unsigned intf)
+{
+	struct f_obex		*obex = func_to_obex(f);
+
+	if (intf == obex->ctrl_id)
+		return 0;
+
+	return obex->port.in->driver_data ? 1 : 0;
+}
+
+static void obex_disable(struct usb_function *f)
+{
+	struct f_obex	*obex = func_to_obex(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	DBG(cdev, "obex ttyGS%d disable\n", obex->port_num);
+	gserial_disconnect(&obex->port);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void obex_connect(struct gserial *g)
+{
+	struct f_obex		*obex = port_to_obex(g);
+	struct usb_composite_dev *cdev = g->func.config->cdev;
+	int			status;
+
+	if (!obex->can_activate)
+		return;
+
+	status = usb_function_activate(&g->func);
+	if (status)
+		DBG(cdev, "obex ttyGS%d function activate --> %d\n",
+			obex->port_num, status);
+}
+
+static void obex_disconnect(struct gserial *g)
+{
+	struct f_obex		*obex = port_to_obex(g);
+	struct usb_composite_dev *cdev = g->func.config->cdev;
+	int			status;
+
+	if (!obex->can_activate)
+		return;
+
+	status = usb_function_deactivate(&g->func);
+	if (status)
+		DBG(cdev, "obex ttyGS%d function deactivate --> %d\n",
+			obex->port_num, status);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int __init
+obex_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_obex		*obex = func_to_obex(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	obex->ctrl_id = status;
+
+	obex_control_intf.bInterfaceNumber = status;
+	obex_cdc_union_desc.bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	obex->data_id = status;
+
+	obex_data_nop_intf.bInterfaceNumber = status;
+	obex_data_intf.bInterfaceNumber = status;
+	obex_cdc_union_desc.bSlaveInterface0 = status;
+
+	/* allocate instance-specific endpoints */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &obex_fs_ep_in_desc);
+	if (!ep)
+		goto fail;
+	obex->port.in = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &obex_fs_ep_out_desc);
+	if (!ep)
+		goto fail;
+	obex->port.out = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(fs_function);
+
+	obex->fs.obex_in = usb_find_endpoint(fs_function,
+			f->descriptors, &obex_fs_ep_in_desc);
+	obex->fs.obex_out = usb_find_endpoint(fs_function,
+			f->descriptors, &obex_fs_ep_out_desc);
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+
+		obex_hs_ep_in_desc.bEndpointAddress =
+				obex_fs_ep_in_desc.bEndpointAddress;
+		obex_hs_ep_out_desc.bEndpointAddress =
+				obex_fs_ep_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(hs_function);
+
+		obex->hs.obex_in = usb_find_endpoint(hs_function,
+				f->descriptors, &obex_hs_ep_in_desc);
+		obex->hs.obex_out = usb_find_endpoint(hs_function,
+				f->descriptors, &obex_hs_ep_out_desc);
+	}
+
+	/* Avoid letting this gadget enumerate until the userspace
+	 * OBEX server is active.
+	 */
+	status = usb_function_deactivate(f);
+	if (status < 0)
+		WARNING(cdev, "obex ttyGS%d: can't prevent enumeration, %d\n",
+			obex->port_num, status);
+	else
+		obex->can_activate = true;
+
+
+	DBG(cdev, "obex ttyGS%d: %s speed IN/%s OUT/%s\n",
+			obex->port_num,
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			obex->port.in->name, obex->port.out->name);
+
+	return 0;
+
+fail:
+	/* we might as well release our claims on endpoints */
+	if (obex->port.out)
+		obex->port.out->driver_data = NULL;
+	if (obex->port.in)
+		obex->port.in->driver_data = NULL;
+
+	ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
+
+	return status;
+}
+
+static void
+obex_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+	kfree(func_to_obex(f));
+}
+
+/* Some controllers can't support CDC OBEX ... */
+static inline bool can_support_obex(struct usb_configuration *c)
+{
+	/* Since the first interface is a NOP, we can ignore the
+	 * issue of multi-interface support on most controllers.
+	 *
+	 * Altsettings are mandatory, however...
+	 */
+	if (!gadget_supports_altsettings(c->cdev->gadget))
+		return false;
+
+	/* everything else is *probably* fine ... */
+	return true;
+}
+
+/**
+ * obex_bind_config - add a CDC OBEX function to a configuration
+ * @c: the configuration to support the CDC OBEX instance
+ * @port_num: /dev/ttyGS* port this interface will use
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gserial_setup() with enough ports to
+ * handle all the ones it binds.  Caller is also responsible
+ * for calling @gserial_cleanup() before module unload.
+ */
+int __init obex_bind_config(struct usb_configuration *c, u8 port_num)
+{
+	struct f_obex	*obex;
+	int		status;
+
+	if (!can_support_obex(c))
+		return -EINVAL;
+
+	/* maybe allocate device-global string IDs, and patch descriptors */
+	if (obex_string_defs[OBEX_CTRL_IDX].id == 0) {
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		obex_string_defs[OBEX_CTRL_IDX].id = status;
+
+		obex_control_intf.iInterface = status;
+
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		obex_string_defs[OBEX_DATA_IDX].id = status;
+
+		obex_data_nop_intf.iInterface =
+			obex_data_intf.iInterface = status;
+	}
+
+	/* allocate and initialize one new instance */
+	obex = kzalloc(sizeof *obex, GFP_KERNEL);
+	if (!obex)
+		return -ENOMEM;
+
+	obex->port_num = port_num;
+
+	obex->port.connect = obex_connect;
+	obex->port.disconnect = obex_disconnect;
+
+	obex->port.func.name = "obex";
+	obex->port.func.strings = obex_strings;
+	/* descriptors are per-instance copies */
+	obex->port.func.bind = obex_bind;
+	obex->port.func.unbind = obex_unbind;
+	obex->port.func.set_alt = obex_set_alt;
+	obex->port.func.get_alt = obex_get_alt;
+	obex->port.func.disable = obex_disable;
+
+	status = usb_add_function(c, &obex->port.func);
+	if (status)
+		kfree(obex);
+
+	return status;
+}
+
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/f_sourcesink.c b/drivers/usb/gadget/f_sourcesink.c
index f18c3a1..dc84d26 100644
--- a/drivers/usb/gadget/f_sourcesink.c
+++ b/drivers/usb/gadget/f_sourcesink.c
@@ -552,7 +552,6 @@
 	.setup		= sourcesink_setup,
 	.bConfigurationValue = 3,
 	.bmAttributes	= USB_CONFIG_ATT_SELFPOWER,
-	.bMaxPower	= 1,	/* 2 mA, minimal */
 	/* .iConfiguration = DYNAMIC */
 };
 
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 0c632d2..c4e62a6 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -851,7 +851,7 @@
 	.bConfigurationValue =	CONFIG_VALUE,
 	.iConfiguration =	STRING_CONFIG,
 	.bmAttributes =		USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
-	.bMaxPower =		1,	// self-powered
+	.bMaxPower =		CONFIG_USB_GADGET_VBUS_DRAW / 2,
 };
 
 static struct usb_otg_descriptor
@@ -2676,11 +2676,24 @@
 	/* Verify the length of the command itself */
 	if (cmnd_size != fsg->cmnd_size) {
 
-		/* Special case workaround: MS-Windows issues REQUEST SENSE
-		 * with cbw->Length == 12 (it should be 6). */
-		if (fsg->cmnd[0] == SC_REQUEST_SENSE && fsg->cmnd_size == 12)
+		/* Special case workaround: There are plenty of buggy SCSI
+		 * implementations. Many have issues with cbw->Length
+		 * field passing a wrong command size. For those cases we
+		 * always try to work around the problem by using the length
+		 * sent by the host side provided it is at least as large
+		 * as the correct command length.
+		 * Examples of such cases would be MS-Windows, which issues
+		 * REQUEST SENSE with cbw->Length == 12 where it should
+		 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
+		 * REQUEST SENSE with cbw->Length == 10 where it should
+		 * be 6 as well.
+		 */
+		if (cmnd_size <= fsg->cmnd_size) {
+			DBG(fsg, "%s is buggy! Expected length %d "
+					"but we got %d\n", name,
+					cmnd_size, fsg->cmnd_size);
 			cmnd_size = fsg->cmnd_size;
-		else {
+		} else {
 			fsg->phase_error = 1;
 			return -EINVAL;
 		}
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
new file mode 100644
index 0000000..1fe8b44
--- /dev/null
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -0,0 +1,2760 @@
+/*
+ * driver/usb/gadget/fsl_qe_udc.c
+ *
+ * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * 	Xie Xiaobo <X.Xie@freescale.com>
+ * 	Li Yang <leoli@freescale.com>
+ * 	Based on bareboard code from Shlomi Gridish.
+ *
+ * Description:
+ * Freescle QE/CPM USB Pheripheral Controller Driver
+ * The controller can be found on MPC8360, MPC8272, and etc.
+ * MPC8360 Rev 1.1 may need QE mircocode update
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#undef USB_TRACE
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/moduleparam.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <asm/qe.h>
+#include <asm/cpm.h>
+#include <asm/dma.h>
+#include <asm/reg.h>
+#include "fsl_qe_udc.h"
+
+#define DRIVER_DESC     "Freescale QE/CPM USB Device Controller driver"
+#define DRIVER_AUTHOR   "Xie XiaoBo"
+#define DRIVER_VERSION  "1.0"
+
+#define DMA_ADDR_INVALID        (~(dma_addr_t)0)
+
+static const char driver_name[] = "fsl_qe_udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+/*ep name is important in gadget, it should obey the convention of ep_match()*/
+static const char *const ep_name[] = {
+	"ep0-control", /* everyone has ep0 */
+	/* 3 configurable endpoints */
+	"ep1",
+	"ep2",
+	"ep3",
+};
+
+static struct usb_endpoint_descriptor qe_ep0_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	0,
+	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize =	USB_MAX_CTRL_PAYLOAD,
+};
+
+/* it is initialized in probe()  */
+static struct qe_udc *udc_controller;
+
+/********************************************************************
+ *      Internal Used Function Start
+********************************************************************/
+/*-----------------------------------------------------------------
+ * done() - retire a request; caller blocked irqs
+ *--------------------------------------------------------------*/
+static void done(struct qe_ep *ep, struct qe_req *req, int status)
+{
+	struct qe_udc *udc = ep->udc;
+	unsigned char stopped = ep->stopped;
+
+	/* the req->queue pointer is used by ep_queue() func, in which
+	 * the request will be added into a udc_ep->queue 'd tail
+	 * so here the req will be dropped from the ep->queue
+	 */
+	list_del_init(&req->queue);
+
+	/* req.status should be set as -EINPROGRESS in ep_queue() */
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	if (req->mapped) {
+		dma_unmap_single(udc->gadget.dev.parent,
+			req->req.dma, req->req.length,
+			ep_is_in(ep)
+				? DMA_TO_DEVICE
+				: DMA_FROM_DEVICE);
+		req->req.dma = DMA_ADDR_INVALID;
+		req->mapped = 0;
+	} else
+		dma_sync_single_for_cpu(udc->gadget.dev.parent,
+			req->req.dma, req->req.length,
+			ep_is_in(ep)
+				? DMA_TO_DEVICE
+				: DMA_FROM_DEVICE);
+
+	if (status && (status != -ESHUTDOWN))
+		dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+	spin_unlock(&udc->lock);
+
+	/* this complete() should a func implemented by gadget layer,
+	 * eg fsg->bulk_in_complete() */
+	if (req->req.complete)
+		req->req.complete(&ep->ep, &req->req);
+
+	spin_lock(&udc->lock);
+
+	ep->stopped = stopped;
+}
+
+/*-----------------------------------------------------------------
+ * nuke(): delete all requests related to this ep
+ *--------------------------------------------------------------*/
+static void nuke(struct qe_ep *ep, int status)
+{
+	/* Whether this eq has request linked */
+	while (!list_empty(&ep->queue)) {
+		struct qe_req *req = NULL;
+		req = list_entry(ep->queue.next, struct qe_req, queue);
+
+		done(ep, req, status);
+	}
+}
+
+/*---------------------------------------------------------------------------*
+ * USB and Endpoint manipulate process, include parameter and register       *
+ *---------------------------------------------------------------------------*/
+/* @value: 1--set stall 0--clean stall */
+static int qe_eprx_stall_change(struct qe_ep *ep, int value)
+{
+	u16 tem_usep;
+	u8 epnum = ep->epnum;
+	struct qe_udc *udc = ep->udc;
+
+	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
+	tem_usep = tem_usep & ~USB_RHS_MASK;
+	if (value == 1)
+		tem_usep |= USB_RHS_STALL;
+	else if (ep->dir == USB_DIR_IN)
+		tem_usep |= USB_RHS_IGNORE_OUT;
+
+	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
+	return 0;
+}
+
+static int qe_eptx_stall_change(struct qe_ep *ep, int value)
+{
+	u16 tem_usep;
+	u8 epnum = ep->epnum;
+	struct qe_udc *udc = ep->udc;
+
+	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
+	tem_usep = tem_usep & ~USB_THS_MASK;
+	if (value == 1)
+		tem_usep |= USB_THS_STALL;
+	else if (ep->dir == USB_DIR_OUT)
+		tem_usep |= USB_THS_IGNORE_IN;
+
+	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
+
+	return 0;
+}
+
+static int qe_ep0_stall(struct qe_udc *udc)
+{
+	qe_eptx_stall_change(&udc->eps[0], 1);
+	qe_eprx_stall_change(&udc->eps[0], 1);
+	udc_controller->ep0_state = WAIT_FOR_SETUP;
+	udc_controller->ep0_dir = 0;
+	return 0;
+}
+
+static int qe_eprx_nack(struct qe_ep *ep)
+{
+	u8 epnum = ep->epnum;
+	struct qe_udc *udc = ep->udc;
+
+	if (ep->state == EP_STATE_IDLE) {
+		/* Set the ep's nack */
+		clrsetbits_be16(&udc->usb_regs->usb_usep[epnum],
+				USB_RHS_MASK, USB_RHS_NACK);
+
+		/* Mask Rx and Busy interrupts */
+		clrbits16(&udc->usb_regs->usb_usbmr,
+				(USB_E_RXB_MASK | USB_E_BSY_MASK));
+
+		ep->state = EP_STATE_NACK;
+	}
+	return 0;
+}
+
+static int qe_eprx_normal(struct qe_ep *ep)
+{
+	struct qe_udc *udc = ep->udc;
+
+	if (ep->state == EP_STATE_NACK) {
+		clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum],
+				USB_RTHS_MASK, USB_THS_IGNORE_IN);
+
+		/* Unmask RX interrupts */
+		out_be16(&udc->usb_regs->usb_usber,
+				USB_E_BSY_MASK | USB_E_RXB_MASK);
+		setbits16(&udc->usb_regs->usb_usbmr,
+				(USB_E_RXB_MASK | USB_E_BSY_MASK));
+
+		ep->state = EP_STATE_IDLE;
+		ep->has_data = 0;
+	}
+
+	return 0;
+}
+
+static int qe_ep_cmd_stoptx(struct qe_ep *ep)
+{
+	if (ep->udc->soc_type == PORT_CPM)
+		cpm_command(CPM_USB_STOP_TX | (ep->epnum << CPM_USB_EP_SHIFT),
+				CPM_USB_STOP_TX_OPCODE);
+	else
+		qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB,
+				ep->epnum, 0);
+
+	return 0;
+}
+
+static int qe_ep_cmd_restarttx(struct qe_ep *ep)
+{
+	if (ep->udc->soc_type == PORT_CPM)
+		cpm_command(CPM_USB_RESTART_TX | (ep->epnum <<
+				CPM_USB_EP_SHIFT), CPM_USB_RESTART_TX_OPCODE);
+	else
+		qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB,
+				ep->epnum, 0);
+
+	return 0;
+}
+
+static int qe_ep_flushtxfifo(struct qe_ep *ep)
+{
+	struct qe_udc *udc = ep->udc;
+	int i;
+
+	i = (int)ep->epnum;
+
+	qe_ep_cmd_stoptx(ep);
+	out_8(&udc->usb_regs->usb_uscom,
+		USB_CMD_FLUSH_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
+	out_be16(&udc->ep_param[i]->tbptr, in_be16(&udc->ep_param[i]->tbase));
+	out_be32(&udc->ep_param[i]->tstate, 0);
+	out_be16(&udc->ep_param[i]->tbcnt, 0);
+
+	ep->c_txbd = ep->txbase;
+	ep->n_txbd = ep->txbase;
+	qe_ep_cmd_restarttx(ep);
+	return 0;
+}
+
+static int qe_ep_filltxfifo(struct qe_ep *ep)
+{
+	struct qe_udc *udc = ep->udc;
+
+	out_8(&udc->usb_regs->usb_uscom,
+			USB_CMD_STR_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
+	return 0;
+}
+
+static int qe_epbds_reset(struct qe_udc *udc, int pipe_num)
+{
+	struct qe_ep *ep;
+	u32 bdring_len;
+	struct qe_bd __iomem *bd;
+	int i;
+
+	ep = &udc->eps[pipe_num];
+
+	if (ep->dir == USB_DIR_OUT)
+		bdring_len = USB_BDRING_LEN_RX;
+	else
+		bdring_len = USB_BDRING_LEN;
+
+	bd = ep->rxbase;
+	for (i = 0; i < (bdring_len - 1); i++) {
+		out_be32((u32 __iomem *)bd, R_E | R_I);
+		bd++;
+	}
+	out_be32((u32 __iomem *)bd, R_E | R_I | R_W);
+
+	bd = ep->txbase;
+	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
+		out_be32(&bd->buf, 0);
+		out_be32((u32 __iomem *)bd, 0);
+		bd++;
+	}
+	out_be32((u32 __iomem *)bd, T_W);
+
+	return 0;
+}
+
+static int qe_ep_reset(struct qe_udc *udc, int pipe_num)
+{
+	struct qe_ep *ep;
+	u16 tmpusep;
+
+	ep = &udc->eps[pipe_num];
+	tmpusep = in_be16(&udc->usb_regs->usb_usep[pipe_num]);
+	tmpusep &= ~USB_RTHS_MASK;
+
+	switch (ep->dir) {
+	case USB_DIR_BOTH:
+		qe_ep_flushtxfifo(ep);
+		break;
+	case USB_DIR_OUT:
+		tmpusep |= USB_THS_IGNORE_IN;
+		break;
+	case USB_DIR_IN:
+		qe_ep_flushtxfifo(ep);
+		tmpusep |= USB_RHS_IGNORE_OUT;
+		break;
+	default:
+		break;
+	}
+	out_be16(&udc->usb_regs->usb_usep[pipe_num], tmpusep);
+
+	qe_epbds_reset(udc, pipe_num);
+
+	return 0;
+}
+
+static int qe_ep_toggledata01(struct qe_ep *ep)
+{
+	ep->data01 ^= 0x1;
+	return 0;
+}
+
+static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num)
+{
+	struct qe_ep *ep = &udc->eps[pipe_num];
+	unsigned long tmp_addr = 0;
+	struct usb_ep_para __iomem *epparam;
+	int i;
+	struct qe_bd __iomem *bd;
+	int bdring_len;
+
+	if (ep->dir == USB_DIR_OUT)
+		bdring_len = USB_BDRING_LEN_RX;
+	else
+		bdring_len = USB_BDRING_LEN;
+
+	epparam = udc->ep_param[pipe_num];
+	/* alloc multi-ram for BD rings and set the ep parameters */
+	tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
+				USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
+	out_be16(&epparam->rbase, (u16)tmp_addr);
+	out_be16(&epparam->tbase, (u16)(tmp_addr +
+				(sizeof(struct qe_bd) * bdring_len)));
+
+	out_be16(&epparam->rbptr, in_be16(&epparam->rbase));
+	out_be16(&epparam->tbptr, in_be16(&epparam->tbase));
+
+	ep->rxbase = cpm_muram_addr(tmp_addr);
+	ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd)
+				* bdring_len));
+	ep->n_rxbd = ep->rxbase;
+	ep->e_rxbd = ep->rxbase;
+	ep->n_txbd = ep->txbase;
+	ep->c_txbd = ep->txbase;
+	ep->data01 = 0; /* data0 */
+
+	/* Init TX and RX bds */
+	bd = ep->rxbase;
+	for (i = 0; i < bdring_len - 1; i++) {
+		out_be32(&bd->buf, 0);
+		out_be32((u32 __iomem *)bd, 0);
+		bd++;
+	}
+	out_be32(&bd->buf, 0);
+	out_be32((u32 __iomem *)bd, R_W);
+
+	bd = ep->txbase;
+	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
+		out_be32(&bd->buf, 0);
+		out_be32((u32 __iomem *)bd, 0);
+		bd++;
+	}
+	out_be32(&bd->buf, 0);
+	out_be32((u32 __iomem *)bd, T_W);
+
+	return 0;
+}
+
+static int qe_ep_rxbd_update(struct qe_ep *ep)
+{
+	unsigned int size;
+	int i;
+	unsigned int tmp;
+	struct qe_bd __iomem *bd;
+	unsigned int bdring_len;
+
+	if (ep->rxbase == NULL)
+		return -EINVAL;
+
+	bd = ep->rxbase;
+
+	ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC);
+	if (ep->rxframe == NULL) {
+		dev_err(ep->udc->dev, "malloc rxframe failed\n");
+		return -ENOMEM;
+	}
+
+	qe_frame_init(ep->rxframe);
+
+	if (ep->dir == USB_DIR_OUT)
+		bdring_len = USB_BDRING_LEN_RX;
+	else
+		bdring_len = USB_BDRING_LEN;
+
+	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1);
+	ep->rxbuffer = kzalloc(size, GFP_ATOMIC);
+	if (ep->rxbuffer == NULL) {
+		dev_err(ep->udc->dev, "malloc rxbuffer failed,size=%d\n",
+				size);
+		kfree(ep->rxframe);
+		return -ENOMEM;
+	}
+
+	ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer);
+	if (ep->rxbuf_d == DMA_ADDR_INVALID) {
+		ep->rxbuf_d = dma_map_single(udc_controller->gadget.dev.parent,
+					ep->rxbuffer,
+					size,
+					DMA_FROM_DEVICE);
+		ep->rxbufmap = 1;
+	} else {
+		dma_sync_single_for_device(udc_controller->gadget.dev.parent,
+					ep->rxbuf_d, size,
+					DMA_FROM_DEVICE);
+		ep->rxbufmap = 0;
+	}
+
+	size = ep->ep.maxpacket + USB_CRC_SIZE + 2;
+	tmp = ep->rxbuf_d;
+	tmp = (u32)(((tmp >> 2) << 2) + 4);
+
+	for (i = 0; i < bdring_len - 1; i++) {
+		out_be32(&bd->buf, tmp);
+		out_be32((u32 __iomem *)bd, (R_E | R_I));
+		tmp = tmp + size;
+		bd++;
+	}
+	out_be32(&bd->buf, tmp);
+	out_be32((u32 __iomem *)bd, (R_E | R_I | R_W));
+
+	return 0;
+}
+
+static int qe_ep_register_init(struct qe_udc *udc, unsigned char pipe_num)
+{
+	struct qe_ep *ep = &udc->eps[pipe_num];
+	struct usb_ep_para __iomem *epparam;
+	u16 usep, logepnum;
+	u16 tmp;
+	u8 rtfcr = 0;
+
+	epparam = udc->ep_param[pipe_num];
+
+	usep = 0;
+	logepnum = (ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+	usep |= (logepnum << USB_EPNUM_SHIFT);
+
+	switch (ep->desc->bmAttributes & 0x03) {
+	case USB_ENDPOINT_XFER_BULK:
+		usep |= USB_TRANS_BULK;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		usep |=  USB_TRANS_ISO;
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		usep |= USB_TRANS_INT;
+		break;
+	default:
+		usep |= USB_TRANS_CTR;
+		break;
+	}
+
+	switch (ep->dir) {
+	case USB_DIR_OUT:
+		usep |= USB_THS_IGNORE_IN;
+		break;
+	case USB_DIR_IN:
+		usep |= USB_RHS_IGNORE_OUT;
+		break;
+	default:
+		break;
+	}
+	out_be16(&udc->usb_regs->usb_usep[pipe_num], usep);
+
+	rtfcr = 0x30;
+	out_8(&epparam->rbmr, rtfcr);
+	out_8(&epparam->tbmr, rtfcr);
+
+	tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE);
+	/* MRBLR must be divisble by 4 */
+	tmp = (u16)(((tmp >> 2) << 2) + 4);
+	out_be16(&epparam->mrblr, tmp);
+
+	return 0;
+}
+
+static int qe_ep_init(struct qe_udc *udc,
+		      unsigned char pipe_num,
+		      const struct usb_endpoint_descriptor *desc)
+{
+	struct qe_ep *ep = &udc->eps[pipe_num];
+	unsigned long flags;
+	int reval = 0;
+	u16 max = 0;
+
+	max = le16_to_cpu(desc->wMaxPacketSize);
+
+	/* check the max package size validate for this endpoint */
+	/* Refer to USB2.0 spec table 9-13,
+	*/
+	if (pipe_num != 0) {
+		switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+		case USB_ENDPOINT_XFER_BULK:
+			if (strstr(ep->ep.name, "-iso")
+					|| strstr(ep->ep.name, "-int"))
+				goto en_done;
+			switch (udc->gadget.speed) {
+			case USB_SPEED_HIGH:
+			if ((max == 128) || (max == 256) || (max == 512))
+				break;
+			default:
+				switch (max) {
+				case 4:
+				case 8:
+				case 16:
+				case 32:
+				case 64:
+					break;
+				default:
+				case USB_SPEED_LOW:
+					goto en_done;
+				}
+			}
+			break;
+		case USB_ENDPOINT_XFER_INT:
+			if (strstr(ep->ep.name, "-iso"))	/* bulk is ok */
+				goto en_done;
+			switch (udc->gadget.speed) {
+			case USB_SPEED_HIGH:
+				if (max <= 1024)
+					break;
+			case USB_SPEED_FULL:
+				if (max <= 64)
+					break;
+			default:
+				if (max <= 8)
+					break;
+				goto en_done;
+			}
+			break;
+		case USB_ENDPOINT_XFER_ISOC:
+			if (strstr(ep->ep.name, "-bulk")
+				|| strstr(ep->ep.name, "-int"))
+				goto en_done;
+			switch (udc->gadget.speed) {
+			case USB_SPEED_HIGH:
+				if (max <= 1024)
+					break;
+			case USB_SPEED_FULL:
+				if (max <= 1023)
+					break;
+			default:
+				goto en_done;
+			}
+			break;
+		case USB_ENDPOINT_XFER_CONTROL:
+			if (strstr(ep->ep.name, "-iso")
+				|| strstr(ep->ep.name, "-int"))
+				goto en_done;
+			switch (udc->gadget.speed) {
+			case USB_SPEED_HIGH:
+			case USB_SPEED_FULL:
+				switch (max) {
+				case 1:
+				case 2:
+				case 4:
+				case 8:
+				case 16:
+				case 32:
+				case 64:
+					break;
+				default:
+					goto en_done;
+				}
+			case USB_SPEED_LOW:
+				switch (max) {
+				case 1:
+				case 2:
+				case 4:
+				case 8:
+					break;
+				default:
+					goto en_done;
+				}
+			default:
+				goto en_done;
+			}
+			break;
+
+		default:
+			goto en_done;
+		}
+	} /* if ep0*/
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* initialize ep structure */
+	ep->ep.maxpacket = max;
+	ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
+	ep->desc = desc;
+	ep->stopped = 0;
+	ep->init = 1;
+
+	if (pipe_num == 0) {
+		ep->dir = USB_DIR_BOTH;
+		udc->ep0_dir = USB_DIR_OUT;
+		udc->ep0_state = WAIT_FOR_SETUP;
+	} else	{
+		switch (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) {
+		case USB_DIR_OUT:
+			ep->dir = USB_DIR_OUT;
+			break;
+		case USB_DIR_IN:
+			ep->dir = USB_DIR_IN;
+		default:
+			break;
+		}
+	}
+
+	/* hardware special operation */
+	qe_ep_bd_init(udc, pipe_num);
+	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) {
+		reval = qe_ep_rxbd_update(ep);
+		if (reval)
+			goto en_done1;
+	}
+
+	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) {
+		ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC);
+		if (ep->txframe == NULL) {
+			dev_err(udc->dev, "malloc txframe failed\n");
+			goto en_done2;
+		}
+		qe_frame_init(ep->txframe);
+	}
+
+	qe_ep_register_init(udc, pipe_num);
+
+	/* Now HW will be NAKing transfers to that EP,
+	 * until a buffer is queued to it. */
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+en_done2:
+	kfree(ep->rxbuffer);
+	kfree(ep->rxframe);
+en_done1:
+	spin_unlock_irqrestore(&udc->lock, flags);
+en_done:
+	dev_dbg(udc->dev, "failed to initialize %s\n", ep->ep.name);
+	return -ENODEV;
+}
+
+static inline void qe_usb_enable(void)
+{
+	setbits8(&udc_controller->usb_regs->usb_usmod, USB_MODE_EN);
+}
+
+static inline void qe_usb_disable(void)
+{
+	clrbits8(&udc_controller->usb_regs->usb_usmod, USB_MODE_EN);
+}
+
+/*----------------------------------------------------------------------------*
+ *		USB and EP basic manipulate function end		      *
+ *----------------------------------------------------------------------------*/
+
+
+/******************************************************************************
+		UDC transmit and receive process
+ ******************************************************************************/
+static void recycle_one_rxbd(struct qe_ep *ep)
+{
+	u32 bdstatus;
+
+	bdstatus = in_be32((u32 __iomem *)ep->e_rxbd);
+	bdstatus = R_I | R_E | (bdstatus & R_W);
+	out_be32((u32 __iomem *)ep->e_rxbd, bdstatus);
+
+	if (bdstatus & R_W)
+		ep->e_rxbd = ep->rxbase;
+	else
+		ep->e_rxbd++;
+}
+
+static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext)
+{
+	u32 bdstatus;
+	struct qe_bd __iomem *bd, *nextbd;
+	unsigned char stop = 0;
+
+	nextbd = ep->n_rxbd;
+	bd = ep->e_rxbd;
+	bdstatus = in_be32((u32 __iomem *)bd);
+
+	while (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK) && !stop) {
+		bdstatus = R_E | R_I | (bdstatus & R_W);
+		out_be32((u32 __iomem *)bd, bdstatus);
+
+		if (bdstatus & R_W)
+			bd = ep->rxbase;
+		else
+			bd++;
+
+		bdstatus = in_be32((u32 __iomem *)bd);
+		if (stopatnext && (bd == nextbd))
+			stop = 1;
+	}
+
+	ep->e_rxbd = bd;
+}
+
+static void ep_recycle_rxbds(struct qe_ep *ep)
+{
+	struct qe_bd __iomem *bd = ep->n_rxbd;
+	u32 bdstatus;
+	u8 epnum = ep->epnum;
+	struct qe_udc *udc = ep->udc;
+
+	bdstatus = in_be32((u32 __iomem *)bd);
+	if (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK)) {
+		bd = ep->rxbase +
+				((in_be16(&udc->ep_param[epnum]->rbptr) -
+				  in_be16(&udc->ep_param[epnum]->rbase))
+				 >> 3);
+		bdstatus = in_be32((u32 __iomem *)bd);
+
+		if (bdstatus & R_W)
+			bd = ep->rxbase;
+		else
+			bd++;
+
+		ep->e_rxbd = bd;
+		recycle_rxbds(ep, 0);
+		ep->e_rxbd = ep->n_rxbd;
+	} else
+		recycle_rxbds(ep, 1);
+
+	if (in_be16(&udc->usb_regs->usb_usber) & USB_E_BSY_MASK)
+		out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK);
+
+	if (ep->has_data <= 0 && (!list_empty(&ep->queue)))
+		qe_eprx_normal(ep);
+
+	ep->localnack = 0;
+}
+
+static void setup_received_handle(struct qe_udc *udc,
+					struct usb_ctrlrequest *setup);
+static int qe_ep_rxframe_handle(struct qe_ep *ep);
+static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
+/* when BD PID is setup, handle the packet */
+static int ep0_setup_handle(struct qe_udc *udc)
+{
+	struct qe_ep *ep = &udc->eps[0];
+	struct qe_frame *pframe;
+	unsigned int fsize;
+	u8 *cp;
+
+	pframe = ep->rxframe;
+	if ((frame_get_info(pframe) & PID_SETUP)
+			&& (udc->ep0_state == WAIT_FOR_SETUP)) {
+		fsize = frame_get_length(pframe);
+		if (unlikely(fsize != 8))
+			return -EINVAL;
+		cp = (u8 *)&udc->local_setup_buff;
+		memcpy(cp, pframe->data, fsize);
+		ep->data01 = 1;
+
+		/* handle the usb command base on the usb_ctrlrequest */
+		setup_received_handle(udc, &udc->local_setup_buff);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int qe_ep0_rx(struct qe_udc *udc)
+{
+	struct qe_ep *ep = &udc->eps[0];
+	struct qe_frame *pframe;
+	struct qe_bd __iomem *bd;
+	u32 bdstatus, length;
+	u32 vaddr;
+
+	pframe = ep->rxframe;
+
+	if (ep->dir == USB_DIR_IN) {
+		dev_err(udc->dev, "ep0 not a control endpoint\n");
+		return -EINVAL;
+	}
+
+	bd = ep->n_rxbd;
+	bdstatus = in_be32((u32 __iomem *)bd);
+	length = bdstatus & BD_LENGTH_MASK;
+
+	while (!(bdstatus & R_E) && length) {
+		if ((bdstatus & R_F) && (bdstatus & R_L)
+			&& !(bdstatus & R_ERROR)) {
+			if (length == USB_CRC_SIZE) {
+				udc->ep0_state = WAIT_FOR_SETUP;
+				dev_vdbg(udc->dev,
+					"receive a ZLP in status phase\n");
+			} else {
+				qe_frame_clean(pframe);
+				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
+				frame_set_data(pframe, (u8 *)vaddr);
+				frame_set_length(pframe,
+						(length - USB_CRC_SIZE));
+				frame_set_status(pframe, FRAME_OK);
+				switch (bdstatus & R_PID) {
+				case R_PID_SETUP:
+					frame_set_info(pframe, PID_SETUP);
+					break;
+				case R_PID_DATA1:
+					frame_set_info(pframe, PID_DATA1);
+					break;
+				default:
+					frame_set_info(pframe, PID_DATA0);
+					break;
+				}
+
+				if ((bdstatus & R_PID) == R_PID_SETUP)
+					ep0_setup_handle(udc);
+				else
+					qe_ep_rxframe_handle(ep);
+			}
+		} else {
+			dev_err(udc->dev, "The receive frame with error!\n");
+		}
+
+		/* note: don't clear the rxbd's buffer address */
+		recycle_one_rxbd(ep);
+
+		/* Get next BD */
+		if (bdstatus & R_W)
+			bd = ep->rxbase;
+		else
+			bd++;
+
+		bdstatus = in_be32((u32 __iomem *)bd);
+		length = bdstatus & BD_LENGTH_MASK;
+
+	}
+
+	ep->n_rxbd = bd;
+
+	return 0;
+}
+
+static int qe_ep_rxframe_handle(struct qe_ep *ep)
+{
+	struct qe_frame *pframe;
+	u8 framepid = 0;
+	unsigned int fsize;
+	u8 *cp;
+	struct qe_req *req;
+
+	pframe = ep->rxframe;
+
+	if (frame_get_info(pframe) & PID_DATA1)
+		framepid = 0x1;
+
+	if (framepid != ep->data01) {
+		dev_err(ep->udc->dev, "the data01 error!\n");
+		return -EIO;
+	}
+
+	fsize = frame_get_length(pframe);
+	if (list_empty(&ep->queue)) {
+		dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name);
+	} else {
+		req = list_entry(ep->queue.next, struct qe_req, queue);
+
+		cp = (u8 *)(req->req.buf) + req->req.actual;
+		if (cp) {
+			memcpy(cp, pframe->data, fsize);
+			req->req.actual += fsize;
+			if ((fsize < ep->ep.maxpacket) ||
+					(req->req.actual >= req->req.length)) {
+				if (ep->epnum == 0)
+					ep0_req_complete(ep->udc, req);
+				else
+					done(ep, req, 0);
+				if (list_empty(&ep->queue) && ep->epnum != 0)
+					qe_eprx_nack(ep);
+			}
+		}
+	}
+
+	qe_ep_toggledata01(ep);
+
+	return 0;
+}
+
+static void ep_rx_tasklet(unsigned long data)
+{
+	struct qe_udc *udc = (struct qe_udc *)data;
+	struct qe_ep *ep;
+	struct qe_frame *pframe;
+	struct qe_bd __iomem *bd;
+	unsigned long flags;
+	u32 bdstatus, length;
+	u32 vaddr, i;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	for (i = 1; i < USB_MAX_ENDPOINTS; i++) {
+		ep = &udc->eps[i];
+
+		if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) {
+			dev_dbg(udc->dev,
+				"This is a transmit ep or disable tasklet!\n");
+			continue;
+		}
+
+		pframe = ep->rxframe;
+		bd = ep->n_rxbd;
+		bdstatus = in_be32((u32 __iomem *)bd);
+		length = bdstatus & BD_LENGTH_MASK;
+
+		while (!(bdstatus & R_E) && length) {
+			if (list_empty(&ep->queue)) {
+				qe_eprx_nack(ep);
+				dev_dbg(udc->dev,
+					"The rxep have noreq %d\n",
+					ep->has_data);
+				break;
+			}
+
+			if ((bdstatus & R_F) && (bdstatus & R_L)
+				&& !(bdstatus & R_ERROR)) {
+				qe_frame_clean(pframe);
+				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
+				frame_set_data(pframe, (u8 *)vaddr);
+				frame_set_length(pframe,
+						(length - USB_CRC_SIZE));
+				frame_set_status(pframe, FRAME_OK);
+				switch (bdstatus & R_PID) {
+				case R_PID_DATA1:
+					frame_set_info(pframe, PID_DATA1);
+					break;
+				case R_PID_SETUP:
+					frame_set_info(pframe, PID_SETUP);
+					break;
+				default:
+					frame_set_info(pframe, PID_DATA0);
+					break;
+				}
+				/* handle the rx frame */
+				qe_ep_rxframe_handle(ep);
+			} else {
+				dev_err(udc->dev,
+					"error in received frame\n");
+			}
+			/* note: don't clear the rxbd's buffer address */
+			/*clear the length */
+			out_be32((u32 __iomem *)bd, bdstatus & BD_STATUS_MASK);
+			ep->has_data--;
+			if (!(ep->localnack))
+				recycle_one_rxbd(ep);
+
+			/* Get next BD */
+			if (bdstatus & R_W)
+				bd = ep->rxbase;
+			else
+				bd++;
+
+			bdstatus = in_be32((u32 __iomem *)bd);
+			length = bdstatus & BD_LENGTH_MASK;
+		}
+
+		ep->n_rxbd = bd;
+
+		if (ep->localnack)
+			ep_recycle_rxbds(ep);
+
+		ep->enable_tasklet = 0;
+	} /* for i=1 */
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static int qe_ep_rx(struct qe_ep *ep)
+{
+	struct qe_udc *udc;
+	struct qe_frame *pframe;
+	struct qe_bd __iomem *bd;
+	u16 swoffs, ucoffs, emptybds;
+
+	udc = ep->udc;
+	pframe = ep->rxframe;
+
+	if (ep->dir == USB_DIR_IN) {
+		dev_err(udc->dev, "transmit ep in rx function\n");
+		return -EINVAL;
+	}
+
+	bd = ep->n_rxbd;
+
+	swoffs = (u16)(bd - ep->rxbase);
+	ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) -
+			in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3);
+	if (swoffs < ucoffs)
+		emptybds = USB_BDRING_LEN_RX - ucoffs + swoffs;
+	else
+		emptybds = swoffs - ucoffs;
+
+	if (emptybds < MIN_EMPTY_BDS) {
+		qe_eprx_nack(ep);
+		ep->localnack = 1;
+		dev_vdbg(udc->dev, "%d empty bds, send NACK\n", emptybds);
+	}
+	ep->has_data = USB_BDRING_LEN_RX - emptybds;
+
+	if (list_empty(&ep->queue)) {
+		qe_eprx_nack(ep);
+		dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n",
+				ep->has_data);
+		return 0;
+	}
+
+	tasklet_schedule(&udc->rx_tasklet);
+	ep->enable_tasklet = 1;
+
+	return 0;
+}
+
+/* send data from a frame, no matter what tx_req */
+static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
+{
+	struct qe_udc *udc = ep->udc;
+	struct qe_bd __iomem *bd;
+	u16 saveusbmr;
+	u32 bdstatus, pidmask;
+	u32 paddr;
+
+	if (ep->dir == USB_DIR_OUT) {
+		dev_err(udc->dev, "receive ep passed to tx function\n");
+		return -EINVAL;
+	}
+
+	/* Disable the Tx interrupt */
+	saveusbmr = in_be16(&udc->usb_regs->usb_usbmr);
+	out_be16(&udc->usb_regs->usb_usbmr,
+			saveusbmr & ~(USB_E_TXB_MASK | USB_E_TXE_MASK));
+
+	bd = ep->n_txbd;
+	bdstatus = in_be32((u32 __iomem *)bd);
+
+	if (!(bdstatus & (T_R | BD_LENGTH_MASK))) {
+		if (frame_get_length(frame) == 0) {
+			frame_set_data(frame, udc->nullbuf);
+			frame_set_length(frame, 2);
+			frame->info |= (ZLP | NO_CRC);
+			dev_vdbg(udc->dev, "the frame size = 0\n");
+		}
+		paddr = virt_to_phys((void *)frame->data);
+		out_be32(&bd->buf, paddr);
+		bdstatus = (bdstatus&T_W);
+		if (!(frame_get_info(frame) & NO_CRC))
+			bdstatus |= T_R | T_I | T_L | T_TC
+					| frame_get_length(frame);
+		else
+			bdstatus |= T_R | T_I | T_L | frame_get_length(frame);
+
+		/* if the packet is a ZLP in status phase */
+		if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP))
+			ep->data01 = 0x1;
+
+		if (ep->data01) {
+			pidmask = T_PID_DATA1;
+			frame->info |= PID_DATA1;
+		} else {
+			pidmask = T_PID_DATA0;
+			frame->info |= PID_DATA0;
+		}
+		bdstatus |= T_CNF;
+		bdstatus |= pidmask;
+		out_be32((u32 __iomem *)bd, bdstatus);
+		qe_ep_filltxfifo(ep);
+
+		/* enable the TX interrupt */
+		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
+
+		qe_ep_toggledata01(ep);
+		if (bdstatus & T_W)
+			ep->n_txbd = ep->txbase;
+		else
+			ep->n_txbd++;
+
+		return 0;
+	} else {
+		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
+		dev_vdbg(udc->dev, "The tx bd is not ready!\n");
+		return -EBUSY;
+	}
+}
+
+/* when a bd was transmitted, the function can
+ * handle the tx_req, not include ep0           */
+static int txcomplete(struct qe_ep *ep, unsigned char restart)
+{
+	if (ep->tx_req != NULL) {
+		if (!restart) {
+			int asent = ep->last;
+			ep->sent += asent;
+			ep->last -= asent;
+		} else {
+			ep->last = 0;
+		}
+
+		/* a request already were transmitted completely */
+		if ((ep->tx_req->req.length - ep->sent) <= 0) {
+			ep->tx_req->req.actual = (unsigned int)ep->sent;
+			done(ep, ep->tx_req, 0);
+			ep->tx_req = NULL;
+			ep->last = 0;
+			ep->sent = 0;
+		}
+	}
+
+	/* we should gain a new tx_req fot this endpoint */
+	if (ep->tx_req == NULL) {
+		if (!list_empty(&ep->queue)) {
+			ep->tx_req = list_entry(ep->queue.next,	struct qe_req,
+							queue);
+			ep->last = 0;
+			ep->sent = 0;
+		}
+	}
+
+	return 0;
+}
+
+/* give a frame and a tx_req, send some data */
+static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
+{
+	unsigned int size;
+	u8 *buf;
+
+	qe_frame_clean(frame);
+	size = min_t(u32, (ep->tx_req->req.length - ep->sent),
+				ep->ep.maxpacket);
+	buf = (u8 *)ep->tx_req->req.buf + ep->sent;
+	if (buf && size) {
+		ep->last = size;
+		frame_set_data(frame, buf);
+		frame_set_length(frame, size);
+		frame_set_status(frame, FRAME_OK);
+		frame_set_info(frame, 0);
+		return qe_ep_tx(ep, frame);
+	}
+	return -EIO;
+}
+
+/* give a frame struct,send a ZLP */
+static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor)
+{
+	struct qe_udc *udc = ep->udc;
+
+	if (frame == NULL)
+		return -ENODEV;
+
+	qe_frame_clean(frame);
+	frame_set_data(frame, (u8 *)udc->nullbuf);
+	frame_set_length(frame, 2);
+	frame_set_status(frame, FRAME_OK);
+	frame_set_info(frame, (ZLP | NO_CRC | infor));
+
+	return qe_ep_tx(ep, frame);
+}
+
+static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame)
+{
+	struct qe_req *req = ep->tx_req;
+	int reval;
+
+	if (req == NULL)
+		return -ENODEV;
+
+	if ((req->req.length - ep->sent) > 0)
+		reval = qe_usb_senddata(ep, frame);
+	else
+		reval = sendnulldata(ep, frame, 0);
+
+	return reval;
+}
+
+/* if direction is DIR_IN, the status is Device->Host
+ * if direction is DIR_OUT, the status transaction is Device<-Host
+ * in status phase, udc create a request and gain status */
+static int ep0_prime_status(struct qe_udc *udc, int direction)
+{
+
+	struct qe_ep *ep = &udc->eps[0];
+
+	if (direction == USB_DIR_IN) {
+		udc->ep0_state = DATA_STATE_NEED_ZLP;
+		udc->ep0_dir = USB_DIR_IN;
+		sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
+	} else {
+		udc->ep0_dir = USB_DIR_OUT;
+		udc->ep0_state = WAIT_FOR_OUT_STATUS;
+	}
+
+	return 0;
+}
+
+/* a request complete in ep0, whether gadget request or udc request */
+static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req)
+{
+	struct qe_ep *ep = &udc->eps[0];
+	/* because usb and ep's status already been set in ch9setaddress() */
+
+	switch (udc->ep0_state) {
+	case DATA_STATE_XMIT:
+		done(ep, req, 0);
+		/* receive status phase */
+		if (ep0_prime_status(udc, USB_DIR_OUT))
+			qe_ep0_stall(udc);
+		break;
+
+	case DATA_STATE_NEED_ZLP:
+		done(ep, req, 0);
+		udc->ep0_state = WAIT_FOR_SETUP;
+		break;
+
+	case DATA_STATE_RECV:
+		done(ep, req, 0);
+		/* send status phase */
+		if (ep0_prime_status(udc, USB_DIR_IN))
+			qe_ep0_stall(udc);
+		break;
+
+	case WAIT_FOR_OUT_STATUS:
+		done(ep, req, 0);
+		udc->ep0_state = WAIT_FOR_SETUP;
+		break;
+
+	case WAIT_FOR_SETUP:
+		dev_vdbg(udc->dev, "Unexpected interrupt\n");
+		break;
+
+	default:
+		qe_ep0_stall(udc);
+		break;
+	}
+}
+
+static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart)
+{
+	struct qe_req *tx_req = NULL;
+	struct qe_frame *frame = ep->txframe;
+
+	if ((frame_get_info(frame) & (ZLP | NO_REQ)) == (ZLP | NO_REQ)) {
+		if (!restart)
+			ep->udc->ep0_state = WAIT_FOR_SETUP;
+		else
+			sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
+		return 0;
+	}
+
+	tx_req = ep->tx_req;
+	if (tx_req != NULL) {
+		if (!restart) {
+			int asent = ep->last;
+			ep->sent += asent;
+			ep->last -= asent;
+		} else {
+			ep->last = 0;
+		}
+
+		/* a request already were transmitted completely */
+		if ((ep->tx_req->req.length - ep->sent) <= 0) {
+			ep->tx_req->req.actual = (unsigned int)ep->sent;
+			ep0_req_complete(ep->udc, ep->tx_req);
+			ep->tx_req = NULL;
+			ep->last = 0;
+			ep->sent = 0;
+		}
+	} else {
+		dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n");
+	}
+
+	return 0;
+}
+
+static int ep0_txframe_handle(struct qe_ep *ep)
+{
+	/* if have error, transmit again */
+	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
+		qe_ep_flushtxfifo(ep);
+		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
+		if (frame_get_info(ep->txframe) & PID_DATA0)
+			ep->data01 = 0;
+		else
+			ep->data01 = 1;
+
+		ep0_txcomplete(ep, 1);
+	} else
+		ep0_txcomplete(ep, 0);
+
+	frame_create_tx(ep, ep->txframe);
+	return 0;
+}
+
+static int qe_ep0_txconf(struct qe_ep *ep)
+{
+	struct qe_bd __iomem *bd;
+	struct qe_frame *pframe;
+	u32 bdstatus;
+
+	bd = ep->c_txbd;
+	bdstatus = in_be32((u32 __iomem *)bd);
+	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
+		pframe = ep->txframe;
+
+		/* clear and recycle the BD */
+		out_be32((u32 __iomem *)bd, bdstatus & T_W);
+		out_be32(&bd->buf, 0);
+		if (bdstatus & T_W)
+			ep->c_txbd = ep->txbase;
+		else
+			ep->c_txbd++;
+
+		if (ep->c_txbd == ep->n_txbd) {
+			if (bdstatus & DEVICE_T_ERROR) {
+				frame_set_status(pframe, FRAME_ERROR);
+				if (bdstatus & T_TO)
+					pframe->status |= TX_ER_TIMEOUT;
+				if (bdstatus & T_UN)
+					pframe->status |= TX_ER_UNDERUN;
+			}
+			ep0_txframe_handle(ep);
+		}
+
+		bd = ep->c_txbd;
+		bdstatus = in_be32((u32 __iomem *)bd);
+	}
+
+	return 0;
+}
+
+static int ep_txframe_handle(struct qe_ep *ep)
+{
+	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
+		qe_ep_flushtxfifo(ep);
+		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
+		if (frame_get_info(ep->txframe) & PID_DATA0)
+			ep->data01 = 0;
+		else
+			ep->data01 = 1;
+
+		txcomplete(ep, 1);
+	} else
+		txcomplete(ep, 0);
+
+	frame_create_tx(ep, ep->txframe); /* send the data */
+	return 0;
+}
+
+/* confirm the already trainsmited bd */
+static int qe_ep_txconf(struct qe_ep *ep)
+{
+	struct qe_bd __iomem *bd;
+	struct qe_frame *pframe = NULL;
+	u32 bdstatus;
+	unsigned char breakonrxinterrupt = 0;
+
+	bd = ep->c_txbd;
+	bdstatus = in_be32((u32 __iomem *)bd);
+	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
+		pframe = ep->txframe;
+		if (bdstatus & DEVICE_T_ERROR) {
+			frame_set_status(pframe, FRAME_ERROR);
+			if (bdstatus & T_TO)
+				pframe->status |= TX_ER_TIMEOUT;
+			if (bdstatus & T_UN)
+				pframe->status |= TX_ER_UNDERUN;
+		}
+
+		/* clear and recycle the BD */
+		out_be32((u32 __iomem *)bd, bdstatus & T_W);
+		out_be32(&bd->buf, 0);
+		if (bdstatus & T_W)
+			ep->c_txbd = ep->txbase;
+		else
+			ep->c_txbd++;
+
+		/* handle the tx frame */
+		ep_txframe_handle(ep);
+		bd = ep->c_txbd;
+		bdstatus = in_be32((u32 __iomem *)bd);
+	}
+	if (breakonrxinterrupt)
+		return -EIO;
+	else
+		return 0;
+}
+
+/* Add a request in queue, and try to transmit a packet */
+static int ep_req_send(struct qe_ep *ep, struct qe_req *req)
+{
+	int reval = 0;
+
+	if (ep->tx_req == NULL) {
+		ep->sent = 0;
+		ep->last = 0;
+		txcomplete(ep, 0); /* can gain a new tx_req */
+		reval = frame_create_tx(ep, ep->txframe);
+	}
+	return reval;
+}
+
+/* Maybe this is a good ideal */
+static int ep_req_rx(struct qe_ep *ep, struct qe_req *req)
+{
+	struct qe_udc *udc = ep->udc;
+	struct qe_frame *pframe = NULL;
+	struct qe_bd __iomem *bd;
+	u32 bdstatus, length;
+	u32 vaddr, fsize;
+	u8 *cp;
+	u8 finish_req = 0;
+	u8 framepid;
+
+	if (list_empty(&ep->queue)) {
+		dev_vdbg(udc->dev, "the req already finish!\n");
+		return 0;
+	}
+	pframe = ep->rxframe;
+
+	bd = ep->n_rxbd;
+	bdstatus = in_be32((u32 __iomem *)bd);
+	length = bdstatus & BD_LENGTH_MASK;
+
+	while (!(bdstatus & R_E) && length) {
+		if (finish_req)
+			break;
+		if ((bdstatus & R_F) && (bdstatus & R_L)
+					&& !(bdstatus & R_ERROR)) {
+			qe_frame_clean(pframe);
+			vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
+			frame_set_data(pframe, (u8 *)vaddr);
+			frame_set_length(pframe, (length - USB_CRC_SIZE));
+			frame_set_status(pframe, FRAME_OK);
+			switch (bdstatus & R_PID) {
+			case R_PID_DATA1:
+				frame_set_info(pframe, PID_DATA1); break;
+			default:
+				frame_set_info(pframe, PID_DATA0); break;
+			}
+			/* handle the rx frame */
+
+			if (frame_get_info(pframe) & PID_DATA1)
+				framepid = 0x1;
+			else
+				framepid = 0;
+
+			if (framepid != ep->data01) {
+				dev_vdbg(udc->dev, "the data01 error!\n");
+			} else {
+				fsize = frame_get_length(pframe);
+
+				cp = (u8 *)(req->req.buf) + req->req.actual;
+				if (cp) {
+					memcpy(cp, pframe->data, fsize);
+					req->req.actual += fsize;
+					if ((fsize < ep->ep.maxpacket)
+						|| (req->req.actual >=
+							req->req.length)) {
+						finish_req = 1;
+						done(ep, req, 0);
+						if (list_empty(&ep->queue))
+							qe_eprx_nack(ep);
+					}
+				}
+				qe_ep_toggledata01(ep);
+			}
+		} else {
+			dev_err(udc->dev, "The receive frame with error!\n");
+		}
+
+		/* note: don't clear the rxbd's buffer address *
+		 * only Clear the length */
+		out_be32((u32 __iomem *)bd, (bdstatus & BD_STATUS_MASK));
+		ep->has_data--;
+
+		/* Get next BD */
+		if (bdstatus & R_W)
+			bd = ep->rxbase;
+		else
+			bd++;
+
+		bdstatus = in_be32((u32 __iomem *)bd);
+		length = bdstatus & BD_LENGTH_MASK;
+	}
+
+	ep->n_rxbd = bd;
+	ep_recycle_rxbds(ep);
+
+	return 0;
+}
+
+/* only add the request in queue */
+static int ep_req_receive(struct qe_ep *ep, struct qe_req *req)
+{
+	if (ep->state == EP_STATE_NACK) {
+		if (ep->has_data <= 0) {
+			/* Enable rx and unmask rx interrupt */
+			qe_eprx_normal(ep);
+		} else {
+			/* Copy the exist BD data */
+			ep_req_rx(ep, req);
+		}
+	}
+
+	return 0;
+}
+
+/********************************************************************
+	Internal Used Function End
+********************************************************************/
+
+/*-----------------------------------------------------------------------
+	Endpoint Management Functions For Gadget
+ -----------------------------------------------------------------------*/
+static int qe_ep_enable(struct usb_ep *_ep,
+			 const struct usb_endpoint_descriptor *desc)
+{
+	struct qe_udc *udc;
+	struct qe_ep *ep;
+	int retval = 0;
+	unsigned char epnum;
+
+	ep = container_of(_ep, struct qe_ep, ep);
+
+	/* catch various bogus parameters */
+	if (!_ep || !desc || ep->desc || _ep->name == ep_name[0] ||
+			(desc->bDescriptorType != USB_DT_ENDPOINT))
+		return -EINVAL;
+
+	udc = ep->udc;
+	if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+
+	epnum = (u8)desc->bEndpointAddress & 0xF;
+
+	retval = qe_ep_init(udc, epnum, desc);
+	if (retval != 0) {
+		cpm_muram_free(cpm_muram_offset(ep->rxbase));
+		dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum);
+		return -EINVAL;
+	}
+	dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum);
+	return 0;
+}
+
+static int qe_ep_disable(struct usb_ep *_ep)
+{
+	struct qe_udc *udc;
+	struct qe_ep *ep;
+	unsigned long flags;
+	unsigned int size;
+
+	ep = container_of(_ep, struct qe_ep, ep);
+	udc = ep->udc;
+
+	if (!_ep || !ep->desc) {
+		dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&udc->lock, flags);
+	/* Nuke all pending requests (does flush) */
+	nuke(ep, -ESHUTDOWN);
+	ep->desc = NULL;
+	ep->stopped = 1;
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	cpm_muram_free(cpm_muram_offset(ep->rxbase));
+
+	if (ep->dir == USB_DIR_OUT)
+		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
+				(USB_BDRING_LEN_RX + 1);
+	else
+		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
+				(USB_BDRING_LEN + 1);
+
+	if (ep->dir != USB_DIR_IN) {
+		kfree(ep->rxframe);
+		if (ep->rxbufmap) {
+			dma_unmap_single(udc_controller->gadget.dev.parent,
+					ep->rxbuf_d, size,
+					DMA_FROM_DEVICE);
+			ep->rxbuf_d = DMA_ADDR_INVALID;
+		} else {
+			dma_sync_single_for_cpu(
+					udc_controller->gadget.dev.parent,
+					ep->rxbuf_d, size,
+					DMA_FROM_DEVICE);
+		}
+		kfree(ep->rxbuffer);
+	}
+
+	if (ep->dir != USB_DIR_OUT)
+		kfree(ep->txframe);
+
+	dev_dbg(udc->dev, "disabled %s OK\n", _ep->name);
+	return 0;
+}
+
+static struct usb_request *qe_alloc_request(struct usb_ep *_ep,	gfp_t gfp_flags)
+{
+	struct qe_req *req;
+
+	req = kzalloc(sizeof(*req), gfp_flags);
+	if (!req)
+		return NULL;
+
+	req->req.dma = DMA_ADDR_INVALID;
+
+	INIT_LIST_HEAD(&req->queue);
+
+	return &req->req;
+}
+
+static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct qe_req *req;
+
+	req = container_of(_req, struct qe_req, req);
+
+	if (_req)
+		kfree(req);
+}
+
+/* queues (submits) an I/O request to an endpoint */
+static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+				gfp_t gfp_flags)
+{
+	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
+	struct qe_req *req = container_of(_req, struct qe_req, req);
+	struct qe_udc *udc;
+	unsigned long flags;
+	int reval;
+
+	udc = ep->udc;
+	/* catch various bogus parameters */
+	if (!_req || !req->req.complete || !req->req.buf
+			|| !list_empty(&req->queue)) {
+		dev_dbg(udc->dev, "bad params\n");
+		return -EINVAL;
+	}
+	if (!_ep || (!ep->desc && ep_index(ep))) {
+		dev_dbg(udc->dev, "bad ep\n");
+		return -EINVAL;
+	}
+
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	req->ep = ep;
+
+	/* map virtual address to hardware */
+	if (req->req.dma == DMA_ADDR_INVALID) {
+		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+					req->req.buf,
+					req->req.length,
+					ep_is_in(ep)
+					? DMA_TO_DEVICE :
+					DMA_FROM_DEVICE);
+		req->mapped = 1;
+	} else {
+		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
+					req->req.dma, req->req.length,
+					ep_is_in(ep)
+					? DMA_TO_DEVICE :
+					DMA_FROM_DEVICE);
+		req->mapped = 0;
+	}
+
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+
+	list_add_tail(&req->queue, &ep->queue);
+	dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
+			ep->name, req->req.length);
+	spin_lock_irqsave(&udc->lock, flags);
+	/* push the request to device */
+	if (ep_is_in(ep))
+		reval = ep_req_send(ep, req);
+
+	/* EP0 */
+	if (ep_index(ep) == 0 && req->req.length > 0) {
+		if (ep_is_in(ep))
+			udc->ep0_state = DATA_STATE_XMIT;
+		else
+			udc->ep0_state = DATA_STATE_RECV;
+	}
+
+	if (ep->dir == USB_DIR_OUT)
+		reval = ep_req_receive(ep, req);
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+}
+
+/* dequeues (cancels, unlinks) an I/O request from an endpoint */
+static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
+	struct qe_req *req;
+	unsigned long flags;
+
+	if (!_ep || !_req)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+
+	if (&req->req != _req) {
+		spin_unlock_irqrestore(&ep->udc->lock, flags);
+		return -EINVAL;
+	}
+
+	done(ep, req, -ECONNRESET);
+
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+	return 0;
+}
+
+/*-----------------------------------------------------------------
+ * modify the endpoint halt feature
+ * @ep: the non-isochronous endpoint being stalled
+ * @value: 1--set halt  0--clear halt
+ * Returns zero, or a negative error code.
+*----------------------------------------------------------------*/
+static int qe_ep_set_halt(struct usb_ep *_ep, int value)
+{
+	struct qe_ep *ep;
+	unsigned long flags;
+	int status = -EOPNOTSUPP;
+	struct qe_udc *udc;
+
+	ep = container_of(_ep, struct qe_ep, ep);
+	if (!_ep || !ep->desc) {
+		status = -EINVAL;
+		goto out;
+	}
+
+	udc = ep->udc;
+	/* Attempt to halt IN ep will fail if any transfer requests
+	 * are still queue */
+	if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
+		status = -EAGAIN;
+		goto out;
+	}
+
+	status = 0;
+	spin_lock_irqsave(&ep->udc->lock, flags);
+	qe_eptx_stall_change(ep, value);
+	qe_eprx_stall_change(ep, value);
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+	if (ep->epnum == 0) {
+		udc->ep0_state = WAIT_FOR_SETUP;
+		udc->ep0_dir = 0;
+	}
+
+	/* set data toggle to DATA0 on clear halt */
+	if (value == 0)
+		ep->data01 = 0;
+out:
+	dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name,
+			value ?  "set" : "clear", status);
+
+	return status;
+}
+
+static struct usb_ep_ops qe_ep_ops = {
+	.enable = qe_ep_enable,
+	.disable = qe_ep_disable,
+
+	.alloc_request = qe_alloc_request,
+	.free_request = qe_free_request,
+
+	.queue = qe_ep_queue,
+	.dequeue = qe_ep_dequeue,
+
+	.set_halt = qe_ep_set_halt,
+};
+
+/*------------------------------------------------------------------------
+	Gadget Driver Layer Operations
+ ------------------------------------------------------------------------*/
+
+/* Get the current frame number */
+static int qe_get_frame(struct usb_gadget *gadget)
+{
+	u16 tmp;
+
+	tmp = in_be16(&udc_controller->usb_param->frame_n);
+	if (tmp & 0x8000)
+		tmp = tmp & 0x07ff;
+	else
+		tmp = -EINVAL;
+
+	return (int)tmp;
+}
+
+/* Tries to wake up the host connected to this gadget
+ *
+ * Return : 0-success
+ * Negative-this feature not enabled by host or not supported by device hw
+ */
+static int qe_wakeup(struct usb_gadget *gadget)
+{
+	return -ENOTSUPP;
+}
+
+/* Notify controller that VBUS is powered, Called by whatever
+   detects VBUS sessions */
+static int qe_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	return -ENOTSUPP;
+}
+
+/* constrain controller's VBUS power usage
+ * This call is used by gadget drivers during SET_CONFIGURATION calls,
+ * reporting how much power the device may consume.  For example, this
+ * could affect how quickly batteries are recharged.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static int qe_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+	return -ENOTSUPP;
+}
+
+/* Change Data+ pullup status
+ * this func is used by usb_gadget_connect/disconnect
+ */
+static int qe_pullup(struct usb_gadget *gadget, int is_on)
+{
+	return -ENOTSUPP;
+}
+
+/* defined in usb_gadget.h */
+static struct usb_gadget_ops qe_gadget_ops = {
+	.get_frame = qe_get_frame,
+	.wakeup = qe_wakeup,
+/*	.set_selfpowered = qe_set_selfpowered,*/ /* always selfpowered */
+	.vbus_session = qe_vbus_session,
+	.vbus_draw = qe_vbus_draw,
+	.pullup = qe_pullup,
+};
+
+/*-------------------------------------------------------------------------
+	USB ep0 Setup process in BUS Enumeration
+ -------------------------------------------------------------------------*/
+static int udc_reset_ep_queue(struct qe_udc *udc, u8 pipe)
+{
+	struct qe_ep *ep = &udc->eps[pipe];
+
+	nuke(ep, -ECONNRESET);
+	ep->tx_req = NULL;
+	return 0;
+}
+
+static int reset_queues(struct qe_udc *udc)
+{
+	u8 pipe;
+
+	for (pipe = 0; pipe < USB_MAX_ENDPOINTS; pipe++)
+		udc_reset_ep_queue(udc, pipe);
+
+	/* report disconnect; the driver is already quiesced */
+	spin_unlock(&udc->lock);
+	udc->driver->disconnect(&udc->gadget);
+	spin_lock(&udc->lock);
+
+	return 0;
+}
+
+static void ch9setaddress(struct qe_udc *udc, u16 value, u16 index,
+			u16 length)
+{
+	/* Save the new address to device struct */
+	udc->device_address = (u8) value;
+	/* Update usb state */
+	udc->usb_state = USB_STATE_ADDRESS;
+
+	/* Status phase , send a ZLP */
+	if (ep0_prime_status(udc, USB_DIR_IN))
+		qe_ep0_stall(udc);
+}
+
+static void ownercomplete(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct qe_req *req = container_of(_req, struct qe_req, req);
+
+	req->req.buf = NULL;
+	kfree(req);
+}
+
+static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
+			u16 index, u16 length)
+{
+	u16 usb_status = 0;
+	struct qe_req *req;
+	struct qe_ep *ep;
+	int status = 0;
+
+	ep = &udc->eps[0];
+	if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+		/* Get device status */
+		usb_status = 1 << USB_DEVICE_SELF_POWERED;
+	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
+		/* Get interface status */
+		/* We don't have interface information in udc driver */
+		usb_status = 0;
+	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
+		/* Get endpoint status */
+		int pipe = index & USB_ENDPOINT_NUMBER_MASK;
+		struct qe_ep *target_ep = &udc->eps[pipe];
+		u16 usep;
+
+		/* stall if endpoint doesn't exist */
+		if (!target_ep->desc)
+			goto stall;
+
+		usep = in_be16(&udc->usb_regs->usb_usep[pipe]);
+		if (index & USB_DIR_IN) {
+			if (target_ep->dir != USB_DIR_IN)
+				goto stall;
+			if ((usep & USB_THS_MASK) == USB_THS_STALL)
+				usb_status = 1 << USB_ENDPOINT_HALT;
+		} else {
+			if (target_ep->dir != USB_DIR_OUT)
+				goto stall;
+			if ((usep & USB_RHS_MASK) == USB_RHS_STALL)
+				usb_status = 1 << USB_ENDPOINT_HALT;
+		}
+	}
+
+	req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL),
+					struct qe_req, req);
+	req->req.length = 2;
+	req->req.buf = udc->statusbuf;
+	*(u16 *)req->req.buf = cpu_to_le16(usb_status);
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->req.complete = ownercomplete;
+
+	udc->ep0_dir = USB_DIR_IN;
+
+	/* data phase */
+	status = qe_ep_queue(&ep->ep, &req->req, GFP_ATOMIC);
+
+	if (status == 0)
+		return;
+stall:
+	dev_err(udc->dev, "Can't respond to getstatus request \n");
+	qe_ep0_stall(udc);
+}
+
+/* only handle the setup request, suppose the device in normal status */
+static void setup_received_handle(struct qe_udc *udc,
+				struct usb_ctrlrequest *setup)
+{
+	/* Fix Endian (udc->local_setup_buff is cpu Endian now)*/
+	u16 wValue = le16_to_cpu(setup->wValue);
+	u16 wIndex = le16_to_cpu(setup->wIndex);
+	u16 wLength = le16_to_cpu(setup->wLength);
+
+	/* clear the previous request in the ep0 */
+	udc_reset_ep_queue(udc, 0);
+
+	if (setup->bRequestType & USB_DIR_IN)
+		udc->ep0_dir = USB_DIR_IN;
+	else
+		udc->ep0_dir = USB_DIR_OUT;
+
+	switch (setup->bRequest) {
+	case USB_REQ_GET_STATUS:
+		/* Data+Status phase form udc */
+		if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+					!= (USB_DIR_IN | USB_TYPE_STANDARD))
+			break;
+		ch9getstatus(udc, setup->bRequestType, wValue, wIndex,
+					wLength);
+		return;
+
+	case USB_REQ_SET_ADDRESS:
+		/* Status phase from udc */
+		if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
+						USB_RECIP_DEVICE))
+			break;
+		ch9setaddress(udc, wValue, wIndex, wLength);
+		return;
+
+	case USB_REQ_CLEAR_FEATURE:
+	case USB_REQ_SET_FEATURE:
+		/* Requests with no data phase, status phase from udc */
+		if ((setup->bRequestType & USB_TYPE_MASK)
+					!= USB_TYPE_STANDARD)
+			break;
+
+		if ((setup->bRequestType & USB_RECIP_MASK)
+				== USB_RECIP_ENDPOINT) {
+			int pipe = wIndex & USB_ENDPOINT_NUMBER_MASK;
+			struct qe_ep *ep;
+
+			if (wValue != 0 || wLength != 0
+				|| pipe > USB_MAX_ENDPOINTS)
+				break;
+			ep = &udc->eps[pipe];
+
+			spin_unlock(&udc->lock);
+			qe_ep_set_halt(&ep->ep,
+					(setup->bRequest == USB_REQ_SET_FEATURE)
+						? 1 : 0);
+			spin_lock(&udc->lock);
+		}
+
+		ep0_prime_status(udc, USB_DIR_IN);
+
+		return;
+
+	default:
+		break;
+	}
+
+	if (wLength) {
+		/* Data phase from gadget, status phase from udc */
+		if (setup->bRequestType & USB_DIR_IN) {
+			udc->ep0_state = DATA_STATE_XMIT;
+			udc->ep0_dir = USB_DIR_IN;
+		} else {
+			udc->ep0_state = DATA_STATE_RECV;
+			udc->ep0_dir = USB_DIR_OUT;
+		}
+		spin_unlock(&udc->lock);
+		if (udc->driver->setup(&udc->gadget,
+					&udc->local_setup_buff) < 0)
+			qe_ep0_stall(udc);
+		spin_lock(&udc->lock);
+	} else {
+		/* No data phase, IN status from gadget */
+		udc->ep0_dir = USB_DIR_IN;
+		spin_unlock(&udc->lock);
+		if (udc->driver->setup(&udc->gadget,
+					&udc->local_setup_buff) < 0)
+			qe_ep0_stall(udc);
+		spin_lock(&udc->lock);
+		udc->ep0_state = DATA_STATE_NEED_ZLP;
+	}
+}
+
+/*-------------------------------------------------------------------------
+	USB Interrupt handlers
+ -------------------------------------------------------------------------*/
+static void suspend_irq(struct qe_udc *udc)
+{
+	udc->resume_state = udc->usb_state;
+	udc->usb_state = USB_STATE_SUSPENDED;
+
+	/* report suspend to the driver ,serial.c not support this*/
+	if (udc->driver->suspend)
+		udc->driver->suspend(&udc->gadget);
+}
+
+static void resume_irq(struct qe_udc *udc)
+{
+	udc->usb_state = udc->resume_state;
+	udc->resume_state = 0;
+
+	/* report resume to the driver , serial.c not support this*/
+	if (udc->driver->resume)
+		udc->driver->resume(&udc->gadget);
+}
+
+static void idle_irq(struct qe_udc *udc)
+{
+	u8 usbs;
+
+	usbs = in_8(&udc->usb_regs->usb_usbs);
+	if (usbs & USB_IDLE_STATUS_MASK) {
+		if ((udc->usb_state) != USB_STATE_SUSPENDED)
+			suspend_irq(udc);
+	} else {
+		if (udc->usb_state == USB_STATE_SUSPENDED)
+			resume_irq(udc);
+	}
+}
+
+static int reset_irq(struct qe_udc *udc)
+{
+	unsigned char i;
+
+	qe_usb_disable();
+	out_8(&udc->usb_regs->usb_usadr, 0);
+
+	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
+		if (udc->eps[i].init)
+			qe_ep_reset(udc, i);
+	}
+
+	reset_queues(udc);
+	udc->usb_state = USB_STATE_DEFAULT;
+	udc->ep0_state = WAIT_FOR_SETUP;
+	udc->ep0_dir = USB_DIR_OUT;
+	qe_usb_enable();
+	return 0;
+}
+
+static int bsy_irq(struct qe_udc *udc)
+{
+	return 0;
+}
+
+static int txe_irq(struct qe_udc *udc)
+{
+	return 0;
+}
+
+/* ep0 tx interrupt also in here */
+static int tx_irq(struct qe_udc *udc)
+{
+	struct qe_ep *ep;
+	struct qe_bd __iomem *bd;
+	int i, res = 0;
+
+	if ((udc->usb_state == USB_STATE_ADDRESS)
+		&& (in_8(&udc->usb_regs->usb_usadr) == 0))
+		out_8(&udc->usb_regs->usb_usadr, udc->device_address);
+
+	for (i = (USB_MAX_ENDPOINTS-1); ((i >= 0) && (res == 0)); i--) {
+		ep = &udc->eps[i];
+		if (ep && ep->init && (ep->dir != USB_DIR_OUT)) {
+			bd = ep->c_txbd;
+			if (!(in_be32((u32 __iomem *)bd) & T_R)
+						&& (in_be32(&bd->buf))) {
+				/* confirm the transmitted bd */
+				if (ep->epnum == 0)
+					res = qe_ep0_txconf(ep);
+				else
+					res = qe_ep_txconf(ep);
+			}
+		}
+	}
+	return res;
+}
+
+
+/* setup packect's rx is handle in the function too */
+static void rx_irq(struct qe_udc *udc)
+{
+	struct qe_ep *ep;
+	struct qe_bd __iomem *bd;
+	int i;
+
+	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
+		ep = &udc->eps[i];
+		if (ep && ep->init && (ep->dir != USB_DIR_IN)) {
+			bd = ep->n_rxbd;
+			if (!(in_be32((u32 __iomem *)bd) & R_E)
+						&& (in_be32(&bd->buf))) {
+				if (ep->epnum == 0) {
+					qe_ep0_rx(udc);
+				} else {
+					/*non-setup package receive*/
+					qe_ep_rx(ep);
+				}
+			}
+		}
+	}
+}
+
+static irqreturn_t qe_udc_irq(int irq, void *_udc)
+{
+	struct qe_udc *udc = (struct qe_udc *)_udc;
+	u16 irq_src;
+	irqreturn_t status = IRQ_NONE;
+	unsigned long flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	irq_src = in_be16(&udc->usb_regs->usb_usber) &
+		in_be16(&udc->usb_regs->usb_usbmr);
+	/* Clear notification bits */
+	out_be16(&udc->usb_regs->usb_usber, irq_src);
+	/* USB Interrupt */
+	if (irq_src & USB_E_IDLE_MASK) {
+		idle_irq(udc);
+		irq_src &= ~USB_E_IDLE_MASK;
+		status = IRQ_HANDLED;
+	}
+
+	if (irq_src & USB_E_TXB_MASK) {
+		tx_irq(udc);
+		irq_src &= ~USB_E_TXB_MASK;
+		status = IRQ_HANDLED;
+	}
+
+	if (irq_src & USB_E_RXB_MASK) {
+		rx_irq(udc);
+		irq_src &= ~USB_E_RXB_MASK;
+		status = IRQ_HANDLED;
+	}
+
+	if (irq_src & USB_E_RESET_MASK) {
+		reset_irq(udc);
+		irq_src &= ~USB_E_RESET_MASK;
+		status = IRQ_HANDLED;
+	}
+
+	if (irq_src & USB_E_BSY_MASK) {
+		bsy_irq(udc);
+		irq_src &= ~USB_E_BSY_MASK;
+		status = IRQ_HANDLED;
+	}
+
+	if (irq_src & USB_E_TXE_MASK) {
+		txe_irq(udc);
+		irq_src &= ~USB_E_TXE_MASK;
+		status = IRQ_HANDLED;
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return status;
+}
+
+/*-------------------------------------------------------------------------
+	Gadget driver register and unregister.
+ --------------------------------------------------------------------------*/
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+	int retval;
+	unsigned long flags = 0;
+
+	/* standard operations */
+	if (!udc_controller)
+		return -ENODEV;
+
+	if (!driver || (driver->speed != USB_SPEED_FULL
+			&& driver->speed != USB_SPEED_HIGH)
+			|| !driver->bind || !driver->disconnect
+			|| !driver->setup)
+		return -EINVAL;
+
+	if (udc_controller->driver)
+		return -EBUSY;
+
+	/* lock is needed but whether should use this lock or another */
+	spin_lock_irqsave(&udc_controller->lock, flags);
+
+	driver->driver.bus = NULL;
+	/* hook up the driver */
+	udc_controller->driver = driver;
+	udc_controller->gadget.dev.driver = &driver->driver;
+	udc_controller->gadget.speed = (enum usb_device_speed)(driver->speed);
+	spin_unlock_irqrestore(&udc_controller->lock, flags);
+
+	retval = driver->bind(&udc_controller->gadget);
+	if (retval) {
+		dev_err(udc_controller->dev, "bind to %s --> %d",
+				driver->driver.name, retval);
+		udc_controller->gadget.dev.driver = NULL;
+		udc_controller->driver = NULL;
+		return retval;
+	}
+
+	/* Enable IRQ reg and Set usbcmd reg EN bit */
+	qe_usb_enable();
+
+	out_be16(&udc_controller->usb_regs->usb_usber, 0xffff);
+	out_be16(&udc_controller->usb_regs->usb_usbmr, USB_E_DEFAULT_DEVICE);
+	udc_controller->usb_state = USB_STATE_ATTACHED;
+	udc_controller->ep0_state = WAIT_FOR_SETUP;
+	udc_controller->ep0_dir = USB_DIR_OUT;
+	dev_info(udc_controller->dev, "%s bind to driver %s \n",
+		udc_controller->gadget.name, driver->driver.name);
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	struct qe_ep *loop_ep;
+	unsigned long flags;
+
+	if (!udc_controller)
+		return -ENODEV;
+
+	if (!driver || driver != udc_controller->driver)
+		return -EINVAL;
+
+	/* stop usb controller, disable intr */
+	qe_usb_disable();
+
+	/* in fact, no needed */
+	udc_controller->usb_state = USB_STATE_ATTACHED;
+	udc_controller->ep0_state = WAIT_FOR_SETUP;
+	udc_controller->ep0_dir = 0;
+
+	/* stand operation */
+	spin_lock_irqsave(&udc_controller->lock, flags);
+	udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
+	nuke(&udc_controller->eps[0], -ESHUTDOWN);
+	list_for_each_entry(loop_ep, &udc_controller->gadget.ep_list,
+				ep.ep_list)
+		nuke(loop_ep, -ESHUTDOWN);
+	spin_unlock_irqrestore(&udc_controller->lock, flags);
+
+	/* unbind gadget and unhook driver. */
+	driver->unbind(&udc_controller->gadget);
+	udc_controller->gadget.dev.driver = NULL;
+	udc_controller->driver = NULL;
+
+	dev_info(udc_controller->dev, "unregistered gadget driver '%s'\r\n",
+			driver->driver.name);
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+/* udc structure's alloc and setup, include ep-param alloc */
+static struct qe_udc __devinit *qe_udc_config(struct of_device *ofdev)
+{
+	struct qe_udc *udc;
+	struct device_node *np = ofdev->node;
+	unsigned int tmp_addr = 0;
+	struct usb_device_para __iomem *usbpram;
+	unsigned int i;
+	u64 size;
+	u32 offset;
+
+	udc = kzalloc(sizeof(*udc), GFP_KERNEL);
+	if (udc == NULL) {
+		dev_err(&ofdev->dev, "malloc udc failed\n");
+		goto cleanup;
+	}
+
+	udc->dev = &ofdev->dev;
+
+	/* get default address of usb parameter in MURAM from device tree */
+	offset = *of_get_address(np, 1, &size, NULL);
+	udc->usb_param = cpm_muram_addr(offset);
+	memset_io(udc->usb_param, 0, size);
+
+	usbpram = udc->usb_param;
+	out_be16(&usbpram->frame_n, 0);
+	out_be32(&usbpram->rstate, 0);
+
+	tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
+					sizeof(struct usb_ep_para)),
+					   USB_EP_PARA_ALIGNMENT);
+
+	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
+		out_be16(&usbpram->epptr[i], (u16)tmp_addr);
+		udc->ep_param[i] = cpm_muram_addr(tmp_addr);
+		tmp_addr += 32;
+	}
+
+	memset_io(udc->ep_param[0], 0,
+			USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para));
+
+	udc->resume_state = USB_STATE_NOTATTACHED;
+	udc->usb_state = USB_STATE_POWERED;
+	udc->ep0_dir = 0;
+
+	spin_lock_init(&udc->lock);
+	return udc;
+
+cleanup:
+	kfree(udc);
+	return NULL;
+}
+
+/* USB Controller register init */
+static int __devinit qe_udc_reg_init(struct qe_udc *udc)
+{
+	struct usb_ctlr __iomem *qe_usbregs;
+	qe_usbregs = udc->usb_regs;
+
+	/* Init the usb register */
+	out_8(&qe_usbregs->usb_usmod, 0x01);
+	out_be16(&qe_usbregs->usb_usbmr, 0);
+	out_8(&qe_usbregs->usb_uscom, 0);
+	out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
+
+	return 0;
+}
+
+static int __devinit qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
+{
+	struct qe_ep *ep = &udc->eps[pipe_num];
+
+	ep->udc = udc;
+	strcpy(ep->name, ep_name[pipe_num]);
+	ep->ep.name = ep_name[pipe_num];
+
+	ep->ep.ops = &qe_ep_ops;
+	ep->stopped = 1;
+	ep->ep.maxpacket = (unsigned short) ~0;
+	ep->desc = NULL;
+	ep->dir = 0xff;
+	ep->epnum = (u8)pipe_num;
+	ep->sent = 0;
+	ep->last = 0;
+	ep->init = 0;
+	ep->rxframe = NULL;
+	ep->txframe = NULL;
+	ep->tx_req = NULL;
+	ep->state = EP_STATE_IDLE;
+	ep->has_data = 0;
+
+	/* the queue lists any req for this ep */
+	INIT_LIST_HEAD(&ep->queue);
+
+	/* gagdet.ep_list used for ep_autoconfig so no ep0*/
+	if (pipe_num != 0)
+		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+
+	ep->gadget = &udc->gadget;
+
+	return 0;
+}
+
+/*-----------------------------------------------------------------------
+ *	UDC device Driver operation functions				*
+ *----------------------------------------------------------------------*/
+static void qe_udc_release(struct device *dev)
+{
+	int i = 0;
+
+	complete(udc_controller->done);
+	cpm_muram_free(cpm_muram_offset(udc_controller->ep_param[0]));
+	for (i = 0; i < USB_MAX_ENDPOINTS; i++)
+		udc_controller->ep_param[i] = NULL;
+
+	kfree(udc_controller);
+	udc_controller = NULL;
+}
+
+/* Driver probe functions */
+static int __devinit qe_udc_probe(struct of_device *ofdev,
+			const struct of_device_id *match)
+{
+	struct device_node *np = ofdev->node;
+	struct qe_ep *ep;
+	unsigned int ret = 0;
+	unsigned int i;
+	const void *prop;
+
+	prop = of_get_property(np, "mode", NULL);
+	if (!prop || strcmp(prop, "peripheral"))
+		return -ENODEV;
+
+	/* Initialize the udc structure including QH member and other member */
+	udc_controller = qe_udc_config(ofdev);
+	if (!udc_controller) {
+		dev_dbg(&ofdev->dev, "udc_controll is NULL\n");
+		return -ENOMEM;
+	}
+
+	udc_controller->soc_type = (unsigned long)match->data;
+	udc_controller->usb_regs = of_iomap(np, 0);
+	if (!udc_controller->usb_regs) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	/* initialize usb hw reg except for regs for EP,
+	 * leave usbintr reg untouched*/
+	qe_udc_reg_init(udc_controller);
+
+	/* here comes the stand operations for probe
+	 * set the qe_udc->gadget.xxx */
+	udc_controller->gadget.ops = &qe_gadget_ops;
+
+	/* gadget.ep0 is a pointer */
+	udc_controller->gadget.ep0 = &udc_controller->eps[0].ep;
+
+	INIT_LIST_HEAD(&udc_controller->gadget.ep_list);
+
+	/* modify in register gadget process */
+	udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
+
+	/* name: Identifies the controller hardware type. */
+	udc_controller->gadget.name = driver_name;
+
+	device_initialize(&udc_controller->gadget.dev);
+
+	strcpy(udc_controller->gadget.dev.bus_id, "gadget");
+
+	udc_controller->gadget.dev.release = qe_udc_release;
+	udc_controller->gadget.dev.parent = &ofdev->dev;
+
+	/* initialize qe_ep struct */
+	for (i = 0; i < USB_MAX_ENDPOINTS ; i++) {
+		/* because the ep type isn't decide here so
+		 * qe_ep_init() should be called in ep_enable() */
+
+		/* setup the qe_ep struct and link ep.ep.list
+		 * into gadget.ep_list */
+		qe_ep_config(udc_controller, (unsigned char)i);
+	}
+
+	/* ep0 initialization in here */
+	ret = qe_ep_init(udc_controller, 0, &qe_ep0_desc);
+	if (ret)
+		goto err2;
+
+	/* create a buf for ZLP send, need to remain zeroed */
+	udc_controller->nullbuf = kzalloc(256, GFP_KERNEL);
+	if (udc_controller->nullbuf == NULL) {
+		dev_dbg(udc_controller->dev, "cannot alloc nullbuf\n");
+		ret = -ENOMEM;
+		goto err3;
+	}
+
+	/* buffer for data of get_status request */
+	udc_controller->statusbuf = kzalloc(2, GFP_KERNEL);
+	if (udc_controller->statusbuf == NULL) {
+		ret = -ENOMEM;
+		goto err4;
+	}
+
+	udc_controller->nullp = virt_to_phys((void *)udc_controller->nullbuf);
+	if (udc_controller->nullp == DMA_ADDR_INVALID) {
+		udc_controller->nullp = dma_map_single(
+					udc_controller->gadget.dev.parent,
+					udc_controller->nullbuf,
+					256,
+					DMA_TO_DEVICE);
+		udc_controller->nullmap = 1;
+	} else {
+		dma_sync_single_for_device(udc_controller->gadget.dev.parent,
+					udc_controller->nullp, 256,
+					DMA_TO_DEVICE);
+	}
+
+	tasklet_init(&udc_controller->rx_tasklet, ep_rx_tasklet,
+			(unsigned long)udc_controller);
+	/* request irq and disable DR  */
+	udc_controller->usb_irq = irq_of_parse_and_map(np, 0);
+
+	ret = request_irq(udc_controller->usb_irq, qe_udc_irq, 0,
+				driver_name, udc_controller);
+	if (ret) {
+		dev_err(udc_controller->dev, "cannot request irq %d err %d \n",
+			udc_controller->usb_irq, ret);
+		goto err5;
+	}
+
+	ret = device_add(&udc_controller->gadget.dev);
+	if (ret)
+		goto err6;
+
+	dev_info(udc_controller->dev,
+			"%s USB controller initialized as device\n",
+			(udc_controller->soc_type == PORT_QE) ? "QE" : "CPM");
+	return 0;
+
+err6:
+	free_irq(udc_controller->usb_irq, udc_controller);
+err5:
+	if (udc_controller->nullmap) {
+		dma_unmap_single(udc_controller->gadget.dev.parent,
+			udc_controller->nullp, 256,
+				DMA_TO_DEVICE);
+			udc_controller->nullp = DMA_ADDR_INVALID;
+	} else {
+		dma_sync_single_for_cpu(udc_controller->gadget.dev.parent,
+			udc_controller->nullp, 256,
+				DMA_TO_DEVICE);
+	}
+	kfree(udc_controller->statusbuf);
+err4:
+	kfree(udc_controller->nullbuf);
+err3:
+	ep = &udc_controller->eps[0];
+	cpm_muram_free(cpm_muram_offset(ep->rxbase));
+	kfree(ep->rxframe);
+	kfree(ep->rxbuffer);
+	kfree(ep->txframe);
+err2:
+	iounmap(udc_controller->usb_regs);
+err1:
+	kfree(udc_controller);
+
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int qe_udc_suspend(struct of_device *dev, pm_message_t state)
+{
+	return -ENOTSUPP;
+}
+
+static int qe_udc_resume(struct of_device *dev)
+{
+	return -ENOTSUPP;
+}
+#endif
+
+static int __devexit qe_udc_remove(struct of_device *ofdev)
+{
+	struct qe_ep *ep;
+	unsigned int size;
+
+	DECLARE_COMPLETION(done);
+
+	if (!udc_controller)
+		return -ENODEV;
+
+	udc_controller->done = &done;
+	tasklet_disable(&udc_controller->rx_tasklet);
+
+	if (udc_controller->nullmap) {
+		dma_unmap_single(udc_controller->gadget.dev.parent,
+			udc_controller->nullp, 256,
+				DMA_TO_DEVICE);
+			udc_controller->nullp = DMA_ADDR_INVALID;
+	} else {
+		dma_sync_single_for_cpu(udc_controller->gadget.dev.parent,
+			udc_controller->nullp, 256,
+				DMA_TO_DEVICE);
+	}
+	kfree(udc_controller->statusbuf);
+	kfree(udc_controller->nullbuf);
+
+	ep = &udc_controller->eps[0];
+	cpm_muram_free(cpm_muram_offset(ep->rxbase));
+	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1);
+
+	kfree(ep->rxframe);
+	if (ep->rxbufmap) {
+		dma_unmap_single(udc_controller->gadget.dev.parent,
+				ep->rxbuf_d, size,
+				DMA_FROM_DEVICE);
+		ep->rxbuf_d = DMA_ADDR_INVALID;
+	} else {
+		dma_sync_single_for_cpu(udc_controller->gadget.dev.parent,
+				ep->rxbuf_d, size,
+				DMA_FROM_DEVICE);
+	}
+
+	kfree(ep->rxbuffer);
+	kfree(ep->txframe);
+
+	free_irq(udc_controller->usb_irq, udc_controller);
+
+	tasklet_kill(&udc_controller->rx_tasklet);
+
+	iounmap(udc_controller->usb_regs);
+
+	device_unregister(&udc_controller->gadget.dev);
+	/* wait for release() of gadget.dev to free udc */
+	wait_for_completion(&done);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+static struct of_device_id __devinitdata qe_udc_match[] = {
+	{
+		.compatible = "fsl,mpc8360-qe-usb",
+		.data = (void *)PORT_QE,
+	},
+	{
+		.compatible = "fsl,mpc8272-cpm-usb",
+		.data = (void *)PORT_CPM,
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, qe_udc_match);
+
+static struct of_platform_driver udc_driver = {
+	.name           = (char *)driver_name,
+	.match_table    = qe_udc_match,
+	.probe          = qe_udc_probe,
+	.remove         = __devexit_p(qe_udc_remove),
+#ifdef CONFIG_PM
+	.suspend        = qe_udc_suspend,
+	.resume         = qe_udc_resume,
+#endif
+};
+
+static int __init qe_udc_init(void)
+{
+	printk(KERN_INFO "%s: %s, %s\n", driver_name, driver_desc,
+			DRIVER_VERSION);
+	return of_register_platform_driver(&udc_driver);
+}
+
+static void __exit qe_udc_exit(void)
+{
+	of_unregister_platform_driver(&udc_driver);
+}
+
+module_init(qe_udc_init);
+module_exit(qe_udc_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/usb/gadget/fsl_qe_udc.h b/drivers/usb/gadget/fsl_qe_udc.h
new file mode 100644
index 0000000..31b2710
--- /dev/null
+++ b/drivers/usb/gadget/fsl_qe_udc.h
@@ -0,0 +1,437 @@
+/*
+ * drivers/usb/gadget/qe_udc.h
+ *
+ * Copyright (C) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * 	Xiaobo Xie <X.Xie@freescale.com>
+ * 	Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * Freescale USB device/endpoint management registers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef __FSL_QE_UDC_H
+#define __FSL_QE_UDC_H
+
+/* SoC type */
+#define PORT_CPM	0
+#define PORT_QE		1
+
+#define USB_MAX_ENDPOINTS               4
+#define USB_MAX_PIPES                   USB_MAX_ENDPOINTS
+#define USB_EP0_MAX_SIZE		64
+#define USB_MAX_CTRL_PAYLOAD            0x4000
+#define USB_BDRING_LEN			16
+#define USB_BDRING_LEN_RX		256
+#define USB_BDRING_LEN_TX		16
+#define MIN_EMPTY_BDS			128
+#define MAX_DATA_BDS			8
+#define USB_CRC_SIZE			2
+#define USB_DIR_BOTH			0x88
+#define R_BUF_MAXSIZE			0x800
+#define USB_EP_PARA_ALIGNMENT		32
+
+/* USB Mode Register bit define */
+#define USB_MODE_EN		0x01
+#define USB_MODE_HOST		0x02
+#define USB_MODE_TEST		0x04
+#define USB_MODE_SFTE		0x08
+#define USB_MODE_RESUME		0x40
+#define USB_MODE_LSS		0x80
+
+/* USB Slave Address Register Mask */
+#define USB_SLVADDR_MASK	0x7F
+
+/* USB Endpoint register define */
+#define USB_EPNUM_MASK		0xF000
+#define USB_EPNUM_SHIFT		12
+
+#define USB_TRANS_MODE_SHIFT	8
+#define USB_TRANS_CTR		0x0000
+#define USB_TRANS_INT		0x0100
+#define USB_TRANS_BULK		0x0200
+#define USB_TRANS_ISO		0x0300
+
+#define USB_EP_MF		0x0020
+#define USB_EP_RTE		0x0010
+
+#define USB_THS_SHIFT		2
+#define USB_THS_MASK		0x000c
+#define USB_THS_NORMAL		0x0
+#define USB_THS_IGNORE_IN	0x0004
+#define USB_THS_NACK		0x0008
+#define USB_THS_STALL		0x000c
+
+#define USB_RHS_SHIFT   	0
+#define USB_RHS_MASK		0x0003
+#define USB_RHS_NORMAL  	0x0
+#define USB_RHS_IGNORE_OUT	0x0001
+#define USB_RHS_NACK		0x0002
+#define USB_RHS_STALL		0x0003
+
+#define USB_RTHS_MASK		0x000f
+
+/* USB Command Register define */
+#define USB_CMD_STR_FIFO	0x80
+#define USB_CMD_FLUSH_FIFO	0x40
+#define USB_CMD_ISFT		0x20
+#define USB_CMD_DSFT		0x10
+#define USB_CMD_EP_MASK		0x03
+
+/* USB Event and Mask Register define */
+#define USB_E_MSF_MASK		0x0800
+#define USB_E_SFT_MASK		0x0400
+#define USB_E_RESET_MASK	0x0200
+#define USB_E_IDLE_MASK		0x0100
+#define USB_E_TXE4_MASK		0x0080
+#define USB_E_TXE3_MASK		0x0040
+#define USB_E_TXE2_MASK		0x0020
+#define USB_E_TXE1_MASK		0x0010
+#define USB_E_SOF_MASK		0x0008
+#define USB_E_BSY_MASK		0x0004
+#define USB_E_TXB_MASK		0x0002
+#define USB_E_RXB_MASK		0x0001
+#define USBER_ALL_CLEAR 	0x0fff
+
+#define USB_E_DEFAULT_DEVICE   (USB_E_RESET_MASK | USB_E_TXE4_MASK | \
+				USB_E_TXE3_MASK | USB_E_TXE2_MASK | \
+				USB_E_TXE1_MASK | USB_E_BSY_MASK | \
+				USB_E_TXB_MASK | USB_E_RXB_MASK)
+
+#define USB_E_TXE_MASK         (USB_E_TXE4_MASK | USB_E_TXE3_MASK|\
+				 USB_E_TXE2_MASK | USB_E_TXE1_MASK)
+/* USB Status Register define */
+#define USB_IDLE_STATUS_MASK	0x01
+
+/* USB Start of Frame Timer */
+#define USB_USSFT_MASK		0x3FFF
+
+/* USB Frame Number Register */
+#define USB_USFRN_MASK		0xFFFF
+
+struct usb_device_para{
+	u16	epptr[4];
+	u32	rstate;
+	u32	rptr;
+	u16	frame_n;
+	u16	rbcnt;
+	u32	rtemp;
+	u32	rxusb_data;
+	u16	rxuptr;
+	u8	reso[2];
+	u32	softbl;
+	u8	sofucrctemp;
+};
+
+struct usb_ep_para{
+	u16	rbase;
+	u16	tbase;
+	u8	rbmr;
+	u8	tbmr;
+	u16	mrblr;
+	u16	rbptr;
+	u16	tbptr;
+	u32	tstate;
+	u32	tptr;
+	u16	tcrc;
+	u16	tbcnt;
+	u32	ttemp;
+	u16	txusbu_ptr;
+	u8	reserve[2];
+};
+
+#define USB_BUSMODE_GBL		0x20
+#define USB_BUSMODE_BO_MASK	0x18
+#define USB_BUSMODE_BO_SHIFT	0x3
+#define USB_BUSMODE_BE		0x2
+#define USB_BUSMODE_CETM	0x04
+#define USB_BUSMODE_DTB		0x02
+
+/* Endpoint basic handle */
+#define ep_index(EP)		((EP)->desc->bEndpointAddress & 0xF)
+#define ep_maxpacket(EP)	((EP)->ep.maxpacket)
+#define ep_is_in(EP)	((ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
+			USB_DIR_IN) : ((EP)->desc->bEndpointAddress \
+			& USB_DIR_IN) == USB_DIR_IN)
+
+/* ep0 transfer state */
+#define WAIT_FOR_SETUP          0
+#define DATA_STATE_XMIT         1
+#define DATA_STATE_NEED_ZLP     2
+#define WAIT_FOR_OUT_STATUS     3
+#define DATA_STATE_RECV         4
+
+/* ep tramsfer mode */
+#define USBP_TM_CTL	0
+#define USBP_TM_ISO	1
+#define USBP_TM_BULK	2
+#define USBP_TM_INT	3
+
+/*-----------------------------------------------------------------------------
+	USB RX And TX DATA Frame
+ -----------------------------------------------------------------------------*/
+struct qe_frame{
+	u8 *data;
+	u32 len;
+	u32 status;
+	u32 info;
+
+	void *privdata;
+	struct list_head node;
+};
+
+/* Frame structure, info field. */
+#define PID_DATA0              0x80000000 /* Data toggle zero */
+#define PID_DATA1              0x40000000 /* Data toggle one  */
+#define PID_SETUP              0x20000000 /* setup bit */
+#define SETUP_STATUS           0x10000000 /* setup status bit */
+#define SETADDR_STATUS         0x08000000 /* setupup address status bit */
+#define NO_REQ                 0x04000000 /* Frame without request */
+#define HOST_DATA              0x02000000 /* Host data frame */
+#define FIRST_PACKET_IN_FRAME  0x01000000 /* first packet in the frame */
+#define TOKEN_FRAME            0x00800000 /* Host token frame */
+#define ZLP                    0x00400000 /* Zero length packet */
+#define IN_TOKEN_FRAME         0x00200000 /* In token package */
+#define OUT_TOKEN_FRAME        0x00100000 /* Out token package */
+#define SETUP_TOKEN_FRAME      0x00080000 /* Setup token package */
+#define STALL_FRAME            0x00040000 /* Stall handshake */
+#define NACK_FRAME             0x00020000 /* Nack handshake */
+#define NO_PID                 0x00010000 /* No send PID */
+#define NO_CRC                 0x00008000 /* No send CRC */
+#define HOST_COMMAND           0x00004000 /* Host command frame   */
+
+/* Frame status field */
+/* Receive side */
+#define FRAME_OK               0x00000000 /* Frame tranmitted or received OK */
+#define FRAME_ERROR            0x80000000 /* Error occured on frame */
+#define START_FRAME_LOST       0x40000000 /* START_FRAME_LOST */
+#define END_FRAME_LOST         0x20000000 /* END_FRAME_LOST */
+#define RX_ER_NONOCT           0x10000000 /* Rx Non Octet Aligned Packet */
+#define RX_ER_BITSTUFF         0x08000000 /* Frame Aborted --Received packet
+					     with bit stuff error */
+#define RX_ER_CRC              0x04000000 /* Received packet with CRC error */
+#define RX_ER_OVERUN           0x02000000 /* Over-run occured on reception */
+#define RX_ER_PID              0x01000000 /* Wrong PID received */
+/* Tranmit side */
+#define TX_ER_NAK              0x00800000 /* Received NAK handshake */
+#define TX_ER_STALL            0x00400000 /* Received STALL handshake */
+#define TX_ER_TIMEOUT          0x00200000 /* Transmit time out */
+#define TX_ER_UNDERUN          0x00100000 /* Transmit underrun */
+#define FRAME_INPROGRESS       0x00080000 /* Frame is being transmitted */
+#define ER_DATA_UNDERUN        0x00040000 /* Frame is shorter then expected */
+#define ER_DATA_OVERUN         0x00020000 /* Frame is longer then expected */
+
+/* QE USB frame operation functions */
+#define frame_get_length(frm) (frm->len)
+#define frame_set_length(frm, leng) (frm->len = leng)
+#define frame_get_data(frm) (frm->data)
+#define frame_set_data(frm, dat) (frm->data = dat)
+#define frame_get_info(frm) (frm->info)
+#define frame_set_info(frm, inf) (frm->info = inf)
+#define frame_get_status(frm) (frm->status)
+#define frame_set_status(frm, stat) (frm->status = stat)
+#define frame_get_privdata(frm) (frm->privdata)
+#define frame_set_privdata(frm, dat) (frm->privdata = dat)
+
+static inline void qe_frame_clean(struct qe_frame *frm)
+{
+	frame_set_data(frm, NULL);
+	frame_set_length(frm, 0);
+	frame_set_status(frm, FRAME_OK);
+	frame_set_info(frm, 0);
+	frame_set_privdata(frm, NULL);
+}
+
+static inline void qe_frame_init(struct qe_frame *frm)
+{
+	qe_frame_clean(frm);
+	INIT_LIST_HEAD(&(frm->node));
+}
+
+struct qe_req {
+	struct usb_request req;
+	struct list_head queue;
+	/* ep_queue() func will add
+	 a request->queue into a udc_ep->queue 'd tail */
+	struct qe_ep *ep;
+	unsigned mapped:1;
+};
+
+struct qe_ep {
+	struct usb_ep ep;
+	struct list_head queue;
+	struct qe_udc *udc;
+	const struct usb_endpoint_descriptor *desc;
+	struct usb_gadget *gadget;
+
+	u8 state;
+
+	struct qe_bd __iomem *rxbase;
+	struct qe_bd __iomem *n_rxbd;
+	struct qe_bd __iomem *e_rxbd;
+
+	struct qe_bd __iomem *txbase;
+	struct qe_bd __iomem *n_txbd;
+	struct qe_bd __iomem *c_txbd;
+
+	struct qe_frame *rxframe;
+	u8 *rxbuffer;
+	dma_addr_t rxbuf_d;
+	u8 rxbufmap;
+	unsigned char localnack;
+	int has_data;
+
+	struct qe_frame *txframe;
+	struct qe_req *tx_req;
+	int sent;  /*data already sent */
+	int last;  /*data sent in the last time*/
+
+	u8 dir;
+	u8 epnum;
+	u8 tm; /* transfer mode */
+	u8 data01;
+	u8 init;
+
+	u8 already_seen;
+	u8 enable_tasklet;
+	u8 setup_stage;
+	u32 last_io;            /* timestamp */
+
+	char name[14];
+
+	unsigned double_buf:1;
+	unsigned stopped:1;
+	unsigned fnf:1;
+	unsigned has_dma:1;
+
+	u8 ackwait;
+	u8 dma_channel;
+	u16 dma_counter;
+	int lch;
+
+	struct timer_list timer;
+};
+
+struct qe_udc {
+	struct usb_gadget gadget;
+	struct usb_gadget_driver *driver;
+	struct device *dev;
+	struct qe_ep eps[USB_MAX_ENDPOINTS];
+	struct usb_ctrlrequest local_setup_buff;
+	spinlock_t lock;	/* lock for set/config qe_udc */
+	unsigned long soc_type;		/* QE or CPM soc */
+
+	struct qe_req *status_req;	/* ep0 status request */
+
+	/* USB and EP Parameter Block pointer */
+	struct usb_device_para __iomem *usb_param;
+	struct usb_ep_para __iomem *ep_param[4];
+
+	u32 max_pipes;          /* Device max pipes */
+	u32 max_use_endpts;     /* Max endpointes to be used */
+	u32 bus_reset;          /* Device is bus reseting */
+	u32 resume_state;       /* USB state to resume*/
+	u32 usb_state;          /* USB current state */
+	u32 usb_next_state;     /* USB next state */
+	u32 ep0_state;          /* Enpoint zero state */
+	u32 ep0_dir;            /* Enpoint zero direction: can be
+				USB_DIR_IN or USB_DIR_OUT*/
+	u32 usb_sof_count;      /* SOF count */
+	u32 errors;             /* USB ERRORs count */
+
+	u8 *tmpbuf;
+	u32 c_start;
+	u32 c_end;
+
+	u8 *nullbuf;
+	u8 *statusbuf;
+	dma_addr_t nullp;
+	u8 nullmap;
+	u8 device_address;	/* Device USB address */
+
+	unsigned int usb_clock;
+	unsigned int usb_irq;
+	struct usb_ctlr __iomem *usb_regs;
+
+	struct tasklet_struct rx_tasklet;
+
+	struct completion *done;	/* to make sure release() is done */
+};
+
+#define EP_STATE_IDLE	0
+#define EP_STATE_NACK	1
+#define EP_STATE_STALL	2
+
+/*
+ * transmit BD's status
+ */
+#define T_R           0x80000000         /* ready bit */
+#define T_W           0x20000000         /* wrap bit */
+#define T_I           0x10000000         /* interrupt on completion */
+#define T_L           0x08000000         /* last */
+#define T_TC          0x04000000         /* transmit CRC */
+#define T_CNF         0x02000000         /* wait for  transmit confirm */
+#define T_LSP         0x01000000         /* Low-speed transaction */
+#define T_PID         0x00c00000         /* packet id */
+#define T_NAK         0x00100000         /* No ack. */
+#define T_STAL        0x00080000         /* Stall recieved */
+#define T_TO          0x00040000         /* time out */
+#define T_UN          0x00020000         /* underrun */
+
+#define DEVICE_T_ERROR    (T_UN | T_TO)
+#define HOST_T_ERROR      (T_UN | T_TO | T_NAK | T_STAL)
+#define DEVICE_T_BD_MASK  DEVICE_T_ERROR
+#define HOST_T_BD_MASK    HOST_T_ERROR
+
+#define T_PID_SHIFT   6
+#define T_PID_DATA0   0x00800000         /* Data 0 toggle */
+#define T_PID_DATA1   0x00c00000         /* Data 1 toggle */
+
+/*
+ * receive BD's status
+ */
+#define R_E           0x80000000         /* buffer empty */
+#define R_W           0x20000000         /* wrap bit */
+#define R_I           0x10000000         /* interrupt on reception */
+#define R_L           0x08000000         /* last */
+#define R_F           0x04000000         /* first */
+#define R_PID         0x00c00000         /* packet id */
+#define R_NO          0x00100000         /* Rx Non Octet Aligned Packet */
+#define R_AB          0x00080000         /* Frame Aborted */
+#define R_CR          0x00040000         /* CRC Error */
+#define R_OV          0x00020000         /* Overrun */
+
+#define R_ERROR       (R_NO | R_AB | R_CR | R_OV)
+#define R_BD_MASK     R_ERROR
+
+#define R_PID_DATA0   0x00000000
+#define R_PID_DATA1   0x00400000
+#define R_PID_SETUP   0x00800000
+
+#define CPM_USB_STOP_TX 0x2e600000
+#define CPM_USB_RESTART_TX 0x2e600000
+#define CPM_USB_STOP_TX_OPCODE 0x0a
+#define CPM_USB_RESTART_TX_OPCODE 0x0b
+#define CPM_USB_EP_SHIFT 5
+
+#ifndef CONFIG_CPM
+inline int cpm_command(u32 command, u8 opcode)
+{
+	return -EOPNOTSUPP;
+}
+#endif
+
+#ifndef CONFIG_QUICC_ENGINE
+inline int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol,
+	u32 cmd_input)
+{
+	return -EOPNOTSUPP;
+}
+#endif
+
+#endif  /* __FSL_QE_UDC_H */
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_usb2_udc.c
index 45ad556..091bb55 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.c
+++ b/drivers/usb/gadget/fsl_usb2_udc.c
@@ -23,11 +23,8 @@
 #include <linux/ioport.h>
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/init.h>
-#include <linux/timer.h>
 #include <linux/list.h>
 #include <linux/interrupt.h>
 #include <linux/proc_fs.h>
@@ -44,11 +41,9 @@
 
 #include <asm/byteorder.h>
 #include <asm/io.h>
-#include <asm/irq.h>
 #include <asm/system.h>
 #include <asm/unaligned.h>
 #include <asm/dma.h>
-#include <asm/cacheflush.h>
 
 #include "fsl_usb2_udc.h"
 
@@ -61,8 +56,8 @@
 static const char driver_name[] = "fsl-usb2-udc";
 static const char driver_desc[] = DRIVER_DESC;
 
-volatile static struct usb_dr_device *dr_regs = NULL;
-volatile static struct usb_sys_interface *usb_sys_regs = NULL;
+static struct usb_dr_device *dr_regs;
+static struct usb_sys_interface *usb_sys_regs;
 
 /* it is initialized in probe()  */
 static struct fsl_udc *udc_controller = NULL;
@@ -76,16 +71,14 @@
 	.wMaxPacketSize =	USB_MAX_CTRL_PAYLOAD,
 };
 
-static int fsl_udc_suspend(struct platform_device *pdev, pm_message_t state);
-static int fsl_udc_resume(struct platform_device *pdev);
 static void fsl_ep_fifo_flush(struct usb_ep *_ep);
 
 #ifdef CONFIG_PPC32
 #define fsl_readl(addr)		in_le32(addr)
-#define fsl_writel(addr, val32) out_le32(val32, addr)
+#define fsl_writel(val32, addr) out_le32(addr, val32)
 #else
 #define fsl_readl(addr)		readl(addr)
-#define fsl_writel(addr, val32) writel(addr, val32)
+#define fsl_writel(val32, addr) writel(val32, addr)
 #endif
 
 /********************************************************************
@@ -185,10 +178,6 @@
 	unsigned long timeout;
 #define FSL_UDC_RESET_TIMEOUT 1000
 
-	/* before here, make sure dr_regs has been initialized */
-	if (!udc)
-		return -EINVAL;
-
 	/* Stop and reset the usb controller */
 	tmp = fsl_readl(&dr_regs->usbcmd);
 	tmp &= ~USB_CMD_RUN_STOP;
@@ -202,7 +191,7 @@
 	timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
 	while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
 		if (time_after(jiffies, timeout)) {
-			ERR("udc reset timeout! \n");
+			ERR("udc reset timeout!\n");
 			return -ETIMEDOUT;
 		}
 		cpu_relax();
@@ -315,7 +304,8 @@
 	return;
 }
 
-void dr_ep_setup(unsigned char ep_num, unsigned char dir, unsigned char ep_type)
+static void dr_ep_setup(unsigned char ep_num, unsigned char dir,
+			unsigned char ep_type)
 {
 	unsigned int tmp_epctrl = 0;
 
@@ -563,7 +553,7 @@
 	/* nuke all pending requests (does flush) */
 	nuke(ep, -ESHUTDOWN);
 
-	ep->desc = 0;
+	ep->desc = NULL;
 	ep->stopped = 1;
 	spin_unlock_irqrestore(&udc->lock, flags);
 
@@ -602,7 +592,7 @@
 }
 
 /*-------------------------------------------------------------------------*/
-static int fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
+static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
 {
 	int i = ep_index(ep) * 2 + ep_is_in(ep);
 	u32 temp, bitmask, tmp_stat;
@@ -653,13 +643,16 @@
 			| EP_QUEUE_HEAD_STATUS_HALT));
 	dQH->size_ioc_int_sts &= temp;
 
+	/* Ensure that updates to the QH will occure before priming. */
+	wmb();
+
 	/* Prime endpoint by writing 1 to ENDPTPRIME */
 	temp = ep_is_in(ep)
 		? (1 << (ep_index(ep) + 16))
 		: (1 << (ep_index(ep)));
 	fsl_writel(temp, &dr_regs->endpointprime);
 out:
-	return 0;
+	return;
 }
 
 /* Fill in the dTD structure
@@ -710,7 +703,7 @@
 		*is_last = 0;
 
 	if ((*is_last) == 0)
-		VDBG("multi-dtd request!\n");
+		VDBG("multi-dtd request!");
 	/* Fill in the transfer size; set active bit */
 	swap_temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
 
@@ -773,11 +766,11 @@
 	/* catch various bogus parameters */
 	if (!_req || !req->req.complete || !req->req.buf
 			|| !list_empty(&req->queue)) {
-		VDBG("%s, bad params\n", __func__);
+		VDBG("%s, bad params", __func__);
 		return -EINVAL;
 	}
 	if (unlikely(!_ep || !ep->desc)) {
-		VDBG("%s, bad ep\n", __func__);
+		VDBG("%s, bad ep", __func__);
 		return -EINVAL;
 	}
 	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
@@ -1069,7 +1062,7 @@
 
 	udc = container_of(gadget, struct fsl_udc, gadget);
 	spin_lock_irqsave(&udc->lock, flags);
-	VDBG("VBUS %s\n", is_active ? "on" : "off");
+	VDBG("VBUS %s", is_active ? "on" : "off");
 	udc->vbus_active = (is_active != 0);
 	if (can_pullup(udc))
 		fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
@@ -1146,7 +1139,6 @@
 {
 	struct fsl_req *req = udc->status_req;
 	struct fsl_ep *ep;
-	int status = 0;
 
 	if (direction == EP_DIR_IN)
 		udc->ep0_dir = USB_DIR_IN;
@@ -1164,27 +1156,21 @@
 	req->dtd_count = 0;
 
 	if (fsl_req_to_dtd(req) == 0)
-		status = fsl_queue_td(ep, req);
+		fsl_queue_td(ep, req);
 	else
 		return -ENOMEM;
 
-	if (status)
-		ERR("Can't queue ep0 status request \n");
 	list_add_tail(&req->queue, &ep->queue);
 
-	return status;
+	return 0;
 }
 
-static inline int udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)
+static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)
 {
 	struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
 
-	if (!ep->name)
-		return 0;
-
-	nuke(ep, -ESHUTDOWN);
-
-	return 0;
+	if (ep->name)
+		nuke(ep, -ESHUTDOWN);
 }
 
 /*
@@ -1208,10 +1194,8 @@
 		u16 index, u16 length)
 {
 	u16 tmp = 0;		/* Status, cpu endian */
-
 	struct fsl_req *req;
 	struct fsl_ep *ep;
-	int status = 0;
 
 	ep = &udc->eps[0];
 
@@ -1250,14 +1234,10 @@
 
 	/* prime the data phase */
 	if ((fsl_req_to_dtd(req) == 0))
-		status = fsl_queue_td(ep, req);
+		fsl_queue_td(ep, req);
 	else			/* no mem */
 		goto stall;
 
-	if (status) {
-		ERR("Can't respond to getstatus request \n");
-		goto stall;
-	}
 	list_add_tail(&req->queue, &ep->queue);
 	udc->ep0_state = DATA_STATE_XMIT;
 	return;
@@ -1397,7 +1377,7 @@
 		udc->ep0_state = WAIT_FOR_SETUP;
 		break;
 	case WAIT_FOR_SETUP:
-		ERR("Unexpect ep0 packets \n");
+		ERR("Unexpect ep0 packets\n");
 		break;
 	default:
 		ep0stall(udc);
@@ -1476,7 +1456,7 @@
 				status = -EILSEQ;
 				break;
 			} else
-				ERR("Unknown error has occured (0x%x)!\r\n",
+				ERR("Unknown error has occured (0x%x)!\n",
 					errors);
 
 		} else if (le32_to_cpu(curr_td->size_ioc_sts)
@@ -1495,7 +1475,7 @@
 			}
 		} else {
 			td_complete++;
-			VDBG("dTD transmitted successful ");
+			VDBG("dTD transmitted successful");
 		}
 
 		if (j != curr_req->dtd_count - 1)
@@ -1568,9 +1548,6 @@
 {
 	u32 speed;
 
-	if (udc->bus_reset)
-		udc->bus_reset = 0;
-
 	/* Bus resetting is finished */
 	if (!(fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET)) {
 		/* Get the speed */
@@ -1678,8 +1655,6 @@
 
 	if (fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET) {
 		VDBG("Bus reset");
-		/* Bus is reseting */
-		udc->bus_reset = 1;
 		/* Reset all the queues, include XD, dTD, EP queue
 		 * head and TR Queue */
 		reset_queues(udc);
@@ -1768,7 +1743,7 @@
 	}
 
 	if (irq_src & (USB_STS_ERR | USB_STS_SYS_ERR)) {
-		VDBG("Error IRQ %x ", irq_src);
+		VDBG("Error IRQ %x", irq_src);
 	}
 
 	spin_unlock_irqrestore(&udc->lock, flags);
@@ -1799,7 +1774,7 @@
 	/* lock is needed but whether should use this lock or another */
 	spin_lock_irqsave(&udc_controller->lock, flags);
 
-	driver->driver.bus = 0;
+	driver->driver.bus = NULL;
 	/* hook up the driver */
 	udc_controller->driver = driver;
 	udc_controller->gadget.dev.driver = &driver->driver;
@@ -1809,8 +1784,8 @@
 	retval = driver->bind(&udc_controller->gadget);
 	if (retval) {
 		VDBG("bind to %s --> %d", driver->driver.name, retval);
-		udc_controller->gadget.dev.driver = 0;
-		udc_controller->driver = 0;
+		udc_controller->gadget.dev.driver = NULL;
+		udc_controller->driver = NULL;
 		goto out;
 	}
 
@@ -1819,12 +1794,12 @@
 	udc_controller->usb_state = USB_STATE_ATTACHED;
 	udc_controller->ep0_state = WAIT_FOR_SETUP;
 	udc_controller->ep0_dir = 0;
-	printk(KERN_INFO "%s: bind to driver %s \n",
+	printk(KERN_INFO "%s: bind to driver %s\n",
 			udc_controller->gadget.name, driver->driver.name);
 
 out:
 	if (retval)
-		printk("retval %d \n", retval);
+		printk("gadget driver register failed %d\n", retval);
 	return retval;
 }
 EXPORT_SYMBOL(usb_gadget_register_driver);
@@ -1842,7 +1817,7 @@
 		return -EINVAL;
 
 	if (udc_controller->transceiver)
-		(void)otg_set_peripheral(udc_controller->transceiver, 0);
+		otg_set_peripheral(udc_controller->transceiver, NULL);
 
 	/* stop DR, disable intr */
 	dr_controller_stop(udc_controller);
@@ -1863,10 +1838,10 @@
 
 	/* unbind gadget and unhook driver. */
 	driver->unbind(&udc_controller->gadget);
-	udc_controller->gadget.dev.driver = 0;
-	udc_controller->driver = 0;
+	udc_controller->gadget.dev.driver = NULL;
+	udc_controller->driver = NULL;
 
-	printk("unregistered gadget driver '%s'\r\n", driver->driver.name);
+	printk("unregistered gadget driver '%s'\n", driver->driver.name);
 	return 0;
 }
 EXPORT_SYMBOL(usb_gadget_unregister_driver);
@@ -1922,7 +1897,7 @@
 	tmp_reg = fsl_readl(&dr_regs->usbsts);
 	t = scnprintf(next, size,
 			"USB Status Reg:\n"
-			"Dr Suspend: %d" "Reset Received: %d" "System Error: %s"
+			"Dr Suspend: %d Reset Received: %d System Error: %s "
 			"USB Error Interrupt: %s\n\n",
 			(tmp_reg & USB_STS_SUSPEND) ? 1 : 0,
 			(tmp_reg & USB_STS_RESET) ? 1 : 0,
@@ -1934,11 +1909,11 @@
 	tmp_reg = fsl_readl(&dr_regs->usbintr);
 	t = scnprintf(next, size,
 			"USB Intrrupt Enable Reg:\n"
-			"Sleep Enable: %d" "SOF Received Enable: %d"
+			"Sleep Enable: %d SOF Received Enable: %d "
 			"Reset Enable: %d\n"
-			"System Error Enable: %d"
+			"System Error Enable: %d "
 			"Port Change Dectected Enable: %d\n"
-			"USB Error Intr Enable: %d" "USB Intr Enable: %d\n\n",
+			"USB Error Intr Enable: %d USB Intr Enable: %d\n\n",
 			(tmp_reg & USB_INTR_DEVICE_SUSPEND) ? 1 : 0,
 			(tmp_reg & USB_INTR_SOF_EN) ? 1 : 0,
 			(tmp_reg & USB_INTR_RESET_EN) ? 1 : 0,
@@ -1951,21 +1926,21 @@
 
 	tmp_reg = fsl_readl(&dr_regs->frindex);
 	t = scnprintf(next, size,
-			"USB Frame Index Reg:" "Frame Number is 0x%x\n\n",
+			"USB Frame Index Reg: Frame Number is 0x%x\n\n",
 			(tmp_reg & USB_FRINDEX_MASKS));
 	size -= t;
 	next += t;
 
 	tmp_reg = fsl_readl(&dr_regs->deviceaddr);
 	t = scnprintf(next, size,
-			"USB Device Address Reg:" "Device Addr is 0x%x\n\n",
+			"USB Device Address Reg: Device Addr is 0x%x\n\n",
 			(tmp_reg & USB_DEVICE_ADDRESS_MASK));
 	size -= t;
 	next += t;
 
 	tmp_reg = fsl_readl(&dr_regs->endpointlistaddr);
 	t = scnprintf(next, size,
-			"USB Endpoint List Address Reg:"
+			"USB Endpoint List Address Reg: "
 			"Device Addr is 0x%x\n\n",
 			(tmp_reg & USB_EP_LIST_ADDRESS_MASK));
 	size -= t;
@@ -1974,11 +1949,12 @@
 	tmp_reg = fsl_readl(&dr_regs->portsc1);
 	t = scnprintf(next, size,
 		"USB Port Status&Control Reg:\n"
-		"Port Transceiver Type : %s" "Port Speed: %s \n"
-		"PHY Low Power Suspend: %s" "Port Reset: %s"
-		"Port Suspend Mode: %s \n" "Over-current Change: %s"
+		"Port Transceiver Type : %s Port Speed: %s\n"
+		"PHY Low Power Suspend: %s Port Reset: %s "
+		"Port Suspend Mode: %s\n"
+		"Over-current Change: %s "
 		"Port Enable/Disable Change: %s\n"
-		"Port Enabled/Disabled: %s"
+		"Port Enabled/Disabled: %s "
 		"Current Connect Status: %s\n\n", ( {
 			char *s;
 			switch (tmp_reg & PORTSCX_PTS_FSLS) {
@@ -2023,7 +1999,7 @@
 
 	tmp_reg = fsl_readl(&dr_regs->usbmode);
 	t = scnprintf(next, size,
-			"USB Mode Reg:" "Controller Mode is : %s\n\n", ( {
+			"USB Mode Reg: Controller Mode is: %s\n\n", ( {
 				char *s;
 				switch (tmp_reg & USB_MODE_CTRL_MODE_HOST) {
 				case USB_MODE_CTRL_MODE_IDLE:
@@ -2042,7 +2018,7 @@
 
 	tmp_reg = fsl_readl(&dr_regs->endptsetupstat);
 	t = scnprintf(next, size,
-			"Endpoint Setup Status Reg:" "SETUP on ep 0x%x\n\n",
+			"Endpoint Setup Status Reg: SETUP on ep 0x%x\n\n",
 			(tmp_reg & EP_SETUP_STATUS_MASK));
 	size -= t;
 	next += t;
@@ -2055,12 +2031,12 @@
 		next += t;
 	}
 	tmp_reg = fsl_readl(&dr_regs->endpointprime);
-	t = scnprintf(next, size, "EP Prime Reg = [0x%x]\n", tmp_reg);
+	t = scnprintf(next, size, "EP Prime Reg = [0x%x]\n\n", tmp_reg);
 	size -= t;
 	next += t;
 
 	tmp_reg = usb_sys_regs->snoop1;
-	t = scnprintf(next, size, "\nSnoop1 Reg : = [0x%x]\n\n", tmp_reg);
+	t = scnprintf(next, size, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
 	size -= t;
 	next += t;
 
@@ -2084,7 +2060,7 @@
 	} else {
 		list_for_each_entry(req, &ep->queue, queue) {
 			t = scnprintf(next, size,
-				"req %p actual 0x%x length 0x%x  buf %p\n",
+				"req %p actual 0x%x length 0x%x buf %p\n",
 				&req->req, req->req.actual,
 				req->req.length, req->req.buf);
 			size -= t;
@@ -2110,7 +2086,7 @@
 			} else {
 				list_for_each_entry(req, &ep->queue, queue) {
 					t = scnprintf(next, size,
-						"req %p actual 0x%x length"
+						"req %p actual 0x%x length "
 						"0x%x  buf %p\n",
 						&req->req, req->req.actual,
 						req->req.length, req->req.buf);
@@ -2202,7 +2178,6 @@
 	udc->usb_state = USB_STATE_POWERED;
 	udc->ep0_dir = 0;
 	udc->remote_wakeup = 0;	/* default to 0 on reset */
-	spin_lock_init(&udc->lock);
 
 	return 0;
 }
@@ -2254,7 +2229,7 @@
 	u32 dccparams;
 
 	if (strcmp(pdev->name, driver_name)) {
-		VDBG("Wrong device\n");
+		VDBG("Wrong device");
 		return -ENODEV;
 	}
 
@@ -2264,23 +2239,26 @@
 		return -ENOMEM;
 	}
 
+	spin_lock_init(&udc_controller->lock);
+	udc_controller->stopped = 1;
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
-		kfree(udc_controller);
-		return -ENXIO;
+		ret = -ENXIO;
+		goto err_kfree;
 	}
 
 	if (!request_mem_region(res->start, res->end - res->start + 1,
 				driver_name)) {
-		ERR("request mem region for %s failed \n", pdev->name);
-		kfree(udc_controller);
-		return -EBUSY;
+		ERR("request mem region for %s failed\n", pdev->name);
+		ret = -EBUSY;
+		goto err_kfree;
 	}
 
 	dr_regs = ioremap(res->start, res->end - res->start + 1);
 	if (!dr_regs) {
 		ret = -ENOMEM;
-		goto err1;
+		goto err_release_mem_region;
 	}
 
 	usb_sys_regs = (struct usb_sys_interface *)
@@ -2291,7 +2269,7 @@
 	if (!(dccparams & DCCPARAMS_DC)) {
 		ERR("This SOC doesn't support device role\n");
 		ret = -ENODEV;
-		goto err2;
+		goto err_iounmap;
 	}
 	/* Get max device endpoints */
 	/* DEN is bidirectional ep number, max_ep doubles the number */
@@ -2300,22 +2278,22 @@
 	udc_controller->irq = platform_get_irq(pdev, 0);
 	if (!udc_controller->irq) {
 		ret = -ENODEV;
-		goto err2;
+		goto err_iounmap;
 	}
 
 	ret = request_irq(udc_controller->irq, fsl_udc_irq, IRQF_SHARED,
 			driver_name, udc_controller);
 	if (ret != 0) {
-		ERR("cannot request irq %d err %d \n",
+		ERR("cannot request irq %d err %d\n",
 				udc_controller->irq, ret);
-		goto err2;
+		goto err_iounmap;
 	}
 
 	/* Initialize the udc structure including QH member and other member */
 	if (struct_udc_setup(udc_controller, pdev)) {
 		ERR("Can't initialize udc data structure\n");
 		ret = -ENOMEM;
-		goto err3;
+		goto err_free_irq;
 	}
 
 	/* initialize usb hw reg except for regs for EP,
@@ -2336,7 +2314,7 @@
 	udc_controller->gadget.dev.parent = &pdev->dev;
 	ret = device_register(&udc_controller->gadget.dev);
 	if (ret < 0)
-		goto err3;
+		goto err_free_irq;
 
 	/* setup QH and epctrl for ep0 */
 	ep0_setup(udc_controller);
@@ -2366,20 +2344,22 @@
 			DTD_ALIGNMENT, UDC_DMA_BOUNDARY);
 	if (udc_controller->td_pool == NULL) {
 		ret = -ENOMEM;
-		goto err4;
+		goto err_unregister;
 	}
 	create_proc_file();
 	return 0;
 
-err4:
+err_unregister:
 	device_unregister(&udc_controller->gadget.dev);
-err3:
+err_free_irq:
 	free_irq(udc_controller->irq, udc_controller);
-err2:
+err_iounmap:
 	iounmap(dr_regs);
-err1:
+err_release_mem_region:
 	release_mem_region(res->start, res->end - res->start + 1);
+err_kfree:
 	kfree(udc_controller);
+	udc_controller = NULL;
 	return ret;
 }
 
@@ -2469,7 +2449,7 @@
 static void __exit udc_exit(void)
 {
 	platform_driver_unregister(&udc_driver);
-	printk("%s unregistered \n", driver_desc);
+	printk("%s unregistered\n", driver_desc);
 }
 
 module_exit(udc_exit);
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 6131752..e63ef12 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -424,16 +424,6 @@
 /* Controller dma boundary */
 #define UDC_DMA_BOUNDARY			0x1000
 
-/* -----------------------------------------------------------------------*/
-/* ##### enum data
-*/
-typedef enum {
-	e_ULPI,
-	e_UTMI_8BIT,
-	e_UTMI_16BIT,
-	e_SERIAL
-} e_PhyInterface;
-
 /*-------------------------------------------------------------------------*/
 
 /* ### driver private data
@@ -469,9 +459,9 @@
 #define EP_DIR_OUT	0
 
 struct fsl_udc {
-
 	struct usb_gadget gadget;
 	struct usb_gadget_driver *driver;
+	struct completion *done;	/* to make sure release() is done */
 	struct fsl_ep *eps;
 	unsigned int max_ep;
 	unsigned int irq;
@@ -492,20 +482,13 @@
 	size_t ep_qh_size;		/* size after alignment adjustment*/
 	dma_addr_t ep_qh_dma;		/* dma address of QH */
 
-	u32 max_pipes;		/* Device max pipes */
-	u32 max_use_endpts;	/* Max endpointes to be used */
-	u32 bus_reset;		/* Device is bus reseting */
+	u32 max_pipes;          /* Device max pipes */
 	u32 resume_state;	/* USB state to resume */
 	u32 usb_state;		/* USB current state */
-	u32 usb_next_state;	/* USB next state */
 	u32 ep0_state;		/* Endpoint zero state */
 	u32 ep0_dir;		/* Endpoint zero direction: can be
 				   USB_DIR_IN or USB_DIR_OUT */
-	u32 usb_sof_count;	/* SOF count */
-	u32 errors;		/* USB ERRORs count */
 	u8 device_address;	/* Device USB address */
-
-	struct completion *done;	/* to make sure release() is done */
 };
 
 /*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 17d9905..4e3107d 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -151,6 +151,13 @@
 #define	gadget_is_m66592(g)	0
 #endif
 
+/* Freescale CPM/QE UDC SUPPORT */
+#ifdef CONFIG_USB_GADGET_FSL_QE
+#define gadget_is_fsl_qe(g)	!strcmp("fsl_qe_udc", (g)->name)
+#else
+#define gadget_is_fsl_qe(g)	0
+#endif
+
 
 // CONFIG_USB_GADGET_SX2
 // CONFIG_USB_GADGET_AU1X00
@@ -216,6 +223,8 @@
 		return 0x20;
 	else if (gadget_is_m66592(gadget))
 		return 0x21;
+	else if (gadget_is_fsl_qe(gadget))
+		return 0x22;
 	return -ENOENT;
 }
 
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 6eee760..60d3f9e 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -222,7 +222,7 @@
 	 * power properties of the device. Is it selfpowered?
 	 */
 	.bmAttributes =		USB_CONFIG_ATT_ONE,
-	.bMaxPower =		1,
+	.bMaxPower =		CONFIG_USB_GADGET_VBUS_DRAW / 2,
 };
 
 /* B.3.1  Standard AC Interface Descriptor */
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 5cfb5eb..8ae70de 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -178,6 +178,7 @@
 
 	/* ep_reset() has already been called */
 	ep->stopped = 0;
+	ep->wedged = 0;
 	ep->out_overflow = 0;
 
 	/* set speed-dependent max packet; may kick in high bandwidth */
@@ -1218,7 +1219,7 @@
 static int net2280_fifo_status (struct usb_ep *_ep);
 
 static int
-net2280_set_halt (struct usb_ep *_ep, int value)
+net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
 {
 	struct net2280_ep	*ep;
 	unsigned long		flags;
@@ -1239,16 +1240,21 @@
 	else if (ep->is_in && value && net2280_fifo_status (_ep) != 0)
 		retval = -EAGAIN;
 	else {
-		VDEBUG (ep->dev, "%s %s halt\n", _ep->name,
-				value ? "set" : "clear");
+		VDEBUG (ep->dev, "%s %s %s\n", _ep->name,
+				value ? "set" : "clear",
+				wedged ? "wedge" : "halt");
 		/* set/clear, then synch memory views with the device */
 		if (value) {
 			if (ep->num == 0)
 				ep->dev->protocol_stall = 1;
 			else
 				set_halt (ep);
-		} else
+			if (wedged)
+				ep->wedged = 1;
+		} else {
 			clear_halt (ep);
+			ep->wedged = 0;
+		}
 		(void) readl (&ep->regs->ep_rsp);
 	}
 	spin_unlock_irqrestore (&ep->dev->lock, flags);
@@ -1257,6 +1263,20 @@
 }
 
 static int
+net2280_set_halt(struct usb_ep *_ep, int value)
+{
+	return net2280_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int
+net2280_set_wedge(struct usb_ep *_ep)
+{
+	if (!_ep || _ep->name == ep0name)
+		return -EINVAL;
+	return net2280_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static int
 net2280_fifo_status (struct usb_ep *_ep)
 {
 	struct net2280_ep	*ep;
@@ -1302,6 +1322,7 @@
 	.dequeue	= net2280_dequeue,
 
 	.set_halt	= net2280_set_halt,
+	.set_wedge	= net2280_set_wedge,
 	.fifo_status	= net2280_fifo_status,
 	.fifo_flush	= net2280_fifo_flush,
 };
@@ -2410,9 +2431,14 @@
 				goto do_stall;
 			if ((e = get_ep_by_addr (dev, w_index)) == 0)
 				goto do_stall;
-			clear_halt (e);
+			if (e->wedged) {
+				VDEBUG(dev, "%s wedged, halt not cleared\n",
+						ep->ep.name);
+			} else {
+				VDEBUG(dev, "%s clear halt\n", ep->ep.name);
+				clear_halt(e);
+			}
 			allow_status (ep);
-			VDEBUG (dev, "%s clear halt\n", ep->ep.name);
 			goto next_endpoints;
 			}
 			break;
@@ -2427,6 +2453,8 @@
 				goto do_stall;
 			if ((e = get_ep_by_addr (dev, w_index)) == 0)
 				goto do_stall;
+			if (e->ep.name == ep0name)
+				goto do_stall;
 			set_halt (e);
 			allow_status (ep);
 			VDEBUG (dev, "%s set halt\n", ep->ep.name);
diff --git a/drivers/usb/gadget/net2280.h b/drivers/usb/gadget/net2280.h
index 81a71db..c368522 100644
--- a/drivers/usb/gadget/net2280.h
+++ b/drivers/usb/gadget/net2280.h
@@ -109,6 +109,7 @@
 						in_fifo_validate : 1,
 						out_overflow : 1,
 						stopped : 1,
+						wedged : 1,
 						is_in : 1,
 						is_iso : 1,
 						responded : 1;
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index bb54cca..34e9e39 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -2313,6 +2313,13 @@
 
 	tmp = omap_readl(OTG_REV);
 	if (cpu_is_omap24xx()) {
+		/*
+		 * REVISIT: Not clear how this works on OMAP2.  trans
+		 * is ANDed to produce bits 7 and 8, which might make
+		 * sense for USB_TRANSCEIVER_CTRL on OMAP1,
+		 * but with CONTROL_DEVCONF, these bits have something to
+		 * do with the frame adjustment counter and McBSP2.
+		 */
 		ctrl_name = "control_devconf";
 		trans = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
 	} else {
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 2b3b9e1..5a3034f 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -252,7 +252,7 @@
 	.bConfigurationValue =	DEV_CONFIG_VALUE,
 	.iConfiguration =	0,
 	.bmAttributes =		USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
-	.bMaxPower =		1	/* Self-Powered */
+	.bMaxPower =		CONFIG_USB_GADGET_VBUS_DRAW / 2,
 };
 
 static struct usb_interface_descriptor intf_desc = {
@@ -1278,8 +1278,7 @@
 	/* respond with data transfer before status phase? */
 	if (value >= 0) {
 		req->length = value;
-		req->zero = value < wLength
-				&& (value % gadget->ep0->maxpacket) == 0;
+		req->zero = value < wLength;
 		value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
 		if (value < 0) {
 			DBG(dev, "ep_queue --> %d\n", value);
@@ -1477,7 +1476,6 @@
 	if (gadget->is_otg) {
 		otg_desc.bmAttributes |= USB_OTG_HNP,
 		config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
-		config_desc.bMaxPower = 4;
 	}
 
 	spin_lock_init(&dev->lock);
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 7cbc78a..caa37c9 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -22,7 +22,6 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
-#include <linux/version.h>
 #include <linux/errno.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
@@ -651,7 +650,7 @@
 	struct pxa27x_request *req;
 
 	req = kzalloc(sizeof *req, gfp_flags);
-	if (!req || !_ep)
+	if (!req)
 		return NULL;
 
 	INIT_LIST_HEAD(&req->queue);
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 29d13eb..00ba06b 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1651,7 +1651,7 @@
 		return -EBUSY;
 
 	if (!driver->bind || !driver->setup
-			|| driver->speed != USB_SPEED_FULL) {
+			|| driver->speed < USB_SPEED_FULL) {
 		printk(KERN_ERR "Invalid driver: bind %p setup %p speed %d\n",
 			driver->bind, driver->setup, driver->speed);
 		return -EINVAL;
@@ -1894,11 +1894,8 @@
 		udc->regs_info = debugfs_create_file("registers", S_IRUGO,
 				s3c2410_udc_debugfs_root,
 				udc, &s3c2410_udc_debugfs_fops);
-		if (IS_ERR(udc->regs_info)) {
-			dev_warn(dev, "debugfs file creation failed %ld\n",
-				 PTR_ERR(udc->regs_info));
-			udc->regs_info = NULL;
-		}
+		if (!udc->regs_info)
+			dev_warn(dev, "debugfs file creation failed\n");
 	}
 
 	dev_dbg(dev, "probe ok\n");
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index 3faa7a7..37879af 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -43,6 +43,7 @@
 #include "epautoconf.c"
 
 #include "f_acm.c"
+#include "f_obex.c"
 #include "f_serial.c"
 #include "u_serial.c"
 
@@ -56,6 +57,7 @@
 #define GS_VENDOR_ID			0x0525	/* NetChip */
 #define GS_PRODUCT_ID			0xa4a6	/* Linux-USB Serial Gadget */
 #define GS_CDC_PRODUCT_ID		0xa4a7	/* ... as CDC-ACM */
+#define GS_CDC_OBEX_PRODUCT_ID		0xa4a9	/* ... as CDC-OBEX */
 
 /* string IDs are assigned dynamically */
 
@@ -125,6 +127,10 @@
 module_param(use_acm, bool, 0);
 MODULE_PARM_DESC(use_acm, "Use CDC ACM, default=yes");
 
+static int use_obex = false;
+module_param(use_obex, bool, 0);
+MODULE_PARM_DESC(use_obex, "Use CDC OBEX, default=no");
+
 static unsigned n_ports = 1;
 module_param(n_ports, uint, 0);
 MODULE_PARM_DESC(n_ports, "number of ports to create, default=1");
@@ -139,6 +145,8 @@
 	for (i = 0; i < n_ports && status == 0; i++) {
 		if (use_acm)
 			status = acm_bind_config(c, i);
+		else if (use_obex)
+			status = obex_bind_config(c, i);
 		else
 			status = gser_bind_config(c, i);
 	}
@@ -151,7 +159,6 @@
 	/* .bConfigurationValue = f(use_acm) */
 	/* .iConfiguration = DYNAMIC */
 	.bmAttributes	= USB_CONFIG_ATT_SELFPOWER,
-	.bMaxPower	= 1,	/* 2 mA, minimal */
 };
 
 static int __init gs_bind(struct usb_composite_dev *cdev)
@@ -249,6 +256,12 @@
 		device_desc.bDeviceClass = USB_CLASS_COMM;
 		device_desc.idProduct =
 				__constant_cpu_to_le16(GS_CDC_PRODUCT_ID);
+	} else if (use_obex) {
+		serial_config_driver.label = "CDC OBEX config";
+		serial_config_driver.bConfigurationValue = 3;
+		device_desc.bDeviceClass = USB_CLASS_COMM;
+		device_desc.idProduct =
+			__constant_cpu_to_le16(GS_CDC_OBEX_PRODUCT_ID);
 	} else {
 		serial_config_driver.label = "Generic Serial config";
 		serial_config_driver.bConfigurationValue = 1;
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index dbd575a..66948b7 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -873,6 +873,13 @@
 		spin_lock(&dev->lock);
 		dev->port_usb = link;
 		link->ioport = dev;
+		if (netif_running(dev->net)) {
+			if (link->open)
+				link->open(link);
+		} else {
+			if (link->close)
+				link->close(link);
+		}
 		spin_unlock(&dev->lock);
 
 		netif_carrier_on(dev->net);
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h
index af3910d..300f0ed 100644
--- a/drivers/usb/gadget/u_serial.h
+++ b/drivers/usb/gadget/u_serial.h
@@ -62,5 +62,6 @@
 /* functions are bound to configurations by a config or gadget driver */
 int acm_bind_config(struct usb_configuration *c, u8 port_num);
 int gser_bind_config(struct usb_configuration *c, u8 port_num);
+int obex_bind_config(struct usb_configuration *c, u8 port_num);
 
 #endif /* __U_SERIAL_H */
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 228797e..56f592d 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -138,7 +138,6 @@
 	tristate "OHCI HCD support"
 	depends on USB && USB_ARCH_HAS_OHCI
 	select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
-	select I2C if ARCH_PNX4008
 	---help---
 	  The Open Host Controller Interface (OHCI) is a standard for accessing
 	  USB 1.1 host controller hardware.  It does more in hardware than Intel's
@@ -305,3 +304,31 @@
 	help
 	   This driver enables support for the on-chip R8A66597 in the
 	   SH7366 and SH7723 processors.
+
+config USB_WHCI_HCD
+	tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	depends on PCI && USB
+	select USB_WUSB
+	select UWB_WHCI
+	help
+	  A driver for PCI-based Wireless USB Host Controllers that are
+	  compliant with the WHCI specification.
+
+	  To compile this driver a module, choose M here: the module
+	  will be called "whci-hcd".
+
+config USB_HWA_HCD
+	tristate "Host Wire Adapter (HWA) driver (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	depends on USB
+	select USB_WUSB
+	select UWB_HWA
+	help
+	  This driver enables you to connect Wireless USB devices to
+	  your system using a Host Wire Adaptor USB dongle. This is an
+	  UWB Radio Controller and WUSB Host Controller connected to
+	  your machine via USB (specified in WUSB1.0).
+
+	  To compile this driver a module, choose M here: the module
+	  will be called "hwa-hc".
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f1edda2..23be222 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -8,6 +8,8 @@
 
 isp1760-objs := isp1760-hcd.o isp1760-if.o
 
+obj-$(CONFIG_USB_WHCI_HCD)	+= whci/
+
 obj-$(CONFIG_PCI)		+= pci-quirks.o
 
 obj-$(CONFIG_USB_EHCI_HCD)	+= ehci-hcd.o
@@ -19,3 +21,4 @@
 obj-$(CONFIG_USB_U132_HCD)	+= u132-hcd.o
 obj-$(CONFIG_USB_R8A66597_HCD)	+= r8a66597-hcd.o
 obj-$(CONFIG_USB_ISP1760_HCD)	+= isp1760.o
+obj-$(CONFIG_USB_HWA_HCD)	+= hwa-hc.o
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index b0f8ed5..0cb53ca 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -358,7 +358,8 @@
 	struct usb_bus *bus;
 	struct mutex mutex;	/* protect filling of buffer */
 	size_t count;		/* number of characters filled into buffer */
-	char *page;
+	char *output_buf;
+	size_t alloc_size;
 };
 
 #define speed_char(info1) ({ char tmp; \
@@ -488,8 +489,8 @@
 
 	hcd = bus_to_hcd(buf->bus);
 	ehci = hcd_to_ehci (hcd);
-	next = buf->page;
-	size = PAGE_SIZE;
+	next = buf->output_buf;
+	size = buf->alloc_size;
 
 	*next = 0;
 
@@ -510,7 +511,7 @@
 	}
 	spin_unlock_irqrestore (&ehci->lock, flags);
 
-	return strlen(buf->page);
+	return strlen(buf->output_buf);
 }
 
 #define DBG_SCHED_LIMIT 64
@@ -531,8 +532,8 @@
 
 	hcd = bus_to_hcd(buf->bus);
 	ehci = hcd_to_ehci (hcd);
-	next = buf->page;
-	size = PAGE_SIZE;
+	next = buf->output_buf;
+	size = buf->alloc_size;
 
 	temp = scnprintf (next, size, "size = %d\n", ehci->periodic_size);
 	size -= temp;
@@ -568,14 +569,16 @@
 				for (temp = 0; temp < seen_count; temp++) {
 					if (seen [temp].ptr != p.ptr)
 						continue;
-					if (p.qh->qh_next.ptr)
+					if (p.qh->qh_next.ptr) {
 						temp = scnprintf (next, size,
 							" ...");
-					p.ptr = NULL;
+						size -= temp;
+						next += temp;
+					}
 					break;
 				}
 				/* show more info the first time around */
-				if (temp == seen_count && p.ptr) {
+				if (temp == seen_count) {
 					u32	scratch = hc32_to_cpup(ehci,
 							&p.qh->hw_info1);
 					struct ehci_qtd	*qtd;
@@ -649,7 +652,7 @@
 	spin_unlock_irqrestore (&ehci->lock, flags);
 	kfree (seen);
 
-	return PAGE_SIZE - size;
+	return buf->alloc_size - size;
 }
 #undef DBG_SCHED_LIMIT
 
@@ -665,14 +668,14 @@
 
 	hcd = bus_to_hcd(buf->bus);
 	ehci = hcd_to_ehci (hcd);
-	next = buf->page;
-	size = PAGE_SIZE;
+	next = buf->output_buf;
+	size = buf->alloc_size;
 
 	spin_lock_irqsave (&ehci->lock, flags);
 
 	if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
 		size = scnprintf (next, size,
-			"bus %s, device %s (driver " DRIVER_VERSION ")\n"
+			"bus %s, device %s\n"
 			"%s\n"
 			"SUSPENDED (no register access)\n",
 			hcd->self.controller->bus->name,
@@ -684,7 +687,7 @@
 	/* Capability Registers */
 	i = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
 	temp = scnprintf (next, size,
-		"bus %s, device %s (driver " DRIVER_VERSION ")\n"
+		"bus %s, device %s\n"
 		"%s\n"
 		"EHCI %x.%02x, hcd state %d\n",
 		hcd->self.controller->bus->name,
@@ -808,7 +811,7 @@
 done:
 	spin_unlock_irqrestore (&ehci->lock, flags);
 
-	return PAGE_SIZE - size;
+	return buf->alloc_size - size;
 }
 
 static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
@@ -822,6 +825,7 @@
 		buf->bus = bus;
 		buf->fill_func = fill_func;
 		mutex_init(&buf->mutex);
+		buf->alloc_size = PAGE_SIZE;
 	}
 
 	return buf;
@@ -831,10 +835,10 @@
 {
 	int ret = 0;
 
-	if (!buf->page)
-		buf->page = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!buf->output_buf)
+		buf->output_buf = (char *)vmalloc(buf->alloc_size);
 
-	if (!buf->page) {
+	if (!buf->output_buf) {
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -867,7 +871,7 @@
 	mutex_unlock(&buf->mutex);
 
 	ret = simple_read_from_buffer(user_buf, len, offset,
-				      buf->page, buf->count);
+				      buf->output_buf, buf->count);
 
 out:
 	return ret;
@@ -879,8 +883,8 @@
 	struct debug_buffer *buf = file->private_data;
 
 	if (buf) {
-		if (buf->page)
-			free_page((unsigned long)buf->page);
+		if (buf->output_buf)
+			vfree(buf->output_buf);
 		kfree(buf);
 	}
 
@@ -895,10 +899,14 @@
 
 static int debug_periodic_open(struct inode *inode, struct file *file)
 {
-	file->private_data = alloc_buffer(inode->i_private,
-					  fill_periodic_buffer);
+	struct debug_buffer *buf;
+	buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
+	if (!buf)
+		return -ENOMEM;
 
-	return file->private_data ? 0 : -ENOMEM;
+	buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
+	file->private_data = buf;
+	return 0;
 }
 
 static int debug_registers_open(struct inode *inode, struct file *file)
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 8409e07..15a803b 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -24,6 +24,7 @@
 #include <linux/ioport.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/timer.h>
@@ -59,7 +60,6 @@
  * providing early devices for those host controllers to talk to!
  */
 
-#define DRIVER_VERSION "10 Dec 2004"
 #define DRIVER_AUTHOR "David Brownell"
 #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
 
@@ -620,9 +620,9 @@
 
 	temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
 	ehci_info (ehci,
-		"USB %x.%x started, EHCI %x.%02x, driver %s%s\n",
+		"USB %x.%x started, EHCI %x.%02x%s\n",
 		((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
-		temp >> 8, temp & 0xff, DRIVER_VERSION,
+		temp >> 8, temp & 0xff,
 		ignore_oc ? ", overcurrent ignored" : "");
 
 	ehci_writel(ehci, INTR_MASK,
@@ -706,7 +706,7 @@
 		pcd_status = status;
 
 		/* resume root hub? */
-		if (!(ehci_readl(ehci, &ehci->regs->command) & CMD_RUN))
+		if (!(cmd & CMD_RUN))
 			usb_hcd_resume_root_hub(hcd);
 
 		while (i--) {
@@ -715,8 +715,11 @@
 
 			if (pstatus & PORT_OWNER)
 				continue;
-			if (!(pstatus & PORT_RESUME)
-					|| ehci->reset_done [i] != 0)
+			if (!(test_bit(i, &ehci->suspended_ports) &&
+					((pstatus & PORT_RESUME) ||
+						!(pstatus & PORT_SUSPEND)) &&
+					(pstatus & PORT_PE) &&
+					ehci->reset_done[i] == 0))
 				continue;
 
 			/* start 20 msec resume signaling from this port,
@@ -731,9 +734,8 @@
 
 	/* PCI errors [4.15.2.4] */
 	if (unlikely ((status & STS_FATAL) != 0)) {
-		dbg_cmd (ehci, "fatal", ehci_readl(ehci,
-						   &ehci->regs->command));
-		dbg_status (ehci, "fatal", status);
+		dbg_cmd(ehci, "fatal", cmd);
+		dbg_status(ehci, "fatal", status);
 		if (status & STS_HALT) {
 			ehci_err (ehci, "fatal error\n");
 dead:
@@ -994,9 +996,7 @@
 
 /*-------------------------------------------------------------------------*/
 
-#define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC
-
-MODULE_DESCRIPTION (DRIVER_INFO);
+MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_AUTHOR (DRIVER_AUTHOR);
 MODULE_LICENSE ("GPL");
 
@@ -1020,11 +1020,6 @@
 #define	PS3_SYSTEM_BUS_DRIVER	ps3_ehci_driver
 #endif
 
-#if defined(CONFIG_440EPX) && !defined(CONFIG_PPC_MERGE)
-#include "ehci-ppc-soc.c"
-#define	PLATFORM_DRIVER		ehci_ppc_soc_driver
-#endif
-
 #ifdef CONFIG_USB_EHCI_HCD_PPC_OF
 #include "ehci-ppc-of.c"
 #define OF_PLATFORM_DRIVER	ehci_hcd_ppc_of_driver
@@ -1049,6 +1044,16 @@
 {
 	int retval = 0;
 
+	if (usb_disabled())
+		return -ENODEV;
+
+	printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
+	set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+	if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
+			test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
+		printk(KERN_WARNING "Warning! ehci_hcd should always be loaded"
+				" before uhci_hcd and ohci_hcd, not after\n");
+
 	pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n",
 		 hcd_name,
 		 sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
@@ -1056,8 +1061,10 @@
 
 #ifdef DEBUG
 	ehci_debug_root = debugfs_create_dir("ehci", NULL);
-	if (!ehci_debug_root)
-		return -ENOENT;
+	if (!ehci_debug_root) {
+		retval = -ENOENT;
+		goto err_debug;
+	}
 #endif
 
 #ifdef PLATFORM_DRIVER
@@ -1104,7 +1111,9 @@
 #ifdef DEBUG
 	debugfs_remove(ehci_debug_root);
 	ehci_debug_root = NULL;
+err_debug:
 #endif
+	clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
 	return retval;
 }
 module_init(ehci_hcd_init);
@@ -1126,6 +1135,7 @@
 #ifdef DEBUG
 	debugfs_remove(ehci_debug_root);
 #endif
+	clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
 }
 module_exit(ehci_hcd_cleanup);
 
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 740835b..218f966 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -236,10 +236,8 @@
 		temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
 		temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
 		if (test_bit(i, &ehci->bus_suspended) &&
-				(temp & PORT_SUSPEND)) {
-			ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
+				(temp & PORT_SUSPEND))
 			temp |= PORT_RESUME;
-		}
 		ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
 	}
 	i = HCS_N_PORTS (ehci->hcs_params);
@@ -482,10 +480,9 @@
 		 * controller by the user.
 		 */
 
-		if ((temp & mask) != 0
-				|| ((temp & PORT_RESUME) != 0
-					&& time_after_eq(jiffies,
-						ehci->reset_done[i]))) {
+		if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend)
+				|| (ehci->reset_done[i] && time_after_eq(
+					jiffies, ehci->reset_done[i]))) {
 			if (i < 7)
 			    buf [0] |= 1 << (i + 1);
 			else
@@ -688,6 +685,7 @@
 			/* resume completed? */
 			else if (time_after_eq(jiffies,
 					ehci->reset_done[wIndex])) {
+				clear_bit(wIndex, &ehci->suspended_ports);
 				set_bit(wIndex, &ehci->port_c_suspend);
 				ehci->reset_done[wIndex] = 0;
 
@@ -734,6 +732,9 @@
 					ehci_readl(ehci, status_reg));
 		}
 
+		if (!(temp & (PORT_RESUME|PORT_RESET)))
+			ehci->reset_done[wIndex] = 0;
+
 		/* transfer dedicated ports to the companion hc */
 		if ((temp & PORT_CONNECT) &&
 				test_bit(wIndex, &ehci->companion_ports)) {
@@ -757,8 +758,17 @@
 		}
 		if (temp & PORT_PE)
 			status |= 1 << USB_PORT_FEAT_ENABLE;
-		if (temp & (PORT_SUSPEND|PORT_RESUME))
+
+		/* maybe the port was unsuspended without our knowledge */
+		if (temp & (PORT_SUSPEND|PORT_RESUME)) {
 			status |= 1 << USB_PORT_FEAT_SUSPEND;
+		} else if (test_bit(wIndex, &ehci->suspended_ports)) {
+			clear_bit(wIndex, &ehci->suspended_ports);
+			ehci->reset_done[wIndex] = 0;
+			if (temp & PORT_PE)
+				set_bit(wIndex, &ehci->port_c_suspend);
+		}
+
 		if (temp & PORT_OC)
 			status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
 		if (temp & PORT_RESET)
@@ -803,6 +813,7 @@
 					|| (temp & PORT_RESET) != 0)
 				goto error;
 			ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+			set_bit(wIndex, &ehci->suspended_ports);
 			break;
 		case USB_PORT_FEAT_POWER:
 			if (HCS_PPC (ehci->hcs_params))
diff --git a/drivers/usb/host/ehci-ppc-soc.c b/drivers/usb/host/ehci-ppc-soc.c
deleted file mode 100644
index 529590e..0000000
--- a/drivers/usb/host/ehci-ppc-soc.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * EHCI HCD (Host Controller Driver) for USB.
- *
- * (C) Copyright 2006-2007 Stefan Roese <sr@denx.de>, DENX Software Engineering
- *
- * Bus Glue for PPC On-Chip EHCI driver
- * Tested on AMCC 440EPx
- *
- * Based on "ehci-au1xxx.c" by K.Boge <karsten.boge@amd.com>
- *
- * This file is licenced under the GPL.
- */
-
-#include <linux/platform_device.h>
-
-extern int usb_disabled(void);
-
-/* called during probe() after chip reset completes */
-static int ehci_ppc_soc_setup(struct usb_hcd *hcd)
-{
-	struct ehci_hcd	*ehci = hcd_to_ehci(hcd);
-	int		retval;
-
-	retval = ehci_halt(ehci);
-	if (retval)
-		return retval;
-
-	retval = ehci_init(hcd);
-	if (retval)
-		return retval;
-
-	ehci->sbrn = 0x20;
-	return ehci_reset(ehci);
-}
-
-/**
- * usb_ehci_ppc_soc_probe - initialize PPC-SoC-based HCDs
- * Context: !in_interrupt()
- *
- * Allocates basic resources for this USB host controller, and
- * then invokes the start() method for the HCD associated with it
- * through the hotplug entry's driver_data.
- *
- */
-int usb_ehci_ppc_soc_probe(const struct hc_driver *driver,
-			   struct usb_hcd **hcd_out,
-			   struct platform_device *dev)
-{
-	int retval;
-	struct usb_hcd *hcd;
-	struct ehci_hcd *ehci;
-
-	if (dev->resource[1].flags != IORESOURCE_IRQ) {
-		pr_debug("resource[1] is not IORESOURCE_IRQ");
-		retval = -ENOMEM;
-	}
-	hcd = usb_create_hcd(driver, &dev->dev, "PPC-SOC EHCI");
-	if (!hcd)
-		return -ENOMEM;
-	hcd->rsrc_start = dev->resource[0].start;
-	hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1;
-
-	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
-		pr_debug("request_mem_region failed");
-		retval = -EBUSY;
-		goto err1;
-	}
-
-	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
-	if (!hcd->regs) {
-		pr_debug("ioremap failed");
-		retval = -ENOMEM;
-		goto err2;
-	}
-
-	ehci = hcd_to_ehci(hcd);
-	ehci->big_endian_mmio = 1;
-	ehci->big_endian_desc = 1;
-	ehci->caps = hcd->regs;
-	ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
-
-	/* cache this readonly data; minimize chip reads */
-	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
-
-#if defined(CONFIG_440EPX)
-	/*
-	 * 440EPx Errata USBH_3
-	 * Fix: Enable Break Memory Transfer (BMT) in INSNREG3
-	 */
-	out_be32((void *)((ulong)(&ehci->regs->command) + 0x8c), (1 << 0));
-	ehci_dbg(ehci, "Break Memory Transfer (BMT) has beed enabled!\n");
-#endif
-
-	retval = usb_add_hcd(hcd, dev->resource[1].start, IRQF_DISABLED);
-	if (retval == 0)
-		return retval;
-
-	iounmap(hcd->regs);
-err2:
-	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-err1:
-	usb_put_hcd(hcd);
-	return retval;
-}
-
-/* may be called without controller electrically present */
-/* may be called with controller, bus, and devices active */
-
-/**
- * usb_ehci_hcd_ppc_soc_remove - shutdown processing for PPC-SoC-based HCDs
- * @dev: USB Host Controller being removed
- * Context: !in_interrupt()
- *
- * Reverses the effect of usb_ehci_hcd_ppc_soc_probe(), first invoking
- * the HCD's stop() method.  It is always called from a thread
- * context, normally "rmmod", "apmd", or something similar.
- *
- */
-void usb_ehci_ppc_soc_remove(struct usb_hcd *hcd, struct platform_device *dev)
-{
-	usb_remove_hcd(hcd);
-	iounmap(hcd->regs);
-	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-	usb_put_hcd(hcd);
-}
-
-static const struct hc_driver ehci_ppc_soc_hc_driver = {
-	.description = hcd_name,
-	.product_desc = "PPC-SOC EHCI",
-	.hcd_priv_size = sizeof(struct ehci_hcd),
-
-	/*
-	 * generic hardware linkage
-	 */
-	.irq = ehci_irq,
-	.flags = HCD_MEMORY | HCD_USB2,
-
-	/*
-	 * basic lifecycle operations
-	 */
-	.reset = ehci_ppc_soc_setup,
-	.start = ehci_run,
-	.stop = ehci_stop,
-	.shutdown = ehci_shutdown,
-
-	/*
-	 * managing i/o requests and associated device resources
-	 */
-	.urb_enqueue = ehci_urb_enqueue,
-	.urb_dequeue = ehci_urb_dequeue,
-	.endpoint_disable = ehci_endpoint_disable,
-
-	/*
-	 * scheduling support
-	 */
-	.get_frame_number = ehci_get_frame,
-
-	/*
-	 * root hub support
-	 */
-	.hub_status_data = ehci_hub_status_data,
-	.hub_control = ehci_hub_control,
-	.bus_suspend = ehci_bus_suspend,
-	.bus_resume = ehci_bus_resume,
-	.relinquish_port = ehci_relinquish_port,
-	.port_handed_over = ehci_port_handed_over,
-};
-
-static int ehci_hcd_ppc_soc_drv_probe(struct platform_device *pdev)
-{
-	struct usb_hcd *hcd = NULL;
-	int ret;
-
-	pr_debug("In ehci_hcd_ppc_soc_drv_probe\n");
-
-	if (usb_disabled())
-		return -ENODEV;
-
-	/* FIXME we only want one one probe() not two */
-	ret = usb_ehci_ppc_soc_probe(&ehci_ppc_soc_hc_driver, &hcd, pdev);
-	return ret;
-}
-
-static int ehci_hcd_ppc_soc_drv_remove(struct platform_device *pdev)
-{
-	struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
-	/* FIXME we only want one one remove() not two */
-	usb_ehci_ppc_soc_remove(hcd, pdev);
-	return 0;
-}
-
-MODULE_ALIAS("platform:ppc-soc-ehci");
-static struct platform_driver ehci_ppc_soc_driver = {
-	.probe = ehci_hcd_ppc_soc_drv_probe,
-	.remove = ehci_hcd_ppc_soc_drv_remove,
-	.shutdown = usb_hcd_platform_shutdown,
-	.driver = {
-		.name = "ppc-soc-ehci",
-	}
-};
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index b697a13..b11798d 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -99,6 +99,8 @@
 			owned by the companion during a bus suspend */
 	unsigned long		port_c_suspend;		/* which ports have
 			the change-suspend feature turned on */
+	unsigned long		suspended_ports;	/* which ports are
+			suspended */
 
 	/* per-HC memory pools (could be per-bus, but ...) */
 	struct dma_pool		*qh_pool;	/* qh per active urb */
@@ -181,14 +183,16 @@
 	 * the async ring; just the I/O watchdog.  Note that if a
 	 * SHRINK were pending, OFF would never be requested.
 	 */
-	if (timer_pending(&ehci->watchdog)
-			&& ((BIT(TIMER_ASYNC_SHRINK) | BIT(TIMER_ASYNC_OFF))
-				& ehci->actions))
-		return;
+	enum ehci_timer_action oldactions = ehci->actions;
 
 	if (!test_and_set_bit (action, &ehci->actions)) {
 		unsigned long t;
 
+		if (timer_pending(&ehci->watchdog)
+			&& ((BIT(TIMER_ASYNC_SHRINK) | BIT(TIMER_ASYNC_OFF))
+				& oldactions))
+			return;
+
 		switch (action) {
 		case TIMER_IO_WATCHDOG:
 			t = EHCI_IO_JIFFIES;
@@ -204,7 +208,7 @@
 			t = DIV_ROUND_UP(EHCI_SHRINK_FRAMES * HZ, 1000) + 1;
 			break;
 		}
-		mod_timer(&ehci->watchdog, t + jiffies);
+		mod_timer(&ehci->watchdog, round_jiffies(t + jiffies));
 	}
 }
 
@@ -604,16 +608,7 @@
 /*
  * Big-endian read/write functions are arch-specific.
  * Other arches can be added if/when they're needed.
- *
- * REVISIT: arch/powerpc now has readl/writel_be, so the
- * definition below can die once the 4xx support is
- * finally ported over.
  */
-#if defined(CONFIG_PPC) && !defined(CONFIG_PPC_MERGE)
-#define readl_be(addr)		in_be32((__force unsigned *)addr)
-#define writel_be(val, addr)	out_be32((__force unsigned *)addr, val)
-#endif
-
 #if defined(CONFIG_ARM) && defined(CONFIG_ARCH_IXP4XX)
 #define readl_be(addr)		__raw_readl((__force unsigned *)addr)
 #define writel_be(val, addr)	__raw_writel(val, (__force unsigned *)addr)
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
new file mode 100644
index 0000000..64be4d8
--- /dev/null
+++ b/drivers/usb/host/hwa-hc.c
@@ -0,0 +1,925 @@
+/*
+ * Host Wire Adapter:
+ * Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * The HWA driver is a simple layer that forwards requests to the WAHC
+ * (Wire Adater Host Controller) or WUSBHC (Wireless USB Host
+ * Controller) layers.
+ *
+ * Host Wire Adapter is the 'WUSB 1.0 standard' name for Wireless-USB
+ * Host Controller that is connected to your system via USB (a USB
+ * dongle that implements a USB host...). There is also a Device Wired
+ * Adaptor, DWA (Wireless USB hub) that uses the same mechanism for
+ * transferring data (it is after all a USB host connected via
+ * Wireless USB), we have a common layer called Wire Adapter Host
+ * Controller that does all the hard work. The WUSBHC (Wireless USB
+ * Host Controller) is the part common to WUSB Host Controllers, the
+ * HWA and the PCI-based one, that is implemented following the WHCI
+ * spec. All these layers are implemented in ../wusbcore.
+ *
+ * The main functions are hwahc_op_urb_{en,de}queue(), that pass the
+ * job of converting a URB to a Wire Adapter
+ *
+ * Entry points:
+ *
+ *   hwahc_driver_*()   Driver initialization, registration and
+ *                      teardown.
+ *
+ *   hwahc_probe()	New device came up, create an instance for
+ *                      it [from device enumeration].
+ *
+ *   hwahc_disconnect()	Remove device instance [from device
+ *                      enumeration].
+ *
+ *   [__]hwahc_op_*()   Host-Wire-Adaptor specific functions for
+ *                      starting/stopping/etc (some might be made also
+ *                      DWA).
+ */
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/completion.h>
+#include "../wusbcore/wa-hc.h"
+#include "../wusbcore/wusbhc.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+struct hwahc {
+	struct wusbhc wusbhc;	/* has to be 1st */
+	struct wahc wa;
+	u8 buffer[16];		/* for misc usb transactions */
+};
+
+/**
+ * FIXME should be wusbhc
+ *
+ * NOTE: we need to cache the Cluster ID because later...there is no
+ *       way to get it :)
+ */
+static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id)
+{
+	int result;
+	struct wusbhc *wusbhc = &hwahc->wusbhc;
+	struct wahc *wa = &hwahc->wa;
+	struct device *dev = &wa->usb_iface->dev;
+
+	result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_SET_CLUSTER_ID,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			cluster_id,
+			wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+			NULL, 0, 1000 /* FIXME: arbitrary */);
+	if (result < 0)
+		dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n",
+			cluster_id, result);
+	else
+		wusbhc->cluster_id = cluster_id;
+	dev_info(dev, "Wireless USB Cluster ID set to 0x%02x\n", cluster_id);
+	return result;
+}
+
+static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
+{
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+
+	return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_SET_NUM_DNTS,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			interval << 8 | slots,
+			wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+			NULL, 0, 1000 /* FIXME: arbitrary */);
+}
+
+/*
+ * Reset a WUSB host controller and wait for it to complete doing it.
+ *
+ * @usb_hcd:	Pointer to WUSB Host Controller instance.
+ *
+ */
+static int hwahc_op_reset(struct usb_hcd *usb_hcd)
+{
+	int result;
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct device *dev = &hwahc->wa.usb_iface->dev;
+
+	d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
+	mutex_lock(&wusbhc->mutex);
+	wa_nep_disarm(&hwahc->wa);
+	result = __wa_set_feature(&hwahc->wa, WA_RESET);
+	if (result < 0) {
+		dev_err(dev, "error commanding HC to reset: %d\n", result);
+		goto error_unlock;
+	}
+	d_printf(3, dev, "reset: waiting for device to change state\n");
+	result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0);
+	if (result < 0) {
+		dev_err(dev, "error waiting for HC to reset: %d\n", result);
+		goto error_unlock;
+	}
+error_unlock:
+	mutex_unlock(&wusbhc->mutex);
+	d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
+	return result;
+}
+
+/*
+ * FIXME: break this function up
+ */
+static int hwahc_op_start(struct usb_hcd *usb_hcd)
+{
+	u8 addr;
+	int result;
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct device *dev = &hwahc->wa.usb_iface->dev;
+
+	/* Set up a Host Info WUSB Information Element */
+	d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
+	result = -ENOSPC;
+	mutex_lock(&wusbhc->mutex);
+	/* Start the numbering from the top so that the bottom
+	 * range of the unauth addr space is used for devices,
+	 * the top for HCs; use 0xfe - RC# */
+	addr = wusb_cluster_id_get();
+	if (addr == 0)
+		goto error_cluster_id_get;
+	result = __hwahc_set_cluster_id(hwahc, addr);
+	if (result < 0)
+		goto error_set_cluster_id;
+
+	result = wa_nep_arm(&hwahc->wa, GFP_KERNEL);
+	if (result < 0) {
+		dev_err(dev, "cannot listen to notifications: %d\n", result);
+		goto error_stop;
+	}
+	usb_hcd->uses_new_polling = 1;
+	usb_hcd->poll_rh = 1;
+	usb_hcd->state = HC_STATE_RUNNING;
+	result = 0;
+out:
+	mutex_unlock(&wusbhc->mutex);
+	d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
+	return result;
+
+error_stop:
+	__wa_stop(&hwahc->wa);
+error_set_cluster_id:
+	wusb_cluster_id_put(wusbhc->cluster_id);
+error_cluster_id_get:
+	goto out;
+
+}
+
+/*
+ * FIXME: break this function up
+ */
+static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc)
+{
+	int result;
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct device *dev = &hwahc->wa.usb_iface->dev;
+
+	/* Set up a Host Info WUSB Information Element */
+	d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
+	result = -ENOSPC;
+
+	result = __wa_set_feature(&hwahc->wa, WA_ENABLE);
+	if (result < 0) {
+		dev_err(dev, "error commanding HC to start: %d\n", result);
+		goto error_stop;
+	}
+	result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE);
+	if (result < 0) {
+		dev_err(dev, "error waiting for HC to start: %d\n", result);
+		goto error_stop;
+	}
+	result = 0;
+out:
+	d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
+	return result;
+
+error_stop:
+	result = __wa_clear_feature(&hwahc->wa, WA_ENABLE);
+	goto out;
+}
+
+static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	dev_err(wusbhc->dev, "%s (%p [%p], 0x%lx) UNIMPLEMENTED\n", __func__,
+		usb_hcd, hwahc, *(unsigned long *) &msg);
+	return -ENOSYS;
+}
+
+static int hwahc_op_resume(struct usb_hcd *usb_hcd)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+	dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
+		usb_hcd, hwahc);
+	return -ENOSYS;
+}
+
+static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc)
+{
+	int result;
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct device *dev = &hwahc->wa.usb_iface->dev;
+
+	d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
+	/* Nothing for now */
+	d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
+	return;
+}
+
+/*
+ * No need to abort pipes, as when this is called, all the children
+ * has been disconnected and that has done it [through
+ * usb_disable_interface() -> usb_disable_endpoint() ->
+ * hwahc_op_ep_disable() - >rpipe_ep_disable()].
+ */
+static void hwahc_op_stop(struct usb_hcd *usb_hcd)
+{
+	int result;
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	struct device *dev = &wa->usb_iface->dev;
+
+	d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
+	mutex_lock(&wusbhc->mutex);
+	wusbhc_stop(wusbhc);
+	wa_nep_disarm(&hwahc->wa);
+	result = __wa_stop(&hwahc->wa);
+	wusb_cluster_id_put(wusbhc->cluster_id);
+	mutex_unlock(&wusbhc->mutex);
+	d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
+	return;
+}
+
+static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+	dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
+		usb_hcd, hwahc);
+	return -ENOSYS;
+}
+
+static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
+				gfp_t gfp)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+	return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp);
+}
+
+static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb,
+				int status)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+	return wa_urb_dequeue(&hwahc->wa, urb);
+}
+
+/*
+ * Release resources allocated for an endpoint
+ *
+ * If there is an associated rpipe to this endpoint, go ahead and put it.
+ */
+static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd,
+				      struct usb_host_endpoint *ep)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+	rpipe_ep_disable(&hwahc->wa, ep);
+}
+
+/*
+ * Set the UWB MAS allocation for the WUSB cluster
+ *
+ * @stream_index: stream to use (-1 for cancelling the allocation)
+ * @mas: mas bitmap to use
+ */
+static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
+			      const struct uwb_mas_bm *mas)
+{
+	int result;
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	struct device *dev = &wa->usb_iface->dev;
+	u8 mas_le[UWB_NUM_MAS/8];
+
+	/* Set the stream index */
+	result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_SET_STREAM_IDX,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			stream_index,
+			wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+			NULL, 0, 1000 /* FIXME: arbitrary */);
+	if (result < 0) {
+		dev_err(dev, "Cannot set WUSB stream index: %d\n", result);
+		goto out;
+	}
+	uwb_mas_bm_copy_le(mas_le, mas);
+	/* Set the MAS allocation */
+	result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_SET_WUSB_MAS,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+			mas_le, 32, 1000 /* FIXME: arbitrary */);
+	if (result < 0)
+		dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
+out:
+	return result;
+}
+
+/*
+ * Add an IE to the host's MMC
+ *
+ * @interval:    See WUSB1.0[8.5.3.1]
+ * @repeat_cnt:  See WUSB1.0[8.5.3.1]
+ * @handle:      See WUSB1.0[8.5.3.1]
+ * @wuie:        Pointer to the header of the WUSB IE data to add.
+ *               MUST BE allocated in a kmalloc buffer (no stack or
+ *               vmalloc).
+ *
+ * NOTE: the format of the WUSB IEs for MMCs are different to the
+ *       normal MBOA MAC IEs (IE Id + Length in MBOA MAC vs. Length +
+ *       Id in WUSB IEs). Standards...you gotta love'em.
+ */
+static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval,
+				u8 repeat_cnt, u8 handle,
+				struct wuie_hdr *wuie)
+{
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+
+	return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_ADD_MMC_IE,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			interval << 8 | repeat_cnt,
+			handle << 8 | iface_no,
+			wuie, wuie->bLength, 1000 /* FIXME: arbitrary */);
+}
+
+/*
+ * Remove an IE to the host's MMC
+ *
+ * @handle:      See WUSB1.0[8.5.3.1]
+ */
+static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
+{
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+	return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_REMOVE_MMC_IE,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			0, handle << 8 | iface_no,
+			NULL, 0, 1000 /* FIXME: arbitrary */);
+}
+
+/*
+ * Update device information for a given fake port
+ *
+ * @port_idx: Fake port to which device is connected (wusbhc index, not
+ *            USB port number).
+ */
+static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc,
+				   struct wusb_dev *wusb_dev)
+{
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+	struct hwa_dev_info *dev_info;
+	int ret;
+
+	/* fill out the Device Info buffer and send it */
+	dev_info = kzalloc(sizeof(struct hwa_dev_info), GFP_KERNEL);
+	if (!dev_info)
+		return -ENOMEM;
+	uwb_mas_bm_copy_le(dev_info->bmDeviceAvailability,
+			   &wusb_dev->availability);
+	dev_info->bDeviceAddress = wusb_dev->addr;
+
+	/*
+	 * If the descriptors haven't been read yet, use a default PHY
+	 * rate of 53.3 Mbit/s only.  The correct value will be used
+	 * when this will be called again as part of the
+	 * authentication process (which occurs after the descriptors
+	 * have been read).
+	 */
+	if (wusb_dev->wusb_cap_descr)
+		dev_info->wPHYRates = wusb_dev->wusb_cap_descr->wPHYRates;
+	else
+		dev_info->wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53);
+
+	ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_SET_DEV_INFO,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			0, wusb_dev->port_idx << 8 | iface_no,
+			dev_info, sizeof(struct hwa_dev_info),
+			1000 /* FIXME: arbitrary */);
+	kfree(dev_info);
+	return ret;
+}
+
+/*
+ * Set host's idea of which encryption (and key) method to use when
+ * talking to ad evice on a given port.
+ *
+ * If key is NULL, it means disable encryption for that "virtual port"
+ * (used when we disconnect).
+ */
+static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+			       const void *key, size_t key_size,
+			       u8 key_idx)
+{
+	int result = -ENOMEM;
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+	struct usb_key_descriptor *keyd;
+	size_t keyd_len;
+
+	keyd_len = sizeof(*keyd) + key_size;
+	keyd = kzalloc(keyd_len, GFP_KERNEL);
+	if (keyd == NULL)
+		return -ENOMEM;
+
+	keyd->bLength = keyd_len;
+	keyd->bDescriptorType = USB_DT_KEY;
+	keyd->tTKID[0] = (tkid >>  0) & 0xff;
+	keyd->tTKID[1] = (tkid >>  8) & 0xff;
+	keyd->tTKID[2] = (tkid >> 16) & 0xff;
+	memcpy(keyd->bKeyData, key, key_size);
+
+	result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			USB_REQ_SET_DESCRIPTOR,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			USB_DT_KEY << 8 | key_idx,
+			port_idx << 8 | iface_no,
+			keyd, keyd_len, 1000 /* FIXME: arbitrary */);
+
+	memset(keyd, 0, sizeof(*keyd));	/* clear keys etc. */
+	kfree(keyd);
+	return result;
+}
+
+/*
+ * Set host's idea of which encryption (and key) method to use when
+ * talking to ad evice on a given port.
+ *
+ * If key is NULL, it means disable encryption for that "virtual port"
+ * (used when we disconnect).
+ */
+static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+			      const void *key, size_t key_size)
+{
+	int result = -ENOMEM;
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+	u8 encryption_value;
+
+	/* Tell the host which key to use to talk to the device */
+	if (key) {
+		u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_PTK,
+					    WUSB_KEY_INDEX_ORIGINATOR_HOST);
+
+		result = __hwahc_dev_set_key(wusbhc, port_idx, tkid,
+					     key, key_size, key_idx);
+		if (result < 0)
+			goto error_set_key;
+		encryption_value = wusbhc->ccm1_etd->bEncryptionValue;
+	} else {
+		/* FIXME: this should come from wusbhc->etd[UNSECURE].value */
+		encryption_value = 0;
+	}
+
+	/* Set the encryption type for commmunicating with the device */
+	result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			USB_REQ_SET_ENCRYPTION,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			encryption_value, port_idx << 8 | iface_no,
+			NULL, 0, 1000 /* FIXME: arbitrary */);
+	if (result < 0)
+		dev_err(wusbhc->dev, "Can't set host's WUSB encryption for "
+			"port index %u to %s (value %d): %d\n", port_idx,
+			wusb_et_name(wusbhc->ccm1_etd->bEncryptionType),
+			wusbhc->ccm1_etd->bEncryptionValue, result);
+error_set_key:
+	return result;
+}
+
+/*
+ * Set host's GTK key
+ */
+static int __hwahc_op_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+			      const void *key, size_t key_size)
+{
+	u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
+				    WUSB_KEY_INDEX_ORIGINATOR_HOST);
+
+	return __hwahc_dev_set_key(wusbhc, 0, tkid, key, key_size, key_idx);
+}
+
+/*
+ * Get the Wire Adapter class-specific descriptor
+ *
+ * NOTE: this descriptor comes with the big bundled configuration
+ *       descriptor that includes the interfaces' and endpoints', so
+ *       we just look for it in the cached copy kept by the USB stack.
+ *
+ * NOTE2: We convert LE fields to CPU order.
+ */
+static int wa_fill_descr(struct wahc *wa)
+{
+	int result;
+	struct device *dev = &wa->usb_iface->dev;
+	char *itr;
+	struct usb_device *usb_dev = wa->usb_dev;
+	struct usb_descriptor_header *hdr;
+	struct usb_wa_descriptor *wa_descr;
+	size_t itr_size, actconfig_idx;
+
+	actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
+			sizeof(usb_dev->config[0]);
+	itr = usb_dev->rawdescriptors[actconfig_idx];
+	itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
+	while (itr_size >= sizeof(*hdr)) {
+		hdr = (struct usb_descriptor_header *) itr;
+		d_printf(3, dev, "Extra device descriptor: "
+			 "type %02x/%u bytes @ %zu (%zu left)\n",
+			 hdr->bDescriptorType, hdr->bLength,
+			 (itr - usb_dev->rawdescriptors[actconfig_idx]),
+			 itr_size);
+		if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER)
+			goto found;
+		itr += hdr->bLength;
+		itr_size -= hdr->bLength;
+	}
+	dev_err(dev, "cannot find Wire Adapter Class descriptor\n");
+	return -ENODEV;
+
+found:
+	result = -EINVAL;
+	if (hdr->bLength > itr_size) {	/* is it available? */
+		dev_err(dev, "incomplete Wire Adapter Class descriptor "
+			"(%zu bytes left, %u needed)\n",
+			itr_size, hdr->bLength);
+		goto error;
+	}
+	if (hdr->bLength < sizeof(*wa->wa_descr)) {
+		dev_err(dev, "short Wire Adapter Class descriptor\n");
+		goto error;
+	}
+	wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr;
+	/* Make LE fields CPU order */
+	wa_descr->bcdWAVersion = le16_to_cpu(wa_descr->bcdWAVersion);
+	wa_descr->wNumRPipes = le16_to_cpu(wa_descr->wNumRPipes);
+	wa_descr->wRPipeMaxBlock = le16_to_cpu(wa_descr->wRPipeMaxBlock);
+	if (wa_descr->bcdWAVersion > 0x0100)
+		dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n",
+			 wa_descr->bcdWAVersion & 0xff00 >> 8,
+			 wa_descr->bcdWAVersion & 0x00ff);
+	result = 0;
+error:
+	return result;
+}
+
+static struct hc_driver hwahc_hc_driver = {
+	.description = "hwa-hcd",
+	.product_desc = "Wireless USB HWA host controller",
+	.hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd),
+	.irq = NULL,			/* FIXME */
+	.flags = HCD_USB2,		/* FIXME */
+	.reset = hwahc_op_reset,
+	.start = hwahc_op_start,
+	.pci_suspend = hwahc_op_suspend,
+	.pci_resume = hwahc_op_resume,
+	.stop = hwahc_op_stop,
+	.get_frame_number = hwahc_op_get_frame_number,
+	.urb_enqueue = hwahc_op_urb_enqueue,
+	.urb_dequeue = hwahc_op_urb_dequeue,
+	.endpoint_disable = hwahc_op_endpoint_disable,
+
+	.hub_status_data = wusbhc_rh_status_data,
+	.hub_control = wusbhc_rh_control,
+	.bus_suspend = wusbhc_rh_suspend,
+	.bus_resume = wusbhc_rh_resume,
+	.start_port_reset = wusbhc_rh_start_port_reset,
+};
+
+static int hwahc_security_create(struct hwahc *hwahc)
+{
+	int result;
+	struct wusbhc *wusbhc = &hwahc->wusbhc;
+	struct usb_device *usb_dev = hwahc->wa.usb_dev;
+	struct device *dev = &usb_dev->dev;
+	struct usb_security_descriptor *secd;
+	struct usb_encryption_descriptor *etd;
+	void *itr, *top;
+	size_t itr_size, needed, bytes;
+	u8 index;
+	char buf[64];
+
+	/* Find the host's security descriptors in the config descr bundle */
+	index = (usb_dev->actconfig - usb_dev->config) /
+		sizeof(usb_dev->config[0]);
+	itr = usb_dev->rawdescriptors[index];
+	itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
+	top = itr + itr_size;
+	result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
+			le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
+			USB_DT_SECURITY, (void **) &secd);
+	if (result == -1) {
+		dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
+		return 0;
+	}
+	needed = sizeof(*secd);
+	if (top - (void *)secd < needed) {
+		dev_err(dev, "BUG? Not enough data to process security "
+			"descriptor header (%zu bytes left vs %zu needed)\n",
+			top - (void *) secd, needed);
+		return 0;
+	}
+	needed = le16_to_cpu(secd->wTotalLength);
+	if (top - (void *)secd < needed) {
+		dev_err(dev, "BUG? Not enough data to process security "
+			"descriptors (%zu bytes left vs %zu needed)\n",
+			top - (void *) secd, needed);
+		return 0;
+	}
+	/* Walk over the sec descriptors and store CCM1's on wusbhc */
+	itr = (void *) secd + sizeof(*secd);
+	top = (void *) secd + le16_to_cpu(secd->wTotalLength);
+	index = 0;
+	bytes = 0;
+	while (itr < top) {
+		etd = itr;
+		if (top - itr < sizeof(*etd)) {
+			dev_err(dev, "BUG: bad host security descriptor; "
+				"not enough data (%zu vs %zu left)\n",
+				top - itr, sizeof(*etd));
+			break;
+		}
+		if (etd->bLength < sizeof(*etd)) {
+			dev_err(dev, "BUG: bad host encryption descriptor; "
+				"descriptor is too short "
+				"(%zu vs %zu needed)\n",
+				(size_t)etd->bLength, sizeof(*etd));
+			break;
+		}
+		itr += etd->bLength;
+		bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
+				  "%s (0x%02x) ",
+				  wusb_et_name(etd->bEncryptionType),
+				  etd->bEncryptionValue);
+		wusbhc->ccm1_etd = etd;
+	}
+	dev_info(dev, "supported encryption types: %s\n", buf);
+	if (wusbhc->ccm1_etd == NULL) {
+		dev_err(dev, "E: host doesn't support CCM-1 crypto\n");
+		return 0;
+	}
+	/* Pretty print what we support */
+	return 0;
+}
+
+static void hwahc_security_release(struct hwahc *hwahc)
+{
+	/* nothing to do here so far... */
+}
+
+static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface)
+{
+	int result;
+	struct device *dev = &iface->dev;
+	struct wusbhc *wusbhc = &hwahc->wusbhc;
+	struct wahc *wa = &hwahc->wa;
+	struct usb_device *usb_dev = interface_to_usbdev(iface);
+
+	wa->usb_dev = usb_get_dev(usb_dev);	/* bind the USB device */
+	wa->usb_iface = usb_get_intf(iface);
+	wusbhc->dev = dev;
+	wusbhc->uwb_rc = uwb_rc_get_by_grandpa(iface->dev.parent);
+	if (wusbhc->uwb_rc == NULL) {
+		result = -ENODEV;
+		dev_err(dev, "Cannot get associated UWB Host Controller\n");
+		goto error_rc_get;
+	}
+	result = wa_fill_descr(wa);	/* Get the device descriptor */
+	if (result < 0)
+		goto error_fill_descriptor;
+	if (wa->wa_descr->bNumPorts > USB_MAXCHILDREN) {
+		dev_err(dev, "FIXME: USB_MAXCHILDREN too low for WUSB "
+			"adapter (%u ports)\n", wa->wa_descr->bNumPorts);
+		wusbhc->ports_max = USB_MAXCHILDREN;
+	} else {
+		wusbhc->ports_max = wa->wa_descr->bNumPorts;
+	}
+	wusbhc->mmcies_max = wa->wa_descr->bNumMMCIEs;
+	wusbhc->start = __hwahc_op_wusbhc_start;
+	wusbhc->stop = __hwahc_op_wusbhc_stop;
+	wusbhc->mmcie_add = __hwahc_op_mmcie_add;
+	wusbhc->mmcie_rm = __hwahc_op_mmcie_rm;
+	wusbhc->dev_info_set = __hwahc_op_dev_info_set;
+	wusbhc->bwa_set = __hwahc_op_bwa_set;
+	wusbhc->set_num_dnts = __hwahc_op_set_num_dnts;
+	wusbhc->set_ptk = __hwahc_op_set_ptk;
+	wusbhc->set_gtk = __hwahc_op_set_gtk;
+	result = hwahc_security_create(hwahc);
+	if (result < 0) {
+		dev_err(dev, "Can't initialize security: %d\n", result);
+		goto error_security_create;
+	}
+	wa->wusb = wusbhc;	/* FIXME: ugly, need to fix */
+	result = wusbhc_create(&hwahc->wusbhc);
+	if (result < 0) {
+		dev_err(dev, "Can't create WUSB HC structures: %d\n", result);
+		goto error_wusbhc_create;
+	}
+	result = wa_create(&hwahc->wa, iface);
+	if (result < 0)
+		goto error_wa_create;
+	return 0;
+
+error_wa_create:
+	wusbhc_destroy(&hwahc->wusbhc);
+error_wusbhc_create:
+	/* WA Descr fill allocs no resources */
+error_security_create:
+error_fill_descriptor:
+	uwb_rc_put(wusbhc->uwb_rc);
+error_rc_get:
+	usb_put_intf(iface);
+	usb_put_dev(usb_dev);
+	return result;
+}
+
+static void hwahc_destroy(struct hwahc *hwahc)
+{
+	struct wusbhc *wusbhc = &hwahc->wusbhc;
+
+	d_fnstart(1, NULL, "(hwahc %p)\n", hwahc);
+	mutex_lock(&wusbhc->mutex);
+	__wa_destroy(&hwahc->wa);
+	wusbhc_destroy(&hwahc->wusbhc);
+	hwahc_security_release(hwahc);
+	hwahc->wusbhc.dev = NULL;
+	uwb_rc_put(wusbhc->uwb_rc);
+	usb_put_intf(hwahc->wa.usb_iface);
+	usb_put_dev(hwahc->wa.usb_dev);
+	mutex_unlock(&wusbhc->mutex);
+	d_fnend(1, NULL, "(hwahc %p) = void\n", hwahc);
+}
+
+static void hwahc_init(struct hwahc *hwahc)
+{
+	wa_init(&hwahc->wa);
+}
+
+static int hwahc_probe(struct usb_interface *usb_iface,
+		       const struct usb_device_id *id)
+{
+	int result;
+	struct usb_hcd *usb_hcd;
+	struct wusbhc *wusbhc;
+	struct hwahc *hwahc;
+	struct device *dev = &usb_iface->dev;
+
+	d_fnstart(4, dev, "(%p, %p)\n", usb_iface, id);
+	result = -ENOMEM;
+	usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa");
+	if (usb_hcd == NULL) {
+		dev_err(dev, "unable to allocate instance\n");
+		goto error_alloc;
+	}
+	usb_hcd->wireless = 1;
+	usb_hcd->flags |= HCD_FLAG_SAW_IRQ;
+	wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	hwahc_init(hwahc);
+	result = hwahc_create(hwahc, usb_iface);
+	if (result < 0) {
+		dev_err(dev, "Cannot initialize internals: %d\n", result);
+		goto error_hwahc_create;
+	}
+	result = usb_add_hcd(usb_hcd, 0, 0);
+	if (result < 0) {
+		dev_err(dev, "Cannot add HCD: %d\n", result);
+		goto error_add_hcd;
+	}
+	result = wusbhc_b_create(&hwahc->wusbhc);
+	if (result < 0) {
+		dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result);
+		goto error_wusbhc_b_create;
+	}
+	d_fnend(4, dev, "(%p, %p) = 0\n", usb_iface, id);
+	return 0;
+
+error_wusbhc_b_create:
+	usb_remove_hcd(usb_hcd);
+error_add_hcd:
+	hwahc_destroy(hwahc);
+error_hwahc_create:
+	usb_put_hcd(usb_hcd);
+error_alloc:
+	d_fnend(4, dev, "(%p, %p) = %d\n", usb_iface, id, result);
+	return result;
+}
+
+static void hwahc_disconnect(struct usb_interface *usb_iface)
+{
+	struct usb_hcd *usb_hcd;
+	struct wusbhc *wusbhc;
+	struct hwahc *hwahc;
+
+	usb_hcd = usb_get_intfdata(usb_iface);
+	wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+	d_fnstart(1, NULL, "(hwahc %p [usb_iface %p])\n", hwahc, usb_iface);
+	wusbhc_b_destroy(&hwahc->wusbhc);
+	usb_remove_hcd(usb_hcd);
+	hwahc_destroy(hwahc);
+	usb_put_hcd(usb_hcd);
+	d_fnend(1, NULL, "(hwahc %p [usb_iface %p]) = void\n", hwahc,
+		usb_iface);
+}
+
+/** USB device ID's that we handle */
+static struct usb_device_id hwahc_id_table[] = {
+	/* FIXME: use class labels for this */
+	{ USB_INTERFACE_INFO(0xe0, 0x02, 0x01), },
+	{},
+};
+MODULE_DEVICE_TABLE(usb, hwahc_id_table);
+
+static struct usb_driver hwahc_driver = {
+	.name =		"hwa-hc",
+	.probe =	hwahc_probe,
+	.disconnect =	hwahc_disconnect,
+	.id_table =	hwahc_id_table,
+};
+
+static int __init hwahc_driver_init(void)
+{
+	int result;
+	result = usb_register(&hwahc_driver);
+	if (result < 0) {
+		printk(KERN_ERR "WA-CDS: Cannot register USB driver: %d\n",
+		       result);
+		goto error_usb_register;
+	}
+	return 0;
+
+error_usb_register:
+	return result;
+
+}
+module_init(hwahc_driver_init);
+
+static void __exit hwahc_driver_exit(void)
+{
+	usb_deregister(&hwahc_driver);
+}
+module_exit(hwahc_driver_exit);
+
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index ce1ca0b..4dda31b 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1562,11 +1562,12 @@
 {
 	struct usb_hcd *hcd;
 	struct isp116x *isp116x;
-	struct resource *addr, *data;
+	struct resource *addr, *data, *ires;
 	void __iomem *addr_reg;
 	void __iomem *data_reg;
 	int irq;
 	int ret = 0;
+	unsigned long irqflags;
 
 	if (pdev->num_resources < 3) {
 		ret = -ENODEV;
@@ -1575,12 +1576,16 @@
 
 	data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	irq = platform_get_irq(pdev, 0);
-	if (!addr || !data || irq < 0) {
+	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+	if (!addr || !data || !ires) {
 		ret = -ENODEV;
 		goto err1;
 	}
 
+	irq = ires->start;
+	irqflags = ires->flags & IRQF_TRIGGER_MASK;
+
 	if (pdev->dev.dma_mask) {
 		DBG("DMA not supported\n");
 		ret = -EINVAL;
@@ -1634,7 +1639,7 @@
 		goto err6;
 	}
 
-	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+	ret = usb_add_hcd(hcd, irq, irqflags | IRQF_DISABLED);
 	if (ret)
 		goto err6;
 
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 051ef7b..af849f5 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -218,7 +218,7 @@
 	 * and reading back and checking the contents are same or not
 	 */
 	if (reg_data != 0xFACE) {
-		err("scratch register mismatch %x", reg_data);
+		dev_err(&dev->dev, "scratch register mismatch %x\n", reg_data);
 		goto clean;
 	}
 
@@ -232,9 +232,10 @@
 	hcd = isp1760_register(pci_mem_phy0, length, dev->irq,
 		IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev_name(&dev->dev),
 		devflags);
-	pci_set_drvdata(dev, hcd);
-	if (!hcd)
+	if (!IS_ERR(hcd)) {
+		pci_set_drvdata(dev, hcd);
 		return 0;
+	}
 clean:
 	status = -ENODEV;
 	iounmap(iobase);
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 7cef1d2..d3269656 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -649,7 +649,7 @@
 	ohci_dbg_sw (ohci, &next, &size,
 		"bus %s, device %s\n"
 		"%s\n"
-		"%s version " DRIVER_VERSION "\n",
+		"%s\n",
 		hcd->self.controller->bus->name,
 		dev_name(hcd->self.controller),
 		hcd->product_desc,
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 8990196..8aa3f45 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -46,7 +46,6 @@
 
 #include "../core/hcd.h"
 
-#define DRIVER_VERSION "2006 August 04"
 #define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell"
 #define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
 
@@ -984,10 +983,8 @@
 
 /*-------------------------------------------------------------------------*/
 
-#define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC
-
 MODULE_AUTHOR (DRIVER_AUTHOR);
-MODULE_DESCRIPTION (DRIVER_INFO);
+MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE ("GPL");
 
 #ifdef CONFIG_PCI
@@ -1078,12 +1075,18 @@
 #define SM501_OHCI_DRIVER	ohci_hcd_sm501_driver
 #endif
 
+#ifdef CONFIG_MFD_TC6393XB
+#include "ohci-tmio.c"
+#define TMIO_OHCI_DRIVER	ohci_hcd_tmio_driver
+#endif
+
 #if	!defined(PCI_DRIVER) &&		\
 	!defined(PLATFORM_DRIVER) &&	\
 	!defined(OF_PLATFORM_DRIVER) &&	\
 	!defined(SA1111_DRIVER) &&	\
 	!defined(PS3_SYSTEM_BUS_DRIVER) && \
 	!defined(SM501_OHCI_DRIVER) && \
+	!defined(TMIO_OHCI_DRIVER) && \
 	!defined(SSB_OHCI_DRIVER)
 #error "missing bus glue for ohci-hcd"
 #endif
@@ -1095,9 +1098,10 @@
 	if (usb_disabled())
 		return -ENODEV;
 
-	printk (KERN_DEBUG "%s: " DRIVER_INFO "\n", hcd_name);
+	printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
 	pr_debug ("%s: block sizes: ed %Zd td %Zd\n", hcd_name,
 		sizeof (struct ed), sizeof (struct td));
+	set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
 
 #ifdef DEBUG
 	ohci_debug_root = debugfs_create_dir("ohci", NULL);
@@ -1149,13 +1153,25 @@
 		goto error_sm501;
 #endif
 
+#ifdef TMIO_OHCI_DRIVER
+	retval = platform_driver_register(&TMIO_OHCI_DRIVER);
+	if (retval < 0)
+		goto error_tmio;
+#endif
+
 	return retval;
 
 	/* Error path */
+#ifdef TMIO_OHCI_DRIVER
+	platform_driver_unregister(&TMIO_OHCI_DRIVER);
+ error_tmio:
+#endif
 #ifdef SM501_OHCI_DRIVER
+	platform_driver_unregister(&SM501_OHCI_DRIVER);
  error_sm501:
 #endif
 #ifdef SSB_OHCI_DRIVER
+	ssb_driver_unregister(&SSB_OHCI_DRIVER);
  error_ssb:
 #endif
 #ifdef PCI_DRIVER
@@ -1184,12 +1200,16 @@
  error_debug:
 #endif
 
+	clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
 	return retval;
 }
 module_init(ohci_hcd_mod_init);
 
 static void __exit ohci_hcd_mod_exit(void)
 {
+#ifdef TMIO_OHCI_DRIVER
+	platform_driver_unregister(&TMIO_OHCI_DRIVER);
+#endif
 #ifdef SM501_OHCI_DRIVER
 	platform_driver_unregister(&SM501_OHCI_DRIVER);
 #endif
@@ -1214,6 +1234,7 @@
 #ifdef DEBUG
 	debugfs_remove(ohci_debug_root);
 #endif
+	clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
 }
 module_exit(ohci_hcd_mod_exit);
 
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 7ea9a7b..32bbce9 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -359,21 +359,24 @@
 
 /* Carry out polling-, autostop-, and autoresume-related state changes */
 static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
-		int any_connected)
+		int any_connected, int rhsc_status)
 {
 	int	poll_rh = 1;
-	int	rhsc;
+	int	rhsc_enable;
 
-	rhsc = ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC;
+	/* Some broken controllers never turn off RHCS in the interrupt
+	 * status register.  For their sake we won't re-enable RHSC
+	 * interrupts if the interrupt bit is already active.
+	 */
+	rhsc_enable = ohci_readl(ohci, &ohci->regs->intrenable) &
+			OHCI_INTR_RHSC;
+
 	switch (ohci->hc_control & OHCI_CTRL_HCFS) {
-
 	case OHCI_USB_OPER:
-		/* If no status changes are pending, enable status-change
-		 * interrupts.
-		 */
-		if (!rhsc && !changed) {
-			rhsc = OHCI_INTR_RHSC;
-			ohci_writel(ohci, rhsc, &ohci->regs->intrenable);
+		/* If no status changes are pending, enable RHSC interrupts. */
+		if (!rhsc_enable && !rhsc_status && !changed) {
+			rhsc_enable = OHCI_INTR_RHSC;
+			ohci_writel(ohci, rhsc_enable, &ohci->regs->intrenable);
 		}
 
 		/* Keep on polling until we know a device is connected
@@ -383,7 +386,7 @@
 			if (any_connected ||
 					!device_may_wakeup(&ohci_to_hcd(ohci)
 						->self.root_hub->dev)) {
-				if (rhsc)
+				if (rhsc_enable)
 					poll_rh = 0;
 			} else {
 				ohci->autostop = 1;
@@ -396,34 +399,45 @@
 				ohci->autostop = 0;
 				ohci->next_statechange = jiffies +
 						STATECHANGE_DELAY;
-			} else if (rhsc && time_after_eq(jiffies,
+			} else if (time_after_eq(jiffies,
 						ohci->next_statechange)
 					&& !ohci->ed_rm_list
 					&& !(ohci->hc_control &
 						OHCI_SCHED_ENABLES)) {
 				ohci_rh_suspend(ohci, 1);
-				poll_rh = 0;
+				if (rhsc_enable)
+					poll_rh = 0;
 			}
 		}
 		break;
 
-	/* if there is a port change, autostart or ask to be resumed */
 	case OHCI_USB_SUSPEND:
 	case OHCI_USB_RESUME:
+		/* if there is a port change, autostart or ask to be resumed */
 		if (changed) {
 			if (ohci->autostop)
 				ohci_rh_resume(ohci);
 			else
 				usb_hcd_resume_root_hub(ohci_to_hcd(ohci));
-		} else {
-			if (!rhsc && (ohci->autostop ||
-					ohci_to_hcd(ohci)->self.root_hub->
-						do_remote_wakeup))
-				ohci_writel(ohci, OHCI_INTR_RHSC,
-						&ohci->regs->intrenable);
 
-			/* everything is idle, no need for polling */
+		/* If remote wakeup is disabled, stop polling */
+		} else if (!ohci->autostop &&
+				!ohci_to_hcd(ohci)->self.root_hub->
+					do_remote_wakeup) {
 			poll_rh = 0;
+
+		} else {
+			/* If no status changes are pending,
+			 * enable RHSC interrupts
+			 */
+			if (!rhsc_enable && !rhsc_status) {
+				rhsc_enable = OHCI_INTR_RHSC;
+				ohci_writel(ohci, rhsc_enable,
+						&ohci->regs->intrenable);
+			}
+			/* Keep polling until RHSC is enabled */
+			if (rhsc_enable)
+				poll_rh = 0;
 		}
 		break;
 	}
@@ -441,18 +455,22 @@
  * autostop isn't used when CONFIG_PM is turned off.
  */
 static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
-		int any_connected)
+		int any_connected, int rhsc_status)
 {
 	/* If RHSC is enabled, don't poll */
 	if (ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC)
 		return 0;
 
-	/* If no status changes are pending, enable status-change interrupts */
-	if (!changed) {
-		ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable);
-		return 0;
-	}
-	return 1;
+	/* If status changes are pending, continue polling.
+	 * Conversely, if no status changes are pending but the RHSC
+	 * status bit was set, then RHSC may be broken so continue polling.
+	 */
+	if (changed || rhsc_status)
+		return 1;
+
+	/* It's safe to re-enable RHSC interrupts */
+	ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable);
+	return 0;
 }
 
 #endif	/* CONFIG_PM */
@@ -467,6 +485,7 @@
 	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
 	int		i, changed = 0, length = 1;
 	int		any_connected = 0;
+	int		rhsc_status;
 	unsigned long	flags;
 
 	spin_lock_irqsave (&ohci->lock, flags);
@@ -492,12 +511,10 @@
 		length++;
 	}
 
-	/* Some broken controllers never turn off RHCS in the interrupt
-	 * status register.  For their sake we won't re-enable RHSC
-	 * interrupts if the flag is already set.
-	 */
-	if (ohci_readl(ohci, &ohci->regs->intrstatus) & OHCI_INTR_RHSC)
-		changed = 1;
+	/* Clear the RHSC status flag before reading the port statuses */
+	ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrstatus);
+	rhsc_status = ohci_readl(ohci, &ohci->regs->intrstatus) &
+			OHCI_INTR_RHSC;
 
 	/* look at each port */
 	for (i = 0; i < ohci->num_ports; i++) {
@@ -517,7 +534,7 @@
 	}
 
 	hcd->poll_rh = ohci_root_hub_state_changes(ohci, changed,
-			any_connected);
+			any_connected, rhsc_status);
 
 done:
 	spin_unlock_irqrestore (&ohci->lock, flags);
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 5221856..91697bd 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -231,7 +231,7 @@
 
 	omap_ohci_clock_power(1);
 
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap15xx()) {
 		omap_1510_local_bus_power(1);
 		omap_1510_local_bus_init();
 	}
@@ -319,7 +319,7 @@
 	if (IS_ERR(usb_host_ck))
 		return PTR_ERR(usb_host_ck);
 
-	if (!cpu_is_omap1510())
+	if (!cpu_is_omap15xx())
 		usb_dc_ck = clk_get(0, "usb_dc_ck");
 	else
 		usb_dc_ck = clk_get(0, "lb_ck");
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 658a2a9..e306ca6 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -331,7 +331,7 @@
 
 	int ret = 0, irq;
 
-	dev_dbg(&pdev->dev, "%s: " DRIVER_INFO " (pnx4008)\n", hcd_name);
+	dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (pnx4008)\n", hcd_name);
 	if (usb_disabled()) {
 		err("USB is disabled");
 		ret = -ENODEV;
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
new file mode 100644
index 0000000..f9f134a
--- /dev/null
+++ b/drivers/usb/host/ohci-tmio.c
@@ -0,0 +1,376 @@
+/*
+ * OHCI HCD(Host Controller Driver) for USB.
+ *
+ *(C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ *(C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ *(C) Copyright 2002 Hewlett-Packard Company
+ *
+ * Bus glue for Toshiba Mobile IO(TMIO) Controller's OHCI core
+ * (C) Copyright 2005 Chris Humbert <mahadri-usb@drigon.com>
+ * (C) Copyright 2007, 2008 Dmitry Baryshkov <dbaryshkov@gmail.com>
+ *
+ * This is known to work with the following variants:
+ *	TC6393XB revision 3	(32kB SRAM)
+ *
+ * The TMIO's OHCI core DMAs through a small internal buffer that
+ * is directly addressable by the CPU.
+ *
+ * Written from sparse documentation from Toshiba and Sharp's driver
+ * for the 2.4 kernel,
+ *	usb-ohci-tc6393.c(C) Copyright 2004 Lineo Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/sched.h>*/
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tmio.h>
+#include <linux/dma-mapping.h>
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * USB Host Controller Configuration Register
+ */
+#define CCR_REVID	0x08	/* b Revision ID				*/
+#define CCR_BASE	0x10	/* l USB Control Register Base Address Low	*/
+#define CCR_ILME	0x40	/* b Internal Local Memory Enable		*/
+#define CCR_PM		0x4c	/* w Power Management			*/
+#define CCR_INTC	0x50	/* b INT Control				*/
+#define CCR_LMW1L	0x54	/* w Local Memory Window 1 LMADRS Low	*/
+#define CCR_LMW1H	0x56	/* w Local Memory Window 1 LMADRS High	*/
+#define CCR_LMW1BL	0x58	/* w Local Memory Window 1 Base Address Low	*/
+#define CCR_LMW1BH	0x5A	/* w Local Memory Window 1 Base Address High	*/
+#define CCR_LMW2L	0x5C	/* w Local Memory Window 2 LMADRS Low	*/
+#define CCR_LMW2H	0x5E	/* w Local Memory Window 2 LMADRS High	*/
+#define CCR_LMW2BL	0x60	/* w Local Memory Window 2 Base Address Low	*/
+#define CCR_LMW2BH	0x62	/* w Local Memory Window 2 Base Address High	*/
+#define CCR_MISC	0xFC	/* b MISC					*/
+
+#define CCR_PM_GKEN      0x0001
+#define CCR_PM_CKRNEN    0x0002
+#define CCR_PM_USBPW1    0x0004
+#define CCR_PM_USBPW2    0x0008
+#define CCR_PM_USBPW3    0x0008
+#define CCR_PM_PMEE      0x0100
+#define CCR_PM_PMES      0x8000
+
+/*-------------------------------------------------------------------------*/
+
+struct tmio_hcd {
+	void __iomem		*ccr;
+	spinlock_t		lock; /* protects RMW cycles */
+};
+
+#define hcd_to_tmio(hcd)	((struct tmio_hcd *)(hcd_to_ohci(hcd) + 1))
+
+/*-------------------------------------------------------------------------*/
+
+static void tmio_write_pm(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+	u16 pm;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tmio->lock, flags);
+
+	pm = CCR_PM_GKEN | CCR_PM_CKRNEN |
+	     CCR_PM_PMEE | CCR_PM_PMES;
+
+	tmio_iowrite16(pm, tmio->ccr + CCR_PM);
+	spin_unlock_irqrestore(&tmio->lock, flags);
+}
+
+static void tmio_stop_hc(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+	struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+	u16 pm;
+
+	pm = CCR_PM_GKEN | CCR_PM_CKRNEN;
+	switch (ohci->num_ports) {
+		default:
+			dev_err(&dev->dev, "Unsupported amount of ports: %d\n", ohci->num_ports);
+		case 3:
+			pm |= CCR_PM_USBPW3;
+		case 2:
+			pm |= CCR_PM_USBPW2;
+		case 1:
+			pm |= CCR_PM_USBPW1;
+	}
+	tmio_iowrite8(0, tmio->ccr + CCR_INTC);
+	tmio_iowrite8(0, tmio->ccr + CCR_ILME);
+	tmio_iowrite16(0, tmio->ccr + CCR_BASE);
+	tmio_iowrite16(0, tmio->ccr + CCR_BASE + 2);
+	tmio_iowrite16(pm, tmio->ccr + CCR_PM);
+}
+
+static void tmio_start_hc(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+	unsigned long base = hcd->rsrc_start;
+
+	tmio_write_pm(dev);
+	tmio_iowrite16(base, tmio->ccr + CCR_BASE);
+	tmio_iowrite16(base >> 16, tmio->ccr + CCR_BASE + 2);
+	tmio_iowrite8(1, tmio->ccr + CCR_ILME);
+	tmio_iowrite8(2, tmio->ccr + CCR_INTC);
+
+	dev_info(&dev->dev, "revision %d @ 0x%08llx, irq %d\n",
+			tmio_ioread8(tmio->ccr + CCR_REVID), hcd->rsrc_start, hcd->irq);
+}
+
+static int ohci_tmio_start(struct usb_hcd *hcd)
+{
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+	int ret;
+
+	if ((ret = ohci_init(ohci)) < 0)
+		return ret;
+
+	if ((ret = ohci_run(ohci)) < 0) {
+		err("can't start %s", hcd->self.bus_name);
+		ohci_stop(hcd);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct hc_driver ohci_tmio_hc_driver = {
+	.description =		hcd_name,
+	.product_desc =		"TMIO OHCI USB Host Controller",
+	.hcd_priv_size =	sizeof(struct ohci_hcd) + sizeof (struct tmio_hcd),
+
+	/* generic hardware linkage */
+	.irq =			ohci_irq,
+	.flags =		HCD_USB11 | HCD_MEMORY | HCD_LOCAL_MEM,
+
+	/* basic lifecycle operations */
+	.start =		ohci_tmio_start,
+	.stop =			ohci_stop,
+	.shutdown =		ohci_shutdown,
+
+	/* managing i/o requests and associated device resources */
+	.urb_enqueue =		ohci_urb_enqueue,
+	.urb_dequeue =		ohci_urb_dequeue,
+	.endpoint_disable =	ohci_endpoint_disable,
+
+	/* scheduling support */
+	.get_frame_number =	ohci_get_frame,
+
+	/* root hub support */
+	.hub_status_data =	ohci_hub_status_data,
+	.hub_control =		ohci_hub_control,
+#ifdef	CONFIG_PM
+	.bus_suspend =		ohci_bus_suspend,
+	.bus_resume =		ohci_bus_resume,
+#endif
+	.start_port_reset =	ohci_start_port_reset,
+};
+
+/*-------------------------------------------------------------------------*/
+static struct platform_driver ohci_hcd_tmio_driver;
+
+static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
+{
+	struct mfd_cell *cell = dev->dev.platform_data;
+	struct resource *regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	struct resource *config = platform_get_resource(dev, IORESOURCE_MEM, 1);
+	struct resource *sram = platform_get_resource(dev, IORESOURCE_MEM, 2);
+	int irq = platform_get_irq(dev, 0);
+	struct tmio_hcd *tmio;
+	struct ohci_hcd *ohci;
+	struct usb_hcd *hcd;
+	int ret;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	if (!cell)
+		return -EINVAL;
+
+	hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev->dev.bus_id);
+	if (!hcd) {
+		ret = -ENOMEM;
+		goto err_usb_create_hcd;
+	}
+
+	hcd->rsrc_start = regs->start;
+	hcd->rsrc_len = regs->end - regs->start + 1;
+
+	tmio = hcd_to_tmio(hcd);
+
+	spin_lock_init(&tmio->lock);
+
+	tmio->ccr = ioremap(config->start, config->end - config->start + 1);
+	if (!tmio->ccr) {
+		ret = -ENOMEM;
+		goto err_ioremap_ccr;
+	}
+
+	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+	if (!hcd->regs) {
+		ret = -ENOMEM;
+		goto err_ioremap_regs;
+	}
+
+	if (!dma_declare_coherent_memory(&dev->dev, sram->start,
+				sram->start,
+				sram->end - sram->start + 1,
+				DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE)) {
+		ret = -EBUSY;
+		goto err_dma_declare;
+	}
+
+	if (cell->enable) {
+		ret = cell->enable(dev);
+		if (ret)
+			goto err_enable;
+	}
+
+	tmio_start_hc(dev);
+	ohci = hcd_to_ohci(hcd);
+	ohci_hcd_init(ohci);
+
+	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+	if (ret)
+		goto err_add_hcd;
+
+	if (ret == 0)
+		return ret;
+
+	usb_remove_hcd(hcd);
+
+err_add_hcd:
+	tmio_stop_hc(dev);
+	if (cell->disable)
+		cell->disable(dev);
+err_enable:
+	dma_release_declared_memory(&dev->dev);
+err_dma_declare:
+	iounmap(hcd->regs);
+err_ioremap_regs:
+	iounmap(tmio->ccr);
+err_ioremap_ccr:
+	usb_put_hcd(hcd);
+err_usb_create_hcd:
+
+	return ret;
+}
+
+static int __devexit ohci_hcd_tmio_drv_remove(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+	struct mfd_cell *cell = dev->dev.platform_data;
+
+	usb_remove_hcd(hcd);
+	tmio_stop_hc(dev);
+	if (cell->disable)
+		cell->disable(dev);
+	dma_release_declared_memory(&dev->dev);
+	iounmap(hcd->regs);
+	iounmap(tmio->ccr);
+	usb_put_hcd(hcd);
+
+	platform_set_drvdata(dev, NULL);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t state)
+{
+	struct mfd_cell *cell = dev->dev.platform_data;
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+	struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+	unsigned long flags;
+	u8 misc;
+	int ret;
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	spin_lock_irqsave(&tmio->lock, flags);
+
+	misc = tmio_ioread8(tmio->ccr + CCR_MISC);
+	misc |= 1 << 3; /* USSUSP */
+	tmio_iowrite8(misc, tmio->ccr + CCR_MISC);
+
+	spin_unlock_irqrestore(&tmio->lock, flags);
+
+	if (cell->suspend) {
+		ret = cell->suspend(dev);
+		if (ret)
+			return ret;
+	}
+
+	hcd->state = HC_STATE_SUSPENDED;
+
+	return 0;
+}
+
+static int ohci_hcd_tmio_drv_resume(struct platform_device *dev)
+{
+	struct mfd_cell *cell = dev->dev.platform_data;
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+	struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+	unsigned long flags;
+	u8 misc;
+	int ret;
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	if (cell->resume) {
+		ret = cell->resume(dev);
+		if (ret)
+			return ret;
+	}
+
+	tmio_start_hc(dev);
+
+	spin_lock_irqsave(&tmio->lock, flags);
+
+	misc = tmio_ioread8(tmio->ccr + CCR_MISC);
+	misc &= ~(1 << 3); /* USSUSP */
+	tmio_iowrite8(misc, tmio->ccr + CCR_MISC);
+
+	spin_unlock_irqrestore(&tmio->lock, flags);
+
+	ohci_finish_controller_resume(hcd);
+
+	return 0;
+}
+#else
+#define ohci_hcd_tmio_drv_suspend NULL
+#define ohci_hcd_tmio_drv_resume NULL
+#endif
+
+static struct platform_driver ohci_hcd_tmio_driver = {
+	.probe		= ohci_hcd_tmio_drv_probe,
+	.remove		= __devexit_p(ohci_hcd_tmio_drv_remove),
+	.shutdown	= usb_hcd_platform_shutdown,
+	.suspend	= ohci_hcd_tmio_drv_suspend,
+	.resume		= ohci_hcd_tmio_drv_resume,
+	.driver		= {
+		.name	= "tmio-ohci",
+		.owner	= THIS_MODULE,
+	},
+};
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index faf622e..222011f 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -540,15 +540,7 @@
  * Big-endian read/write functions are arch-specific.
  * Other arches can be added if/when they're needed.
  *
- * REVISIT: arch/powerpc now has readl/writel_be, so the
- * definition below can die once the STB04xxx support is
- * finally ported over.
  */
-#if defined(CONFIG_PPC) && !defined(CONFIG_PPC_MERGE)
-#define readl_be(addr)		in_be32((__force unsigned *)addr)
-#define writel_be(val, addr)	out_be32((__force unsigned *)addr, val)
-#endif
-
 static inline unsigned int _ohci_readl (const struct ohci_hcd *ohci,
 					__hc32 __iomem * regs)
 {
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index ea7126f..c18d879 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -66,7 +66,7 @@
 module_param(endian, ushort, 0644);
 MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)");
 
-static unsigned short irq_sense = INTL;
+static unsigned short irq_sense = 0xff;
 module_param(irq_sense, ushort, 0644);
 MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=32, falling edge=0 "
 		"(default=32)");
@@ -118,7 +118,7 @@
 		r8a66597_write(r8a66597, SCKE, SYSCFG0);
 		tmp = r8a66597_read(r8a66597, SYSCFG0);
 		if (i++ > 1000) {
-			err("register access fail.");
+			printk(KERN_ERR "r8a66597: register access fail.\n");
 			return -ENXIO;
 		}
 	} while ((tmp & SCKE) != SCKE);
@@ -128,7 +128,7 @@
 		r8a66597_write(r8a66597, USBE, SYSCFG0);
 		tmp = r8a66597_read(r8a66597, SYSCFG0);
 		if (i++ > 1000) {
-			err("register access fail.");
+			printk(KERN_ERR "r8a66597: register access fail.\n");
 			return -ENXIO;
 		}
 	} while ((tmp & USBE) != USBE);
@@ -141,7 +141,7 @@
 		msleep(1);
 		tmp = r8a66597_read(r8a66597, SYSCFG0);
 		if (i++ > 500) {
-			err("register access fail.");
+			printk(KERN_ERR "r8a66597: register access fail.\n");
 			return -ENXIO;
 		}
 	} while ((tmp & SCKE) != SCKE);
@@ -265,7 +265,7 @@
 	if (root_port) {
 		*root_port = (devpath[0] & 0x0F) - 1;
 		if (*root_port >= R8A66597_MAX_ROOT_HUB)
-			err("illegal root port number");
+			printk(KERN_ERR "r8a66597: Illegal root port number.\n");
 	}
 	if (hub_port)
 		*hub_port = devpath[2] & 0x0F;
@@ -286,7 +286,7 @@
 		usbspd = HSMODE;
 		break;
 	default:
-		err("unknown speed");
+		printk(KERN_ERR "r8a66597: unknown speed\n");
 		break;
 	}
 
@@ -385,7 +385,7 @@
 	struct r8a66597_device *dev;
 
 	if (is_hub_limit(urb->dev->devpath)) {
-		err("Externel hub limit reached.");
+		dev_err(&urb->dev->dev, "External hub limit reached.\n");
 		return 0;
 	}
 
@@ -406,8 +406,9 @@
 		return addr;
 	}
 
-	err("cannot communicate with a USB device more than 10.(%x)",
-	    r8a66597->address_map);
+	dev_err(&urb->dev->dev,
+		"cannot communicate with a USB device more than 10.(%x)\n",
+		r8a66597->address_map);
 
 	return 0;
 }
@@ -447,7 +448,8 @@
 	do {
 		tmp = r8a66597_read(r8a66597, reg);
 		if (i++ > 1000000) {
-			err("register%lx, loop %x is timeout", reg, loop);
+			printk(KERN_ERR "r8a66597: register%lx, loop %x "
+			       "is timeout\n", reg, loop);
 			break;
 		}
 		ndelay(1);
@@ -675,7 +677,7 @@
 			array[i++] = 1;
 		break;
 	default:
-		err("Illegal type");
+		printk(KERN_ERR "r8a66597: Illegal type\n");
 		return 0;
 	}
 
@@ -705,7 +707,7 @@
 		r8a66597_type = R8A66597_ISO;
 		break;
 	default:
-		err("Illegal type");
+		printk(KERN_ERR "r8a66597: Illegal type\n");
 		r8a66597_type = 0x0000;
 		break;
 	}
@@ -724,7 +726,7 @@
 	else if (check_interrupt(pipenum))
 		bufnum = 4 + (pipenum - 6);
 	else
-		err("Illegal pipenum (%d)", pipenum);
+		printk(KERN_ERR "r8a66597: Illegal pipenum (%d)\n", pipenum);
 
 	return bufnum;
 }
@@ -740,7 +742,7 @@
 	else if (check_interrupt(pipenum))
 		buf_bsize = 0;
 	else
-		err("Illegal pipenum (%d)", pipenum);
+		printk(KERN_ERR "r8a66597: Illegal pipenum (%d)\n", pipenum);
 
 	return buf_bsize;
 }
@@ -760,10 +762,12 @@
 			if ((r8a66597->dma_map & (1 << i)) != 0)
 				continue;
 
-			info("address %d, EndpointAddress 0x%02x use DMA FIFO",
-			     usb_pipedevice(urb->pipe),
-			     info->dir_in ? USB_ENDPOINT_DIR_MASK + info->epnum
-					    : info->epnum);
+			dev_info(&dev->udev->dev,
+				 "address %d, EndpointAddress 0x%02x use "
+				 "DMA FIFO\n", usb_pipedevice(urb->pipe),
+				 info->dir_in ?
+				 	USB_ENDPOINT_DIR_MASK + info->epnum
+					: info->epnum);
 
 			r8a66597->dma_map |= 1 << i;
 			dev->dma_map |= 1 << i;
@@ -1187,7 +1191,7 @@
 		prepare_status_packet(r8a66597, td);
 		break;
 	default:
-		err("invalid type.");
+		printk(KERN_ERR "r8a66597: invalid type.\n");
 		break;
 	}
 
@@ -1295,7 +1299,7 @@
 	if (unlikely((tmp & FRDY) == 0)) {
 		pipe_stop(r8a66597, td->pipe);
 		pipe_irq_disable(r8a66597, pipenum);
-		err("in fifo not ready (%d)", pipenum);
+		printk(KERN_ERR "r8a66597: in fifo not ready (%d)\n", pipenum);
 		finish_request(r8a66597, td, pipenum, td->urb, -EPIPE);
 		return;
 	}
@@ -1370,7 +1374,7 @@
 	if (unlikely((tmp & FRDY) == 0)) {
 		pipe_stop(r8a66597, td->pipe);
 		pipe_irq_disable(r8a66597, pipenum);
-		err("out write fifo not ready. (%d)", pipenum);
+		printk(KERN_ERR "r8a66597: out fifo not ready (%d)\n", pipenum);
 		finish_request(r8a66597, td, pipenum, urb, -EPIPE);
 		return;
 	}
@@ -2005,7 +2009,7 @@
 		return dev;
 	}
 
-	err("get_r8a66597_device fail.(%d)\n", addr);
+	printk(KERN_ERR "r8a66597: get_r8a66597_device fail.(%d)\n", addr);
 	return NULL;
 }
 
@@ -2263,7 +2267,7 @@
 #define resource_len(r) (((r)->end - (r)->start) + 1)
 static int __init r8a66597_probe(struct platform_device *pdev)
 {
-	struct resource *res = NULL;
+	struct resource *res = NULL, *ires;
 	int irq = -1;
 	void __iomem *reg = NULL;
 	struct usb_hcd *hcd = NULL;
@@ -2274,7 +2278,7 @@
 
 	if (pdev->dev.dma_mask) {
 		ret = -EINVAL;
-		err("dma not support");
+		dev_err(&pdev->dev, "dma not supported\n");
 		goto clean_up;
 	}
 
@@ -2282,21 +2286,25 @@
 					   (char *)hcd_name);
 	if (!res) {
 		ret = -ENODEV;
-		err("platform_get_resource_byname error.");
+		dev_err(&pdev->dev, "platform_get_resource_byname error.\n");
 		goto clean_up;
 	}
 
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
+	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!ires) {
 		ret = -ENODEV;
-		err("platform_get_irq error.");
+		dev_err(&pdev->dev,
+			"platform_get_resource IORESOURCE_IRQ error.\n");
 		goto clean_up;
 	}
 
+	irq = ires->start;
+	irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
+
 	reg = ioremap(res->start, resource_len(res));
 	if (reg == NULL) {
 		ret = -ENOMEM;
-		err("ioremap error.");
+		dev_err(&pdev->dev, "ioremap error.\n");
 		goto clean_up;
 	}
 
@@ -2304,7 +2312,7 @@
 	hcd = usb_create_hcd(&r8a66597_hc_driver, &pdev->dev, (char *)hcd_name);
 	if (!hcd) {
 		ret = -ENOMEM;
-		err("Failed to create hcd");
+		dev_err(&pdev->dev, "Failed to create hcd\n");
 		goto clean_up;
 	}
 	r8a66597 = hcd_to_r8a66597(hcd);
@@ -2329,13 +2337,33 @@
 	INIT_LIST_HEAD(&r8a66597->child_device);
 
 	hcd->rsrc_start = res->start;
-	if (irq_sense == INTL)
-		irq_trigger = IRQF_TRIGGER_LOW;
-	else
-		irq_trigger = IRQF_TRIGGER_FALLING;
+
+	/* irq_sense setting on cmdline takes precedence over resource
+	 * settings, so the introduction of irqflags in IRQ resourse
+	 * won't disturb existing setups */
+	switch (irq_sense) {
+		case INTL:
+			irq_trigger = IRQF_TRIGGER_LOW;
+			break;
+		case 0:
+			irq_trigger = IRQF_TRIGGER_FALLING;
+			break;
+		case 0xff:
+			if (irq_trigger)
+				irq_sense = (irq_trigger & IRQF_TRIGGER_LOW) ?
+					    INTL : 0;
+			else {
+				irq_sense = INTL;
+				irq_trigger = IRQF_TRIGGER_LOW;
+			}
+			break;
+		default:
+			dev_err(&pdev->dev, "Unknown irq_sense value.\n");
+	}
+
 	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
 	if (ret != 0) {
-		err("Failed to add hcd");
+		dev_err(&pdev->dev, "Failed to add hcd\n");
 		goto clean_up;
 	}
 
@@ -2364,7 +2392,8 @@
 	if (usb_disabled())
 		return -ENODEV;
 
-	info("driver %s, %s", hcd_name, DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": driver %s, %s\n", hcd_name,
+	       DRIVER_VERSION);
 	return platform_driver_register(&r8a66597_driver);
 }
 module_init(r8a66597_init);
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 8a74bbb..e106e9d 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1620,22 +1620,26 @@
 {
 	struct usb_hcd		*hcd;
 	struct sl811		*sl811;
-	struct resource		*addr, *data;
+	struct resource		*addr, *data, *ires;
 	int			irq;
 	void __iomem		*addr_reg;
 	void __iomem		*data_reg;
 	int			retval;
 	u8			tmp, ioaddr = 0;
+	unsigned long		irqflags;
 
 	/* basic sanity checks first.  board-specific init logic should
 	 * have initialized these three resources and probably board
 	 * specific platform_data.  we don't probe for IRQs, and do only
 	 * minimal sanity checking.
 	 */
-	irq = platform_get_irq(dev, 0);
-	if (dev->num_resources < 3 || irq < 0)
+	ires = platform_get_resource(dev, IORESOURCE_IRQ, 0);
+	if (dev->num_resources < 3 || !ires)
 		return -ENODEV;
 
+	irq = ires->start;
+	irqflags = ires->flags & IRQF_TRIGGER_MASK;
+
 	/* refuse to confuse usbcore */
 	if (dev->dev.dma_mask) {
 		DBG("no we won't dma\n");
@@ -1717,8 +1721,11 @@
 	 * triggers (e.g. most ARM CPUs).  Initial driver stress testing
 	 * was on a system with single edge triggering, so most sorts of
 	 * triggering arrangement should work.
+	 *
+	 * Use resource IRQ flags if set by platform device setup.
 	 */
-	retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+	irqflags |= IRQF_SHARED;
+	retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | irqflags);
 	if (retval != 0)
 		goto err6;
 
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 3a7bfe7..cf5e4cf 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -53,7 +53,6 @@
 /*
  * Version Information
  */
-#define DRIVER_VERSION "v3.0"
 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
 Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
 Alan Stern"
@@ -951,12 +950,13 @@
 {
 	int retval = -ENOMEM;
 
-	printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "%s\n",
-			ignore_oc ? ", overcurrent ignored" : "");
-
 	if (usb_disabled())
 		return -ENODEV;
 
+	printk(KERN_INFO "uhci_hcd: " DRIVER_DESC "%s\n",
+			ignore_oc ? ", overcurrent ignored" : "");
+	set_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
+
 	if (DEBUG_CONFIGURED) {
 		errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
 		if (!errbuf)
@@ -988,6 +988,7 @@
 
 errbuf_failed:
 
+	clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
 	return retval;
 }
 
@@ -997,6 +998,7 @@
 	kmem_cache_destroy(uhci_up_cachep);
 	debugfs_remove(uhci_debugfs_root);
 	kfree(errbuf);
+	clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
 }
 
 module_init(uhci_hcd_init);
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 1f0c2cf..5631d89 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1065,13 +1065,18 @@
 		}
 		if (exponent < 0)
 			return -EINVAL;
-		qh->period = 1 << exponent;
-		qh->skel = SKEL_INDEX(exponent);
 
-		/* For now, interrupt phase is fixed by the layout
-		 * of the QH lists. */
-		qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
-		ret = uhci_check_bandwidth(uhci, qh);
+		/* If the slot is full, try a lower period */
+		do {
+			qh->period = 1 << exponent;
+			qh->skel = SKEL_INDEX(exponent);
+
+			/* For now, interrupt phase is fixed by the layout
+			 * of the QH lists.
+			 */
+			qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
+			ret = uhci_check_bandwidth(uhci, qh);
+		} while (ret != 0 && --exponent >= 0);
 		if (ret)
 			return ret;
 	} else if (qh->period > urb->interval)
diff --git a/drivers/usb/host/whci/Kbuild b/drivers/usb/host/whci/Kbuild
new file mode 100644
index 0000000..26a3871
--- /dev/null
+++ b/drivers/usb/host/whci/Kbuild
@@ -0,0 +1,11 @@
+obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o
+
+whci-hcd-y := \
+	asl.o	\
+	hcd.o 	\
+	hw.o	\
+	init.o	\
+	int.o	\
+	pzl.o	\
+	qset.o	\
+	wusb.o
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c
new file mode 100644
index 0000000..4d7078e5
--- /dev/null
+++ b/drivers/usb/host/whci/asl.c
@@ -0,0 +1,367 @@
+/*
+ * Wireless Host Controller (WHC) asynchronous schedule management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+#include <linux/usb.h>
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+#if D_LOCAL >= 4
+static void dump_asl(struct whc *whc, const char *tag)
+{
+	struct device *dev = &whc->umc->dev;
+	struct whc_qset *qset;
+
+	d_printf(4, dev, "ASL %s\n", tag);
+
+	list_for_each_entry(qset, &whc->async_list, list_node) {
+		dump_qset(qset, dev);
+	}
+}
+#else
+static inline void dump_asl(struct whc *whc, const char *tag)
+{
+}
+#endif
+
+
+static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset,
+			       struct whc_qset **next, struct whc_qset **prev)
+{
+	struct list_head *n, *p;
+
+	BUG_ON(list_empty(&whc->async_list));
+
+	n = qset->list_node.next;
+	if (n == &whc->async_list)
+		n = n->next;
+	p = qset->list_node.prev;
+	if (p == &whc->async_list)
+		p = p->prev;
+
+	*next = container_of(n, struct whc_qset, list_node);
+	*prev = container_of(p, struct whc_qset, list_node);
+
+}
+
+static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset)
+{
+	list_move(&qset->list_node, &whc->async_list);
+	qset->in_sw_list = true;
+}
+
+static void asl_qset_insert(struct whc *whc, struct whc_qset *qset)
+{
+	struct whc_qset *next, *prev;
+
+	qset_clear(whc, qset);
+
+	/* Link into ASL. */
+	qset_get_next_prev(whc, qset, &next, &prev);
+	whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma);
+	whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma);
+	qset->in_hw_list = true;
+}
+
+static void asl_qset_remove(struct whc *whc, struct whc_qset *qset)
+{
+	struct whc_qset *prev, *next;
+
+	qset_get_next_prev(whc, qset, &next, &prev);
+
+	list_move(&qset->list_node, &whc->async_removed_list);
+	qset->in_sw_list = false;
+
+	/*
+	 * No more qsets in the ASL?  The caller must stop the ASL as
+	 * it's no longer valid.
+	 */
+	if (list_empty(&whc->async_list))
+		return;
+
+	/* Remove from ASL. */
+	whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma);
+	qset->in_hw_list = false;
+}
+
+/**
+ * process_qset - process any recently inactivated or halted qTDs in a
+ * qset.
+ *
+ * After inactive qTDs are removed, new qTDs can be added if the
+ * urb queue still contains URBs.
+ *
+ * Returns any additional WUSBCMD bits for the ASL sync command (i.e.,
+ * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed).
+ */
+static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
+{
+	enum whc_update update = 0;
+	uint32_t status = 0;
+
+	while (qset->ntds) {
+		struct whc_qtd *td;
+		int t;
+
+		t = qset->td_start;
+		td = &qset->qtd[qset->td_start];
+		status = le32_to_cpu(td->status);
+
+		/*
+		 * Nothing to do with a still active qTD.
+		 */
+		if (status & QTD_STS_ACTIVE)
+			break;
+
+		if (status & QTD_STS_HALTED) {
+			/* Ug, an error. */
+			process_halted_qtd(whc, qset, td);
+			goto done;
+		}
+
+		/* Mmm, a completed qTD. */
+		process_inactive_qtd(whc, qset, td);
+	}
+
+	update |= qset_add_qtds(whc, qset);
+
+done:
+	/*
+	 * Remove this qset from the ASL if requested, but only if has
+	 * no qTDs.
+	 */
+	if (qset->remove && qset->ntds == 0) {
+		asl_qset_remove(whc, qset);
+		update |= WHC_UPDATE_REMOVED;
+	}
+	return update;
+}
+
+void asl_start(struct whc *whc)
+{
+	struct whc_qset *qset;
+
+	qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
+
+	le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR);
+
+	whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN);
+	whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+		      WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED,
+		      1000, "start ASL");
+}
+
+void asl_stop(struct whc *whc)
+{
+	whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0);
+	whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+		      WUSBSTS_ASYNC_SCHED, 0,
+		      1000, "stop ASL");
+}
+
+void asl_update(struct whc *whc, uint32_t wusbcmd)
+{
+	whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
+	wait_event(whc->async_list_wq,
+		   (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0);
+}
+
+/**
+ * scan_async_work - scan the ASL for qsets to process.
+ *
+ * Process each qset in the ASL in turn and then signal the WHC that
+ * the ASL has been updated.
+ *
+ * Then start, stop or update the asynchronous schedule as required.
+ */
+void scan_async_work(struct work_struct *work)
+{
+	struct whc *whc = container_of(work, struct whc, async_work);
+	struct whc_qset *qset, *t;
+	enum whc_update update = 0;
+
+	spin_lock_irq(&whc->lock);
+
+	dump_asl(whc, "before processing");
+
+	/*
+	 * Transerve the software list backwards so new qsets can be
+	 * safely inserted into the ASL without making it non-circular.
+	 */
+	list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) {
+		if (!qset->in_hw_list) {
+			asl_qset_insert(whc, qset);
+			update |= WHC_UPDATE_ADDED;
+		}
+
+		update |= process_qset(whc, qset);
+	}
+
+	dump_asl(whc, "after processing");
+
+	spin_unlock_irq(&whc->lock);
+
+	if (update) {
+		uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB;
+		if (update & WHC_UPDATE_REMOVED)
+			wusbcmd |= WUSBCMD_ASYNC_QSET_RM;
+		asl_update(whc, wusbcmd);
+	}
+
+	/*
+	 * Now that the ASL is updated, complete the removal of any
+	 * removed qsets.
+	 */
+	spin_lock(&whc->lock);
+
+	list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
+		qset_remove_complete(whc, qset);
+	}
+
+	spin_unlock(&whc->lock);
+}
+
+/**
+ * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL).
+ * @whc: the WHCI host controller
+ * @urb: the URB to enqueue
+ * @mem_flags: flags for any memory allocations
+ *
+ * The qset for the endpoint is obtained and the urb queued on to it.
+ *
+ * Work is scheduled to update the hardware's view of the ASL.
+ */
+int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
+{
+	struct whc_qset *qset;
+	int err;
+	unsigned long flags;
+
+	spin_lock_irqsave(&whc->lock, flags);
+
+	qset = get_qset(whc, urb, GFP_ATOMIC);
+	if (qset == NULL)
+		err = -ENOMEM;
+	else
+		err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
+	if (!err) {
+		usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
+		if (!qset->in_sw_list)
+			asl_qset_insert_begin(whc, qset);
+	}
+
+	spin_unlock_irqrestore(&whc->lock, flags);
+
+	if (!err)
+		queue_work(whc->workqueue, &whc->async_work);
+
+	return 0;
+}
+
+/**
+ * asl_urb_dequeue - remove an URB (qset) from the async list.
+ * @whc: the WHCI host controller
+ * @urb: the URB to dequeue
+ * @status: the current status of the URB
+ *
+ * URBs that do yet have qTDs can simply be removed from the software
+ * queue, otherwise the qset must be removed from the ASL so the qTDs
+ * can be removed.
+ */
+int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
+{
+	struct whc_urb *wurb = urb->hcpriv;
+	struct whc_qset *qset = wurb->qset;
+	struct whc_std *std, *t;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&whc->lock, flags);
+
+	ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
+	if (ret < 0)
+		goto out;
+
+	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+		if (std->urb == urb)
+			qset_free_std(whc, std);
+		else
+			std->qtd = NULL; /* so this std is re-added when the qset is */
+	}
+
+	asl_qset_remove(whc, qset);
+	wurb->status = status;
+	wurb->is_async = true;
+	queue_work(whc->workqueue, &wurb->dequeue_work);
+
+out:
+	spin_unlock_irqrestore(&whc->lock, flags);
+
+	return ret;
+}
+
+/**
+ * asl_qset_delete - delete a qset from the ASL
+ */
+void asl_qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+	qset->remove = 1;
+	queue_work(whc->workqueue, &whc->async_work);
+	qset_delete(whc, qset);
+}
+
+/**
+ * asl_init - initialize the asynchronous schedule list
+ *
+ * A dummy qset with no qTDs is added to the ASL to simplify removing
+ * qsets (no need to stop the ASL when the last qset is removed).
+ */
+int asl_init(struct whc *whc)
+{
+	struct whc_qset *qset;
+
+	qset = qset_alloc(whc, GFP_KERNEL);
+	if (qset == NULL)
+		return -ENOMEM;
+
+	asl_qset_insert_begin(whc, qset);
+	asl_qset_insert(whc, qset);
+
+	return 0;
+}
+
+/**
+ * asl_clean_up - free ASL resources
+ *
+ * The ASL is stopped and empty except for the dummy qset.
+ */
+void asl_clean_up(struct whc *whc)
+{
+	struct whc_qset *qset;
+
+	if (!list_empty(&whc->async_list)) {
+		qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
+		list_del(&qset->list_node);
+		qset_free(whc, qset);
+	}
+}
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
new file mode 100644
index 0000000..ef3ad4d
--- /dev/null
+++ b/drivers/usb/host/whci/hcd.c
@@ -0,0 +1,339 @@
+/*
+ * Wireless Host Controller (WHC) driver.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+/*
+ * One time initialization.
+ *
+ * Nothing to do here.
+ */
+static int whc_reset(struct usb_hcd *usb_hcd)
+{
+	return 0;
+}
+
+/*
+ * Start the wireless host controller.
+ *
+ * Start device notification.
+ *
+ * Put hc into run state, set DNTS parameters.
+ */
+static int whc_start(struct usb_hcd *usb_hcd)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	u8 bcid;
+	int ret;
+
+	mutex_lock(&wusbhc->mutex);
+
+	le_writel(WUSBINTR_GEN_CMD_DONE
+		  | WUSBINTR_HOST_ERR
+		  | WUSBINTR_ASYNC_SCHED_SYNCED
+		  | WUSBINTR_DNTS_INT
+		  | WUSBINTR_ERR_INT
+		  | WUSBINTR_INT,
+		  whc->base + WUSBINTR);
+
+	/* set cluster ID */
+	bcid = wusb_cluster_id_get();
+	ret = whc_set_cluster_id(whc, bcid);
+	if (ret < 0)
+		goto out;
+	wusbhc->cluster_id = bcid;
+
+	/* start HC */
+	whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN);
+
+	usb_hcd->uses_new_polling = 1;
+	usb_hcd->poll_rh = 1;
+	usb_hcd->state = HC_STATE_RUNNING;
+
+out:
+	mutex_unlock(&wusbhc->mutex);
+	return ret;
+}
+
+
+/*
+ * Stop the wireless host controller.
+ *
+ * Stop device notification.
+ *
+ * Wait for pending transfer to stop? Put hc into stop state?
+ */
+static void whc_stop(struct usb_hcd *usb_hcd)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+
+	mutex_lock(&wusbhc->mutex);
+
+	wusbhc_stop(wusbhc);
+
+	/* stop HC */
+	le_writel(0, whc->base + WUSBINTR);
+	whc_write_wusbcmd(whc, WUSBCMD_RUN, 0);
+	whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+		      WUSBSTS_HCHALTED, WUSBSTS_HCHALTED,
+		      100, "HC to halt");
+
+	wusb_cluster_id_put(wusbhc->cluster_id);
+
+	mutex_unlock(&wusbhc->mutex);
+}
+
+static int whc_get_frame_number(struct usb_hcd *usb_hcd)
+{
+	/* Frame numbers are not applicable to WUSB. */
+	return -ENOSYS;
+}
+
+
+/*
+ * Queue an URB to the ASL or PZL
+ */
+static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
+			   gfp_t mem_flags)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	int ret;
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_INTERRUPT:
+		ret = pzl_urb_enqueue(whc, urb, mem_flags);
+		break;
+	case PIPE_ISOCHRONOUS:
+		dev_err(&whc->umc->dev, "isochronous transfers unsupported\n");
+		ret = -ENOTSUPP;
+		break;
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+	default:
+		ret = asl_urb_enqueue(whc, urb, mem_flags);
+		break;
+	};
+
+	return ret;
+}
+
+/*
+ * Remove a queued URB from the ASL or PZL.
+ */
+static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	int ret;
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_INTERRUPT:
+		ret = pzl_urb_dequeue(whc, urb, status);
+		break;
+	case PIPE_ISOCHRONOUS:
+		ret = -ENOTSUPP;
+		break;
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+	default:
+		ret = asl_urb_dequeue(whc, urb, status);
+		break;
+	};
+
+	return ret;
+}
+
+/*
+ * Wait for all URBs to the endpoint to be completed, then delete the
+ * qset.
+ */
+static void whc_endpoint_disable(struct usb_hcd *usb_hcd,
+				 struct usb_host_endpoint *ep)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	struct whc_qset *qset;
+
+	qset = ep->hcpriv;
+	if (qset) {
+		ep->hcpriv = NULL;
+		if (usb_endpoint_xfer_bulk(&ep->desc)
+		    || usb_endpoint_xfer_control(&ep->desc))
+			asl_qset_delete(whc, qset);
+		else
+			pzl_qset_delete(whc, qset);
+	}
+}
+
+static struct hc_driver whc_hc_driver = {
+	.description = "whci-hcd",
+	.product_desc = "Wireless host controller",
+	.hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd),
+	.irq = whc_int_handler,
+	.flags = HCD_USB2,
+
+	.reset = whc_reset,
+	.start = whc_start,
+	.stop = whc_stop,
+	.get_frame_number = whc_get_frame_number,
+	.urb_enqueue = whc_urb_enqueue,
+	.urb_dequeue = whc_urb_dequeue,
+	.endpoint_disable = whc_endpoint_disable,
+
+	.hub_status_data = wusbhc_rh_status_data,
+	.hub_control = wusbhc_rh_control,
+	.bus_suspend = wusbhc_rh_suspend,
+	.bus_resume = wusbhc_rh_resume,
+	.start_port_reset = wusbhc_rh_start_port_reset,
+};
+
+static int whc_probe(struct umc_dev *umc)
+{
+	int ret = -ENOMEM;
+	struct usb_hcd *usb_hcd;
+	struct wusbhc *wusbhc = NULL;
+	struct whc *whc = NULL;
+	struct device *dev = &umc->dev;
+
+	usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci");
+	if (usb_hcd == NULL) {
+		dev_err(dev, "unable to create hcd\n");
+		goto error;
+	}
+
+	usb_hcd->wireless = 1;
+
+	wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	whc = wusbhc_to_whc(wusbhc);
+	whc->umc = umc;
+
+	ret = whc_init(whc);
+	if (ret)
+		goto error;
+
+	wusbhc->dev = dev;
+	wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent);
+	if (!wusbhc->uwb_rc) {
+		ret = -ENODEV;
+		dev_err(dev, "cannot get radio controller\n");
+		goto error;
+	}
+
+	if (whc->n_devices > USB_MAXCHILDREN) {
+		dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n",
+			 whc->n_devices);
+		wusbhc->ports_max = USB_MAXCHILDREN;
+	} else
+		wusbhc->ports_max = whc->n_devices;
+	wusbhc->mmcies_max      = whc->n_mmc_ies;
+	wusbhc->start           = whc_wusbhc_start;
+	wusbhc->stop            = whc_wusbhc_stop;
+	wusbhc->mmcie_add       = whc_mmcie_add;
+	wusbhc->mmcie_rm        = whc_mmcie_rm;
+	wusbhc->dev_info_set    = whc_dev_info_set;
+	wusbhc->bwa_set         = whc_bwa_set;
+	wusbhc->set_num_dnts    = whc_set_num_dnts;
+	wusbhc->set_ptk         = whc_set_ptk;
+	wusbhc->set_gtk         = whc_set_gtk;
+
+	ret = wusbhc_create(wusbhc);
+	if (ret)
+		goto error_wusbhc_create;
+
+	ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED);
+	if (ret) {
+		dev_err(dev, "cannot add HCD: %d\n", ret);
+		goto error_usb_add_hcd;
+	}
+
+	ret = wusbhc_b_create(wusbhc);
+	if (ret) {
+		dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret);
+		goto error_wusbhc_b_create;
+	}
+
+	return 0;
+
+error_wusbhc_b_create:
+	usb_remove_hcd(usb_hcd);
+error_usb_add_hcd:
+	wusbhc_destroy(wusbhc);
+error_wusbhc_create:
+	uwb_rc_put(wusbhc->uwb_rc);
+error:
+	whc_clean_up(whc);
+	if (usb_hcd)
+		usb_put_hcd(usb_hcd);
+	return ret;
+}
+
+
+static void whc_remove(struct umc_dev *umc)
+{
+	struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev);
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+
+	if (usb_hcd) {
+		wusbhc_b_destroy(wusbhc);
+		usb_remove_hcd(usb_hcd);
+		wusbhc_destroy(wusbhc);
+		uwb_rc_put(wusbhc->uwb_rc);
+		whc_clean_up(whc);
+		usb_put_hcd(usb_hcd);
+	}
+}
+
+static struct umc_driver whci_hc_driver = {
+	.name =		"whci-hcd",
+	.cap_id =       UMC_CAP_ID_WHCI_WUSB_HC,
+	.probe =	whc_probe,
+	.remove =	whc_remove,
+};
+
+static int __init whci_hc_driver_init(void)
+{
+	return umc_driver_register(&whci_hc_driver);
+}
+module_init(whci_hc_driver_init);
+
+static void __exit whci_hc_driver_exit(void)
+{
+	umc_driver_unregister(&whci_hc_driver);
+}
+module_exit(whci_hc_driver_exit);
+
+/* PCI device ID's that we handle (so it gets loaded) */
+static struct pci_device_id whci_hcd_id_table[] = {
+	{ PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
+	{ /* empty last entry */ }
+};
+MODULE_DEVICE_TABLE(pci, whci_hcd_id_table);
+
+MODULE_DESCRIPTION("WHCI Wireless USB host controller driver");
+MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/whci/hw.c b/drivers/usb/host/whci/hw.c
new file mode 100644
index 0000000..ac86e59
--- /dev/null
+++ b/drivers/usb/host/whci/hw.c
@@ -0,0 +1,87 @@
+/*
+ * Wireless Host Controller (WHC) hardware access helpers.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val)
+{
+	unsigned long flags;
+	u32 cmd;
+
+	spin_lock_irqsave(&whc->lock, flags);
+
+	cmd = le_readl(whc->base + WUSBCMD);
+	cmd = (cmd & ~mask) | val;
+	le_writel(cmd, whc->base + WUSBCMD);
+
+	spin_unlock_irqrestore(&whc->lock, flags);
+}
+
+/**
+ * whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register
+ * @whc:    the WHCI HC
+ * @cmd:    command to start.
+ * @params: parameters for the command (the WUSBGENCMDPARAMS register value).
+ * @addr:   pointer to any data for the command (may be NULL).
+ * @len:    length of the data (if any).
+ */
+int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len)
+{
+	unsigned long flags;
+	dma_addr_t dma_addr;
+	int t;
+
+	mutex_lock(&whc->mutex);
+
+	/* Wait for previous command to complete. */
+	t = wait_event_timeout(whc->cmd_wq,
+			       (le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0,
+			       WHC_GENCMD_TIMEOUT_MS);
+	if (t == 0) {
+		dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n",
+			le_readl(whc->base + WUSBGENCMDSTS),
+			le_readl(whc->base + WUSBGENCMDPARAMS));
+		return -ETIMEDOUT;
+	}
+
+	if (addr) {
+		memcpy(whc->gen_cmd_buf, addr, len);
+		dma_addr = whc->gen_cmd_buf_dma;
+	} else
+		dma_addr = 0;
+
+	/* Poke registers to start cmd. */
+	spin_lock_irqsave(&whc->lock, flags);
+
+	le_writel(params, whc->base + WUSBGENCMDPARAMS);
+	le_writeq(dma_addr, whc->base + WUSBGENADDR);
+
+	le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd,
+		  whc->base + WUSBGENCMDSTS);
+
+	spin_unlock_irqrestore(&whc->lock, flags);
+
+	mutex_unlock(&whc->mutex);
+
+	return 0;
+}
diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c
new file mode 100644
index 0000000..34a783c
--- /dev/null
+++ b/drivers/usb/host/whci/init.c
@@ -0,0 +1,188 @@
+/*
+ * Wireless Host Controller (WHC) initialization.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+/*
+ * Reset the host controller.
+ */
+static void whc_hw_reset(struct whc *whc)
+{
+	le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD);
+	whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0,
+		      100, "reset");
+}
+
+static void whc_hw_init_di_buf(struct whc *whc)
+{
+	int d;
+
+	/* Disable all entries in the Device Information buffer. */
+	for (d = 0; d < whc->n_devices; d++)
+		whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE;
+
+	le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR);
+}
+
+static void whc_hw_init_dn_buf(struct whc *whc)
+{
+	/* Clear the Device Notification buffer to ensure the V (valid)
+	 * bits are clear.  */
+	memset(whc->dn_buf, 0, 4096);
+
+	le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR);
+}
+
+int whc_init(struct whc *whc)
+{
+	u32 whcsparams;
+	int ret, i;
+	resource_size_t start, len;
+
+	spin_lock_init(&whc->lock);
+	mutex_init(&whc->mutex);
+	init_waitqueue_head(&whc->cmd_wq);
+	init_waitqueue_head(&whc->async_list_wq);
+	init_waitqueue_head(&whc->periodic_list_wq);
+	whc->workqueue = create_singlethread_workqueue(dev_name(&whc->umc->dev));
+	if (whc->workqueue == NULL) {
+		ret = -ENOMEM;
+		goto error;
+	}
+	INIT_WORK(&whc->dn_work, whc_dn_work);
+
+	INIT_WORK(&whc->async_work, scan_async_work);
+	INIT_LIST_HEAD(&whc->async_list);
+	INIT_LIST_HEAD(&whc->async_removed_list);
+
+	INIT_WORK(&whc->periodic_work, scan_periodic_work);
+	for (i = 0; i < 5; i++)
+		INIT_LIST_HEAD(&whc->periodic_list[i]);
+	INIT_LIST_HEAD(&whc->periodic_removed_list);
+
+	/* Map HC registers. */
+	start = whc->umc->resource.start;
+	len   = whc->umc->resource.end - start + 1;
+	if (!request_mem_region(start, len, "whci-hc")) {
+		dev_err(&whc->umc->dev, "can't request HC region\n");
+		ret = -EBUSY;
+		goto error;
+	}
+	whc->base_phys = start;
+	whc->base = ioremap(start, len);
+	if (!whc->base) {
+		dev_err(&whc->umc->dev, "ioremap\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	whc_hw_reset(whc);
+
+	/* Read maximum number of devices, keys and MMC IEs. */
+	whcsparams = le_readl(whc->base + WHCSPARAMS);
+	whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams);
+	whc->n_keys    = WHCSPARAMS_TO_N_KEYS(whcsparams);
+	whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams);
+
+	dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n",
+		whc->n_devices, whc->n_keys, whc->n_mmc_ies);
+
+	whc->qset_pool = dma_pool_create("qset", &whc->umc->dev,
+					 sizeof(struct whc_qset), 64, 0);
+	if (whc->qset_pool == NULL) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ret = asl_init(whc);
+	if (ret < 0)
+		goto error;
+	ret = pzl_init(whc);
+	if (ret < 0)
+		goto error;
+
+	/* Allocate and initialize a buffer for generic commands, the
+	   Device Information buffer, and the Device Notification
+	   buffer. */
+
+	whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
+					      &whc->gen_cmd_buf_dma, GFP_KERNEL);
+	if (whc->gen_cmd_buf == NULL) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	whc->dn_buf = dma_alloc_coherent(&whc->umc->dev,
+					 sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
+					 &whc->dn_buf_dma, GFP_KERNEL);
+	if (!whc->dn_buf) {
+		ret = -ENOMEM;
+		goto error;
+	}
+	whc_hw_init_dn_buf(whc);
+
+	whc->di_buf = dma_alloc_coherent(&whc->umc->dev,
+					 sizeof(struct di_buf_entry) * whc->n_devices,
+					 &whc->di_buf_dma, GFP_KERNEL);
+	if (!whc->di_buf) {
+		ret = -ENOMEM;
+		goto error;
+	}
+	whc_hw_init_di_buf(whc);
+
+	return 0;
+
+error:
+	whc_clean_up(whc);
+	return ret;
+}
+
+void whc_clean_up(struct whc *whc)
+{
+	resource_size_t len;
+
+	if (whc->di_buf)
+		dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices,
+				  whc->di_buf, whc->di_buf_dma);
+	if (whc->dn_buf)
+		dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
+				  whc->dn_buf, whc->dn_buf_dma);
+	if (whc->gen_cmd_buf)
+		dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
+				  whc->gen_cmd_buf, whc->gen_cmd_buf_dma);
+
+	pzl_clean_up(whc);
+	asl_clean_up(whc);
+
+	if (whc->qset_pool)
+		dma_pool_destroy(whc->qset_pool);
+
+	len   = whc->umc->resource.end - whc->umc->resource.start + 1;
+	if (whc->base)
+		iounmap(whc->base);
+	if (whc->base_phys)
+		release_mem_region(whc->base_phys, len);
+
+	if (whc->workqueue)
+		destroy_workqueue(whc->workqueue);
+}
diff --git a/drivers/usb/host/whci/int.c b/drivers/usb/host/whci/int.c
new file mode 100644
index 0000000..fce0117
--- /dev/null
+++ b/drivers/usb/host/whci/int.c
@@ -0,0 +1,95 @@
+/*
+ * Wireless Host Controller (WHC) interrupt handling.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+static void transfer_done(struct whc *whc)
+{
+	queue_work(whc->workqueue, &whc->async_work);
+	queue_work(whc->workqueue, &whc->periodic_work);
+}
+
+irqreturn_t whc_int_handler(struct usb_hcd *hcd)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	u32 sts;
+
+	sts = le_readl(whc->base + WUSBSTS);
+	if (!(sts & WUSBSTS_INT_MASK))
+		return IRQ_NONE;
+	le_writel(sts & WUSBSTS_INT_MASK, whc->base + WUSBSTS);
+
+	if (sts & WUSBSTS_GEN_CMD_DONE)
+		wake_up(&whc->cmd_wq);
+
+	if (sts & WUSBSTS_HOST_ERR)
+		dev_err(&whc->umc->dev, "FIXME: host system error\n");
+
+	if (sts & WUSBSTS_ASYNC_SCHED_SYNCED)
+		wake_up(&whc->async_list_wq);
+
+	if (sts & WUSBSTS_PERIODIC_SCHED_SYNCED)
+		wake_up(&whc->periodic_list_wq);
+
+	if (sts & WUSBSTS_DNTS_INT)
+		queue_work(whc->workqueue, &whc->dn_work);
+
+	/*
+	 * A transfer completed (see [WHCI] section 4.7.1.2 for when
+	 * this occurs).
+	 */
+	if (sts & (WUSBSTS_INT | WUSBSTS_ERR_INT))
+		transfer_done(whc);
+
+	return IRQ_HANDLED;
+}
+
+static int process_dn_buf(struct whc *whc)
+{
+	struct wusbhc *wusbhc = &whc->wusbhc;
+	struct dn_buf_entry *dn;
+	int processed = 0;
+
+	for (dn = whc->dn_buf; dn < whc->dn_buf + WHC_N_DN_ENTRIES; dn++) {
+		if (dn->status & WHC_DN_STATUS_VALID) {
+			wusbhc_handle_dn(wusbhc, dn->src_addr,
+					 (struct wusb_dn_hdr *)dn->dn_data,
+					 dn->msg_size);
+			dn->status &= ~WHC_DN_STATUS_VALID;
+			processed++;
+		}
+	}
+	return processed;
+}
+
+void whc_dn_work(struct work_struct *work)
+{
+	struct whc *whc = container_of(work, struct whc, dn_work);
+	int processed;
+
+	do {
+		processed = process_dn_buf(whc);
+	} while (processed);
+}
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c
new file mode 100644
index 0000000..8d62df0
--- /dev/null
+++ b/drivers/usb/host/whci/pzl.c
@@ -0,0 +1,398 @@
+/*
+ * Wireless Host Controller (WHC) periodic schedule management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+#include <linux/usb.h>
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+#if D_LOCAL >= 4
+static void dump_pzl(struct whc *whc, const char *tag)
+{
+	struct device *dev = &whc->umc->dev;
+	struct whc_qset *qset;
+	int period = 0;
+
+	d_printf(4, dev, "PZL %s\n", tag);
+
+	for (period = 0; period < 5; period++) {
+		d_printf(4, dev, "Period %d\n", period);
+		list_for_each_entry(qset, &whc->periodic_list[period], list_node) {
+			dump_qset(qset, dev);
+		}
+	}
+}
+#else
+static inline void dump_pzl(struct whc *whc, const char *tag)
+{
+}
+#endif
+
+static void update_pzl_pointers(struct whc *whc, int period, u64 addr)
+{
+	switch (period) {
+	case 0:
+		whc_qset_set_link_ptr(&whc->pz_list[0], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[2], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[4], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[6], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[8], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[10], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[12], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[14], addr);
+		break;
+	case 1:
+		whc_qset_set_link_ptr(&whc->pz_list[1], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[5], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[9], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[13], addr);
+		break;
+	case 2:
+		whc_qset_set_link_ptr(&whc->pz_list[3], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[11], addr);
+		break;
+	case 3:
+		whc_qset_set_link_ptr(&whc->pz_list[7], addr);
+		break;
+	case 4:
+		whc_qset_set_link_ptr(&whc->pz_list[15], addr);
+		break;
+	}
+}
+
+/*
+ * Return the 'period' to use for this qset.  The minimum interval for
+ * the endpoint is used so whatever urbs are submitted the device is
+ * polled often enough.
+ */
+static int qset_get_period(struct whc *whc, struct whc_qset *qset)
+{
+	uint8_t bInterval = qset->ep->desc.bInterval;
+
+	if (bInterval < 6)
+		bInterval = 6;
+	if (bInterval > 10)
+		bInterval = 10;
+	return bInterval - 6;
+}
+
+static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset)
+{
+	int period;
+
+	period = qset_get_period(whc, qset);
+
+	qset_clear(whc, qset);
+	list_move(&qset->list_node, &whc->periodic_list[period]);
+	qset->in_sw_list = true;
+}
+
+static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset)
+{
+	list_move(&qset->list_node, &whc->periodic_removed_list);
+	qset->in_hw_list = false;
+	qset->in_sw_list = false;
+}
+
+/**
+ * pzl_process_qset - process any recently inactivated or halted qTDs
+ * in a qset.
+ *
+ * After inactive qTDs are removed, new qTDs can be added if the
+ * urb queue still contains URBs.
+ *
+ * Returns the schedule updates required.
+ */
+static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset)
+{
+	enum whc_update update = 0;
+	uint32_t status = 0;
+
+	while (qset->ntds) {
+		struct whc_qtd *td;
+		int t;
+
+		t = qset->td_start;
+		td = &qset->qtd[qset->td_start];
+		status = le32_to_cpu(td->status);
+
+		/*
+		 * Nothing to do with a still active qTD.
+		 */
+		if (status & QTD_STS_ACTIVE)
+			break;
+
+		if (status & QTD_STS_HALTED) {
+			/* Ug, an error. */
+			process_halted_qtd(whc, qset, td);
+			goto done;
+		}
+
+		/* Mmm, a completed qTD. */
+		process_inactive_qtd(whc, qset, td);
+	}
+
+	update |= qset_add_qtds(whc, qset);
+
+done:
+	/*
+	 * If there are no qTDs in this qset, remove it from the PZL.
+	 */
+	if (qset->remove && qset->ntds == 0) {
+		pzl_qset_remove(whc, qset);
+		update |= WHC_UPDATE_REMOVED;
+	}
+
+	return update;
+}
+
+/**
+ * pzl_start - start the periodic schedule
+ * @whc: the WHCI host controller
+ *
+ * The PZL must be valid (e.g., all entries in the list should have
+ * the T bit set).
+ */
+void pzl_start(struct whc *whc)
+{
+	le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
+
+	whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN);
+	whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+		      WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED,
+		      1000, "start PZL");
+}
+
+/**
+ * pzl_stop - stop the periodic schedule
+ * @whc: the WHCI host controller
+ */
+void pzl_stop(struct whc *whc)
+{
+	whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0);
+	whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+		      WUSBSTS_PERIODIC_SCHED, 0,
+		      1000, "stop PZL");
+}
+
+void pzl_update(struct whc *whc, uint32_t wusbcmd)
+{
+	whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
+	wait_event(whc->periodic_list_wq,
+		   (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0);
+}
+
+static void update_pzl_hw_view(struct whc *whc)
+{
+	struct whc_qset *qset, *t;
+	int period;
+	u64 tmp_qh = 0;
+
+	for (period = 0; period < 5; period++) {
+		list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
+			whc_qset_set_link_ptr(&qset->qh.link, tmp_qh);
+			tmp_qh = qset->qset_dma;
+			qset->in_hw_list = true;
+		}
+		update_pzl_pointers(whc, period, tmp_qh);
+	}
+}
+
+/**
+ * scan_periodic_work - scan the PZL for qsets to process.
+ *
+ * Process each qset in the PZL in turn and then signal the WHC that
+ * the PZL has been updated.
+ *
+ * Then start, stop or update the periodic schedule as required.
+ */
+void scan_periodic_work(struct work_struct *work)
+{
+	struct whc *whc = container_of(work, struct whc, periodic_work);
+	struct whc_qset *qset, *t;
+	enum whc_update update = 0;
+	int period;
+
+	spin_lock_irq(&whc->lock);
+
+	dump_pzl(whc, "before processing");
+
+	for (period = 4; period >= 0; period--) {
+		list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
+			if (!qset->in_hw_list)
+				update |= WHC_UPDATE_ADDED;
+			update |= pzl_process_qset(whc, qset);
+		}
+	}
+
+	if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED))
+		update_pzl_hw_view(whc);
+
+	dump_pzl(whc, "after processing");
+
+	spin_unlock_irq(&whc->lock);
+
+	if (update) {
+		uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB;
+		if (update & WHC_UPDATE_REMOVED)
+			wusbcmd |= WUSBCMD_PERIODIC_QSET_RM;
+		pzl_update(whc, wusbcmd);
+	}
+
+	/*
+	 * Now that the PZL is updated, complete the removal of any
+	 * removed qsets.
+	 */
+	spin_lock(&whc->lock);
+
+	list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
+		qset_remove_complete(whc, qset);
+	}
+
+	spin_unlock(&whc->lock);
+}
+
+/**
+ * pzl_urb_enqueue - queue an URB onto the periodic list (PZL)
+ * @whc: the WHCI host controller
+ * @urb: the URB to enqueue
+ * @mem_flags: flags for any memory allocations
+ *
+ * The qset for the endpoint is obtained and the urb queued on to it.
+ *
+ * Work is scheduled to update the hardware's view of the PZL.
+ */
+int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
+{
+	struct whc_qset *qset;
+	int err;
+	unsigned long flags;
+
+	spin_lock_irqsave(&whc->lock, flags);
+
+	qset = get_qset(whc, urb, GFP_ATOMIC);
+	if (qset == NULL)
+		err = -ENOMEM;
+	else
+		err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
+	if (!err) {
+		usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
+		if (!qset->in_sw_list)
+			qset_insert_in_sw_list(whc, qset);
+	}
+
+	spin_unlock_irqrestore(&whc->lock, flags);
+
+	if (!err)
+		queue_work(whc->workqueue, &whc->periodic_work);
+
+	return 0;
+}
+
+/**
+ * pzl_urb_dequeue - remove an URB (qset) from the periodic list
+ * @whc: the WHCI host controller
+ * @urb: the URB to dequeue
+ * @status: the current status of the URB
+ *
+ * URBs that do yet have qTDs can simply be removed from the software
+ * queue, otherwise the qset must be removed so the qTDs can be safely
+ * removed.
+ */
+int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
+{
+	struct whc_urb *wurb = urb->hcpriv;
+	struct whc_qset *qset = wurb->qset;
+	struct whc_std *std, *t;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&whc->lock, flags);
+
+	ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
+	if (ret < 0)
+		goto out;
+
+	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+		if (std->urb == urb)
+			qset_free_std(whc, std);
+		else
+			std->qtd = NULL; /* so this std is re-added when the qset is */
+	}
+
+	pzl_qset_remove(whc, qset);
+	wurb->status = status;
+	wurb->is_async = false;
+	queue_work(whc->workqueue, &wurb->dequeue_work);
+
+out:
+	spin_unlock_irqrestore(&whc->lock, flags);
+
+	return ret;
+}
+
+/**
+ * pzl_qset_delete - delete a qset from the PZL
+ */
+void pzl_qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+	qset->remove = 1;
+	queue_work(whc->workqueue, &whc->periodic_work);
+	qset_delete(whc, qset);
+}
+
+
+/**
+ * pzl_init - initialize the periodic zone list
+ * @whc: the WHCI host controller
+ */
+int pzl_init(struct whc *whc)
+{
+	int i;
+
+	whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16,
+					  &whc->pz_list_dma, GFP_KERNEL);
+	if (whc->pz_list == NULL)
+		return -ENOMEM;
+
+	/* Set T bit on all elements in PZL. */
+	for (i = 0; i < 16; i++)
+		whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
+
+	le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
+
+	return 0;
+}
+
+/**
+ * pzl_clean_up - free PZL resources
+ * @whc: the WHCI host controller
+ *
+ * The PZL is stopped and empty.
+ */
+void pzl_clean_up(struct whc *whc)
+{
+	if (whc->pz_list)
+		dma_free_coherent(&whc->umc->dev,  sizeof(u64) * 16, whc->pz_list,
+				  whc->pz_list_dma);
+}
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
new file mode 100644
index 0000000..0420037
--- /dev/null
+++ b/drivers/usb/host/whci/qset.c
@@ -0,0 +1,567 @@
+/*
+ * Wireless Host Controller (WHC) qset management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+#include <linux/usb.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+void dump_qset(struct whc_qset *qset, struct device *dev)
+{
+	struct whc_std *std;
+	struct urb *urb = NULL;
+	int i;
+
+	dev_dbg(dev, "qset %08x\n", (u32)qset->qset_dma);
+	dev_dbg(dev, "  -> %08x\n", (u32)qset->qh.link);
+	dev_dbg(dev, "  info: %08x %08x %08x\n",
+		qset->qh.info1, qset->qh.info2,  qset->qh.info3);
+	dev_dbg(dev, "  sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count);
+	dev_dbg(dev, "  TD: sts: %08x opts: %08x\n",
+		qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
+
+	for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
+		dev_dbg(dev, "  %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
+			i == qset->td_start ? 'S' : ' ',
+			i == qset->td_end ? 'E' : ' ',
+			i, qset->qtd[i].status, qset->qtd[i].options,
+			(u32)qset->qtd[i].page_list_ptr);
+	}
+	dev_dbg(dev, "  ntds: %d\n", qset->ntds);
+	list_for_each_entry(std, &qset->stds, list_node) {
+		if (urb != std->urb) {
+			urb = std->urb;
+			dev_dbg(dev, "  urb %p transferred: %d bytes\n", urb,
+				urb->actual_length);
+		}
+		if (std->qtd)
+			dev_dbg(dev, "    sTD[%td]: %zu bytes @ %08x\n",
+				std->qtd - &qset->qtd[0],
+				std->len, std->num_pointers ?
+				(u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
+		else
+			dev_dbg(dev, "    sTD[-]: %zd bytes @ %08x\n",
+				std->len, std->num_pointers ?
+				(u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
+	}
+}
+
+struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
+{
+	struct whc_qset *qset;
+	dma_addr_t dma;
+
+	qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
+	if (qset == NULL)
+		return NULL;
+	memset(qset, 0, sizeof(struct whc_qset));
+
+	qset->qset_dma = dma;
+	qset->whc = whc;
+
+	INIT_LIST_HEAD(&qset->list_node);
+	INIT_LIST_HEAD(&qset->stds);
+
+	return qset;
+}
+
+/**
+ * qset_fill_qh - fill the static endpoint state in a qset's QHead
+ * @qset: the qset whose QH needs initializing with static endpoint
+ *        state
+ * @urb:  an urb for a transfer to this endpoint
+ */
+static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
+{
+	struct usb_device *usb_dev = urb->dev;
+	struct usb_wireless_ep_comp_descriptor *epcd;
+	bool is_out;
+
+	is_out = usb_pipeout(urb->pipe);
+
+	epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
+
+	if (epcd) {
+		qset->max_seq = epcd->bMaxSequence;
+		qset->max_burst = epcd->bMaxBurst;
+	} else {
+		qset->max_seq = 2;
+		qset->max_burst = 1;
+	}
+
+	qset->qh.info1 = cpu_to_le32(
+		QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
+		| (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
+		| usb_pipe_to_qh_type(urb->pipe)
+		| QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
+		| QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out))
+		);
+	qset->qh.info2 = cpu_to_le32(
+		QH_INFO2_BURST(qset->max_burst)
+		| QH_INFO2_DBP(0)
+		| QH_INFO2_MAX_COUNT(3)
+		| QH_INFO2_MAX_RETRY(3)
+		| QH_INFO2_MAX_SEQ(qset->max_seq - 1)
+		);
+	/* FIXME: where can we obtain these Tx parameters from?  Why
+	 * doesn't the chip know what Tx power to use? It knows the Rx
+	 * strength and can presumably guess the Tx power required
+	 * from that? */
+	qset->qh.info3 = cpu_to_le32(
+		QH_INFO3_TX_RATE_53_3
+		| QH_INFO3_TX_PWR(0) /* 0 == max power */
+		);
+}
+
+/**
+ * qset_clear - clear fields in a qset so it may be reinserted into a
+ * schedule
+ */
+void qset_clear(struct whc *whc, struct whc_qset *qset)
+{
+	qset->td_start = qset->td_end = qset->ntds = 0;
+	qset->remove = 0;
+
+	qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+	qset->qh.status = cpu_to_le16(QH_STATUS_ICUR(qset->td_start));
+	qset->qh.err_count = 0;
+	qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
+	qset->qh.scratch[0] = 0;
+	qset->qh.scratch[1] = 0;
+	qset->qh.scratch[2] = 0;
+
+	memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
+
+	init_completion(&qset->remove_complete);
+}
+
+/**
+ * get_qset - get the qset for an async endpoint
+ *
+ * A new qset is created if one does not already exist.
+ */
+struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
+				 gfp_t mem_flags)
+{
+	struct whc_qset *qset;
+
+	qset = urb->ep->hcpriv;
+	if (qset == NULL) {
+		qset = qset_alloc(whc, mem_flags);
+		if (qset == NULL)
+			return NULL;
+
+		qset->ep = urb->ep;
+		urb->ep->hcpriv = qset;
+		qset_fill_qh(qset, urb);
+	}
+	return qset;
+}
+
+void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
+{
+	list_del_init(&qset->list_node);
+	complete(&qset->remove_complete);
+}
+
+/**
+ * qset_add_qtds - add qTDs for an URB to a qset
+ *
+ * Returns true if the list (ASL/PZL) must be updated because (for a
+ * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
+ */
+enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
+{
+	struct whc_std *std;
+	enum whc_update update = 0;
+
+	list_for_each_entry(std, &qset->stds, list_node) {
+		struct whc_qtd *qtd;
+		uint32_t status;
+
+		if (qset->ntds >= WHCI_QSET_TD_MAX
+		    || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
+			break;
+
+		if (std->qtd)
+			continue; /* already has a qTD */
+
+		qtd = std->qtd = &qset->qtd[qset->td_end];
+
+		/* Fill in setup bytes for control transfers. */
+		if (usb_pipecontrol(std->urb->pipe))
+			memcpy(qtd->setup, std->urb->setup_packet, 8);
+
+		status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
+
+		if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
+			status |= QTD_STS_LAST_PKT;
+
+		/*
+		 * For an IN transfer the iAlt field should be set so
+		 * the h/w will automatically advance to the next
+		 * transfer. However, if there are 8 or more TDs
+		 * remaining in this transfer then iAlt cannot be set
+		 * as it could point to somewhere in this transfer.
+		 */
+		if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
+			int ialt;
+			ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
+			status |= QTD_STS_IALT(ialt);
+		} else if (usb_pipein(std->urb->pipe))
+			qset->pause_after_urb = std->urb;
+
+		if (std->num_pointers)
+			qtd->options = cpu_to_le32(QTD_OPT_IOC);
+		else
+			qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
+		qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
+
+		qtd->status = cpu_to_le32(status);
+
+		if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
+			update = WHC_UPDATE_UPDATED;
+
+		if (++qset->td_end >= WHCI_QSET_TD_MAX)
+			qset->td_end = 0;
+		qset->ntds++;
+	}
+
+	return update;
+}
+
+/**
+ * qset_remove_qtd - remove the first qTD from a qset.
+ *
+ * The qTD might be still active (if it's part of a IN URB that
+ * resulted in a short read) so ensure it's deactivated.
+ */
+static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
+{
+	qset->qtd[qset->td_start].status = 0;
+
+	if (++qset->td_start >= WHCI_QSET_TD_MAX)
+		qset->td_start = 0;
+	qset->ntds--;
+}
+
+/**
+ * qset_free_std - remove an sTD and free it.
+ * @whc: the WHCI host controller
+ * @std: the sTD to remove and free.
+ */
+void qset_free_std(struct whc *whc, struct whc_std *std)
+{
+	list_del(&std->list_node);
+	if (std->num_pointers) {
+		dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
+				 std->num_pointers * sizeof(struct whc_page_list_entry),
+				 DMA_TO_DEVICE);
+		kfree(std->pl_virt);
+	}
+
+	kfree(std);
+}
+
+/**
+ * qset_remove_qtds - remove an URB's qTDs (and sTDs).
+ */
+static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
+			     struct urb *urb)
+{
+	struct whc_std *std, *t;
+
+	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+		if (std->urb != urb)
+			break;
+		if (std->qtd != NULL)
+			qset_remove_qtd(whc, qset);
+		qset_free_std(whc, std);
+	}
+}
+
+/**
+ * qset_free_stds - free any remaining sTDs for an URB.
+ */
+static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
+{
+	struct whc_std *std, *t;
+
+	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+		if (std->urb == urb)
+			qset_free_std(qset->whc, std);
+	}
+}
+
+static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
+{
+	dma_addr_t dma_addr = std->dma_addr;
+	dma_addr_t sp, ep;
+	size_t std_len = std->len;
+	size_t pl_len;
+	int p;
+
+	sp = ALIGN(dma_addr, WHCI_PAGE_SIZE);
+	ep = dma_addr + std_len;
+	std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
+
+	pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+	std->pl_virt = kmalloc(pl_len, mem_flags);
+	if (std->pl_virt == NULL)
+		return -ENOMEM;
+	std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
+
+	for (p = 0; p < std->num_pointers; p++) {
+		std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
+		dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE);
+	}
+
+	return 0;
+}
+
+/**
+ * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
+ */
+static void urb_dequeue_work(struct work_struct *work)
+{
+	struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
+	struct whc_qset *qset = wurb->qset;
+	struct whc *whc = qset->whc;
+	unsigned long flags;
+
+	if (wurb->is_async == true)
+		asl_update(whc, WUSBCMD_ASYNC_UPDATED
+			   | WUSBCMD_ASYNC_SYNCED_DB
+			   | WUSBCMD_ASYNC_QSET_RM);
+	else
+		pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
+			   | WUSBCMD_PERIODIC_SYNCED_DB
+			   | WUSBCMD_PERIODIC_QSET_RM);
+
+	spin_lock_irqsave(&whc->lock, flags);
+	qset_remove_urb(whc, qset, wurb->urb, wurb->status);
+	spin_unlock_irqrestore(&whc->lock, flags);
+}
+
+/**
+ * qset_add_urb - add an urb to the qset's queue.
+ *
+ * The URB is chopped into sTDs, one for each qTD that will required.
+ * At least one qTD (and sTD) is required even if the transfer has no
+ * data (e.g., for some control transfers).
+ */
+int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+	gfp_t mem_flags)
+{
+	struct whc_urb *wurb;
+	int remaining = urb->transfer_buffer_length;
+	u64 transfer_dma = urb->transfer_dma;
+	int ntds_remaining;
+
+	ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
+	if (ntds_remaining == 0)
+		ntds_remaining = 1;
+
+	wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
+	if (wurb == NULL)
+		goto err_no_mem;
+	urb->hcpriv = wurb;
+	wurb->qset = qset;
+	wurb->urb = urb;
+	INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
+
+	while (ntds_remaining) {
+		struct whc_std *std;
+		size_t std_len;
+
+		std = kmalloc(sizeof(struct whc_std), mem_flags);
+		if (std == NULL)
+			goto err_no_mem;
+
+		std_len = remaining;
+		if (std_len > QTD_MAX_XFER_SIZE)
+			std_len = QTD_MAX_XFER_SIZE;
+
+		std->urb = urb;
+		std->dma_addr = transfer_dma;
+		std->len = std_len;
+		std->ntds_remaining = ntds_remaining;
+		std->qtd = NULL;
+
+		INIT_LIST_HEAD(&std->list_node);
+		list_add_tail(&std->list_node, &qset->stds);
+
+		if (std_len > WHCI_PAGE_SIZE) {
+			if (qset_fill_page_list(whc, std, mem_flags) < 0)
+				goto err_no_mem;
+		} else
+			std->num_pointers = 0;
+
+		ntds_remaining--;
+		remaining -= std_len;
+		transfer_dma += std_len;
+	}
+
+	return 0;
+
+err_no_mem:
+	qset_free_stds(qset, urb);
+	return -ENOMEM;
+}
+
+/**
+ * qset_remove_urb - remove an URB from the urb queue.
+ *
+ * The URB is returned to the USB subsystem.
+ */
+void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
+			    struct urb *urb, int status)
+{
+	struct wusbhc *wusbhc = &whc->wusbhc;
+	struct whc_urb *wurb = urb->hcpriv;
+
+	usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
+	/* Drop the lock as urb->complete() may enqueue another urb. */
+	spin_unlock(&whc->lock);
+	wusbhc_giveback_urb(wusbhc, urb, status);
+	spin_lock(&whc->lock);
+
+	kfree(wurb);
+}
+
+/**
+ * get_urb_status_from_qtd - get the completed urb status from qTD status
+ * @urb:    completed urb
+ * @status: qTD status
+ */
+static int get_urb_status_from_qtd(struct urb *urb, u32 status)
+{
+	if (status & QTD_STS_HALTED) {
+		if (status & QTD_STS_DBE)
+			return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
+		else if (status & QTD_STS_BABBLE)
+			return -EOVERFLOW;
+		else if (status & QTD_STS_RCE)
+			return -ETIME;
+		return -EPIPE;
+	}
+	if (usb_pipein(urb->pipe)
+	    && (urb->transfer_flags & URB_SHORT_NOT_OK)
+	    && urb->actual_length < urb->transfer_buffer_length)
+		return -EREMOTEIO;
+	return 0;
+}
+
+/**
+ * process_inactive_qtd - process an inactive (but not halted) qTD.
+ *
+ * Update the urb with the transfer bytes from the qTD, if the urb is
+ * completely transfered or (in the case of an IN only) the LPF is
+ * set, then the transfer is complete and the urb should be returned
+ * to the system.
+ */
+void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
+				 struct whc_qtd *qtd)
+{
+	struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
+	struct urb *urb = std->urb;
+	uint32_t status;
+	bool complete;
+
+	status = le32_to_cpu(qtd->status);
+
+	urb->actual_length += std->len - QTD_STS_TO_LEN(status);
+
+	if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
+		complete = true;
+	else
+		complete = whc_std_last(std);
+
+	qset_remove_qtd(whc, qset);
+	qset_free_std(whc, std);
+
+	/*
+	 * Transfers for this URB are complete?  Then return it to the
+	 * USB subsystem.
+	 */
+	if (complete) {
+		qset_remove_qtds(whc, qset, urb);
+		qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
+
+		/*
+		 * If iAlt isn't valid then the hardware didn't
+		 * advance iCur. Adjust the start and end pointers to
+		 * match iCur.
+		 */
+		if (!(status & QTD_STS_IALT_VALID))
+			qset->td_start = qset->td_end
+				= QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
+		qset->pause_after_urb = NULL;
+	}
+}
+
+/**
+ * process_halted_qtd - process a qset with a halted qtd
+ *
+ * Remove all the qTDs for the failed URB and return the failed URB to
+ * the USB subsystem.  Then remove all other qTDs so the qset can be
+ * removed.
+ *
+ * FIXME: this is the point where rate adaptation can be done.  If a
+ * transfer failed because it exceeded the maximum number of retries
+ * then it could be reactivated with a slower rate without having to
+ * remove the qset.
+ */
+void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
+			       struct whc_qtd *qtd)
+{
+	struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
+	struct urb *urb = std->urb;
+	int urb_status;
+
+	urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
+
+	qset_remove_qtds(whc, qset, urb);
+	qset_remove_urb(whc, qset, urb, urb_status);
+
+	list_for_each_entry(std, &qset->stds, list_node) {
+		if (qset->ntds == 0)
+			break;
+		qset_remove_qtd(whc, qset);
+		std->qtd = NULL;
+	}
+
+	qset->remove = 1;
+}
+
+void qset_free(struct whc *whc, struct whc_qset *qset)
+{
+	dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
+}
+
+/**
+ * qset_delete - wait for a qset to be unused, then free it.
+ */
+void qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+	wait_for_completion(&qset->remove_complete);
+	qset_free(whc, qset);
+}
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h
new file mode 100644
index 0000000..1d2a53b
--- /dev/null
+++ b/drivers/usb/host/whci/whcd.h
@@ -0,0 +1,197 @@
+/*
+ * Wireless Host Controller (WHC) private header.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#ifndef __WHCD_H
+#define __WHCD_H
+
+#include <linux/uwb/whci.h>
+#include <linux/workqueue.h>
+
+#include "whci-hc.h"
+
+/* Generic command timeout. */
+#define WHC_GENCMD_TIMEOUT_MS 100
+
+
+struct whc {
+	struct wusbhc wusbhc;
+	struct umc_dev *umc;
+
+	resource_size_t base_phys;
+	void __iomem *base;
+	int irq;
+
+	u8 n_devices;
+	u8 n_keys;
+	u8 n_mmc_ies;
+
+	u64 *pz_list;
+	struct dn_buf_entry *dn_buf;
+	struct di_buf_entry *di_buf;
+	dma_addr_t pz_list_dma;
+	dma_addr_t dn_buf_dma;
+	dma_addr_t di_buf_dma;
+
+	spinlock_t   lock;
+	struct mutex mutex;
+
+	void *            gen_cmd_buf;
+	dma_addr_t        gen_cmd_buf_dma;
+	wait_queue_head_t cmd_wq;
+
+	struct workqueue_struct *workqueue;
+	struct work_struct       dn_work;
+
+	struct dma_pool *qset_pool;
+
+	struct list_head async_list;
+	struct list_head async_removed_list;
+	wait_queue_head_t async_list_wq;
+	struct work_struct async_work;
+
+	struct list_head periodic_list[5];
+	struct list_head periodic_removed_list;
+	wait_queue_head_t periodic_list_wq;
+	struct work_struct periodic_work;
+};
+
+#define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc))
+
+/**
+ * struct whc_std - a software TD.
+ * @urb: the URB this sTD is for.
+ * @offset: start of the URB's data for this TD.
+ * @len: the length of data in the associated TD.
+ * @ntds_remaining: number of TDs (starting from this one) in this transfer.
+ *
+ * Queued URBs may require more TDs than are available in a qset so we
+ * use a list of these "software TDs" (sTDs) to hold per-TD data.
+ */
+struct whc_std {
+	struct urb *urb;
+	size_t len;
+	int    ntds_remaining;
+	struct whc_qtd *qtd;
+
+	struct list_head list_node;
+	int num_pointers;
+	dma_addr_t dma_addr;
+	struct whc_page_list_entry *pl_virt;
+};
+
+/**
+ * struct whc_urb - per URB host controller structure.
+ * @urb: the URB this struct is for.
+ * @qset: the qset associated to the URB.
+ * @dequeue_work: the work to remove the URB when dequeued.
+ * @is_async: the URB belongs to async sheduler or not.
+ * @status: the status to be returned when calling wusbhc_giveback_urb.
+ */
+struct whc_urb {
+	struct urb *urb;
+	struct whc_qset *qset;
+	struct work_struct dequeue_work;
+	bool is_async;
+	int status;
+};
+
+/**
+ * whc_std_last - is this sTD the URB's last?
+ * @std: the sTD to check.
+ */
+static inline bool whc_std_last(struct whc_std *std)
+{
+	return std->ntds_remaining <= 1;
+}
+
+enum whc_update {
+	WHC_UPDATE_ADDED   = 0x01,
+	WHC_UPDATE_REMOVED = 0x02,
+	WHC_UPDATE_UPDATED = 0x04,
+};
+
+/* init.c */
+int whc_init(struct whc *whc);
+void whc_clean_up(struct whc *whc);
+
+/* hw.c */
+void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val);
+int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len);
+
+/* wusb.c */
+int whc_wusbhc_start(struct wusbhc *wusbhc);
+void whc_wusbhc_stop(struct wusbhc *wusbhc);
+int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+		  u8 handle, struct wuie_hdr *wuie);
+int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle);
+int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm);
+int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev);
+int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots);
+int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+		const void *ptk, size_t key_size);
+int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+		const void *gtk, size_t key_size);
+int whc_set_cluster_id(struct whc *whc, u8 bcid);
+
+/* int.c */
+irqreturn_t whc_int_handler(struct usb_hcd *hcd);
+void whc_dn_work(struct work_struct *work);
+
+/* asl.c */
+void asl_start(struct whc *whc);
+void asl_stop(struct whc *whc);
+int  asl_init(struct whc *whc);
+void asl_clean_up(struct whc *whc);
+int  asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+int  asl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
+void asl_qset_delete(struct whc *whc, struct whc_qset *qset);
+void scan_async_work(struct work_struct *work);
+
+/* pzl.c */
+int  pzl_init(struct whc *whc);
+void pzl_clean_up(struct whc *whc);
+void pzl_start(struct whc *whc);
+void pzl_stop(struct whc *whc);
+int  pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+int  pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
+void pzl_qset_delete(struct whc *whc, struct whc_qset *qset);
+void scan_periodic_work(struct work_struct *work);
+
+/* qset.c */
+struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags);
+void qset_free(struct whc *whc, struct whc_qset *qset);
+struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+void qset_delete(struct whc *whc, struct whc_qset *qset);
+void qset_clear(struct whc *whc, struct whc_qset *qset);
+int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+		 gfp_t mem_flags);
+void qset_free_std(struct whc *whc, struct whc_std *std);
+void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
+			    struct urb *urb, int status);
+void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
+			       struct whc_qtd *qtd);
+void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
+				 struct whc_qtd *qtd);
+enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset);
+void qset_remove_complete(struct whc *whc, struct whc_qset *qset);
+void dump_qset(struct whc_qset *qset, struct device *dev);
+void pzl_update(struct whc *whc, uint32_t wusbcmd);
+void asl_update(struct whc *whc, uint32_t wusbcmd);
+
+#endif /* #ifndef __WHCD_H */
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h
new file mode 100644
index 0000000..bff1eb7
--- /dev/null
+++ b/drivers/usb/host/whci/whci-hc.h
@@ -0,0 +1,416 @@
+/*
+ * Wireless Host Controller (WHC) data structures.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#ifndef _WHCI_WHCI_HC_H
+#define _WHCI_WHCI_HC_H
+
+#include <linux/list.h>
+
+/**
+ * WHCI_PAGE_SIZE - page size use by WHCI
+ *
+ * WHCI assumes that host system uses pages of 4096 octets.
+ */
+#define WHCI_PAGE_SIZE 4096
+
+
+/**
+ * QTD_MAX_TXFER_SIZE - max number of bytes to transfer with a single
+ * qtd.
+ *
+ * This is 2^20 - 1.
+ */
+#define QTD_MAX_XFER_SIZE 1048575
+
+
+/**
+ * struct whc_qtd - Queue Element Transfer Descriptors (qTD)
+ *
+ * This describes the data for a bulk, control or interrupt transfer.
+ *
+ * [WHCI] section 3.2.4
+ */
+struct whc_qtd {
+	__le32 status; /*< remaining transfer len and transfer status */
+	__le32 options;
+	__le64 page_list_ptr; /*< physical pointer to data buffer page list*/
+	__u8   setup[8];      /*< setup data for control transfers */
+} __attribute__((packed));
+
+#define QTD_STS_ACTIVE     (1 << 31)  /* enable execution of transaction */
+#define QTD_STS_HALTED     (1 << 30)  /* transfer halted */
+#define QTD_STS_DBE        (1 << 29)  /* data buffer error */
+#define QTD_STS_BABBLE     (1 << 28)  /* babble detected */
+#define QTD_STS_RCE        (1 << 27)  /* retry count exceeded */
+#define QTD_STS_LAST_PKT   (1 << 26)  /* set Last Packet Flag in WUSB header */
+#define QTD_STS_INACTIVE   (1 << 25)  /* queue set is marked inactive */
+#define QTD_STS_IALT_VALID (1 << 23)                          /* iAlt field is valid */
+#define QTD_STS_IALT(i)    (QTD_STS_IALT_VALID | ((i) << 20)) /* iAlt field */
+#define QTD_STS_LEN(l)     ((l) << 0) /* transfer length */
+#define QTD_STS_TO_LEN(s)  ((s) & 0x000fffff)
+
+#define QTD_OPT_IOC      (1 << 1) /* page_list_ptr points to buffer directly */
+#define QTD_OPT_SMALL    (1 << 0) /* interrupt on complete */
+
+/**
+ * struct whc_itd - Isochronous Queue Element Transfer Descriptors (iTD)
+ *
+ * This describes the data and other parameters for an isochronous
+ * transfer.
+ *
+ * [WHCI] section 3.2.5
+ */
+struct whc_itd {
+	__le16 presentation_time;    /*< presentation time for OUT transfers */
+	__u8   num_segments;         /*< number of data segments in segment list */
+	__u8   status;               /*< command execution status */
+	__le32 options;              /*< misc transfer options */
+	__le64 page_list_ptr;        /*< physical pointer to data buffer page list */
+	__le64 seg_list_ptr;         /*< physical pointer to segment list */
+} __attribute__((packed));
+
+#define ITD_STS_ACTIVE   (1 << 7) /* enable execution of transaction */
+#define ITD_STS_DBE      (1 << 5) /* data buffer error */
+#define ITD_STS_BABBLE   (1 << 4) /* babble detected */
+#define ITD_STS_INACTIVE (1 << 1) /* queue set is marked inactive */
+
+#define ITD_OPT_IOC      (1 << 1) /* interrupt on complete */
+#define ITD_OPT_SMALL    (1 << 0) /* page_list_ptr points to buffer directly */
+
+/**
+ * Page list entry.
+ *
+ * A TD's page list must contain sufficient page list entries for the
+ * total data length in the TD.
+ *
+ * [WHCI] section 3.2.4.3
+ */
+struct whc_page_list_entry {
+	__le64 buf_ptr; /*< physical pointer to buffer */
+} __attribute__((packed));
+
+/**
+ * struct whc_seg_list_entry - Segment list entry.
+ *
+ * Describes a portion of the data buffer described in the containing
+ * qTD's page list.
+ *
+ * seg_ptr = qtd->page_list_ptr[qtd->seg_list_ptr[seg].idx].buf_ptr
+ *           + qtd->seg_list_ptr[seg].offset;
+ *
+ * Segments can't cross page boundries.
+ *
+ * [WHCI] section 3.2.5.5
+ */
+struct whc_seg_list_entry {
+	__le16 len;    /*< segment length */
+	__u8   idx;    /*< index into page list */
+	__u8   status; /*< segment status */
+	__le16 offset; /*< 12 bit offset into page */
+} __attribute__((packed));
+
+/**
+ * struct whc_qhead - endpoint and status information for a qset.
+ *
+ * [WHCI] section 3.2.6
+ */
+struct whc_qhead {
+	__le64 link; /*< next qset in list */
+	__le32 info1;
+	__le32 info2;
+	__le32 info3;
+	__le16 status;
+	__le16 err_count;  /*< transaction error count */
+	__le32 cur_window;
+	__le32 scratch[3]; /*< h/w scratch area */
+	union {
+		struct whc_qtd qtd;
+		struct whc_itd itd;
+	} overlay;
+} __attribute__((packed));
+
+#define QH_LINK_PTR_MASK (~0x03Full)
+#define QH_LINK_PTR(ptr) ((ptr) & QH_LINK_PTR_MASK)
+#define QH_LINK_IQS      (1 << 4) /* isochronous queue set */
+#define QH_LINK_NTDS(n)  (((n) - 1) << 1) /* number of TDs in queue set */
+#define QH_LINK_T        (1 << 0) /* last queue set in periodic schedule list */
+
+#define QH_INFO1_EP(e)           ((e) << 0)  /* endpoint number */
+#define QH_INFO1_DIR_IN          (1 << 4)    /* IN transfer */
+#define QH_INFO1_DIR_OUT         (0 << 4)    /* OUT transfer */
+#define QH_INFO1_TR_TYPE_CTRL    (0x0 << 5)  /* control transfer */
+#define QH_INFO1_TR_TYPE_ISOC    (0x1 << 5)  /* isochronous transfer */
+#define QH_INFO1_TR_TYPE_BULK    (0x2 << 5)  /* bulk transfer */
+#define QH_INFO1_TR_TYPE_INT     (0x3 << 5)  /* interrupt */
+#define QH_INFO1_TR_TYPE_LP_INT  (0x7 << 5)  /* low power interrupt */
+#define QH_INFO1_DEV_INFO_IDX(i) ((i) << 8)  /* index into device info buffer */
+#define QH_INFO1_SET_INACTIVE    (1 << 15)   /* set inactive after transfer */
+#define QH_INFO1_MAX_PKT_LEN(l)  ((l) << 16) /* maximum packet length */
+
+#define QH_INFO2_BURST(b)        ((b) << 0)  /* maximum burst length */
+#define QH_INFO2_DBP(p)          ((p) << 5)  /* data burst policy (see [WUSB] table 5-7) */
+#define QH_INFO2_MAX_COUNT(c)    ((c) << 8)  /* max isoc/int pkts per zone */
+#define QH_INFO2_RQS             (1 << 15)   /* reactivate queue set */
+#define QH_INFO2_MAX_RETRY(r)    ((r) << 16) /* maximum transaction retries */
+#define QH_INFO2_MAX_SEQ(s)      ((s) << 20) /* maximum sequence number */
+#define QH_INFO3_MAX_DELAY(d)    ((d) << 0)  /* maximum stream delay in 125 us units (isoc only) */
+#define QH_INFO3_INTERVAL(i)     ((i) << 16) /* segment interval in 125 us units (isoc only) */
+
+#define QH_INFO3_TX_RATE_53_3    (0 << 24)
+#define QH_INFO3_TX_RATE_80      (1 << 24)
+#define QH_INFO3_TX_RATE_106_7   (2 << 24)
+#define QH_INFO3_TX_RATE_160     (3 << 24)
+#define QH_INFO3_TX_RATE_200     (4 << 24)
+#define QH_INFO3_TX_RATE_320     (5 << 24)
+#define QH_INFO3_TX_RATE_400     (6 << 24)
+#define QH_INFO3_TX_RATE_480     (7 << 24)
+#define QH_INFO3_TX_PWR(p)       ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
+
+#define QH_STATUS_FLOW_CTRL      (1 << 15)
+#define QH_STATUS_ICUR(i)        ((i) << 5)
+#define QH_STATUS_TO_ICUR(s)     (((s) >> 5) & 0x7)
+
+/**
+ * usb_pipe_to_qh_type - USB core pipe type to QH transfer type
+ *
+ * Returns the QH type field for a USB core pipe type.
+ */
+static inline unsigned usb_pipe_to_qh_type(unsigned pipe)
+{
+	static const unsigned type[] = {
+		[PIPE_ISOCHRONOUS] = QH_INFO1_TR_TYPE_ISOC,
+		[PIPE_INTERRUPT]   = QH_INFO1_TR_TYPE_INT,
+		[PIPE_CONTROL]     = QH_INFO1_TR_TYPE_CTRL,
+		[PIPE_BULK]        = QH_INFO1_TR_TYPE_BULK,
+	};
+	return type[usb_pipetype(pipe)];
+}
+
+/**
+ * Maxiumum number of TDs in a qset.
+ */
+#define WHCI_QSET_TD_MAX 8
+
+/**
+ * struct whc_qset - WUSB data transfers to a specific endpoint
+ * @qh: the QHead of this qset
+ * @qtd: up to 8 qTDs (for qsets for control, bulk and interrupt
+ * transfers)
+ * @itd: up to 8 iTDs (for qsets for isochronous transfers)
+ * @qset_dma: DMA address for this qset
+ * @whc: WHCI HC this qset is for
+ * @ep: endpoint
+ * @stds: list of sTDs queued to this qset
+ * @ntds: number of qTDs queued (not necessarily the same as nTDs
+ * field in the QH)
+ * @td_start: index of the first qTD in the list
+ * @td_end: index of next free qTD in the list (provided
+ *          ntds < WHCI_QSET_TD_MAX)
+ *
+ * Queue Sets (qsets) are added to the asynchronous schedule list
+ * (ASL) or the periodic zone list (PZL).
+ *
+ * qsets may contain up to 8 TDs (either qTDs or iTDs as appropriate).
+ * Each TD may refer to at most 1 MiB of data. If a single transfer
+ * has > 8MiB of data, TDs can be reused as they are completed since
+ * the TD list is used as a circular buffer.  Similarly, several
+ * (smaller) transfers may be queued in a qset.
+ *
+ * WHCI controllers may cache portions of the qsets in the ASL and
+ * PZL, requiring the WHCD to inform the WHC that the lists have been
+ * updated (fields changed or qsets inserted or removed).  For safe
+ * insertion and removal of qsets from the lists the schedule must be
+ * stopped to avoid races in updating the QH link pointers.
+ *
+ * Since the HC is free to execute qsets in any order, all transfers
+ * to an endpoint should use the same qset to ensure transfers are
+ * executed in the order they're submitted.
+ *
+ * [WHCI] section 3.2.3
+ */
+struct whc_qset {
+	struct whc_qhead qh;
+	union {
+		struct whc_qtd qtd[WHCI_QSET_TD_MAX];
+		struct whc_itd itd[WHCI_QSET_TD_MAX];
+	};
+
+	/* private data for WHCD */
+	dma_addr_t qset_dma;
+	struct whc *whc;
+	struct usb_host_endpoint *ep;
+	struct list_head stds;
+	int ntds;
+	int td_start;
+	int td_end;
+	struct list_head list_node;
+	unsigned in_sw_list:1;
+	unsigned in_hw_list:1;
+	unsigned remove:1;
+	struct urb *pause_after_urb;
+	struct completion remove_complete;
+	int max_burst;
+	int max_seq;
+};
+
+static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target)
+{
+	if (target)
+		*ptr = (*ptr & ~(QH_LINK_PTR_MASK | QH_LINK_T)) | QH_LINK_PTR(target);
+	else
+		*ptr = QH_LINK_T;
+}
+
+/**
+ * struct di_buf_entry - Device Information (DI) buffer entry.
+ *
+ * There's one of these per connected device.
+ */
+struct di_buf_entry {
+	__le32 availability_info[8]; /*< MAS availability information, one MAS per bit */
+	__le32 addr_sec_info;        /*< addressing and security info */
+	__le32 reserved[7];
+} __attribute__((packed));
+
+#define WHC_DI_SECURE           (1 << 31)
+#define WHC_DI_DISABLE          (1 << 30)
+#define WHC_DI_KEY_IDX(k)       ((k) << 8)
+#define WHC_DI_KEY_IDX_MASK     0x0000ff00
+#define WHC_DI_DEV_ADDR(a)      ((a) << 0)
+#define WHC_DI_DEV_ADDR_MASK    0x000000ff
+
+/**
+ * struct dn_buf_entry - Device Notification (DN) buffer entry.
+ *
+ * [WHCI] section 3.2.8
+ */
+struct dn_buf_entry {
+	__u8   msg_size;    /*< number of octets of valid DN data */
+	__u8   reserved1;
+	__u8   src_addr;    /*< source address */
+	__u8   status;      /*< buffer entry status */
+	__le32 tkid;        /*< TKID for source device, valid if secure bit is set */
+	__u8   dn_data[56]; /*< up to 56 octets of DN data */
+} __attribute__((packed));
+
+#define WHC_DN_STATUS_VALID  (1 << 7) /* buffer entry is valid */
+#define WHC_DN_STATUS_SECURE (1 << 6) /* notification received using secure frame */
+
+#define WHC_N_DN_ENTRIES (4096 / sizeof(struct dn_buf_entry))
+
+/* The Add MMC IE WUSB Generic Command may take up to 256 bytes of
+   data. [WHCI] section 2.4.7. */
+#define WHC_GEN_CMD_DATA_LEN 256
+
+/*
+ * HC registers.
+ *
+ * [WHCI] section 2.4
+ */
+
+#define WHCIVERSION          0x00
+
+#define WHCSPARAMS           0x04
+#  define WHCSPARAMS_TO_N_MMC_IES(p) (((p) >> 16) & 0xff)
+#  define WHCSPARAMS_TO_N_KEYS(p)    (((p) >> 8) & 0xff)
+#  define WHCSPARAMS_TO_N_DEVICES(p) (((p) >> 0) & 0x7f)
+
+#define WUSBCMD              0x08
+#  define WUSBCMD_BCID(b)            ((b) << 16)
+#  define WUSBCMD_BCID_MASK          (0xff << 16)
+#  define WUSBCMD_ASYNC_QSET_RM      (1 << 12)
+#  define WUSBCMD_PERIODIC_QSET_RM   (1 << 11)
+#  define WUSBCMD_WUSBSI(s)          ((s) << 8)
+#  define WUSBCMD_WUSBSI_MASK        (0x7 << 8)
+#  define WUSBCMD_ASYNC_SYNCED_DB    (1 << 7)
+#  define WUSBCMD_PERIODIC_SYNCED_DB (1 << 6)
+#  define WUSBCMD_ASYNC_UPDATED      (1 << 5)
+#  define WUSBCMD_PERIODIC_UPDATED   (1 << 4)
+#  define WUSBCMD_ASYNC_EN           (1 << 3)
+#  define WUSBCMD_PERIODIC_EN        (1 << 2)
+#  define WUSBCMD_WHCRESET           (1 << 1)
+#  define WUSBCMD_RUN                (1 << 0)
+
+#define WUSBSTS              0x0c
+#  define WUSBSTS_ASYNC_SCHED             (1 << 15)
+#  define WUSBSTS_PERIODIC_SCHED          (1 << 14)
+#  define WUSBSTS_DNTS_SCHED              (1 << 13)
+#  define WUSBSTS_HCHALTED                (1 << 12)
+#  define WUSBSTS_GEN_CMD_DONE            (1 << 9)
+#  define WUSBSTS_CHAN_TIME_ROLLOVER      (1 << 8)
+#  define WUSBSTS_DNTS_OVERFLOW           (1 << 7)
+#  define WUSBSTS_BPST_ADJUSTMENT_CHANGED (1 << 6)
+#  define WUSBSTS_HOST_ERR                (1 << 5)
+#  define WUSBSTS_ASYNC_SCHED_SYNCED      (1 << 4)
+#  define WUSBSTS_PERIODIC_SCHED_SYNCED   (1 << 3)
+#  define WUSBSTS_DNTS_INT                (1 << 2)
+#  define WUSBSTS_ERR_INT                 (1 << 1)
+#  define WUSBSTS_INT                     (1 << 0)
+#  define WUSBSTS_INT_MASK                0x3ff
+
+#define WUSBINTR             0x10
+#  define WUSBINTR_GEN_CMD_DONE             (1 << 9)
+#  define WUSBINTR_CHAN_TIME_ROLLOVER       (1 << 8)
+#  define WUSBINTR_DNTS_OVERFLOW            (1 << 7)
+#  define WUSBINTR_BPST_ADJUSTMENT_CHANGED  (1 << 6)
+#  define WUSBINTR_HOST_ERR                 (1 << 5)
+#  define WUSBINTR_ASYNC_SCHED_SYNCED       (1 << 4)
+#  define WUSBINTR_PERIODIC_SCHED_SYNCED    (1 << 3)
+#  define WUSBINTR_DNTS_INT                 (1 << 2)
+#  define WUSBINTR_ERR_INT                  (1 << 1)
+#  define WUSBINTR_INT                      (1 << 0)
+#  define WUSBINTR_ALL 0x3ff
+
+#define WUSBGENCMDSTS        0x14
+#  define WUSBGENCMDSTS_ACTIVE (1 << 31)
+#  define WUSBGENCMDSTS_ERROR  (1 << 24)
+#  define WUSBGENCMDSTS_IOC    (1 << 23)
+#  define WUSBGENCMDSTS_MMCIE_ADD 0x01
+#  define WUSBGENCMDSTS_MMCIE_RM  0x02
+#  define WUSBGENCMDSTS_SET_MAS   0x03
+#  define WUSBGENCMDSTS_CHAN_STOP 0x04
+#  define WUSBGENCMDSTS_RWP_EN    0x05
+
+#define WUSBGENCMDPARAMS     0x18
+#define WUSBGENADDR          0x20
+#define WUSBASYNCLISTADDR    0x28
+#define WUSBDNTSBUFADDR      0x30
+#define WUSBDEVICEINFOADDR   0x38
+
+#define WUSBSETSECKEYCMD     0x40
+#  define WUSBSETSECKEYCMD_SET    (1 << 31)
+#  define WUSBSETSECKEYCMD_ERASE  (1 << 30)
+#  define WUSBSETSECKEYCMD_GTK    (1 << 8)
+#  define WUSBSETSECKEYCMD_IDX(i) ((i) << 0)
+
+#define WUSBTKID             0x44
+#define WUSBSECKEY           0x48
+#define WUSBPERIODICLISTBASE 0x58
+#define WUSBMASINDEX         0x60
+
+#define WUSBDNTSCTRL         0x64
+#  define WUSBDNTSCTRL_ACTIVE      (1 << 31)
+#  define WUSBDNTSCTRL_INTERVAL(i) ((i) << 8)
+#  define WUSBDNTSCTRL_SLOTS(s)    ((s) << 0)
+
+#define WUSBTIME             0x68
+#define WUSBBPST             0x6c
+#define WUSBDIBUPDATED       0x70
+
+#endif /* #ifndef _WHCI_WHCI_HC_H */
diff --git a/drivers/usb/host/whci/wusb.c b/drivers/usb/host/whci/wusb.c
new file mode 100644
index 0000000..66e4ddc
--- /dev/null
+++ b/drivers/usb/host/whci/wusb.c
@@ -0,0 +1,241 @@
+/*
+ * Wireless Host Controller (WHC) WUSB operations.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/uwb/umc.h>
+#define D_LOCAL 1
+#include <linux/uwb/debug.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+#if D_LOCAL >= 1
+static void dump_di(struct whc *whc, int idx)
+{
+	struct di_buf_entry *di = &whc->di_buf[idx];
+	struct device *dev = &whc->umc->dev;
+	char buf[128];
+
+	bitmap_scnprintf(buf, sizeof(buf), (unsigned long *)di->availability_info, UWB_NUM_MAS);
+
+	d_printf(1, dev, "DI[%d]\n", idx);
+	d_printf(1, dev, "  availability: %s\n", buf);
+	d_printf(1, dev, "  %c%c key idx: %d dev addr: %d\n",
+		 (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ',
+		 (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ',
+		 (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8,
+		 (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK));
+}
+#else
+static inline void dump_di(struct whc *whc, int idx)
+{
+}
+#endif
+
+static int whc_update_di(struct whc *whc, int idx)
+{
+	int offset = idx / 32;
+	u32 bit = 1 << (idx % 32);
+
+	dump_di(whc, idx);
+
+	le_writel(bit, whc->base + WUSBDIBUPDATED + offset);
+
+	return whci_wait_for(&whc->umc->dev,
+			     whc->base + WUSBDIBUPDATED + offset, bit, 0,
+			     100, "DI update");
+}
+
+/*
+ * WHCI starts and stops MMCs based on there being a valid GTK so
+ * these need only start/stop the asynchronous and periodic schedules.
+ */
+
+int whc_wusbhc_start(struct wusbhc *wusbhc)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+
+	asl_start(whc);
+	pzl_start(whc);
+
+	return 0;
+}
+
+void whc_wusbhc_stop(struct wusbhc *wusbhc)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+
+	pzl_stop(whc);
+	asl_stop(whc);
+}
+
+int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+		  u8 handle, struct wuie_hdr *wuie)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	u32 params;
+
+	params = (interval << 24)
+		| (repeat_cnt << 16)
+		| (wuie->bLength << 8)
+		| handle;
+
+	return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_ADD, params, wuie, wuie->bLength);
+}
+
+int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	u32 params;
+
+	params = handle;
+
+	return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_RM, params, NULL, 0);
+}
+
+int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+
+	if (stream_index >= 0)
+		whc_write_wusbcmd(whc, WUSBCMD_WUSBSI_MASK, WUSBCMD_WUSBSI(stream_index));
+
+	return whc_do_gencmd(whc, WUSBGENCMDSTS_SET_MAS, 0, (void *)mas_bm, sizeof(*mas_bm));
+}
+
+int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	int idx = wusb_dev->port_idx;
+	struct di_buf_entry *di = &whc->di_buf[idx];
+	int ret;
+
+	mutex_lock(&whc->mutex);
+
+	uwb_mas_bm_copy_le(di->availability_info, &wusb_dev->availability);
+	di->addr_sec_info &= ~(WHC_DI_DISABLE | WHC_DI_DEV_ADDR_MASK);
+	di->addr_sec_info |= WHC_DI_DEV_ADDR(wusb_dev->addr);
+
+	ret = whc_update_di(whc, idx);
+
+	mutex_unlock(&whc->mutex);
+
+	return ret;
+}
+
+/*
+ * Set the number of Device Notification Time Slots (DNTS) and enable
+ * device notifications.
+ */
+int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	u32 dntsctrl;
+
+	dntsctrl = WUSBDNTSCTRL_ACTIVE
+		| WUSBDNTSCTRL_INTERVAL(interval)
+		| WUSBDNTSCTRL_SLOTS(slots);
+
+	le_writel(dntsctrl, whc->base + WUSBDNTSCTRL);
+
+	return 0;
+}
+
+static int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid,
+		       const void *key, size_t key_size, bool is_gtk)
+{
+	uint32_t setkeycmd;
+	uint32_t seckey[4];
+	int i;
+	int ret;
+
+	memcpy(seckey, key, key_size);
+	setkeycmd = WUSBSETSECKEYCMD_SET | WUSBSETSECKEYCMD_IDX(key_index);
+	if (is_gtk)
+		setkeycmd |= WUSBSETSECKEYCMD_GTK;
+
+	le_writel(tkid, whc->base + WUSBTKID);
+	for (i = 0; i < 4; i++)
+		le_writel(seckey[i], whc->base + WUSBSECKEY + 4*i);
+	le_writel(setkeycmd, whc->base + WUSBSETSECKEYCMD);
+
+	ret = whci_wait_for(&whc->umc->dev, whc->base + WUSBSETSECKEYCMD,
+			    WUSBSETSECKEYCMD_SET, 0, 100, "set key");
+
+	return ret;
+}
+
+/**
+ * whc_set_ptk - set the PTK to use for a device.
+ *
+ * The index into the key table for this PTK is the same as the
+ * device's port index.
+ */
+int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+		const void *ptk, size_t key_size)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	struct di_buf_entry *di = &whc->di_buf[port_idx];
+	int ret;
+
+	mutex_lock(&whc->mutex);
+
+	if (ptk) {
+		ret = whc_set_key(whc, port_idx, tkid, ptk, key_size, false);
+		if (ret)
+			goto out;
+
+		di->addr_sec_info &= ~WHC_DI_KEY_IDX_MASK;
+		di->addr_sec_info |= WHC_DI_SECURE | WHC_DI_KEY_IDX(port_idx);
+	} else
+		di->addr_sec_info &= ~WHC_DI_SECURE;
+
+	ret = whc_update_di(whc, port_idx);
+out:
+	mutex_unlock(&whc->mutex);
+	return ret;
+}
+
+/**
+ * whc_set_gtk - set the GTK for subsequent broadcast packets
+ *
+ * The GTK is stored in the last entry in the key table (the previous
+ * N_DEVICES entries are for the per-device PTKs).
+ */
+int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+		const void *gtk, size_t key_size)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	int ret;
+
+	mutex_lock(&whc->mutex);
+
+	ret = whc_set_key(whc, whc->n_devices, tkid, gtk, key_size, true);
+
+	mutex_unlock(&whc->mutex);
+
+	return ret;
+}
+
+int whc_set_cluster_id(struct whc *whc, u8 bcid)
+{
+	whc_write_wusbcmd(whc, WUSBCMD_BCID_MASK, WUSBCMD_BCID(bcid));
+	return 0;
+}
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index 0fb114c..878c77c 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -355,13 +355,14 @@
 	if (mdc800->camera_request_ready>0)
 	{
 		mdc800->camera_request_ready=0;
-		err ("timeout waiting for camera.");
+		dev_err(&mdc800->dev->dev, "timeout waiting for camera.\n");
 		return -1;
 	}
 	
 	if (mdc800->state == NOT_CONNECTED)
 	{
-		warn ("Camera gets disconnected during waiting for irq.");
+		printk(KERN_WARNING "mdc800: Camera gets disconnected "
+		       "during waiting for irq.\n");
 		mdc800->camera_request_ready=0;
 		return -2;
 	}
@@ -379,7 +380,8 @@
 	int status = urb->status;
 
 	if (status != 0)
-		err ("writing command fails (status=%i)", status);
+		dev_err(&mdc800->dev->dev,
+			"writing command fails (status=%i)\n", status);
 	else
 		mdc800->state=READY;
 	mdc800->written = 1;
@@ -406,7 +408,8 @@
 			mdc800->state=READY;
 		}
 	} else {
-		err ("request bytes fails (status:%i)", status);
+		dev_err(&mdc800->dev->dev,
+			"request bytes fails (status:%i)\n", status);
 	}
 	mdc800->downloaded = 1;
 	wake_up (&mdc800->download_wait);
@@ -443,13 +446,14 @@
 
 	if (mdc800->dev != NULL)
 	{
-		warn ("only one Mustek MDC800 is supported.");
+		dev_warn(&intf->dev, "only one Mustek MDC800 is supported.\n");
 		return -ENODEV;
 	}
 
 	if (dev->descriptor.bNumConfigurations != 1)
 	{
-		err ("probe fails -> wrong Number of Configuration");
+		dev_err(&intf->dev,
+			"probe fails -> wrong Number of Configuration\n");
 		return -ENODEV;
 	}
 	intf_desc = intf->cur_altsetting;
@@ -461,7 +465,7 @@
 		|| ( intf_desc->desc.bNumEndpoints != 4)
 	)
 	{
-		err ("probe fails -> wrong Interface");
+		dev_err(&intf->dev, "probe fails -> wrong Interface\n");
 		return -ENODEV;
 	}
 
@@ -482,19 +486,19 @@
 		}
 		if (mdc800->endpoint[i] == -1)
 		{
-			err ("probe fails -> Wrong Endpoints.");
+			dev_err(&intf->dev, "probe fails -> Wrong Endpoints.\n");
 			return -ENODEV;
 		}
 	}
 
 
-	info ("Found Mustek MDC800 on USB.");
+	dev_info(&intf->dev, "Found Mustek MDC800 on USB.\n");
 
 	mutex_lock(&mdc800->io_lock);
 
 	retval = usb_register_dev(intf, &mdc800_class);
 	if (retval) {
-		err ("Not able to get a minor for this device.");
+		dev_err(&intf->dev, "Not able to get a minor for this device.\n");
 		return -ENODEV;
 	}
 
@@ -570,7 +574,7 @@
 		mdc800->dev = NULL;
 		usb_set_intfdata(intf, NULL);
 	}
-	info ("Mustek MDC800 disconnected from USB.");
+	dev_info(&intf->dev, "Mustek MDC800 disconnected from USB.\n");
 }
 
 
@@ -644,7 +648,8 @@
 	mdc800->irq_urb->dev = mdc800->dev;
 	retval = usb_submit_urb (mdc800->irq_urb, GFP_KERNEL);
 	if (retval) {
-		err ("request USB irq fails (submit_retval=%i).", retval);
+		dev_err(&mdc800->dev->dev,
+			"request USB irq fails (submit_retval=%i).\n", retval);
 		errn = -EIO;
 		goto error_out;
 	}
@@ -701,7 +706,8 @@
 	}
 	if (mdc800->state == WORKING)
 	{
-		warn ("Illegal State \"working\" reached during read ?!");
+		printk(KERN_WARNING "mdc800: Illegal State \"working\""
+		       "reached during read ?!\n");
 		mutex_unlock(&mdc800->io_lock);
 		return -EBUSY;
 	}
@@ -733,7 +739,9 @@
 				mdc800->download_urb->dev = mdc800->dev;
 				retval = usb_submit_urb (mdc800->download_urb, GFP_KERNEL);
 				if (retval) {
-					err ("Can't submit download urb (retval=%i)",retval);
+					dev_err(&mdc800->dev->dev,
+						"Can't submit download urb "
+						"(retval=%i)\n", retval);
 					mutex_unlock(&mdc800->io_lock);
 					return len-left;
 				}
@@ -742,7 +750,10 @@
 				mdc800->downloaded = 0;
 				if (mdc800->download_urb->status != 0)
 				{
-					err ("request download-bytes fails (status=%i)",mdc800->download_urb->status);
+					dev_err(&mdc800->dev->dev,
+						"request download-bytes fails "
+						"(status=%i)\n",
+						mdc800->download_urb->status);
 					mutex_unlock(&mdc800->io_lock);
 					return len-left;
 				}
@@ -839,7 +850,8 @@
 
 			if (mdc800_usb_waitForIRQ (0,TO_GET_READY))
 			{
-				err ("Camera didn't get ready.\n");
+				dev_err(&mdc800->dev->dev,
+					"Camera didn't get ready.\n");
 				mutex_unlock(&mdc800->io_lock);
 				return -EIO;
 			}
@@ -851,7 +863,9 @@
 			mdc800->write_urb->dev = mdc800->dev;
 			retval = usb_submit_urb (mdc800->write_urb, GFP_KERNEL);
 			if (retval) {
-				err ("submitting write urb fails (retval=%i)", retval);
+				dev_err(&mdc800->dev->dev,
+					"submitting write urb fails "
+					"(retval=%i)\n", retval);
 				mutex_unlock(&mdc800->io_lock);
 				return -EIO;
 			}
@@ -870,7 +884,9 @@
 				case 0x3e: /* Take shot in Fine Mode (WCam Mode) */
 					if (mdc800->pic_len < 0)
 					{
-						err ("call 0x07 before 0x05,0x3e");
+						dev_err(&mdc800->dev->dev,
+							"call 0x07 before "
+							"0x05,0x3e\n");
 						mdc800->state=READY;
 						mutex_unlock(&mdc800->io_lock);
 						return -EIO;
@@ -890,7 +906,7 @@
 
 						if (mdc800_usb_waitForIRQ (1,TO_READ_FROM_IRQ))
 						{
-							err ("requesting answer from irq fails");
+							dev_err(&mdc800->dev->dev, "requesting answer from irq fails\n");
 							mutex_unlock(&mdc800->io_lock);
 							return -EIO;
 						}
@@ -918,7 +934,7 @@
 					{
 						if (mdc800_usb_waitForIRQ (0,TO_DEFAULT_COMMAND))
 						{
-							err ("Command Timeout.");
+							dev_err(&mdc800->dev->dev, "Command Timeout.\n");
 							mutex_unlock(&mdc800->io_lock);
 							return -EIO;
 						}
@@ -1018,7 +1034,8 @@
 	if (retval)
 		goto cleanup_on_fail;
 
-	info (DRIVER_VERSION ":" DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 	return 0;
 
@@ -1028,7 +1045,7 @@
 
 	if (mdc800 != NULL)
 	{
-		err ("can't alloc memory!");
+		printk(KERN_ERR "mdc800: can't alloc memory!\n");
 
 		kfree(mdc800->download_urb_buffer);
 		kfree(mdc800->write_urb_buffer);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 4ea50e0..e463db5 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -42,6 +42,15 @@
 	  To compile this driver as a module, choose M here.  The module
 	  will be called adutux.
 
+config USB_SEVSEG
+	tristate "USB 7-Segment LED Display"
+	depends on USB
+	help
+	  Say Y here if you have a USB 7-Segment Display by Delcom
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called usbsevseg.
+
 config USB_RIO500
 	tristate "USB Diamond Rio500 support"
 	depends on USB
@@ -271,3 +280,18 @@
 	  The firmware for this driver must be extracted from the MacOS
 	  driver beforehand. Tools for doing so are available at
 	  http://bersace03.free.fr
+
+config USB_VST
+	tristate "USB VST driver"
+	depends on USB
+	help
+	  This driver is intended for Vernier Software Technologies
+	  bulk usb devices such as their Ocean-Optics spectrometers or
+	  Labquest.
+	  It is a bulk channel driver with configurable read and write
+	  timeouts.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called vstusb.
+
+
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 45b4e12..1334f7b 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -26,6 +26,8 @@
 obj-$(CONFIG_USB_TEST)		+= usbtest.o
 obj-$(CONFIG_USB_TRANCEVIBRATOR)	+= trancevibrator.o
 obj-$(CONFIG_USB_USS720)	+= uss720.o
+obj-$(CONFIG_USB_SEVSEG)	+= usbsevseg.o
+obj-$(CONFIG_USB_VST)		+= vstusb.o
 
 obj-$(CONFIG_USB_SISUSBVGA)	+= sisusbvga/
 
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 965f6ea..7b6922e 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -283,8 +283,8 @@
 
 	interface = usb_find_interface(&adu_driver, subminor);
 	if (!interface) {
-		err("%s - error, can't find device for minor %d",
-		    __func__, subminor);
+		printk(KERN_ERR "adutux: %s - error, can't find device for "
+		       "minor %d\n", __func__, subminor);
 		retval = -ENODEV;
 		goto exit_no_device;
 	}
@@ -416,7 +416,8 @@
 	/* verify that the device wasn't unplugged */
 	if (dev->udev == NULL) {
 		retval = -ENODEV;
-		err("No device or device unplugged %d", retval);
+		printk(KERN_ERR "adutux: No device or device unplugged %d\n",
+		       retval);
 		goto exit;
 	}
 
@@ -576,7 +577,8 @@
 	/* verify that the device wasn't unplugged */
 	if (dev->udev == NULL) {
 		retval = -ENODEV;
-		err("No device or device unplugged %d", retval);
+		printk(KERN_ERR "adutux: No device or device unplugged %d\n",
+		       retval);
 		goto exit;
 	}
 
@@ -645,7 +647,8 @@
 			retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
 			if (retval < 0) {
 				dev->out_urb_finished = 1;
-				err("Couldn't submit interrupt_out_urb %d", retval);
+				dev_err(&dev->udev->dev, "Couldn't submit "
+					"interrupt_out_urb %d\n", retval);
 				goto exit;
 			}
 
@@ -890,13 +893,14 @@
 	/* register this driver with the USB subsystem */
 	result = usb_register(&adu_driver);
 	if (result < 0) {
-		err("usb_register failed for the "__FILE__" driver. "
-		    "Error number %d", result);
+		printk(KERN_ERR "usb_register failed for the "__FILE__
+		       " driver. Error number %d\n", result);
 		goto exit;
 	}
 
-	info("adutux " DRIVER_DESC " " DRIVER_VERSION);
-	info("adutux is an experimental driver. Use at your own risk");
+	printk(KERN_INFO "adutux " DRIVER_DESC " " DRIVER_VERSION "\n");
+	printk(KERN_INFO "adutux is an experimental driver. "
+	       "Use at your own risk\n");
 
 exit:
 	dbg(2," %s : leave, return value %d", __func__, result);
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index a076c24..1d8e39a 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -130,7 +130,8 @@
 exit:
 	retval = usb_submit_urb(pdata->urb, GFP_ATOMIC);
 	if (retval) {
-		err("%s - usb_submit_urb failed with result %d",
+		dev_err(&pdata->udev->dev,
+			"%s - usb_submit_urb failed with result %d\n",
 			__func__, retval);
 	}
 }
@@ -220,7 +221,7 @@
 		}
 	}
 	if (!int_in_endpointAddr) {
-		err("Could not find int-in endpoint");
+		dev_err(&iface->dev, "Could not find int-in endpoint\n");
 		return -EIO;
 	}
 
@@ -228,7 +229,7 @@
 	pdata = kzalloc(sizeof(struct appledisplay), GFP_KERNEL);
 	if (!pdata) {
 		retval = -ENOMEM;
-		err("Out of memory");
+		dev_err(&iface->dev, "Out of memory\n");
 		goto error;
 	}
 
@@ -241,8 +242,8 @@
 	pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL);
 	if (!pdata->msgdata) {
 		retval = -ENOMEM;
-		err("appledisplay: Allocating buffer for control messages "
-			"failed");
+		dev_err(&iface->dev,
+			"Allocating buffer for control messages failed\n");
 		goto error;
 	}
 
@@ -250,7 +251,7 @@
 	pdata->urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!pdata->urb) {
 		retval = -ENOMEM;
-		err("appledisplay: Allocating URB failed");
+		dev_err(&iface->dev, "Allocating URB failed\n");
 		goto error;
 	}
 
@@ -259,7 +260,7 @@
 		GFP_KERNEL, &pdata->urb->transfer_dma);
 	if (!pdata->urbdata) {
 		retval = -ENOMEM;
-		err("appledisplay: Allocating URB buffer failed");
+		dev_err(&iface->dev, "Allocating URB buffer failed\n");
 		goto error;
 	}
 
@@ -270,7 +271,7 @@
 		pdata, 1);
 	if (usb_submit_urb(pdata->urb, GFP_KERNEL)) {
 		retval = -EIO;
-		err("appledisplay: Submitting URB failed");
+		dev_err(&iface->dev, "Submitting URB failed\n");
 		goto error;
 	}
 
@@ -280,7 +281,7 @@
 	pdata->bd = backlight_device_register(bl_name, NULL, pdata,
 						&appledisplay_bl_data);
 	if (IS_ERR(pdata->bd)) {
-		err("appledisplay: Backlight registration failed");
+		dev_err(&iface->dev, "Backlight registration failed\n");
 		goto error;
 	}
 
@@ -291,7 +292,8 @@
 
 	if (brightness < 0) {
 		retval = brightness;
-		err("appledisplay: Error while getting initial brightness: %d", retval);
+		dev_err(&iface->dev,
+			"Error while getting initial brightness: %d\n", retval);
 		goto error;
 	}
 
@@ -314,7 +316,7 @@
 					pdata->urbdata, pdata->urb->transfer_dma);
 			usb_free_urb(pdata->urb);
 		}
-		if (pdata->bd)
+		if (pdata->bd && !IS_ERR(pdata->bd))
 			backlight_device_unregister(pdata->bd);
 		kfree(pdata->msgdata);
 	}
@@ -352,7 +354,7 @@
 {
 	wq = create_singlethread_workqueue("appledisplay");
 	if (!wq) {
-		err("Could not create work queue\n");
+		printk(KERN_ERR "appledisplay: Could not create work queue\n");
 		return -ENOMEM;
 	}
 
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 9379404..5720bfe 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -278,9 +278,9 @@
 
 	/* register this driver with the USB subsystem */
 	result = usb_register(&cypress_driver);
-	if (result) {
-		err("Function usb_register failed! Error number: %d\n", result);
-	}
+	if (result)
+		printk(KERN_ERR KBUILD_MODNAME ": usb_register failed! "
+		       "Error number: %d\n", result);
 
 	return result;
 }
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 1cd9e7e..4fb3c38 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -422,13 +422,14 @@
 	int result;
 
 	result = usb_register(&cytherm_driver);
-	if (result) 
-	{	
-		err("usb_register failed. Error number %d", result);
+	if (result) {
+		printk(KERN_ERR KBUILD_MODNAME ": usb_register failed! "
+		       "Error number: %d\n", result);
 		return result;
 	}
 
-	info(DRIVER_VERSION ":" DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 }
 
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index 4b994a0..e762beb 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -50,7 +50,7 @@
 	unsigned char *buffer =  kmemdup(data, length, GFP_KERNEL);
 
 	if (!buffer) {
-		err("emi26: kmalloc(%d) failed.", length);
+		dev_err(&dev->dev, "kmalloc(%d) failed.\n", length);
 		return -ENOMEM;
 	}
 	/* Note: usb_control_msg returns negative value on error or length of the
@@ -64,11 +64,11 @@
 static int emi26_set_reset (struct usb_device *dev, unsigned char reset_bit)
 {
 	int response;
-	info("%s - %d", __func__, reset_bit);
+	dev_info(&dev->dev, "%s - %d\n", __func__, reset_bit);
 	/* printk(KERN_DEBUG "%s - %d", __func__, reset_bit); */
 	response = emi26_writememory (dev, CPUCS_REG, &reset_bit, 1, 0xa0);
 	if (response < 0) {
-		err("emi26: set_reset (%d) failed", reset_bit);
+		dev_err(&dev->dev, "set_reset (%d) failed\n", reset_bit);
 	}
 	return response;
 }
@@ -88,7 +88,8 @@
 
 	buf = kmalloc(FW_LOAD_SIZE, GFP_KERNEL);
 	if (!buf) {
-		err( "%s - error loading firmware: error = %d", __func__, -ENOMEM);
+		dev_err(&dev->dev, "%s - error loading firmware: error = %d\n",
+			__func__, -ENOMEM);
 		err = -ENOMEM;
 		goto wraperr;
 	}
@@ -106,14 +107,16 @@
 				    &dev->dev);
 	if (err) {
 	nofw:
-		err( "%s - request_firmware() failed", __func__);
+		dev_err(&dev->dev, "%s - request_firmware() failed\n",
+			__func__);
 		goto wraperr;
 	}
 
 	/* Assert reset (stop the CPU in the EMI) */
 	err = emi26_set_reset(dev,1);
 	if (err < 0) {
-		err( "%s - error loading firmware: error = %d", __func__, err);
+		dev_err(&dev->dev,"%s - error loading firmware: error = %d\n",
+			__func__, err);
 		goto wraperr;
 	}
 
@@ -254,7 +257,7 @@
 {
 	struct usb_device *dev = interface_to_usbdev(intf);
 
-	info("%s start", __func__);
+	dev_info(&intf->dev, "%s start\n", __func__);
 
 	emi26_load_firmware(dev);
 
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index 5d859de..602ee05 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -73,7 +73,7 @@
 static int emi62_set_reset (struct usb_device *dev, unsigned char reset_bit)
 {
 	int response;
-	info("%s - %d", __func__, reset_bit);
+	dev_info(&dev->dev, "%s - %d\n", __func__, reset_bit);
 	
 	response = emi62_writememory (dev, CPUCS_REG, &reset_bit, 1, 0xa0);
 	if (response < 0) {
@@ -271,7 +271,7 @@
 	struct usb_device *dev = interface_to_usbdev(intf);
 	dev_dbg(&intf->dev, "emi62_probe\n");
 
-	info("%s start", __func__);
+	dev_info(&intf->dev, "%s start\n", __func__);
 
 	emi62_load_firmware(dev);
 
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 97c2809..79a7668 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -698,7 +698,7 @@
                 int retval = usb_bulk_msg(ftdi->udev,
                         usb_rcvbulkpipe(ftdi->udev, ftdi->bulk_in_endpointAddr),
                          ftdi->bulk_in_buffer, ftdi->bulk_in_size,
-                        &packet_bytes, msecs_to_jiffies(50));
+                        &packet_bytes, 50);
                 if (packet_bytes > 2) {
                         ftdi->bulk_in_left = packet_bytes - 2;
                         ftdi->bulk_in_last = 1;
@@ -960,7 +960,7 @@
                 int retval = usb_bulk_msg(ftdi->udev,
                         usb_rcvbulkpipe(ftdi->udev, ftdi->bulk_in_endpointAddr),
                          ftdi->bulk_in_buffer, ftdi->bulk_in_size,
-                        &packet_bytes, msecs_to_jiffies(500));
+                        &packet_bytes, 500);
                 char diag[30 *3 + 4];
                 char *d = diag;
                 int m = packet_bytes;
@@ -1880,7 +1880,7 @@
                 int retval = usb_bulk_msg(ftdi->udev,
                         usb_rcvbulkpipe(ftdi->udev, ftdi->bulk_in_endpointAddr),
                          ftdi->bulk_in_buffer, ftdi->bulk_in_size,
-                        &packet_bytes, msecs_to_jiffies(100));
+                        &packet_bytes, 100);
                 if (packet_bytes > 2) {
                         char diag[30 *3 + 4];
                         char *d = diag;
@@ -2067,7 +2067,7 @@
                                 usb_rcvbulkpipe(ftdi->udev,
                                 ftdi->bulk_in_endpointAddr),
                                 ftdi->bulk_in_buffer, ftdi->bulk_in_size,
-                                &packet_bytes, msecs_to_jiffies(500));
+                                &packet_bytes, 500);
                         if (packet_bytes > 2) {
                                 char diag[30 *3 + 4];
                                 char *d = diag;
@@ -2176,7 +2176,7 @@
                 int retval = usb_bulk_msg(ftdi->udev,
                         usb_rcvbulkpipe(ftdi->udev, ftdi->bulk_in_endpointAddr),
                          ftdi->bulk_in_buffer, ftdi->bulk_in_size,
-                        &packet_bytes, msecs_to_jiffies(1000));
+                        &packet_bytes, 1000);
                 if (packet_bytes > 2) {
                         char diag[30 *3 + 4];
                         char *d = diag;
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 4bcf7fb..6da8887 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -403,14 +403,15 @@
 		mutex_unlock(&dev->lock);
 	}
 
-	info("%s disconnected", DRIVER_DESC);
+	dev_info(&interface->dev, "disconnected\n");
 }
 
 static int __init usb_idmouse_init(void)
 {
 	int result;
 
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 	/* register this driver with the USB subsystem */
 	result = usb_register(&idmouse_driver);
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 9370326..ab0f322 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -851,9 +851,8 @@
 
 	dbg(2, "%s: enter", __func__);
 
-	if (udev == NULL) {
-		info ("udev is NULL.");
-	}
+	if (udev == NULL)
+		dev_info(&interface->dev, "udev is NULL.\n");
 
 	/* allocate memory for our device state and initialize it */
 
@@ -954,7 +953,9 @@
 	dev->minor = interface->minor;
 
 	/* let the user know what node this device is now attached to */
-	info ("LEGO USB Tower #%d now attached to major %d minor %d", (dev->minor - LEGO_USB_TOWER_MINOR_BASE), USB_MAJOR, dev->minor);
+	dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
+		 "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
+		 USB_MAJOR, dev->minor);
 
 	/* get the firmware version and log it */
 	result = usb_control_msg (udev,
@@ -971,10 +972,10 @@
 		retval = result;
 		goto error;
 	}
-	info("LEGO USB Tower firmware version is %d.%d build %d",
-	     get_version_reply.major,
-	     get_version_reply.minor,
-	     le16_to_cpu(get_version_reply.build_no));
+	dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d "
+		 "build %d\n", get_version_reply.major,
+		 get_version_reply.minor,
+		 le16_to_cpu(get_version_reply.build_no));
 
 
 exit:
@@ -1021,7 +1022,8 @@
 		mutex_unlock(&dev->lock);
 	}
 
-	info("LEGO USB Tower #%d now disconnected", (minor - LEGO_USB_TOWER_MINOR_BASE));
+	dev_info(&interface->dev, "LEGO USB Tower #%d now disconnected\n",
+		 (minor - LEGO_USB_TOWER_MINOR_BASE));
 
 	dbg(2, "%s: leave", __func__);
 }
@@ -1046,7 +1048,8 @@
 		goto exit;
 	}
 
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 exit:
 	dbg(2, "%s: leave, return value %d", __func__, retval);
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 248a12a..deb95bb 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -89,7 +89,7 @@
 
 	mutex_unlock(&(rio->lock));
 
-	info("Rio opened.");
+	dev_info(&rio->rio_dev->dev, "Rio opened.\n");
 
 	return 0;
 }
@@ -100,7 +100,7 @@
 
 	rio->isopen = 0;
 
-	info("Rio closed.");
+	dev_info(&rio->rio_dev->dev, "Rio closed.\n");
 	return 0;
 }
 
@@ -451,7 +451,7 @@
 	struct rio_usb_data *rio = &rio_instance;
 	int retval;
 
-	info("USB Rio found at address %d", dev->devnum);
+	dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum);
 
 	retval = usb_register_dev(intf, &usb_rio_class);
 	if (retval) {
@@ -503,7 +503,7 @@
 		kfree(rio->ibuf);
 		kfree(rio->obuf);
 
-		info("USB Rio disconnected.");
+		dev_info(&intf->dev, "USB Rio disconnected.\n");
 
 		rio->present = 0;
 		mutex_unlock(&(rio->lock));
@@ -531,7 +531,8 @@
 	if (retval)
 		goto out;
 
-	info(DRIVER_VERSION ":" DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 out:
 	return retval;
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index 03368ed..2e14102 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -144,7 +144,8 @@
 		return retval;
 	}
 
-	info(DRIVER_VERSION ":" DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 }
 
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 2db4228..e0ff9cc 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -311,7 +311,7 @@
 	dev->interface = interface;
 
 	if (le16_to_cpu(dev->udev->descriptor.idProduct) != 0x0001) {
-		warn(KERN_INFO "USBLCD model not supported.");
+		dev_warn(&interface->dev, "USBLCD model not supported.\n");
 		return -ENODEV;
 	}
 	
@@ -359,12 +359,13 @@
 
 	i = le16_to_cpu(dev->udev->descriptor.bcdDevice);
 
-	info("USBLCD Version %1d%1d.%1d%1d found at address %d",
-		(i & 0xF000)>>12,(i & 0xF00)>>8,(i & 0xF0)>>4,(i & 0xF),
-		dev->udev->devnum);
+	dev_info(&interface->dev, "USBLCD Version %1d%1d.%1d%1d found "
+		 "at address %d\n", (i & 0xF000)>>12, (i & 0xF00)>>8,
+		 (i & 0xF0)>>4,(i & 0xF), dev->udev->devnum);
 
 	/* let the user know what node this device is now attached to */
-	info("USB LCD device now attached to USBLCD-%d", interface->minor);
+	dev_info(&interface->dev, "USB LCD device now attached to USBLCD-%d\n",
+		 interface->minor);
 	return 0;
 
 error:
@@ -413,7 +414,7 @@
 	/* decrement our usage count */
 	kref_put(&dev->kref, lcd_delete);
 
-	info("USB LCD #%d now disconnected", minor);
+	dev_info(&interface->dev, "USB LCD #%d now disconnected\n", minor);
 }
 
 static struct usb_driver lcd_driver = {
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
new file mode 100644
index 0000000..28a6a3a
--- /dev/null
+++ b/drivers/usb/misc/usbsevseg.c
@@ -0,0 +1,394 @@
+/*
+ * USB 7 Segment Driver
+ *
+ * Copyright (C) 2008 Harrison Metzger <harrisonmetz@gmail.com>
+ * Based on usbled.c by Greg Kroah-Hartman (greg@kroah.com)
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License as
+ *	published by the Free Software Foundation, version 2.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/usb.h>
+
+
+#define DRIVER_AUTHOR "Harrison Metzger <harrisonmetz@gmail.com>"
+#define DRIVER_DESC "USB 7 Segment Driver"
+
+#define VENDOR_ID	0x0fc5
+#define PRODUCT_ID	0x1227
+#define MAXLEN		6
+
+/* table of devices that work with this driver */
+static struct usb_device_id id_table[] = {
+	{ USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
+	{ },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+/* the different text display modes the device is capable of */
+static char *display_textmodes[] = {"raw", "hex", "ascii", NULL};
+
+struct usb_sevsegdev {
+	struct usb_device *udev;
+
+	u8 powered;
+	u8 mode_msb;
+	u8 mode_lsb;
+	u8 decimals[MAXLEN];
+	u8 textmode;
+	u8 text[MAXLEN];
+	u16 textlength;
+};
+
+/* sysfs_streq can't replace this completely
+ * If the device was in hex mode, and the user wanted a 0,
+ * if str commands are used, we would assume the end of string
+ * so mem commands are used.
+ */
+inline size_t my_memlen(const char *buf, size_t count)
+{
+	if (count > 0 && buf[count-1] == '\n')
+		return count - 1;
+	else
+		return count;
+}
+
+static void update_display_powered(struct usb_sevsegdev *mydev)
+{
+	int rc;
+
+	rc = usb_control_msg(mydev->udev,
+			usb_sndctrlpipe(mydev->udev, 0),
+			0x12,
+			0x48,
+			(80 * 0x100) + 10, /*  (power mode) */
+			(0x00 * 0x100) + (mydev->powered ? 1 : 0),
+			NULL,
+			0,
+			2000);
+	if (rc < 0)
+		dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc);
+}
+
+static void update_display_mode(struct usb_sevsegdev *mydev)
+{
+	int rc;
+
+	rc = usb_control_msg(mydev->udev,
+			usb_sndctrlpipe(mydev->udev, 0),
+			0x12,
+			0x48,
+			(82 * 0x100) + 10, /* (set mode) */
+			(mydev->mode_msb * 0x100) + mydev->mode_lsb,
+			NULL,
+			0,
+			2000);
+
+	if (rc < 0)
+		dev_dbg(&mydev->udev->dev, "mode retval = %d\n", rc);
+}
+
+static void update_display_visual(struct usb_sevsegdev *mydev)
+{
+	int rc;
+	int i;
+	unsigned char *buffer;
+	u8 decimals = 0;
+
+	buffer = kzalloc(MAXLEN, GFP_KERNEL);
+	if (!buffer) {
+		dev_err(&mydev->udev->dev, "out of memory\n");
+		return;
+	}
+
+	/* The device is right to left, where as you write left to right */
+	for (i = 0; i < mydev->textlength; i++)
+		buffer[i] = mydev->text[mydev->textlength-1-i];
+
+	rc = usb_control_msg(mydev->udev,
+			usb_sndctrlpipe(mydev->udev, 0),
+			0x12,
+			0x48,
+			(85 * 0x100) + 10, /* (write text) */
+			(0 * 0x100) + mydev->textmode, /* mode  */
+			buffer,
+			mydev->textlength,
+			2000);
+
+	if (rc < 0)
+		dev_dbg(&mydev->udev->dev, "write retval = %d\n", rc);
+
+	kfree(buffer);
+
+	/* The device is right to left, where as you write left to right */
+	for (i = 0; i < sizeof(mydev->decimals); i++)
+		decimals |= mydev->decimals[i] << i;
+
+	rc = usb_control_msg(mydev->udev,
+			usb_sndctrlpipe(mydev->udev, 0),
+			0x12,
+			0x48,
+			(86 * 0x100) + 10, /* (set decimal) */
+			(0 * 0x100) + decimals, /* decimals */
+			NULL,
+			0,
+			2000);
+
+	if (rc < 0)
+		dev_dbg(&mydev->udev->dev, "decimal retval = %d\n", rc);
+}
+
+#define MYDEV_ATTR_SIMPLE_UNSIGNED(name, update_fcn)		\
+static ssize_t show_attr_##name(struct device *dev, 		\
+	struct device_attribute *attr, char *buf) 		\
+{								\
+	struct usb_interface *intf = to_usb_interface(dev);	\
+	struct usb_sevsegdev *mydev = usb_get_intfdata(intf);	\
+								\
+	return sprintf(buf, "%u\n", mydev->name);		\
+}								\
+								\
+static ssize_t set_attr_##name(struct device *dev, 		\
+	struct device_attribute *attr, const char *buf, size_t count) \
+{								\
+	struct usb_interface *intf = to_usb_interface(dev);	\
+	struct usb_sevsegdev *mydev = usb_get_intfdata(intf);	\
+								\
+	mydev->name = simple_strtoul(buf, NULL, 10);		\
+	update_fcn(mydev); \
+								\
+	return count;						\
+}								\
+static DEVICE_ATTR(name, S_IWUGO | S_IRUGO, show_attr_##name, set_attr_##name);
+
+static ssize_t show_attr_text(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
+
+	return snprintf(buf, mydev->textlength, "%s\n", mydev->text);
+}
+
+static ssize_t set_attr_text(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
+	size_t end = my_memlen(buf, count);
+
+	if (end > sizeof(mydev->text))
+		return -EINVAL;
+
+	memset(mydev->text, 0, sizeof(mydev->text));
+	mydev->textlength = end;
+
+	if (end > 0)
+		memcpy(mydev->text, buf, end);
+
+	update_display_visual(mydev);
+	return count;
+}
+
+static DEVICE_ATTR(text, S_IWUGO | S_IRUGO, show_attr_text, set_attr_text);
+
+static ssize_t show_attr_decimals(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
+	int i;
+	int pos;
+
+	for (i = 0; i < sizeof(mydev->decimals); i++) {
+		pos = sizeof(mydev->decimals) - 1 - i;
+		if (mydev->decimals[i] == 0)
+			buf[pos] = '0';
+		else if (mydev->decimals[i] == 1)
+			buf[pos] = '1';
+		else
+			buf[pos] = 'x';
+	}
+
+	buf[sizeof(mydev->decimals)] = '\n';
+	return sizeof(mydev->decimals) + 1;
+}
+
+static ssize_t set_attr_decimals(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
+	size_t end = my_memlen(buf, count);
+	int i;
+
+	if (end > sizeof(mydev->decimals))
+		return -EINVAL;
+
+	for (i = 0; i < end; i++)
+		if (buf[i] != '0' && buf[i] != '1')
+			return -EINVAL;
+
+	memset(mydev->decimals, 0, sizeof(mydev->decimals));
+	for (i = 0; i < end; i++)
+		if (buf[i] == '1')
+			mydev->decimals[end-1-i] = 1;
+
+	update_display_visual(mydev);
+
+	return count;
+}
+
+static DEVICE_ATTR(decimals, S_IWUGO | S_IRUGO,
+	show_attr_decimals, set_attr_decimals);
+
+static ssize_t show_attr_textmode(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
+	int i;
+
+	buf[0] = 0;
+
+	for (i = 0; display_textmodes[i]; i++) {
+		if (mydev->textmode == i) {
+			strcat(buf, " [");
+			strcat(buf, display_textmodes[i]);
+			strcat(buf, "] ");
+		} else {
+			strcat(buf, " ");
+			strcat(buf, display_textmodes[i]);
+			strcat(buf, " ");
+		}
+	}
+	strcat(buf, "\n");
+
+
+	return strlen(buf);
+}
+
+static ssize_t set_attr_textmode(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
+	int i;
+
+	for (i = 0; display_textmodes[i]; i++) {
+		if (sysfs_streq(display_textmodes[i], buf)) {
+			mydev->textmode = i;
+			update_display_visual(mydev);
+			return count;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(textmode, S_IWUGO | S_IRUGO,
+	show_attr_textmode, set_attr_textmode);
+
+
+MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered);
+MYDEV_ATTR_SIMPLE_UNSIGNED(mode_msb, update_display_mode);
+MYDEV_ATTR_SIMPLE_UNSIGNED(mode_lsb, update_display_mode);
+
+static struct attribute *dev_attrs[] = {
+	&dev_attr_powered.attr,
+	&dev_attr_text.attr,
+	&dev_attr_textmode.attr,
+	&dev_attr_decimals.attr,
+	&dev_attr_mode_msb.attr,
+	&dev_attr_mode_lsb.attr,
+	NULL
+};
+
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+};
+
+static int sevseg_probe(struct usb_interface *interface,
+	const struct usb_device_id *id)
+{
+	struct usb_device *udev = interface_to_usbdev(interface);
+	struct usb_sevsegdev *mydev = NULL;
+	int rc = -ENOMEM;
+
+	mydev = kzalloc(sizeof(struct usb_sevsegdev), GFP_KERNEL);
+	if (mydev == NULL) {
+		dev_err(&interface->dev, "Out of memory\n");
+		goto error_mem;
+	}
+
+	mydev->udev = usb_get_dev(udev);
+	usb_set_intfdata(interface, mydev);
+
+	/*set defaults */
+	mydev->textmode = 0x02; /* ascii mode */
+	mydev->mode_msb = 0x06; /* 6 characters */
+	mydev->mode_lsb = 0x3f; /* scanmode for 6 chars */
+
+	rc = sysfs_create_group(&interface->dev.kobj, &dev_attr_grp);
+	if (rc)
+		goto error;
+
+	dev_info(&interface->dev, "USB 7 Segment device now attached\n");
+	return 0;
+
+error:
+	usb_set_intfdata(interface, NULL);
+	usb_put_dev(mydev->udev);
+	kfree(mydev);
+error_mem:
+	return rc;
+}
+
+static void sevseg_disconnect(struct usb_interface *interface)
+{
+	struct usb_sevsegdev *mydev;
+
+	mydev = usb_get_intfdata(interface);
+	sysfs_remove_group(&interface->dev.kobj, &dev_attr_grp);
+	usb_set_intfdata(interface, NULL);
+	usb_put_dev(mydev->udev);
+	kfree(mydev);
+	dev_info(&interface->dev, "USB 7 Segment now disconnected\n");
+}
+
+static struct usb_driver sevseg_driver = {
+	.name =		"usbsevseg",
+	.probe =	sevseg_probe,
+	.disconnect =	sevseg_disconnect,
+	.id_table =	id_table,
+};
+
+static int __init usb_sevseg_init(void)
+{
+	int rc = 0;
+
+	rc = usb_register(&sevseg_driver);
+	if (rc)
+		err("usb_register failed. Error number %d", rc);
+	return rc;
+}
+
+static void __exit usb_sevseg_exit(void)
+{
+	usb_deregister(&sevseg_driver);
+}
+
+module_init(usb_sevseg_init);
+module_exit(usb_sevseg_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index b358c4e..444c69c 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1561,8 +1561,7 @@
 	if (code != USBTEST_REQUEST)
 		return -EOPNOTSUPP;
 
-	if (param->iterations <= 0 || param->length < 0
-			|| param->sglen < 0 || param->vary < 0)
+	if (param->iterations <= 0)
 		return -EINVAL;
 
 	if (mutex_lock_interruptible(&dev->lock))
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index f1255b0..9a6c27a 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -228,11 +228,12 @@
 		ret = rq->urb->status;
 		*val = priv->reg[(reg >= 9) ? 0 : regindex[reg]];
 		if (ret)
-			warn("get_1284_register: usb error %d", ret);
+			printk(KERN_WARNING "get_1284_register: "
+			       "usb error %d\n", ret);
 		kref_put(&rq->ref_count, destroy_async);
 		return ret;
 	}
-	warn("get_1284_register timeout");
+	printk(KERN_WARNING "get_1284_register timeout\n");
 	kill_all_async_requests_priv(priv);
 	return -EIO;
 }
@@ -716,7 +717,7 @@
 	spin_lock_init(&priv->asynclock);
 	INIT_LIST_HEAD(&priv->asynclist);
 	if (!(pp = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, &parport_uss720_ops))) {
-		warn("could not register parport");
+		printk(KERN_WARNING "uss720: could not register parport\n");
 		goto probe_abort;
 	}
 
@@ -800,10 +801,14 @@
 	if (retval)
 		goto out;
 
-	info(DRIVER_VERSION ":" DRIVER_DESC);
-	info("NOTE: this is a special purpose driver to allow nonstandard");
-	info("protocols (eg. bitbang) over USS720 usb to parallel cables");
-	info("If you just want to connect to a printer, use usblp instead");
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
+	printk(KERN_INFO KBUILD_MODNAME ": NOTE: this is a special purpose "
+	       "driver to allow nonstandard\n");
+	printk(KERN_INFO KBUILD_MODNAME ": protocols (eg. bitbang) over "
+	       "USS720 usb to parallel cables\n");
+	printk(KERN_INFO KBUILD_MODNAME ": If you just want to connect to a "
+	       "printer, use usblp instead\n");
 out:
 	return retval;
 }
diff --git a/drivers/usb/misc/vstusb.c b/drivers/usb/misc/vstusb.c
new file mode 100644
index 0000000..8648470c
--- /dev/null
+++ b/drivers/usb/misc/vstusb.c
@@ -0,0 +1,782 @@
+/*****************************************************************************
+ *  File: drivers/usb/misc/vstusb.c
+ *
+ *  Purpose: Support for the bulk USB Vernier Spectrophotometers
+ *
+ *  Author:     Johnnie Peters
+ *              Axian Consulting
+ *              Beaverton, OR, USA 97005
+ *
+ *  Modified by:     EQware Engineering, Inc.
+ *                   Oregon City, OR, USA 97045
+ *
+ *  Copyright:  2007, 2008
+ *              Vernier Software & Technology
+ *              Beaverton, OR, USA 97005
+ *
+ *  Web:        www.vernier.com
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+
+#include <linux/usb/vstusb.h>
+
+#define DRIVER_VERSION "VST USB Driver Version 1.5"
+#define DRIVER_DESC "Vernier Software Technology Bulk USB Driver"
+
+#ifdef CONFIG_USB_DYNAMIC_MINORS
+	#define VSTUSB_MINOR_BASE	0
+#else
+	#define VSTUSB_MINOR_BASE	199
+#endif
+
+#define USB_VENDOR_OCEANOPTICS	0x2457
+#define USB_VENDOR_VERNIER	0x08F7	/* Vernier Software & Technology */
+
+#define USB_PRODUCT_USB2000	0x1002
+#define USB_PRODUCT_ADC1000_FW	0x1003	/* firmware download (renumerates) */
+#define USB_PRODUCT_ADC1000	0x1004
+#define USB_PRODUCT_HR2000_FW	0x1009	/* firmware download (renumerates) */
+#define USB_PRODUCT_HR2000	0x100A
+#define USB_PRODUCT_HR4000_FW	0x1011	/* firmware download (renumerates) */
+#define USB_PRODUCT_HR4000	0x1012
+#define USB_PRODUCT_USB650	0x1014	/* "Red Tide" */
+#define USB_PRODUCT_QE65000	0x1018
+#define USB_PRODUCT_USB4000	0x1022
+#define USB_PRODUCT_USB325	0x1024	/* "Vernier Spectrometer" */
+
+#define USB_PRODUCT_LABPRO	0x0001
+#define USB_PRODUCT_LABQUEST	0x0005
+
+#define VST_MAXBUFFER		(64*1024)
+
+static struct usb_device_id id_table[] = {
+	{ USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB2000)},
+	{ USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_HR4000)},
+	{ USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB650)},
+	{ USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB4000)},
+	{ USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB325)},
+	{ USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABQUEST)},
+	{ USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABPRO)},
+	{},
+};
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+struct vstusb_device {
+	struct kref				kref;
+	struct mutex            lock;
+	struct usb_device       *usb_dev;
+	char                    present;
+	char                    isopen;
+	struct usb_anchor       submitted;
+	int                     rd_pipe;
+	int                     rd_timeout_ms;
+	int                     wr_pipe;
+	int                     wr_timeout_ms;
+};
+#define to_vst_dev(d) container_of(d, struct vstusb_device, kref)
+
+static struct usb_driver vstusb_driver;
+
+static void vstusb_delete(struct kref *kref)
+{
+	struct vstusb_device *vstdev = to_vst_dev(kref);
+
+	usb_put_dev(vstdev->usb_dev);
+	kfree(vstdev);
+}
+
+static int vstusb_open(struct inode *inode, struct file *file)
+{
+	struct vstusb_device *vstdev;
+	struct usb_interface *interface;
+
+	interface = usb_find_interface(&vstusb_driver, iminor(inode));
+
+	if (!interface) {
+		printk(KERN_ERR KBUILD_MODNAME
+		       ": %s - error, can't find device for minor %d\n",
+		       __func__, iminor(inode));
+		return -ENODEV;
+	}
+
+	vstdev = usb_get_intfdata(interface);
+
+	if (!vstdev)
+		return -ENODEV;
+
+	/* lock this device */
+	mutex_lock(&vstdev->lock);
+
+	/* can only open one time */
+	if ((!vstdev->present) || (vstdev->isopen)) {
+		mutex_unlock(&vstdev->lock);
+		return -EBUSY;
+	}
+
+	/* increment our usage count */
+	kref_get(&vstdev->kref);
+
+	vstdev->isopen = 1;
+
+	/* save device in the file's private structure */
+	file->private_data = vstdev;
+
+	dev_dbg(&vstdev->usb_dev->dev, "%s: opened\n", __func__);
+
+	mutex_unlock(&vstdev->lock);
+
+	return 0;
+}
+
+static int vstusb_release(struct inode *inode, struct file *file)
+{
+	struct vstusb_device *vstdev;
+
+	vstdev = file->private_data;
+
+	if (vstdev == NULL)
+		return -ENODEV;
+
+	mutex_lock(&vstdev->lock);
+
+	vstdev->isopen = 0;
+
+	dev_dbg(&vstdev->usb_dev->dev, "%s: released\n", __func__);
+
+	mutex_unlock(&vstdev->lock);
+
+	kref_put(&vstdev->kref, vstusb_delete);
+
+	return 0;
+}
+
+static void usb_api_blocking_completion(struct urb *urb)
+{
+	struct completion *completeit = urb->context;
+
+	complete(completeit);
+}
+
+static int vstusb_fill_and_send_urb(struct urb *urb,
+				    struct usb_device *usb_dev,
+				    unsigned int pipe, void *data,
+				    unsigned int len, struct completion *done)
+{
+	struct usb_host_endpoint *ep;
+	struct usb_host_endpoint **hostep;
+	unsigned int pipend;
+
+	int status;
+
+	hostep = usb_pipein(pipe) ? usb_dev->ep_in : usb_dev->ep_out;
+	pipend = usb_pipeendpoint(pipe);
+	ep = hostep[pipend];
+
+	if (!ep || (len == 0))
+		return -EINVAL;
+
+	if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+	    == USB_ENDPOINT_XFER_INT) {
+		pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
+		usb_fill_int_urb(urb, usb_dev, pipe, data, len,
+				 (usb_complete_t)usb_api_blocking_completion,
+				 NULL, ep->desc.bInterval);
+	} else
+		usb_fill_bulk_urb(urb, usb_dev, pipe, data, len,
+				  (usb_complete_t)usb_api_blocking_completion,
+				  NULL);
+
+	init_completion(done);
+	urb->context = done;
+	urb->actual_length = 0;
+	status = usb_submit_urb(urb, GFP_KERNEL);
+
+	return status;
+}
+
+static int vstusb_complete_urb(struct urb *urb, struct completion *done,
+			       int timeout, int *actual_length)
+{
+	unsigned long expire;
+	int status;
+
+	expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
+	if (!wait_for_completion_interruptible_timeout(done, expire)) {
+		usb_kill_urb(urb);
+		status = urb->status == -ENOENT ? -ETIMEDOUT : urb->status;
+
+		dev_dbg(&urb->dev->dev,
+			"%s timed out on ep%d%s len=%d/%d, urb status = %d\n",
+			current->comm,
+			usb_pipeendpoint(urb->pipe),
+			usb_pipein(urb->pipe) ? "in" : "out",
+			urb->actual_length,
+			urb->transfer_buffer_length,
+			urb->status);
+
+	} else {
+		if (signal_pending(current)) {
+			/* if really an error */
+			if (urb->status && !((urb->status == -ENOENT)     ||
+					     (urb->status == -ECONNRESET) ||
+					     (urb->status == -ESHUTDOWN))) {
+				status = -EINTR;
+				usb_kill_urb(urb);
+			} else {
+				status = 0;
+			}
+
+			dev_dbg(&urb->dev->dev,
+				"%s: signal pending on ep%d%s len=%d/%d,"
+				"urb status = %d\n",
+				current->comm,
+				usb_pipeendpoint(urb->pipe),
+				usb_pipein(urb->pipe) ? "in" : "out",
+				urb->actual_length,
+				urb->transfer_buffer_length,
+				urb->status);
+
+		} else {
+			status = urb->status;
+		}
+	}
+
+	if (actual_length)
+		*actual_length = urb->actual_length;
+
+	return status;
+}
+
+static ssize_t vstusb_read(struct file *file, char __user *buffer,
+			   size_t count, loff_t *ppos)
+{
+	struct vstusb_device *vstdev;
+	int cnt = -1;
+	void *buf;
+	int retval = 0;
+
+	struct urb              *urb;
+	struct usb_device       *dev;
+	unsigned int            pipe;
+	int                     timeout;
+
+	DECLARE_COMPLETION_ONSTACK(done);
+
+	vstdev = file->private_data;
+
+	if (vstdev == NULL)
+		return -ENODEV;
+
+	/* verify that we actually want to read some data */
+	if ((count == 0) || (count > VST_MAXBUFFER))
+		return -EINVAL;
+
+	/* lock this object */
+	if (mutex_lock_interruptible(&vstdev->lock))
+		return -ERESTARTSYS;
+
+	/* anyone home */
+	if (!vstdev->present) {
+		mutex_unlock(&vstdev->lock);
+		printk(KERN_ERR KBUILD_MODNAME
+		       ": %s: device not present\n", __func__);
+		return -ENODEV;
+	}
+
+	/* pull out the necessary data */
+	dev =     vstdev->usb_dev;
+	pipe =    usb_rcvbulkpipe(dev, vstdev->rd_pipe);
+	timeout = vstdev->rd_timeout_ms;
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (buf == NULL) {
+		mutex_unlock(&vstdev->lock);
+		return -ENOMEM;
+	}
+
+	urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!urb) {
+		kfree(buf);
+		mutex_unlock(&vstdev->lock);
+		return -ENOMEM;
+	}
+
+	usb_anchor_urb(urb, &vstdev->submitted);
+	retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done);
+	mutex_unlock(&vstdev->lock);
+	if (retval) {
+		usb_unanchor_urb(urb);
+		dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n",
+			__func__, retval, pipe);
+		goto exit;
+	}
+
+	retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
+	if (retval) {
+		dev_err(&dev->dev, "%s: error %d completing urb %d\n",
+			__func__, retval, pipe);
+		goto exit;
+	}
+
+	if (copy_to_user(buffer, buf, cnt)) {
+		dev_err(&dev->dev, "%s: can't copy_to_user\n", __func__);
+		retval = -EFAULT;
+	} else {
+		retval = cnt;
+		dev_dbg(&dev->dev, "%s: read %d bytes from pipe %d\n",
+			__func__, cnt, pipe);
+	}
+
+exit:
+	usb_free_urb(urb);
+	kfree(buf);
+	return retval;
+}
+
+static ssize_t vstusb_write(struct file *file, const char __user *buffer,
+			    size_t count, loff_t *ppos)
+{
+	struct vstusb_device *vstdev;
+	int cnt = -1;
+	void *buf;
+	int retval = 0;
+
+	struct urb              *urb;
+	struct usb_device       *dev;
+	unsigned int            pipe;
+	int                     timeout;
+
+	DECLARE_COMPLETION_ONSTACK(done);
+
+	vstdev = file->private_data;
+
+	if (vstdev == NULL)
+		return -ENODEV;
+
+	/* verify that we actually have some data to write */
+	if ((count == 0) || (count > VST_MAXBUFFER))
+		return retval;
+
+	/* lock this object */
+	if (mutex_lock_interruptible(&vstdev->lock))
+		return -ERESTARTSYS;
+
+	/* anyone home */
+	if (!vstdev->present) {
+		mutex_unlock(&vstdev->lock);
+		printk(KERN_ERR KBUILD_MODNAME
+		       ": %s: device not present\n", __func__);
+		return -ENODEV;
+	}
+
+	/* pull out the necessary data */
+	dev =     vstdev->usb_dev;
+	pipe =    usb_sndbulkpipe(dev, vstdev->wr_pipe);
+	timeout = vstdev->wr_timeout_ms;
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (buf == NULL) {
+		mutex_unlock(&vstdev->lock);
+		return -ENOMEM;
+	}
+
+	urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!urb) {
+		kfree(buf);
+		mutex_unlock(&vstdev->lock);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(buf, buffer, count)) {
+		dev_err(&dev->dev, "%s: can't copy_from_user\n", __func__);
+		retval = -EFAULT;
+		goto exit;
+	}
+
+	usb_anchor_urb(urb, &vstdev->submitted);
+	retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done);
+	mutex_unlock(&vstdev->lock);
+	if (retval) {
+		usb_unanchor_urb(urb);
+		dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n",
+			__func__, retval, pipe);
+		goto exit;
+	}
+
+	retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
+	if (retval) {
+		dev_err(&dev->dev, "%s: error %d completing urb %d\n",
+			__func__, retval, pipe);
+		goto exit;
+	} else {
+		retval = cnt;
+		dev_dbg(&dev->dev, "%s: sent %d bytes to pipe %d\n",
+			__func__, cnt, pipe);
+	}
+
+exit:
+	usb_free_urb(urb);
+	kfree(buf);
+	return retval;
+}
+
+static long vstusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	int cnt = -1;
+	void __user *data = (void __user *)arg;
+	struct vstusb_args usb_data;
+
+	struct vstusb_device *vstdev;
+	void *buffer = NULL; /* must be initialized. buffer is
+			      *	referenced on exit but not all
+			      * ioctls allocate it */
+
+	struct urb              *urb = NULL; /* must be initialized. urb is
+					      *	referenced on exit but not all
+					      * ioctls allocate it */
+	struct usb_device       *dev;
+	unsigned int            pipe;
+	int                     timeout;
+
+	DECLARE_COMPLETION_ONSTACK(done);
+
+	vstdev = file->private_data;
+
+	if (_IOC_TYPE(cmd) != VST_IOC_MAGIC) {
+		dev_warn(&vstdev->usb_dev->dev,
+			 "%s: ioctl command %x, bad ioctl magic %x, "
+			 "expected %x\n", __func__, cmd,
+			 _IOC_TYPE(cmd), VST_IOC_MAGIC);
+		return -EINVAL;
+	}
+
+	if (vstdev == NULL)
+		return -ENODEV;
+
+	if (copy_from_user(&usb_data, data, sizeof(struct vstusb_args))) {
+		dev_err(&vstdev->usb_dev->dev, "%s: can't copy_from_user\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	/* lock this object */
+	if (mutex_lock_interruptible(&vstdev->lock)) {
+		retval = -ERESTARTSYS;
+		goto exit;
+	}
+
+	/* anyone home */
+	if (!vstdev->present) {
+		mutex_unlock(&vstdev->lock);
+		dev_err(&vstdev->usb_dev->dev, "%s: device not present\n",
+			__func__);
+		retval = -ENODEV;
+		goto exit;
+	}
+
+	/* pull out the necessary data */
+	dev = vstdev->usb_dev;
+
+	switch (cmd) {
+
+	case IOCTL_VSTUSB_CONFIG_RW:
+
+		vstdev->rd_pipe = usb_data.rd_pipe;
+		vstdev->rd_timeout_ms = usb_data.rd_timeout_ms;
+		vstdev->wr_pipe = usb_data.wr_pipe;
+		vstdev->wr_timeout_ms = usb_data.wr_timeout_ms;
+
+		mutex_unlock(&vstdev->lock);
+
+		dev_dbg(&dev->dev, "%s: setting pipes/timeouts, "
+			"rdpipe = %d, rdtimeout = %d, "
+			"wrpipe = %d, wrtimeout = %d\n", __func__,
+			vstdev->rd_pipe, vstdev->rd_timeout_ms,
+			vstdev->wr_pipe, vstdev->wr_timeout_ms);
+		break;
+
+	case IOCTL_VSTUSB_SEND_PIPE:
+
+		if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) {
+			mutex_unlock(&vstdev->lock);
+			retval = -EINVAL;
+			goto exit;
+		}
+
+		buffer = kmalloc(usb_data.count, GFP_KERNEL);
+		if (buffer == NULL) {
+			mutex_unlock(&vstdev->lock);
+			retval = -ENOMEM;
+			goto exit;
+		}
+
+		urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!urb) {
+			mutex_unlock(&vstdev->lock);
+			retval = -ENOMEM;
+			goto exit;
+		}
+
+		timeout = usb_data.timeout_ms;
+
+		pipe = usb_sndbulkpipe(dev, usb_data.pipe);
+
+		if (copy_from_user(buffer, usb_data.buffer, usb_data.count)) {
+			dev_err(&dev->dev, "%s: can't copy_from_user\n",
+				__func__);
+			mutex_unlock(&vstdev->lock);
+			retval = -EFAULT;
+			goto exit;
+		}
+
+		usb_anchor_urb(urb, &vstdev->submitted);
+		retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer,
+						  usb_data.count, &done);
+		mutex_unlock(&vstdev->lock);
+		if (retval) {
+			usb_unanchor_urb(urb);
+			dev_err(&dev->dev,
+				"%s: error %d filling and sending urb %d\n",
+				__func__, retval, pipe);
+			goto exit;
+		}
+
+		retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
+		if (retval) {
+			dev_err(&dev->dev, "%s: error %d completing urb %d\n",
+				__func__, retval, pipe);
+		}
+
+		break;
+	case IOCTL_VSTUSB_RECV_PIPE:
+
+		if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) {
+			mutex_unlock(&vstdev->lock);
+			retval = -EINVAL;
+			goto exit;
+		}
+
+		buffer = kmalloc(usb_data.count, GFP_KERNEL);
+		if (buffer == NULL) {
+			mutex_unlock(&vstdev->lock);
+			retval = -ENOMEM;
+			goto exit;
+		}
+
+		urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!urb) {
+			mutex_unlock(&vstdev->lock);
+			retval = -ENOMEM;
+			goto exit;
+		}
+
+		timeout = usb_data.timeout_ms;
+
+		pipe = usb_rcvbulkpipe(dev, usb_data.pipe);
+
+		usb_anchor_urb(urb, &vstdev->submitted);
+		retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer,
+						  usb_data.count, &done);
+		mutex_unlock(&vstdev->lock);
+		if (retval) {
+			usb_unanchor_urb(urb);
+			dev_err(&dev->dev,
+				"%s: error %d filling and sending urb %d\n",
+				__func__, retval, pipe);
+			goto exit;
+		}
+
+		retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
+		if (retval) {
+			dev_err(&dev->dev, "%s: error %d completing urb %d\n",
+				__func__, retval, pipe);
+			goto exit;
+		}
+
+		if (copy_to_user(usb_data.buffer, buffer, cnt)) {
+			dev_err(&dev->dev, "%s: can't copy_to_user\n",
+				__func__);
+			retval = -EFAULT;
+			goto exit;
+		}
+
+		usb_data.count = cnt;
+		if (copy_to_user(data, &usb_data, sizeof(struct vstusb_args))) {
+			dev_err(&dev->dev, "%s: can't copy_to_user\n",
+				__func__);
+			retval = -EFAULT;
+		} else {
+			dev_dbg(&dev->dev, "%s: recv %d bytes from pipe %d\n",
+				__func__, usb_data.count, usb_data.pipe);
+		}
+
+		break;
+
+	default:
+		mutex_unlock(&vstdev->lock);
+		dev_warn(&dev->dev, "ioctl_vstusb: invalid ioctl cmd %x\n",
+			 cmd);
+		return -EINVAL;
+		break;
+	}
+exit:
+	usb_free_urb(urb);
+	kfree(buffer);
+	return retval;
+}
+
+static const struct file_operations vstusb_fops = {
+	.owner =                THIS_MODULE,
+	.read =                 vstusb_read,
+	.write =                vstusb_write,
+	.unlocked_ioctl =       vstusb_ioctl,
+	.compat_ioctl =         vstusb_ioctl,
+	.open =                 vstusb_open,
+	.release =              vstusb_release,
+};
+
+static struct usb_class_driver usb_vstusb_class = {
+	.name =         "usb/vstusb%d",
+	.fops =         &vstusb_fops,
+	.minor_base =   VSTUSB_MINOR_BASE,
+};
+
+static int vstusb_probe(struct usb_interface *intf,
+			const struct usb_device_id *id)
+{
+	struct usb_device *dev = interface_to_usbdev(intf);
+	struct vstusb_device *vstdev;
+	int i;
+	int retval = 0;
+
+	/* allocate memory for our device state and intialize it */
+
+	vstdev = kzalloc(sizeof(*vstdev), GFP_KERNEL);
+	if (vstdev == NULL)
+		return -ENOMEM;
+
+	/* must do usb_get_dev() prior to kref_init() since the kref_put()
+	 * release function will do a usb_put_dev() */
+	usb_get_dev(dev);
+	kref_init(&vstdev->kref);
+	mutex_init(&vstdev->lock);
+
+	i = dev->descriptor.bcdDevice;
+
+	dev_dbg(&intf->dev, "Version %1d%1d.%1d%1d found at address %d\n",
+		(i & 0xF000) >> 12, (i & 0xF00) >> 8,
+		(i & 0xF0) >> 4, (i & 0xF), dev->devnum);
+
+	vstdev->present = 1;
+	vstdev->isopen = 0;
+	vstdev->usb_dev = dev;
+	init_usb_anchor(&vstdev->submitted);
+
+	usb_set_intfdata(intf, vstdev);
+	retval = usb_register_dev(intf, &usb_vstusb_class);
+	if (retval) {
+		dev_err(&intf->dev,
+			"%s: Not able to get a minor for this device.\n",
+			__func__);
+		usb_set_intfdata(intf, NULL);
+		kref_put(&vstdev->kref, vstusb_delete);
+		return retval;
+	}
+
+	/* let the user know what node this device is now attached to */
+	dev_info(&intf->dev,
+		 "VST USB Device #%d now attached to major %d minor %d\n",
+		 (intf->minor - VSTUSB_MINOR_BASE), USB_MAJOR, intf->minor);
+
+	dev_info(&intf->dev, "%s, %s\n", DRIVER_DESC, DRIVER_VERSION);
+
+	return retval;
+}
+
+static void vstusb_disconnect(struct usb_interface *intf)
+{
+	struct vstusb_device *vstdev = usb_get_intfdata(intf);
+
+	usb_deregister_dev(intf, &usb_vstusb_class);
+	usb_set_intfdata(intf, NULL);
+
+	if (vstdev) {
+
+		mutex_lock(&vstdev->lock);
+		vstdev->present = 0;
+
+		usb_kill_anchored_urbs(&vstdev->submitted);
+
+		mutex_unlock(&vstdev->lock);
+
+		kref_put(&vstdev->kref, vstusb_delete);
+	}
+
+}
+
+static int vstusb_suspend(struct usb_interface *intf, pm_message_t message)
+{
+	struct vstusb_device *vstdev = usb_get_intfdata(intf);
+	int time;
+	if (!vstdev)
+		return 0;
+
+	mutex_lock(&vstdev->lock);
+	time = usb_wait_anchor_empty_timeout(&vstdev->submitted, 1000);
+	if (!time)
+		usb_kill_anchored_urbs(&vstdev->submitted);
+	mutex_unlock(&vstdev->lock);
+
+	return 0;
+}
+
+static int vstusb_resume(struct usb_interface *intf)
+{
+	return 0;
+}
+
+static struct usb_driver vstusb_driver = {
+	.name =         "vstusb",
+	.probe =        vstusb_probe,
+	.disconnect =   vstusb_disconnect,
+	.suspend =      vstusb_suspend,
+	.resume =       vstusb_resume,
+	.id_table = id_table,
+};
+
+static int __init vstusb_init(void)
+{
+	int rc;
+
+	rc = usb_register(&vstusb_driver);
+	if (rc)
+		printk(KERN_ERR "%s: failed to register (%d)", __func__, rc);
+
+	return rc;
+}
+
+static void __exit vstusb_exit(void)
+{
+	usb_deregister(&vstusb_driver);
+}
+
+module_init(vstusb_init);
+module_exit(vstusb_exit);
+
+MODULE_AUTHOR("Dennis O'Brien/Stephen Ware");
+MODULE_DESCRIPTION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index 442d807..5e0ab42 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -361,12 +361,12 @@
 	}
 	// MOD_INC_USE_COUNT(which_module?);
 
-	usb_register_notify(&mon_nb);
 
 	mutex_lock(&usb_bus_list_lock);
 	list_for_each_entry (ubus, &usb_bus_list, bus_list) {
 		mon_bus_init(ubus);
 	}
+	usb_register_notify(&mon_nb);
 	mutex_unlock(&usb_bus_list_lock);
 	return 0;
 
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 58b2b8f..4b9542b 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -33,10 +33,6 @@
 	default y if ARCH_DAVINCI
 	default y if ARCH_OMAP2430
 	default y if ARCH_OMAP34XX
-	help
-	  Use a static <asm/arch/hdrc_cnf.h> file to describe how the
-	  controller is configured (endpoints, mechanisms, etc) on the
-	  current iteration of a given system-on-chip.
 
 comment "DaVinci 644x USB support"
 	depends on USB_MUSB_HDRC && ARCH_DAVINCI
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
index fc5216b..729b407 100644
--- a/drivers/usb/musb/cppi_dma.h
+++ b/drivers/usb/musb/cppi_dma.h
@@ -119,8 +119,8 @@
 	void __iomem			*mregs;		/* Mentor regs */
 	void __iomem			*tibase;	/* TI/CPPI regs */
 
-	struct cppi_channel		tx[MUSB_C_NUM_EPT - 1];
-	struct cppi_channel		rx[MUSB_C_NUM_EPR - 1];
+	struct cppi_channel		tx[4];
+	struct cppi_channel		rx[4];
 
 	struct dma_pool			*pool;
 
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 75baf18..dfb3bcb 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -30,6 +30,7 @@
 #include <linux/delay.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/gpio.h>
 
 #include <asm/arch/hardware.h>
 #include <asm/arch/memory.h>
@@ -39,7 +40,7 @@
 #include "musb_core.h"
 
 #ifdef CONFIG_MACH_DAVINCI_EVM
-#include <asm/arch/i2c-client.h>
+#define GPIO_nVBUS_DRV		87
 #endif
 
 #include "davinci.h"
@@ -138,7 +139,6 @@
 /* VBUS SWITCHING IS BOARD-SPECIFIC */
 
 #ifdef CONFIG_MACH_DAVINCI_EVM
-#ifndef CONFIG_MACH_DAVINCI_EVM_OTG
 
 /* I2C operations are always synchronous, and require a task context.
  * With unloaded systems, using the shared workqueue seems to suffice
@@ -146,12 +146,11 @@
  */
 static void evm_deferred_drvvbus(struct work_struct *ignored)
 {
-	davinci_i2c_expander_op(0x3a, USB_DRVVBUS, vbus_state);
+	gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
 	vbus_state = !vbus_state;
 }
 static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
 
-#endif	/* modified board */
 #endif	/* EVM */
 
 static void davinci_source_power(struct musb *musb, int is_on, int immediate)
@@ -165,21 +164,10 @@
 
 #ifdef CONFIG_MACH_DAVINCI_EVM
 	if (machine_is_davinci_evm()) {
-#ifdef CONFIG_MACH_DAVINCI_EVM_OTG
-		/* modified EVM board switching VBUS with GPIO(6) not I2C
-		 * NOTE:  PINMUX0.RGB888 (bit23) must be clear
-		 */
-		if (is_on)
-			gpio_set(GPIO(6));
-		else
-			gpio_clear(GPIO(6));
-		immediate = 1;
-#else
 		if (immediate)
-			davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on);
+			gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
 		else
 			schedule_work(&evm_vbus_work);
-#endif
 	}
 #endif
 	if (immediate)
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 128e949..4a35745 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -82,9 +82,9 @@
 /*
  * This gets many kinds of configuration information:
  *	- Kconfig for everything user-configurable
- *	- <asm/arch/hdrc_cnf.h> for SOC or family details
  *	- platform_device for addressing, irq, and platform_data
  *	- platform_data is mostly for board-specific informarion
+ *	  (plus recentrly, SOC or family details)
  *
  * Most of the conditional compilation will (someday) vanish.
  */
@@ -974,9 +974,9 @@
 /*
  * The silicon either has hard-wired endpoint configurations, or else
  * "dynamic fifo" sizing.  The driver has support for both, though at this
- * writing only the dynamic sizing is very well tested.   We use normal
- * idioms to so both modes are compile-tested, but dead code elimination
- * leaves only the relevant one in the object file.
+ * writing only the dynamic sizing is very well tested.   Since we switched
+ * away from compile-time hardware parameters, we can no longer rely on
+ * dead code elimination to leave only the relevant one in the object file.
  *
  * We don't currently use dynamic fifo setup capability to do anything
  * more than selecting one of a bunch of predefined configurations.
@@ -1806,6 +1806,7 @@
 	musb->ctrl_base = mbase;
 	musb->nIrq = -ENODEV;
 	musb->config = config;
+	BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS);
 	for (epnum = 0, ep = musb->endpoints;
 			epnum < musb->config->num_eps;
 			epnum++, ep++) {
@@ -2054,15 +2055,6 @@
 
 	}
 
-	return 0;
-
-fail:
-	if (musb->clock)
-		clk_put(musb->clock);
-	device_init_wakeup(dev, 0);
-	musb_free(musb);
-	return status;
-
 #ifdef CONFIG_SYSFS
 	status = device_create_file(dev, &dev_attr_mode);
 	status = device_create_file(dev, &dev_attr_vbus);
@@ -2071,12 +2063,31 @@
 #endif /* CONFIG_USB_GADGET_MUSB_HDRC */
 	status = 0;
 #endif
+	if (status)
+		goto fail2;
+
+	return 0;
+
+fail2:
+#ifdef CONFIG_SYSFS
+	device_remove_file(musb->controller, &dev_attr_mode);
+	device_remove_file(musb->controller, &dev_attr_vbus);
+#ifdef CONFIG_USB_MUSB_OTG
+	device_remove_file(musb->controller, &dev_attr_srp);
+#endif
+#endif
+	musb_platform_exit(musb);
+fail:
+	dev_err(musb->controller,
+		"musb_init_controller failed with status %d\n", status);
+
+	if (musb->clock)
+		clk_put(musb->clock);
+	device_init_wakeup(dev, 0);
+	musb_free(musb);
 
 	return status;
 
-fail2:
-	musb_platform_exit(musb);
-	goto fail;
 }
 
 /*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index a57652f..3f5e30d 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -437,7 +437,7 @@
 {
 	void __iomem		*regs = musb->control_ep->regs;
 	struct usb_request	*req;
-	u16			tmp;
+	u16			count, csr;
 
 	req = next_ep0_request(musb);
 
@@ -449,35 +449,35 @@
 		unsigned	len = req->length - req->actual;
 
 		/* read the buffer */
-		tmp = musb_readb(regs, MUSB_COUNT0);
-		if (tmp > len) {
+		count = musb_readb(regs, MUSB_COUNT0);
+		if (count > len) {
 			req->status = -EOVERFLOW;
-			tmp = len;
+			count = len;
 		}
-		musb_read_fifo(&musb->endpoints[0], tmp, buf);
-		req->actual += tmp;
-		tmp = MUSB_CSR0_P_SVDRXPKTRDY;
-		if (tmp < 64 || req->actual == req->length) {
+		musb_read_fifo(&musb->endpoints[0], count, buf);
+		req->actual += count;
+		csr = MUSB_CSR0_P_SVDRXPKTRDY;
+		if (count < 64 || req->actual == req->length) {
 			musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
-			tmp |= MUSB_CSR0_P_DATAEND;
+			csr |= MUSB_CSR0_P_DATAEND;
 		} else
 			req = NULL;
 	} else
-		tmp = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
+		csr = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
 
 
 	/* Completion handler may choose to stall, e.g. because the
 	 * message just received holds invalid data.
 	 */
 	if (req) {
-		musb->ackpend = tmp;
+		musb->ackpend = csr;
 		musb_g_ep0_giveback(musb, req);
 		if (!musb->ackpend)
 			return;
 		musb->ackpend = 0;
 	}
 	musb_ep_select(musb->mregs, 0);
-	musb_writew(regs, MUSB_CSR0, tmp);
+	musb_writew(regs, MUSB_CSR0, csr);
 }
 
 /*
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 8b4be01..3133990 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -108,7 +108,7 @@
 /*
  * Clear TX fifo. Needed to avoid BABBLE errors.
  */
-static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
+static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
 {
 	void __iomem	*epio = ep->regs;
 	u16		csr;
@@ -291,6 +291,7 @@
 			urb->actual_length, urb->transfer_buffer_length
 			);
 
+	usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
 	spin_unlock(&musb->lock);
 	usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
 	spin_lock(&musb->lock);
@@ -353,8 +354,6 @@
 		break;
 	}
 
-	usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
-
 	qh->is_ready = 0;
 	__musb_giveback(musb, urb, status);
 	qh->is_ready = ready;
@@ -436,7 +435,7 @@
 	}
 }
 
-static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
+static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
 {
 	/* we don't want fifo to fill itself again;
 	 * ignore dma (various models),
@@ -1005,7 +1004,7 @@
 
 /*
  * Handle default endpoint interrupt as host. Only called in IRQ time
- * from the LinuxIsr() interrupt service routine.
+ * from musb_interrupt().
  *
  * called with controller irqlocked
  */
@@ -1791,7 +1790,9 @@
 	 */
 	qh = kzalloc(sizeof *qh, mem_flags);
 	if (!qh) {
+		spin_lock_irqsave(&musb->lock, flags);
 		usb_hcd_unlink_urb_from_ep(hcd, urb);
+		spin_unlock_irqrestore(&musb->lock, flags);
 		return -ENOMEM;
 	}
 
@@ -1873,7 +1874,11 @@
 			/* set up tt info if needed */
 			if (urb->dev->tt) {
 				qh->h_port_reg = (u8) urb->dev->ttport;
-				qh->h_addr_reg |= 0x80;
+				if (urb->dev->tt->hub)
+					qh->h_addr_reg =
+						(u8) urb->dev->tt->hub->devnum;
+				if (urb->dev->tt->multi)
+					qh->h_addr_reg |= 0x80;
 			}
 		}
 	}
@@ -1903,7 +1908,9 @@
 
 done:
 	if (ret != 0) {
+		spin_lock_irqsave(&musb->lock, flags);
 		usb_hcd_unlink_urb_from_ep(hcd, urb);
+		spin_unlock_irqrestore(&musb->lock, flags);
 		kfree(qh);
 	}
 	return ret;
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 6bbedae..223f0a5 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -37,7 +37,9 @@
 
 #include <linux/io.h>
 
-#ifndef	CONFIG_ARM
+#if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \
+	&& !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \
+	&& !defined(CONFIG_PPC64)
 static inline void readsl(const void __iomem *addr, void *buf, int len)
 	{ insl((unsigned long)addr, buf, len); }
 static inline void readsw(const void __iomem *addr, void *buf, int len)
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 9ba8fb7..8c734ef 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -45,8 +45,8 @@
 #define MUSB_HSDMA_ADDRESS		0x8
 #define MUSB_HSDMA_COUNT		0xc
 
-#define MUSB_HSDMA_CHANNEL_OFFSET(_bChannel, _offset)		\
-		(MUSB_HSDMA_BASE + (_bChannel << 4) + _offset)
+#define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset)		\
+		(MUSB_HSDMA_BASE + (_bchannel << 4) + _offset)
 
 /* control register (16-bit): */
 #define MUSB_HSDMA_ENABLE_SHIFT		0
@@ -67,23 +67,23 @@
 struct musb_dma_controller;
 
 struct musb_dma_channel {
-	struct dma_channel		Channel;
+	struct dma_channel		channel;
 	struct musb_dma_controller	*controller;
-	u32				dwStartAddress;
+	u32				start_addr;
 	u32				len;
-	u16				wMaxPacketSize;
-	u8				bIndex;
+	u16				max_packet_sz;
+	u8				idx;
 	u8				epnum;
 	u8				transmit;
 };
 
 struct musb_dma_controller {
-	struct dma_controller		Controller;
-	struct musb_dma_channel		aChannel[MUSB_HSDMA_CHANNELS];
-	void				*pDmaPrivate;
-	void __iomem			*pCoreBase;
-	u8				bChannelCount;
-	u8				bmUsedChannels;
+	struct dma_controller		controller;
+	struct musb_dma_channel		channel[MUSB_HSDMA_CHANNELS];
+	void				*private_data;
+	void __iomem			*base;
+	u8				channel_count;
+	u8				used_channels;
 	u8				irq;
 };
 
@@ -93,91 +93,91 @@
 	return 0;
 }
 
-static void dma_channel_release(struct dma_channel *pChannel);
+static void dma_channel_release(struct dma_channel *channel);
 
 static int dma_controller_stop(struct dma_controller *c)
 {
-	struct musb_dma_controller *controller =
-		container_of(c, struct musb_dma_controller, Controller);
-	struct musb *musb = (struct musb *) controller->pDmaPrivate;
-	struct dma_channel *pChannel;
-	u8 bBit;
+	struct musb_dma_controller *controller = container_of(c,
+			struct musb_dma_controller, controller);
+	struct musb *musb = controller->private_data;
+	struct dma_channel *channel;
+	u8 bit;
 
-	if (controller->bmUsedChannels != 0) {
+	if (controller->used_channels != 0) {
 		dev_err(musb->controller,
 			"Stopping DMA controller while channel active\n");
 
-		for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
-			if (controller->bmUsedChannels & (1 << bBit)) {
-				pChannel = &controller->aChannel[bBit].Channel;
-				dma_channel_release(pChannel);
+		for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) {
+			if (controller->used_channels & (1 << bit)) {
+				channel = &controller->channel[bit].channel;
+				dma_channel_release(channel);
 
-				if (!controller->bmUsedChannels)
+				if (!controller->used_channels)
 					break;
 			}
 		}
 	}
+
 	return 0;
 }
 
 static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
 				struct musb_hw_ep *hw_ep, u8 transmit)
 {
-	u8 bBit;
-	struct dma_channel *pChannel = NULL;
-	struct musb_dma_channel *pImplChannel = NULL;
-	struct musb_dma_controller *controller =
-			container_of(c, struct musb_dma_controller, Controller);
+	struct musb_dma_controller *controller = container_of(c,
+			struct musb_dma_controller, controller);
+	struct musb_dma_channel *musb_channel = NULL;
+	struct dma_channel *channel = NULL;
+	u8 bit;
 
-	for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
-		if (!(controller->bmUsedChannels & (1 << bBit))) {
-			controller->bmUsedChannels |= (1 << bBit);
-			pImplChannel = &(controller->aChannel[bBit]);
-			pImplChannel->controller = controller;
-			pImplChannel->bIndex = bBit;
-			pImplChannel->epnum = hw_ep->epnum;
-			pImplChannel->transmit = transmit;
-			pChannel = &(pImplChannel->Channel);
-			pChannel->private_data = pImplChannel;
-			pChannel->status = MUSB_DMA_STATUS_FREE;
-			pChannel->max_len = 0x10000;
+	for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) {
+		if (!(controller->used_channels & (1 << bit))) {
+			controller->used_channels |= (1 << bit);
+			musb_channel = &(controller->channel[bit]);
+			musb_channel->controller = controller;
+			musb_channel->idx = bit;
+			musb_channel->epnum = hw_ep->epnum;
+			musb_channel->transmit = transmit;
+			channel = &(musb_channel->channel);
+			channel->private_data = musb_channel;
+			channel->status = MUSB_DMA_STATUS_FREE;
+			channel->max_len = 0x10000;
 			/* Tx => mode 1; Rx => mode 0 */
-			pChannel->desired_mode = transmit;
-			pChannel->actual_len = 0;
+			channel->desired_mode = transmit;
+			channel->actual_len = 0;
 			break;
 		}
 	}
-	return pChannel;
+
+	return channel;
 }
 
-static void dma_channel_release(struct dma_channel *pChannel)
+static void dma_channel_release(struct dma_channel *channel)
 {
-	struct musb_dma_channel *pImplChannel =
-		(struct musb_dma_channel *) pChannel->private_data;
+	struct musb_dma_channel *musb_channel = channel->private_data;
 
-	pChannel->actual_len = 0;
-	pImplChannel->dwStartAddress = 0;
-	pImplChannel->len = 0;
+	channel->actual_len = 0;
+	musb_channel->start_addr = 0;
+	musb_channel->len = 0;
 
-	pImplChannel->controller->bmUsedChannels &=
-		~(1 << pImplChannel->bIndex);
+	musb_channel->controller->used_channels &=
+		~(1 << musb_channel->idx);
 
-	pChannel->status = MUSB_DMA_STATUS_UNKNOWN;
+	channel->status = MUSB_DMA_STATUS_UNKNOWN;
 }
 
-static void configure_channel(struct dma_channel *pChannel,
+static void configure_channel(struct dma_channel *channel,
 				u16 packet_sz, u8 mode,
 				dma_addr_t dma_addr, u32 len)
 {
-	struct musb_dma_channel *pImplChannel =
-		(struct musb_dma_channel *) pChannel->private_data;
-	struct musb_dma_controller *controller = pImplChannel->controller;
-	void __iomem *mbase = controller->pCoreBase;
-	u8 bChannel = pImplChannel->bIndex;
+	struct musb_dma_channel *musb_channel = channel->private_data;
+	struct musb_dma_controller *controller = musb_channel->controller;
+	void __iomem *mbase = controller->base;
+	u8 bchannel = musb_channel->idx;
 	u16 csr = 0;
 
 	DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
-			pChannel, packet_sz, dma_addr, len, mode);
+			channel, packet_sz, dma_addr, len, mode);
 
 	if (mode) {
 		csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
@@ -195,180 +195,183 @@
 		}
 	}
 
-	csr |= (pImplChannel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
+	csr |= (musb_channel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
 		| (1 << MUSB_HSDMA_ENABLE_SHIFT)
 		| (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
-		| (pImplChannel->transmit
+		| (musb_channel->transmit
 				? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
 				: 0);
 
 	/* address/count */
 	musb_writel(mbase,
-		MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS),
 		dma_addr);
 	musb_writel(mbase,
-		MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT),
 		len);
 
 	/* control (this should start things) */
 	musb_writew(mbase,
-		MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL),
 		csr);
 }
 
-static int dma_channel_program(struct dma_channel *pChannel,
+static int dma_channel_program(struct dma_channel *channel,
 				u16 packet_sz, u8 mode,
 				dma_addr_t dma_addr, u32 len)
 {
-	struct musb_dma_channel *pImplChannel =
-			(struct musb_dma_channel *) pChannel->private_data;
+	struct musb_dma_channel *musb_channel = channel->private_data;
 
 	DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
-		pImplChannel->epnum,
-		pImplChannel->transmit ? "Tx" : "Rx",
+		musb_channel->epnum,
+		musb_channel->transmit ? "Tx" : "Rx",
 		packet_sz, dma_addr, len, mode);
 
-	BUG_ON(pChannel->status == MUSB_DMA_STATUS_UNKNOWN ||
-		pChannel->status == MUSB_DMA_STATUS_BUSY);
+	BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
+		channel->status == MUSB_DMA_STATUS_BUSY);
 
-	pChannel->actual_len = 0;
-	pImplChannel->dwStartAddress = dma_addr;
-	pImplChannel->len = len;
-	pImplChannel->wMaxPacketSize = packet_sz;
-	pChannel->status = MUSB_DMA_STATUS_BUSY;
+	channel->actual_len = 0;
+	musb_channel->start_addr = dma_addr;
+	musb_channel->len = len;
+	musb_channel->max_packet_sz = packet_sz;
+	channel->status = MUSB_DMA_STATUS_BUSY;
 
 	if ((mode == 1) && (len >= packet_sz))
-		configure_channel(pChannel, packet_sz, 1, dma_addr, len);
+		configure_channel(channel, packet_sz, 1, dma_addr, len);
 	else
-		configure_channel(pChannel, packet_sz, 0, dma_addr, len);
+		configure_channel(channel, packet_sz, 0, dma_addr, len);
 
 	return true;
 }
 
-static int dma_channel_abort(struct dma_channel *pChannel)
+static int dma_channel_abort(struct dma_channel *channel)
 {
-	struct musb_dma_channel *pImplChannel =
-		(struct musb_dma_channel *) pChannel->private_data;
-	u8 bChannel = pImplChannel->bIndex;
-	void __iomem *mbase = pImplChannel->controller->pCoreBase;
+	struct musb_dma_channel *musb_channel = channel->private_data;
+	void __iomem *mbase = musb_channel->controller->base;
+
+	u8 bchannel = musb_channel->idx;
 	u16 csr;
 
-	if (pChannel->status == MUSB_DMA_STATUS_BUSY) {
-		if (pImplChannel->transmit) {
+	if (channel->status == MUSB_DMA_STATUS_BUSY) {
+		if (musb_channel->transmit) {
 
 			csr = musb_readw(mbase,
-				MUSB_EP_OFFSET(pImplChannel->epnum,
+				MUSB_EP_OFFSET(musb_channel->epnum,
 						MUSB_TXCSR));
 			csr &= ~(MUSB_TXCSR_AUTOSET |
 				 MUSB_TXCSR_DMAENAB |
 				 MUSB_TXCSR_DMAMODE);
 			musb_writew(mbase,
-				MUSB_EP_OFFSET(pImplChannel->epnum,
-						MUSB_TXCSR),
+				MUSB_EP_OFFSET(musb_channel->epnum, MUSB_TXCSR),
 				csr);
 		} else {
 			csr = musb_readw(mbase,
-				MUSB_EP_OFFSET(pImplChannel->epnum,
+				MUSB_EP_OFFSET(musb_channel->epnum,
 						MUSB_RXCSR));
 			csr &= ~(MUSB_RXCSR_AUTOCLEAR |
 				 MUSB_RXCSR_DMAENAB |
 				 MUSB_RXCSR_DMAMODE);
 			musb_writew(mbase,
-				MUSB_EP_OFFSET(pImplChannel->epnum,
-						MUSB_RXCSR),
+				MUSB_EP_OFFSET(musb_channel->epnum, MUSB_RXCSR),
 				csr);
 		}
 
 		musb_writew(mbase,
-			MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+			MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL),
 			0);
 		musb_writel(mbase,
-			MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+			MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS),
 			0);
 		musb_writel(mbase,
-			MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+			MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT),
 			0);
 
-		pChannel->status = MUSB_DMA_STATUS_FREE;
+		channel->status = MUSB_DMA_STATUS_FREE;
 	}
+
 	return 0;
 }
 
 static irqreturn_t dma_controller_irq(int irq, void *private_data)
 {
-	struct musb_dma_controller *controller =
-		(struct musb_dma_controller *)private_data;
-	struct musb_dma_channel *pImplChannel;
-	struct musb *musb = controller->pDmaPrivate;
-	void __iomem *mbase = controller->pCoreBase;
-	struct dma_channel *pChannel;
-	u8 bChannel;
-	u16 csr;
-	u32 dwAddress;
-	u8 int_hsdma;
+	struct musb_dma_controller *controller = private_data;
+	struct musb *musb = controller->private_data;
+	struct musb_dma_channel *musb_channel;
+	struct dma_channel *channel;
+
+	void __iomem *mbase = controller->base;
+
 	irqreturn_t retval = IRQ_NONE;
+
 	unsigned long flags;
 
+	u8 bchannel;
+	u8 int_hsdma;
+
+	u32 addr;
+	u16 csr;
+
 	spin_lock_irqsave(&musb->lock, flags);
 
 	int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
 	if (!int_hsdma)
 		goto done;
 
-	for (bChannel = 0; bChannel < MUSB_HSDMA_CHANNELS; bChannel++) {
-		if (int_hsdma & (1 << bChannel)) {
-			pImplChannel = (struct musb_dma_channel *)
-					&(controller->aChannel[bChannel]);
-			pChannel = &pImplChannel->Channel;
+	for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
+		if (int_hsdma & (1 << bchannel)) {
+			musb_channel = (struct musb_dma_channel *)
+					&(controller->channel[bchannel]);
+			channel = &musb_channel->channel;
 
 			csr = musb_readw(mbase,
-					MUSB_HSDMA_CHANNEL_OFFSET(bChannel,
+					MUSB_HSDMA_CHANNEL_OFFSET(bchannel,
 							MUSB_HSDMA_CONTROL));
 
-			if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT))
-				pImplChannel->Channel.status =
+			if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT)) {
+				musb_channel->channel.status =
 					MUSB_DMA_STATUS_BUS_ABORT;
-			else {
+			} else {
 				u8 devctl;
 
-				dwAddress = musb_readl(mbase,
+				addr = musb_readl(mbase,
 						MUSB_HSDMA_CHANNEL_OFFSET(
-							bChannel,
+							bchannel,
 							MUSB_HSDMA_ADDRESS));
-				pChannel->actual_len = dwAddress
-					- pImplChannel->dwStartAddress;
+				channel->actual_len = addr
+					- musb_channel->start_addr;
 
 				DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
-					pChannel, pImplChannel->dwStartAddress,
-					dwAddress, pChannel->actual_len,
-					pImplChannel->len,
-					(pChannel->actual_len
-						< pImplChannel->len) ?
+					channel, musb_channel->start_addr,
+					addr, channel->actual_len,
+					musb_channel->len,
+					(channel->actual_len
+						< musb_channel->len) ?
 					"=> reconfig 0" : "=> complete");
 
 				devctl = musb_readb(mbase, MUSB_DEVCTL);
 
-				pChannel->status = MUSB_DMA_STATUS_FREE;
+				channel->status = MUSB_DMA_STATUS_FREE;
 
 				/* completed */
 				if ((devctl & MUSB_DEVCTL_HM)
-					&& (pImplChannel->transmit)
-					&& ((pChannel->desired_mode == 0)
-					    || (pChannel->actual_len &
-					    (pImplChannel->wMaxPacketSize - 1)))
+					&& (musb_channel->transmit)
+					&& ((channel->desired_mode == 0)
+					    || (channel->actual_len &
+					    (musb_channel->max_packet_sz - 1)))
 					 ) {
 					/* Send out the packet */
 					musb_ep_select(mbase,
-						pImplChannel->epnum);
+						musb_channel->epnum);
 					musb_writew(mbase, MUSB_EP_OFFSET(
-							pImplChannel->epnum,
+							musb_channel->epnum,
 							MUSB_TXCSR),
 						MUSB_TXCSR_TXPKTRDY);
-				} else
+				} else {
 					musb_dma_completion(
 						musb,
-						pImplChannel->epnum,
-						pImplChannel->transmit);
+						musb_channel->epnum,
+						musb_channel->transmit);
+				}
 			}
 		}
 	}
@@ -380,9 +383,9 @@
 
 void dma_controller_destroy(struct dma_controller *c)
 {
-	struct musb_dma_controller *controller;
+	struct musb_dma_controller *controller = container_of(c,
+			struct musb_dma_controller, controller);
 
-	controller = container_of(c, struct musb_dma_controller, Controller);
 	if (!controller)
 		return;
 
@@ -393,7 +396,7 @@
 }
 
 struct dma_controller *__init
-dma_controller_create(struct musb *musb, void __iomem *pCoreBase)
+dma_controller_create(struct musb *musb, void __iomem *base)
 {
 	struct musb_dma_controller *controller;
 	struct device *dev = musb->controller;
@@ -405,29 +408,30 @@
 		return NULL;
 	}
 
-	controller = kzalloc(sizeof(struct musb_dma_controller), GFP_KERNEL);
+	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
 	if (!controller)
 		return NULL;
 
-	controller->bChannelCount = MUSB_HSDMA_CHANNELS;
-	controller->pDmaPrivate = musb;
-	controller->pCoreBase = pCoreBase;
+	controller->channel_count = MUSB_HSDMA_CHANNELS;
+	controller->private_data = musb;
+	controller->base = base;
 
-	controller->Controller.start = dma_controller_start;
-	controller->Controller.stop = dma_controller_stop;
-	controller->Controller.channel_alloc = dma_channel_allocate;
-	controller->Controller.channel_release = dma_channel_release;
-	controller->Controller.channel_program = dma_channel_program;
-	controller->Controller.channel_abort = dma_channel_abort;
+	controller->controller.start = dma_controller_start;
+	controller->controller.stop = dma_controller_stop;
+	controller->controller.channel_alloc = dma_channel_allocate;
+	controller->controller.channel_release = dma_channel_release;
+	controller->controller.channel_program = dma_channel_program;
+	controller->controller.channel_abort = dma_channel_abort;
 
 	if (request_irq(irq, dma_controller_irq, IRQF_DISABLED,
-			musb->controller->bus_id, &controller->Controller)) {
+			musb->controller->bus_id, &controller->controller)) {
 		dev_err(dev, "request_irq %d failed!\n", irq);
-		dma_controller_destroy(&controller->Controller);
+		dma_controller_destroy(&controller->controller);
+
 		return NULL;
 	}
 
 	controller->irq = irq;
 
-	return &controller->Controller;
+	return &controller->controller;
 }
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 99fb7dc..537f953 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -220,8 +220,8 @@
 
 	buf = kzalloc(count + HCI_HEADER_LENGTH, GFP_ATOMIC);
 	if (!buf) {
-		err("%s- kzalloc(%d) failed.", __func__,
-		    count + HCI_HEADER_LENGTH);
+		dev_err(&port->dev, "%s- kzalloc(%d) failed.\n",
+			__func__, count + HCI_HEADER_LENGTH);
 		return;
 	}
 
@@ -276,7 +276,7 @@
 
 	if (!tty) {
 		schedule_work(&priv->rx_work);
-		err("%s - No tty available", __func__);
+		dev_err(&port->dev, "%s - No tty available\n", __func__);
 		return ;
 	}
 
@@ -336,7 +336,7 @@
 
 	priv = kzalloc(sizeof(struct aircable_private), GFP_KERNEL);
 	if (!priv) {
-		err("%s- kmalloc(%Zd) failed.", __func__,
+		dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n", __func__,
 			sizeof(struct aircable_private));
 		return -ENOMEM;
 	}
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 1913bc7..b7eacad 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -187,7 +187,7 @@
 	/* see comments at top of file */
 	priv->bad_flow_control =
 		(le16_to_cpu(dev->descriptor.bcdDevice) <= 0x0206) ? 1 : 0;
-	info("bcdDevice: %04x, bfc: %d",
+	dev_info(&dev->dev, "bcdDevice: %04x, bfc: %d\n",
 					le16_to_cpu(dev->descriptor.bcdDevice),
 					priv->bad_flow_control);
 
@@ -228,7 +228,7 @@
 	port->read_urb->dev = port->serial->dev;
 	retval = usb_submit_urb(port->read_urb, GFP_KERNEL);
 	if (retval) {
-		err("usb_submit_urb(read bulk) failed");
+		dev_err(&port->dev, "usb_submit_urb(read bulk) failed\n");
 		goto exit;
 	}
 
@@ -236,7 +236,7 @@
 	retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
 	if (retval) {
 		usb_kill_urb(port->read_urb);
-		err(" usb_submit_urb(read int) failed");
+		dev_err(&port->dev, "usb_submit_urb(read int) failed\n");
 	}
 
 exit:
@@ -342,8 +342,8 @@
 exit:
 	retval = usb_submit_urb(urb, GFP_ATOMIC);
 	if (retval)
-		err("%s - usb_submit_urb failed with result %d",
-		     __func__, retval);
+		dev_err(&port->dev, "%s - usb_submit_urb failed with "
+			"result %d\n", __func__, retval);
 }
 
 static void belkin_sa_set_termios(struct tty_struct *tty,
@@ -382,12 +382,12 @@
 		if ((old_cflag & CBAUD) == B0) {
 			control_state |= (TIOCM_DTR|TIOCM_RTS);
 			if (BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, 1) < 0)
-				err("Set DTR error");
+				dev_err(&port->dev, "Set DTR error\n");
 			/* don't set RTS if using hardware flow control */
 			if (!(old_cflag & CRTSCTS))
 				if (BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST
 								, 1) < 0)
-					err("Set RTS error");
+					dev_err(&port->dev, "Set RTS error\n");
 		}
 	}
 
@@ -403,18 +403,18 @@
 		/* Report the actual baud rate back to the caller */
 		tty_encode_baud_rate(tty, baud, baud);
 		if (BSA_USB_CMD(BELKIN_SA_SET_BAUDRATE_REQUEST, urb_value) < 0)
-			err("Set baudrate error");
+			dev_err(&port->dev, "Set baudrate error\n");
 	} else {
 		/* Disable flow control */
 		if (BSA_USB_CMD(BELKIN_SA_SET_FLOW_CTRL_REQUEST,
 						BELKIN_SA_FLOW_NONE) < 0)
-			err("Disable flowcontrol error");
+			dev_err(&port->dev, "Disable flowcontrol error\n");
 		/* Drop RTS and DTR */
 		control_state &= ~(TIOCM_DTR | TIOCM_RTS);
 		if (BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, 0) < 0)
-			err("DTR LOW error");
+			dev_err(&port->dev, "DTR LOW error\n");
 		if (BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST, 0) < 0)
-			err("RTS LOW error");
+			dev_err(&port->dev, "RTS LOW error\n");
 	}
 
 	/* set the parity */
@@ -425,7 +425,7 @@
 		else
 			urb_value = BELKIN_SA_PARITY_NONE;
 		if (BSA_USB_CMD(BELKIN_SA_SET_PARITY_REQUEST, urb_value) < 0)
-			err("Set parity error");
+			dev_err(&port->dev, "Set parity error\n");
 	}
 
 	/* set the number of data bits */
@@ -448,7 +448,7 @@
 			break;
 		}
 		if (BSA_USB_CMD(BELKIN_SA_SET_DATA_BITS_REQUEST, urb_value) < 0)
-			err("Set data bits error");
+			dev_err(&port->dev, "Set data bits error\n");
 	}
 
 	/* set the number of stop bits */
@@ -457,7 +457,7 @@
 						: BELKIN_SA_STOP_BITS(1);
 		if (BSA_USB_CMD(BELKIN_SA_SET_STOP_BITS_REQUEST,
 							urb_value) < 0)
-			err("Set stop bits error");
+			dev_err(&port->dev, "Set stop bits error\n");
 	}
 
 	/* Set flow control */
@@ -478,7 +478,7 @@
 			urb_value &= ~(BELKIN_SA_FLOW_IRTS);
 
 		if (BSA_USB_CMD(BELKIN_SA_SET_FLOW_CTRL_REQUEST, urb_value) < 0)
-			err("Set flow control error");
+			dev_err(&port->dev, "Set flow control error\n");
 	}
 
 	/* save off the modified port settings */
@@ -494,7 +494,7 @@
 	struct usb_serial *serial = port->serial;
 
 	if (BSA_USB_CMD(BELKIN_SA_SET_BREAK_REQUEST, break_state ? 1 : 0) < 0)
-		err("Set break_ctl %d", break_state);
+		dev_err(&port->dev, "Set break_ctl %d\n", break_state);
 }
 
 
@@ -554,13 +554,13 @@
 
 	retval = BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST, rts);
 	if (retval < 0) {
-		err("Set RTS error %d", retval);
+		dev_err(&port->dev, "Set RTS error %d\n", retval);
 		goto exit;
 	}
 
 	retval = BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, dtr);
 	if (retval < 0) {
-		err("Set DTR error %d", retval);
+		dev_err(&port->dev, "Set DTR error %d\n", retval);
 		goto exit;
 	}
 exit:
@@ -577,7 +577,8 @@
 	retval = usb_register(&belkin_driver);
 	if (retval)
 		goto failed_usb_register;
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 	usb_serial_deregister(&belkin_device);
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c
index 1279553..8008d0b 100644
--- a/drivers/usb/serial/cp2101.c
+++ b/drivers/usb/serial/cp2101.c
@@ -753,7 +753,8 @@
 	}
 
 	/* Success */
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 }
 
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 94ef36c..858bdd0 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -141,7 +141,8 @@
 		result = usb_submit_urb(serial->port[i]->interrupt_in_urb,
 					GFP_KERNEL);
 		if (result)
-			err(" usb_submit_urb(read int) failed");
+			dev_err(&serial->dev->dev,
+				"usb_submit_urb(read int) failed\n");
 		dbg("%s - usb_submit_urb(int urb)", __func__);
 	}
 
@@ -274,8 +275,9 @@
 		/* send the data out the bulk port */
 		result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
 		if (result) {
-			err("%s - failed submitting write urb, error %d",
-							__func__, result);
+			dev_err(&port->dev,
+				"%s - failed submitting write urb, error %d",
+				__func__, result);
 			/* Throw away data. No better idea what to do with it. */
 			priv->wrfilled = 0;
 			priv->wrsent = 0;
@@ -351,7 +353,9 @@
 			port->read_urb->dev = port->serial->dev;
 			result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
 			if (result)
-				err("%s - failed resubmitting read urb, error %d", __func__, result);
+				dev_err(&port->dev, "%s - failed resubmitting "
+					"read urb, error %d\n",
+					__func__, result);
 			dbg("%s - usb_submit_urb(read urb)", __func__);
 		}
 	}
@@ -360,7 +364,7 @@
 	port->interrupt_in_urb->dev = port->serial->dev;
 	result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
 	if (result)
-		err(" usb_submit_urb(read int) failed");
+		dev_err(&port->dev, "usb_submit_urb(read int) failed\n");
 	dbg("%s - usb_submit_urb(int urb)", __func__);
 }
 
@@ -414,8 +418,8 @@
 		port->read_urb->dev = port->serial->dev;
 		result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
 		if (result)
-			err("%s - failed resubmitting read urb, error %d",
-				__func__, result);
+			dev_err(&port->dev, "%s - failed resubmitting read "
+				"urb, error %d\n", __func__, result);
 		dbg("%s - usb_submit_urb(read urb)", __func__);
 	}
 }
@@ -462,8 +466,9 @@
 		/* send the data out the bulk port */
 		result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
 		if (result) {
-			err("%s - failed submitting write urb, error %d",
-								__func__, result);
+			dev_err(&port->dev,
+				"%s - failed submitting write urb, error %d\n",
+				__func__, result);
 			/* Throw away data. No better idea what to do with it. */
 			priv->wrfilled = 0;
 			priv->wrsent = 0;
@@ -499,8 +504,9 @@
 	if (retval)
 		goto failed_usb_register;
 
-	info(DRIVER_VERSION " " DRIVER_AUTHOR);
-	info(DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION " "
+	       DRIVER_AUTHOR "\n");
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n");
 
 	return 0;
 failed_usb_register:
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index f3514a9..eae4740 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -404,8 +404,8 @@
 			 retval != -ENODEV);
 
 		if (retval != sizeof(feature_buffer)) {
-			err("%s - failed sending serial line settings - %d",
-							__func__, retval);
+			dev_err(&port->dev, "%s - failed sending serial "
+				"line settings - %d\n", __func__, retval);
 			cypress_set_dead(port);
 		} else {
 			spin_lock_irqsave(&priv->lock, flags);
@@ -443,7 +443,8 @@
 						&& retval != -ENODEV);
 
 		if (retval != sizeof(feature_buffer)) {
-			err("%s - failed to retrieve serial line settings - %d", __func__, retval);
+			dev_err(&port->dev, "%s - failed to retrieve serial "
+				"line settings - %d\n", __func__, retval);
 			cypress_set_dead(port);
 			return retval;
 		} else {
@@ -476,8 +477,8 @@
 	priv->comm_is_ok = 0;
 	spin_unlock_irqrestore(&priv->lock, flags);
 
-	err("cypress_m8 suspending failing port %d - interval might be too short",
-	    port->number);
+	dev_err(&port->dev, "cypress_m8 suspending failing port %d - "
+		"interval might be too short\n", port->number);
 }
 
 
@@ -679,7 +680,8 @@
 
 	/* setup the port and start reading from the device */
 	if (!port->interrupt_in_urb) {
-		err("%s - interrupt_in_urb is empty!", __func__);
+		dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
+			__func__);
 		return -1;
 	}
 
@@ -1107,8 +1109,8 @@
 		data_bits = 3;
 		break;
 	default:
-		err("%s - CSIZE was set, but not CS5-CS8",
-				__func__);
+		dev_err(&port->dev, "%s - CSIZE was set, but not CS5-CS8\n",
+			__func__);
 		data_bits = 3;
 	}
 	spin_lock_irqsave(&priv->lock, flags);
@@ -1658,7 +1660,8 @@
 	if (retval)
 		goto failed_usb_register;
 
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 
 failed_usb_register:
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 5756ac6..69f84f0 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -661,7 +661,8 @@
 	}
 	spin_unlock_irqrestore(&oob_priv->dp_port_lock, flags);
 	if (ret)
-		err("%s: usb_submit_urb failed, ret=%d", __func__, ret);
+		dev_err(&port->dev, "%s: usb_submit_urb failed, ret=%d\n",
+			__func__, ret);
 	return ret;
 
 }
@@ -743,7 +744,8 @@
 	spin_unlock_irqrestore(&priv->dp_port_lock, flags);
 
 	if (ret)
-		err("%s: usb_submit_urb failed, ret=%d, port=%d",
+		dev_err(&port->dev,
+			"%s: usb_submit_urb failed, ret=%d, port=%d\n",
 			__func__, ret, priv->dp_port_num);
 	return ret;
 }
@@ -812,7 +814,8 @@
 	spin_unlock(&port_priv->dp_port_lock);
 	spin_unlock_irqrestore(&oob_priv->dp_port_lock, flags);
 	if (ret)
-		err("%s: usb_submit_urb failed, ret=%d", __func__, ret);
+		dev_err(&port->dev, "%s: usb_submit_urb failed, ret=%d\n",
+			__func__, ret);
 	return ret;
 }
 
@@ -907,7 +910,8 @@
 	spin_unlock_irqrestore(&priv->dp_port_lock, flags);
 
 	if (ret)
-		err("%s: usb_submit_urb failed, ret=%d, port=%d",
+		dev_err(&port->dev,
+			"%s: usb_submit_urb failed, ret=%d, port=%d\n",
 			__func__, ret, priv->dp_port_num);
 }
 
@@ -1214,7 +1218,8 @@
 	/* return length of new data written, or error */
 	spin_unlock_irqrestore(&priv->dp_port_lock, flags);
 	if (ret < 0)
-		err("%s: usb_submit_urb failed, ret=%d, port=%d",
+		dev_err(&port->dev,
+			"%s: usb_submit_urb failed, ret=%d, port=%d\n",
 			__func__, ret, priv->dp_port_num);
 	dbg("digi_write: returning %d", ret);
 	return ret;
@@ -1235,14 +1240,16 @@
 
 	/* port and serial sanity check */
 	if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) {
-		err("%s: port or port->private is NULL, status=%d",
-		    __func__, status);
+		dev_err(&port->dev,
+			"%s: port or port->private is NULL, status=%d\n",
+			__func__, status);
 		return;
 	}
 	serial = port->serial;
 	if (serial == NULL || (serial_priv = usb_get_serial_data(serial)) == NULL) {
-		err("%s: serial or serial->private is NULL, status=%d",
-		    __func__, status);
+		dev_err(&port->dev,
+			"%s: serial or serial->private is NULL, status=%d\n",
+			__func__, status);
 		return;
 	}
 
@@ -1284,7 +1291,8 @@
 
 	spin_unlock(&priv->dp_port_lock);
 	if (ret)
-		err("%s: usb_submit_urb failed, ret=%d, port=%d",
+		dev_err(&port->dev,
+			"%s: usb_submit_urb failed, ret=%d, port=%d\n",
 			__func__, ret, priv->dp_port_num);
 }
 
@@ -1518,8 +1526,9 @@
 		port->write_urb->dev = port->serial->dev;
 		ret = usb_submit_urb(port->read_urb, GFP_KERNEL);
 		if (ret != 0) {
-			err("%s: usb_submit_urb failed, ret=%d, port=%d",
-					__func__, ret, i);
+			dev_err(&port->dev,
+				"%s: usb_submit_urb failed, ret=%d, port=%d\n",
+				__func__, ret, i);
 			break;
 		}
 	}
@@ -1618,22 +1627,26 @@
 	dbg("digi_read_bulk_callback: TOP");
 
 	/* port sanity check, do not resubmit if port is not valid */
-	if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) {
-		err("%s: port or port->private is NULL, status=%d",
-		    __func__, status);
+	if (port == NULL)
+		return;
+	priv = usb_get_serial_port_data(port);
+	if (priv == NULL) {
+		dev_err(&port->dev, "%s: port->private is NULL, status=%d\n",
+			__func__, status);
 		return;
 	}
 	if (port->serial == NULL ||
 		(serial_priv = usb_get_serial_data(port->serial)) == NULL) {
-		err("%s: serial is bad or serial->private is NULL, status=%d",
-			__func__, status);
+		dev_err(&port->dev, "%s: serial is bad or serial->private "
+			"is NULL, status=%d\n", __func__, status);
 		return;
 	}
 
 	/* do not resubmit urb if it has any status error */
 	if (status) {
-		err("%s: nonzero read bulk status: status=%d, port=%d",
-		    __func__, status, priv->dp_port_num);
+		dev_err(&port->dev,
+			"%s: nonzero read bulk status: status=%d, port=%d\n",
+			__func__, status, priv->dp_port_num);
 		return;
 	}
 
@@ -1650,8 +1663,9 @@
 	urb->dev = port->serial->dev;
 	ret = usb_submit_urb(urb, GFP_ATOMIC);
 	if (ret != 0) {
-		err("%s: failed resubmitting urb, ret=%d, port=%d",
-		    __func__, ret, priv->dp_port_num);
+		dev_err(&port->dev,
+			"%s: failed resubmitting urb, ret=%d, port=%d\n",
+			__func__, ret, priv->dp_port_num);
 	}
 
 }
@@ -1687,10 +1701,11 @@
 
 	/* short/multiple packet check */
 	if (urb->actual_length != len + 2) {
-		err("%s: INCOMPLETE OR MULTIPLE PACKET, urb->status=%d, "
-		    "port=%d, opcode=%d, len=%d, actual_length=%d, "
-		    "status=%d", __func__, status, priv->dp_port_num,
-		    opcode, len, urb->actual_length, port_status);
+		dev_err(&port->dev, "%s: INCOMPLETE OR MULTIPLE PACKET, "
+			"urb->status=%d, port=%d, opcode=%d, len=%d, "
+			"actual_length=%d, status=%d\n", __func__, status,
+			priv->dp_port_num, opcode, len, urb->actual_length,
+			port_status);
 		return -1;
 	}
 
@@ -1854,7 +1869,8 @@
 	retval = usb_register(&digi_driver);
 	if (retval)
 		goto failed_usb_register;
-	info(DRIVER_VERSION ":" DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 	usb_serial_deregister(&digi_acceleport_4_device);
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 1072e84..8a69cce 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -416,7 +416,7 @@
 	dbg("%s", __func__);
 
 	if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
-		err("active config #%d != 1 ??",
+		dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
 			serial->dev->actconfig->desc.bConfigurationValue);
 		return -ENODEV;
 	}
@@ -499,15 +499,15 @@
 		urb = usb_alloc_urb(0, GFP_KERNEL);
 		write_urb_pool[i] = urb;
 		if (urb == NULL) {
-			err("No more urbs???");
+			printk(KERN_ERR "empeg: No more urbs???\n");
 			continue;
 		}
 
 		urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
 								GFP_KERNEL);
 		if (!urb->transfer_buffer) {
-			err("%s - out of memory for urb buffers.",
-			    __func__);
+			printk(KERN_ERR "empeg: %s - out of memory for urb "
+			       "buffers.", __func__);
 			continue;
 		}
 	}
@@ -519,7 +519,8 @@
 	if (retval)
 		goto failed_usb_register;
 
-	info(DRIVER_VERSION ":" DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 	return 0;
 failed_usb_register:
diff --git a/drivers/usb/serial/ezusb.c b/drivers/usb/serial/ezusb.c
index 711e84f..3cfc762 100644
--- a/drivers/usb/serial/ezusb.c
+++ b/drivers/usb/serial/ezusb.c
@@ -28,7 +28,8 @@
 
 	/* dbg("ezusb_writememory %x, %d", address, length); */
 	if (!serial->dev) {
-		err("%s - no physical device present, failing.", __func__);
+		printk(KERN_ERR "ezusb: %s - no physical device present, "
+		       "failing.\n", __func__);
 		return -ENODEV;
 	}
 
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index c2ac129..51d7bde 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -578,6 +578,7 @@
 	{ USB_DEVICE(FALCOM_VID, FALCOM_TWIST_PID) },
 	{ USB_DEVICE(FALCOM_VID, FALCOM_SAMBA_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_SUUNTO_SPORTS_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
 	{ USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
 	{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
@@ -1153,7 +1154,7 @@
 		/* Assume its an FT232R  */
 		priv->chip_type = FT232RL;
 	}
-	info("Detected %s", ftdi_chip_name[priv->chip_type]);
+	dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
 }
 
 
@@ -1326,7 +1327,7 @@
 
 	priv = kzalloc(sizeof(struct ftdi_private), GFP_KERNEL);
 	if (!priv) {
-		err("%s- kmalloc(%Zd) failed.", __func__,
+		dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n", __func__,
 					sizeof(struct ftdi_private));
 		return -ENOMEM;
 	}
@@ -1409,7 +1410,8 @@
 	dbg("%s", __func__);
 
 	if (interface == udev->actconfig->interface[0]) {
-		info("Ignoring serial port reserved for JTAG");
+		dev_info(&udev->dev,
+			 "Ignoring serial port reserved for JTAG\n");
 		return -ENODEV;
 	}
 
@@ -1427,7 +1429,8 @@
 
 	if (ep->enabled && ep_desc->wMaxPacketSize == 0) {
 		ep_desc->wMaxPacketSize = cpu_to_le16(0x40);
-		info("Fixing invalid wMaxPacketSize on read pipe");
+		dev_info(&serial->dev->dev,
+			 "Fixing invalid wMaxPacketSize on read pipe\n");
 	}
 
 	return 0;
@@ -1521,8 +1524,9 @@
 			ftdi_read_bulk_callback, port);
 	result = usb_submit_urb(port->read_urb, GFP_KERNEL);
 	if (result)
-		err("%s - failed submitting read urb, error %d",
-							__func__, result);
+		dev_err(&port->dev,
+			"%s - failed submitting read urb, error %d\n",
+			__func__, result);
 
 
 	return result;
@@ -1556,7 +1560,7 @@
 				    FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
 				    0, priv->interface, buf, 0,
 				    WDR_TIMEOUT) < 0) {
-			err("error from flowcontrol urb");
+			dev_err(&port->dev, "error from flowcontrol urb\n");
 		}
 
 		/* drop RTS and DTR */
@@ -1621,14 +1625,15 @@
 
 	buffer = kmalloc(transfer_size, GFP_ATOMIC);
 	if (!buffer) {
-		err("%s ran out of kernel memory for urb ...", __func__);
+		dev_err(&port->dev,
+			"%s ran out of kernel memory for urb ...\n", __func__);
 		count = -ENOMEM;
 		goto error_no_buffer;
 	}
 
 	urb = usb_alloc_urb(0, GFP_ATOMIC);
 	if (!urb) {
-		err("%s - no more free urbs", __func__);
+		dev_err(&port->dev, "%s - no more free urbs\n", __func__);
 		count = -ENOMEM;
 		goto error_no_urb;
 	}
@@ -1672,8 +1677,9 @@
 
 	status = usb_submit_urb(urb, GFP_ATOMIC);
 	if (status) {
-		err("%s - failed submitting write urb, error %d",
-							__func__, status);
+		dev_err(&port->dev,
+			"%s - failed submitting write urb, error %d\n",
+			__func__, status);
 		count = status;
 		goto error;
 	} else {
@@ -1780,7 +1786,8 @@
 	buffered = (int)priv->tx_outstanding_bytes;
 	spin_unlock_irqrestore(&priv->tx_lock, flags);
 	if (buffered < 0) {
-		err("%s outstanding tx bytes is negative!", __func__);
+		dev_err(&port->dev, "%s outstanding tx bytes is negative!\n",
+			__func__);
 		buffered = 0;
 	}
 	return buffered;
@@ -1796,11 +1803,12 @@
 	int status = urb->status;
 
 	if (urb->number_of_packets > 0) {
-		err("%s transfer_buffer_length %d actual_length %d number of packets %d",
-				__func__,
-				urb->transfer_buffer_length,
-				urb->actual_length, urb->number_of_packets);
-		err("%s transfer_flags %x ", __func__, urb->transfer_flags);
+		dev_err(&port->dev, "%s transfer_buffer_length %d "
+			"actual_length %d number of packets %d\n", __func__,
+			urb->transfer_buffer_length,
+			urb->actual_length, urb->number_of_packets);
+		dev_err(&port->dev, "%s transfer_flags %x\n", __func__,
+			urb->transfer_flags);
 	}
 
 	dbg("%s - port %d", __func__, port->number);
@@ -1821,7 +1829,7 @@
 	}
 
 	if (urb != port->read_urb)
-		err("%s - Not my urb!", __func__);
+		dev_err(&port->dev, "%s - Not my urb!\n", __func__);
 
 	if (status) {
 		/* This will happen at close every time so it is a dbg not an
@@ -1924,7 +1932,8 @@
 
 		length = min(PKTSZ, urb->actual_length-packet_offset)-2;
 		if (length < 0) {
-			err("%s - bad packet length: %d", __func__, length+2);
+			dev_err(&port->dev, "%s - bad packet length: %d\n",
+				__func__, length+2);
 			length = 0;
 		}
 
@@ -2039,8 +2048,9 @@
 
 		result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
 		if (result)
-			err("%s - failed resubmitting read urb, error %d",
-							__func__, result);
+			dev_err(&port->dev,
+				"%s - failed resubmitting read urb, error %d\n",
+				__func__, result);
 	}
 out:
 	tty_kref_put(tty);
@@ -2069,8 +2079,8 @@
 			FTDI_SIO_SET_DATA_REQUEST_TYPE,
 			urb_value , priv->interface,
 			buf, 0, WDR_TIMEOUT) < 0) {
-		err("%s FAILED to enable/disable break state (state was %d)",
-							__func__, break_state);
+		dev_err(&port->dev, "%s FAILED to enable/disable break state "
+			"(state was %d)\n", __func__, break_state);
 	}
 
 	dbg("%s break state is %d - urb is %d", __func__,
@@ -2142,7 +2152,7 @@
 		case CS7: urb_value |= 7; dbg("Setting CS7"); break;
 		case CS8: urb_value |= 8; dbg("Setting CS8"); break;
 		default:
-			err("CSIZE was set but not CS5-CS8");
+			dev_err(&port->dev, "CSIZE was set but not CS5-CS8\n");
 		}
 	}
 
@@ -2155,7 +2165,8 @@
 			    FTDI_SIO_SET_DATA_REQUEST_TYPE,
 			    urb_value , priv->interface,
 			    buf, 0, WDR_SHORT_TIMEOUT) < 0) {
-		err("%s FAILED to set databits/stopbits/parity", __func__);
+		dev_err(&port->dev, "%s FAILED to set "
+			"databits/stopbits/parity\n", __func__);
 	}
 
 	/* Now do the baudrate */
@@ -2166,14 +2177,17 @@
 				    FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
 				    0, priv->interface,
 				    buf, 0, WDR_TIMEOUT) < 0) {
-			err("%s error from disable flowcontrol urb", __func__);
+			dev_err(&port->dev,
+				"%s error from disable flowcontrol urb\n",
+				__func__);
 		}
 		/* Drop RTS and DTR */
 		clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
 	} else {
 		/* set the baudrate determined before */
 		if (change_speed(tty, port))
-			err("%s urb failed to set baudrate", __func__);
+			dev_err(&port->dev, "%s urb failed to set baudrate\n",
+				__func__);
 		/* Ensure RTS and DTR are raised when baudrate changed from 0 */
 		if (!old_termios || (old_termios->c_cflag & CBAUD) == B0)
 			set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
@@ -2189,7 +2203,8 @@
 				    FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
 				    0 , (FTDI_SIO_RTS_CTS_HS | priv->interface),
 				    buf, 0, WDR_TIMEOUT) < 0) {
-			err("urb failed to set to rts/cts flow control");
+			dev_err(&port->dev,
+				"urb failed to set to rts/cts flow control\n");
 		}
 
 	} else {
@@ -2220,7 +2235,8 @@
 					    urb_value , (FTDI_SIO_XON_XOFF_HS
 							 | priv->interface),
 					    buf, 0, WDR_TIMEOUT) < 0) {
-				err("urb failed to set to xon/xoff flow control");
+				dev_err(&port->dev, "urb failed to set to "
+					"xon/xoff flow control\n");
 			}
 		} else {
 			/* else clause to only run if cflag ! CRTSCTS and iflag
@@ -2233,7 +2249,8 @@
 					    FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
 					    0, priv->interface,
 					    buf, 0, WDR_TIMEOUT) < 0) {
-				err("urb failed to clear flow control");
+				dev_err(&port->dev,
+					"urb failed to clear flow control\n");
 			}
 		}
 
@@ -2425,7 +2442,8 @@
 	if (retval)
 		goto failed_usb_register;
 
-	info(DRIVER_VERSION ":" DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 	usb_serial_deregister(&ftdi_sio_device);
@@ -2458,5 +2476,5 @@
 MODULE_PARM_DESC(vendor, "User specified vendor ID (default="
 		__MODULE_STRING(FTDI_VID)")");
 module_param(product, ushort, 0);
-MODULE_PARM_DESC(vendor, "User specified product ID");
+MODULE_PARM_DESC(product, "User specified product ID");
 
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 8a5b6df..07a3992 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -628,6 +628,11 @@
 #define FTDI_SUUNTO_SPORTS_PID	0xF680	/* Suunto Sports instrument */
 
 /*
+ * Oceanic product ids
+ */
+#define FTDI_OCEANIC_PID	0xF460  /* Oceanic dive instrument */
+
+/*
  * TTi (Thurlby Thandar Instruments)
  */
 #define TTI_VID			0x103E	/* Vendor Id */
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 2ad0569..8e6a66e 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1585,7 +1585,8 @@
 	retval = usb_register(&garmin_driver);
 	if (retval)
 		goto failed_usb_register;
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 	return 0;
 failed_usb_register:
diff --git a/drivers/usb/serial/hp4x.c b/drivers/usb/serial/hp4x.c
index ab90586..4313292 100644
--- a/drivers/usb/serial/hp4x.c
+++ b/drivers/usb/serial/hp4x.c
@@ -63,7 +63,8 @@
 	retval = usb_register(&hp49gp_driver);
 	if (retval)
 		goto failed_usb_register;
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 	usb_serial_deregister(&hp49gp_device);
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 611f97f..e85c8c0 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -3109,13 +3109,13 @@
 				edge_serial->interrupt_read_urb =
 						usb_alloc_urb(0, GFP_KERNEL);
 				if (!edge_serial->interrupt_read_urb) {
-					err("out of memory");
+					dev_err(&dev->dev, "out of memory\n");
 					return -ENOMEM;
 				}
 				edge_serial->interrupt_in_buffer =
 					kmalloc(buffer_size, GFP_KERNEL);
 				if (!edge_serial->interrupt_in_buffer) {
-					err("out of memory");
+					dev_err(&dev->dev, "out of memory\n");
 					usb_free_urb(edge_serial->interrupt_read_urb);
 					return -ENOMEM;
 				}
@@ -3146,13 +3146,13 @@
 				edge_serial->read_urb =
 						usb_alloc_urb(0, GFP_KERNEL);
 				if (!edge_serial->read_urb) {
-					err("out of memory");
+					dev_err(&dev->dev, "out of memory\n");
 					return -ENOMEM;
 				}
 				edge_serial->bulk_in_buffer =
 					kmalloc(buffer_size, GFP_KERNEL);
 				if (!edge_serial->bulk_in_buffer) {
-					err("out of memory");
+					dev_err(&dev->dev, "out of memory\n");
 					usb_free_urb(edge_serial->read_urb);
 					return -ENOMEM;
 				}
@@ -3181,7 +3181,8 @@
 		}
 
 		if (!interrupt_in_found || !bulk_in_found || !bulk_out_found) {
-			err("Error - the proper endpoints were not found!");
+			dev_err(&dev->dev, "Error - the proper endpoints "
+				"were not found!\n");
 			return -ENODEV;
 		}
 
@@ -3190,8 +3191,9 @@
 		response = usb_submit_urb(edge_serial->interrupt_read_urb,
 								GFP_KERNEL);
 		if (response)
-			err("%s - Error %d submitting control urb",
-							__func__, response);
+			dev_err(&dev->dev,
+				"%s - Error %d submitting control urb\n",
+				__func__, response);
 	}
 	return response;
 }
@@ -3253,7 +3255,8 @@
 	if (retval)
 		goto failed_usb_register;
 	atomic_set(&CmdUrbs, 0);
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 
 failed_usb_register:
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 541dd8e..c3cdd00 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2978,7 +2978,8 @@
 	retval = usb_register(&io_driver);
 	if (retval)
 		goto failed_usb_register;
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 	usb_serial_deregister(&edgeport_2port_device);
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 2affa9c..132be74 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -608,7 +608,7 @@
 	bytes_out = 0;
 	priv = kmalloc(sizeof(struct ipaq_private), GFP_KERNEL);
 	if (priv == NULL) {
-		err("%s - Out of memory", __func__);
+		dev_err(&port->dev, "%s - Out of memory\n", __func__);
 		return -ENOMEM;
 	}
 	usb_set_serial_port_data(port, priv);
@@ -693,8 +693,7 @@
 	}
 
 	if (!retries && result) {
-		err("%s - failed doing control urb, error %d", __func__,
-		    result);
+		dev_err(&port->dev, "%s - failed doing control urb, error %d\n",			__func__, result);
 		goto error;
 	}
 
@@ -707,8 +706,9 @@
 
 	result = usb_submit_urb(port->read_urb, GFP_KERNEL);
 	if (result) {
-		err("%s - failed submitting read urb, error %d",
-						__func__, result);
+		dev_err(&port->dev,
+			"%s - failed submitting read urb, error %d\n",
+			__func__, result);
 		goto error;
 	}
 
@@ -716,7 +716,7 @@
 
 enomem:
 	result = -ENOMEM;
-	err("%s - Out of memory", __func__);
+	dev_err(&port->dev, "%s - Out of memory\n", __func__);
 error:
 	ipaq_destroy_lists(port);
 	kfree(priv);
@@ -781,8 +781,9 @@
 	    ipaq_read_bulk_callback, port);
 	result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
 	if (result)
-		err("%s - failed resubmitting read urb, error %d",
-							__func__, result);
+		dev_err(&port->dev,
+			"%s - failed resubmitting read urb, error %d\n",
+			__func__, result);
 	return;
 }
 
@@ -847,7 +848,8 @@
 		spin_unlock_irqrestore(&write_list_lock, flags);
 		result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
 		if (result)
-			err("%s - failed submitting write urb, error %d",
+			dev_err(&port->dev,
+				"%s - failed submitting write urb, error %d\n",
 				__func__, result);
 	} else {
 		spin_unlock_irqrestore(&write_list_lock, flags);
@@ -909,8 +911,9 @@
 		spin_unlock_irqrestore(&write_list_lock, flags);
 		result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
 		if (result)
-			err("%s - failed submitting write urb, error %d",
-					__func__, result);
+			dev_err(&port->dev,
+				"%s - failed submitting write urb, error %d\n",
+				__func__, result);
 	} else {
 		priv->active = 0;
 		spin_unlock_irqrestore(&write_list_lock, flags);
@@ -957,7 +960,7 @@
 {
 	dbg("%s", __func__);
 	if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
-		err("active config #%d != 1 ??",
+		dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
 			serial->dev->actconfig->desc.bConfigurationValue);
 		return -ENODEV;
 	}
@@ -976,7 +979,6 @@
 	retval = usb_serial_register(&ipaq_device);
 	if (retval)
 		goto failed_usb_serial_register;
-	info(DRIVER_DESC " " DRIVER_VERSION);
 	if (vendor) {
 		ipaq_id_table[0].idVendor = vendor;
 		ipaq_id_table[0].idProduct = product;
@@ -985,6 +987,8 @@
 	if (retval)
 		goto failed_usb_register;
 
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 	usb_serial_deregister(&ipaq_device);
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 480cac2..3ac59a8 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -485,7 +485,8 @@
 		usb_serial_deregister(&ipw_device);
 		return retval;
 	}
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 }
 
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 45d4043..b679a55 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -602,7 +602,8 @@
 	if (retval)
 		goto failed_usb_register;
 
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 	return 0;
 
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 53710aa7..e320972 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1185,7 +1185,8 @@
 	retval = usb_register(&iuu_driver);
 	if (retval)
 		goto failed_usb_register;
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 	usb_serial_deregister(&iuu_device);
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 15447af..9878c0f 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -217,7 +217,8 @@
 	if (retval)
 		goto failed_usb_register;
 
-	info(DRIVER_VERSION ":" DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 	return 0;
 failed_usb_register:
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 99e9a14..bf1ae24 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -742,11 +742,13 @@
 		fw_name = "keyspan_pda/xircom_pgs.fw";
 #endif
 	else {
-		err("%s: unknown vendor, aborting.", __func__);
+		dev_err(&serial->dev->dev, "%s: unknown vendor, aborting.\n",
+			__func__);
 		return -ENODEV;
 	}
 	if (request_ihex_firmware(&fw, fw_name, &serial->dev->dev)) {
-		err("failed to load firmware \"%s\"\n", fw_name);
+		dev_err(&serial->dev->dev, "failed to load firmware \"%s\"\n",
+			fw_name);
 		return -ENOENT;
 	}
 	record = (const struct ihex_binrec *)fw->data;
@@ -756,10 +758,10 @@
 					     (unsigned char *)record->data,
 					     be16_to_cpu(record->len), 0xa0);
 		if (response < 0) {
-			err("ezusb_writememory failed for Keyspan PDA "
-			    "firmware (%d %04X %p %d)",
-			    response, be32_to_cpu(record->addr),
-			    record->data, be16_to_cpu(record->len));
+			dev_err(&serial->dev->dev, "ezusb_writememory failed "
+				"for Keyspan PDA firmware (%d %04X %p %d)\n",
+				response, be32_to_cpu(record->addr),
+				record->data, be16_to_cpu(record->len));
 			break;
 		}
 		record = ihex_next_binrec(record);
@@ -874,7 +876,8 @@
 	retval = usb_register(&keyspan_pda_driver);
 	if (retval)
 		goto failed_usb_register;
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 #ifdef XIRCOM
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index ff3a07f..dc36a05 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -182,12 +182,12 @@
 			sizeof(struct klsi_105_port_settings),
 			KLSI_TIMEOUT);
 	if (rc < 0)
-		err("Change port settings failed (error = %d)", rc);
-	info("%s - %d byte block, baudrate %x, databits %d, u1 %d, u2 %d",
-	    __func__,
-	    settings->pktlen,
-	    settings->baudrate, settings->databits,
-	    settings->unknown1, settings->unknown2);
+		dev_err(&port->dev,
+			"Change port settings failed (error = %d)\n", rc);
+	dev_info(&port->serial->dev->dev,
+		 "%d byte block, baudrate %x, databits %d, u1 %d, u2 %d\n",
+		 settings->pktlen, settings->baudrate, settings->databits,
+		 settings->unknown1, settings->unknown2);
 	return rc;
 } /* klsi_105_chg_port_settings */
 
@@ -215,7 +215,7 @@
 	__u8 status_buf[KLSI_STATUSBUF_LEN] = { -1, -1};
 	__u16 status;
 
-	info("%s - sending SIO Poll request", __func__);
+	dev_info(&port->serial->dev->dev, "sending SIO Poll request\n");
 	rc = usb_control_msg(port->serial->dev,
 			     usb_rcvctrlpipe(port->serial->dev, 0),
 			     KL5KUSB105A_SIO_POLL,
@@ -226,12 +226,13 @@
 			     10000
 			     );
 	if (rc < 0)
-		err("Reading line status failed (error = %d)", rc);
+		dev_err(&port->dev, "Reading line status failed (error = %d)\n",
+			rc);
 	else {
 		status = get_unaligned_le16(status_buf);
 
-		info("%s - read status %x %x", __func__,
-		     status_buf[0], status_buf[1]);
+		dev_info(&port->serial->dev->dev, "read status %x %x",
+			 status_buf[0], status_buf[1]);
 
 		*line_state_p = klsi_105_status2linestate(status);
 	}
@@ -280,15 +281,16 @@
 
 			priv->write_urb_pool[j] = urb;
 			if (urb == NULL) {
-				err("No more urbs???");
+				dev_err(&serial->dev->dev, "No more urbs???\n");
 				goto err_cleanup;
 			}
 
 			urb->transfer_buffer =
 				kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
 			if (!urb->transfer_buffer) {
-				err("%s - out of memory for urb buffers.",
-								__func__);
+				dev_err(&serial->dev->dev,
+					"%s - out of memory for urb buffers.\n",
+					__func__);
 				goto err_cleanup;
 			}
 		}
@@ -409,7 +411,8 @@
 
 	rc = usb_submit_urb(port->read_urb, GFP_KERNEL);
 	if (rc) {
-		err("%s - failed submitting read urb, error %d", __func__, rc);
+		dev_err(&port->dev, "%s - failed submitting read urb, "
+			"error %d\n", __func__, rc);
 		retval = rc;
 		goto exit;
 	}
@@ -424,7 +427,7 @@
 			     0,
 			     KLSI_TIMEOUT);
 	if (rc < 0) {
-		err("Enabling read failed (error = %d)", rc);
+		dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc);
 		retval = rc;
 	} else
 		dbg("%s - enabled reading", __func__);
@@ -464,7 +467,8 @@
 				     NULL, 0,
 				     KLSI_TIMEOUT);
 		if (rc < 0)
-			err("Disabling read failed (error = %d)", rc);
+			dev_err(&port->dev,
+				"Disabling read failed (error = %d)\n", rc);
 	}
 	mutex_unlock(&port->serial->disc_mutex);
 
@@ -475,8 +479,9 @@
 	/* FIXME */
 	/* wgg - do I need this? I think so. */
 	usb_kill_urb(port->interrupt_in_urb);
-	info("kl5kusb105 port stats: %ld bytes in, %ld bytes out",
-					priv->bytes_in, priv->bytes_out);
+	dev_info(&port->serial->dev->dev,
+		 "port stats: %ld bytes in, %ld bytes out\n",
+		 priv->bytes_in, priv->bytes_out);
 } /* klsi_105_close */
 
 
@@ -522,7 +527,9 @@
 			urb->transfer_buffer =
 				kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC);
 			if (urb->transfer_buffer == NULL) {
-				err("%s - no more kernel memory...", __func__);
+				dev_err(&port->dev,
+					"%s - no more kernel memory...\n",
+					__func__);
 				goto exit;
 			}
 		}
@@ -549,8 +556,9 @@
 		/* send the data out the bulk port */
 		result = usb_submit_urb(urb, GFP_ATOMIC);
 		if (result) {
-			err("%s - failed submitting write urb, error %d",
-							__func__, result);
+			dev_err(&port->dev,
+				"%s - failed submitting write urb, error %d\n",
+				__func__, result);
 			goto exit;
 		}
 		buf += size;
@@ -694,8 +702,9 @@
 		      port);
 	rc = usb_submit_urb(port->read_urb, GFP_ATOMIC);
 	if (rc)
-		err("%s - failed resubmitting read urb, error %d",
-							__func__, rc);
+		dev_err(&port->dev,
+			"%s - failed resubmitting read urb, error %d\n",
+			__func__, rc);
 } /* klsi_105_read_bulk_callback */
 
 
@@ -799,7 +808,8 @@
 			priv->cfg.databits = kl5kusb105a_dtb_8;
 			break;
 		default:
-			err("CSIZE was not CS5-CS8, using default of 8");
+			dev_err(&port->dev,
+				"CSIZE was not CS5-CS8, using default of 8\n");
 			priv->cfg.databits = kl5kusb105a_dtb_8;
 			break;
 		}
@@ -886,7 +896,8 @@
 
 	rc = klsi_105_get_line_state(port, &line_state);
 	if (rc < 0) {
-		err("Reading line control failed (error = %d)", rc);
+		dev_err(&port->dev,
+			"Reading line control failed (error = %d)\n", rc);
 		/* better return value? EAGAIN? */
 		return rc;
 	}
@@ -944,8 +955,9 @@
 	port->read_urb->dev = port->serial->dev;
 	result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
 	if (result)
-		err("%s - failed submitting read urb, error %d", __func__,
-		    result);
+		dev_err(&port->dev,
+			"%s - failed submitting read urb, error %d\n",
+			__func__, result);
 }
 
 
@@ -960,7 +972,8 @@
 	if (retval)
 		goto failed_usb_register;
 
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 	usb_serial_deregister(&kl5kusb105d_device);
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index cfcf37c..6286baa 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -744,8 +744,8 @@
 	if (retval)
 		goto failed_usb_register;
 
-	info(DRIVER_VERSION " " DRIVER_AUTHOR);
-	info(DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 	return 0;
 failed_usb_register:
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 9b2cef8..07710cf 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -246,7 +246,8 @@
 				0, 0, &divisor, MCT_U232_SET_BAUD_RATE_SIZE,
 				WDR_TIMEOUT);
 	if (rc < 0)	/*FIXME: What value speed results */
-		err("Set BAUD RATE %d failed (error = %d)", value, rc);
+		dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n",
+			value, rc);
 	else
 		tty_encode_baud_rate(tty, speed, speed);
 	dbg("set_baud_rate: value: 0x%x, divisor: 0x%x", value, divisor);
@@ -274,8 +275,9 @@
 				0, 0, &zero_byte, MCT_U232_SET_UNKNOWN1_SIZE,
 				WDR_TIMEOUT);
 	if (rc < 0)
-		err("Sending USB device request code %d failed (error = %d)",
-		    MCT_U232_SET_UNKNOWN1_REQUEST, rc);
+		dev_err(&port->dev, "Sending USB device request code %d "
+			"failed (error = %d)\n", MCT_U232_SET_UNKNOWN1_REQUEST,
+			rc);
 
 	if (port && C_CRTSCTS(tty))
 	   cts_enable_byte = 1;
@@ -288,8 +290,8 @@
 			0, 0, &cts_enable_byte, MCT_U232_SET_CTS_SIZE,
 			WDR_TIMEOUT);
 	if (rc < 0)
-		err("Sending USB device request code %d failed (error = %d)",
-					MCT_U232_SET_CTS_REQUEST, rc);
+		dev_err(&port->dev, "Sending USB device request code %d "
+			"failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc);
 
 	return rc;
 } /* mct_u232_set_baud_rate */
@@ -303,7 +305,8 @@
 			0, 0, &lcr, MCT_U232_SET_LINE_CTRL_SIZE,
 			WDR_TIMEOUT);
 	if (rc < 0)
-		err("Set LINE CTRL 0x%x failed (error = %d)", lcr, rc);
+		dev_err(&serial->dev->dev,
+			"Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc);
 	dbg("set_line_ctrl: 0x%x", lcr);
 	return rc;
 } /* mct_u232_set_line_ctrl */
@@ -325,7 +328,8 @@
 			0, 0, &mcr, MCT_U232_SET_MODEM_CTRL_SIZE,
 			WDR_TIMEOUT);
 	if (rc < 0)
-		err("Set MODEM CTRL 0x%x failed (error = %d)", mcr, rc);
+		dev_err(&serial->dev->dev,
+			"Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
 	dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
 
 	return rc;
@@ -341,7 +345,8 @@
 			0, 0, msr, MCT_U232_GET_MODEM_STAT_SIZE,
 			WDR_TIMEOUT);
 	if (rc < 0) {
-		err("Get MODEM STATus failed (error = %d)", rc);
+		dev_err(&serial->dev->dev,
+			"Get MODEM STATus failed (error = %d)\n", rc);
 		*msr = 0;
 	}
 	dbg("get_modem_stat: 0x%x", *msr);
@@ -470,8 +475,9 @@
 	port->read_urb->dev = port->serial->dev;
 	retval = usb_submit_urb(port->read_urb, GFP_KERNEL);
 	if (retval) {
-		err("usb_submit_urb(read bulk) failed pipe 0x%x err %d",
-		    port->read_urb->pipe, retval);
+		dev_err(&port->dev,
+			"usb_submit_urb(read bulk) failed pipe 0x%x err %d\n",
+			port->read_urb->pipe, retval);
 		goto error;
 	}
 
@@ -479,8 +485,9 @@
 	retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
 	if (retval) {
 		usb_kill_urb(port->read_urb);
-		err(" usb_submit_urb(read int) failed pipe 0x%x err %d",
-		    port->interrupt_in_urb->pipe, retval);
+		dev_err(&port->dev,
+			"usb_submit_urb(read int) failed pipe 0x%x err %d",
+			port->interrupt_in_urb->pipe, retval);
 		goto error;
 	}
 	return 0;
@@ -612,8 +619,9 @@
 exit:
 	retval = usb_submit_urb(urb, GFP_ATOMIC);
 	if (retval)
-		err("%s - usb_submit_urb failed with result %d",
-		     __func__, retval);
+		dev_err(&port->dev,
+			"%s - usb_submit_urb failed with result %d\n",
+			__func__, retval);
 } /* mct_u232_read_int_callback */
 
 static void mct_u232_set_termios(struct tty_struct *tty,
@@ -680,7 +688,8 @@
 	case CS8:
 		last_lcr |= MCT_U232_DATA_BITS_8; break;
 	default:
-		err("CSIZE was not CS5-CS8, using default of 8");
+		dev_err(&port->dev,
+			"CSIZE was not CS5-CS8, using default of 8\n");
 		last_lcr |= MCT_U232_DATA_BITS_8;
 		break;
 	}
@@ -817,7 +826,8 @@
 	retval = usb_register(&mct_u232_driver);
 	if (retval)
 		goto failed_usb_register;
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 	usb_serial_deregister(&mct_u232_device);
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 7b538ca..e772cc0 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -355,14 +355,16 @@
 		mos7720_port->write_urb_pool[j] = urb;
 
 		if (urb == NULL) {
-			err("No more urbs???");
+			dev_err(&port->dev, "No more urbs???\n");
 			continue;
 		}
 
 		urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
 					       GFP_KERNEL);
 		if (!urb->transfer_buffer) {
-			err("%s-out of memory for urb buffers.", __func__);
+			dev_err(&port->dev,
+				"%s-out of memory for urb buffers.\n",
+				__func__);
 			usb_free_urb(mos7720_port->write_urb_pool[j]);
 			mos7720_port->write_urb_pool[j] = NULL;
 			continue;
@@ -694,7 +696,8 @@
 		urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
 					       GFP_KERNEL);
 		if (urb->transfer_buffer == NULL) {
-			err("%s no more kernel memory...", __func__);
+			dev_err(&port->dev, "%s no more kernel memory...\n",
+				__func__);
 			goto exit;
 		}
 	}
@@ -714,8 +717,8 @@
 	/* send it down the pipe */
 	status = usb_submit_urb(urb, GFP_ATOMIC);
 	if (status) {
-		err("%s - usb_submit_urb(write bulk) failed with status = %d",
-		    __func__, status);
+		dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed "
+			"with status = %d\n", __func__, status);
 		bytes_sent = status;
 		goto exit;
 	}
@@ -975,7 +978,7 @@
 	/* Calculate the Divisor */
 	status = calc_baud_rate_divisor(baudrate, &divisor);
 	if (status) {
-		err("%s - bad baud rate", __func__);
+		dev_err(&port->dev, "%s - bad baud rate\n", __func__);
 		return status;
 	}
 
@@ -1478,7 +1481,7 @@
 	/* create our private serial structure */
 	mos7720_serial = kzalloc(sizeof(struct moschip_serial), GFP_KERNEL);
 	if (mos7720_serial == NULL) {
-		err("%s - Out of memory", __func__);
+		dev_err(&dev->dev, "%s - Out of memory\n", __func__);
 		return -ENOMEM;
 	}
 
@@ -1491,7 +1494,7 @@
 	for (i = 0; i < serial->num_ports; ++i) {
 		mos7720_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
 		if (mos7720_port == NULL) {
-			err("%s - Out of memory", __func__);
+			dev_err(&dev->dev, "%s - Out of memory\n", __func__);
 			usb_set_serial_data(serial, NULL);
 			kfree(mos7720_serial);
 			return -ENOMEM;
@@ -1585,7 +1588,8 @@
 	if (retval)
 		goto failed_port_device_register;
 
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 	/* Register with the usb */
 	retval = usb_register(&usb_driver);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 60543d7..fda4a64 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -844,7 +844,7 @@
 		mos7840_port->write_urb_pool[j] = urb;
 
 		if (urb == NULL) {
-			err("No more urbs???");
+			dev_err(&port->dev, "No more urbs???\n");
 			continue;
 		}
 
@@ -853,7 +853,9 @@
 		if (!urb->transfer_buffer) {
 			usb_free_urb(urb);
 			mos7840_port->write_urb_pool[j] = NULL;
-			err("%s-out of memory for urb buffers.", __func__);
+			dev_err(&port->dev,
+				"%s-out of memory for urb buffers.\n",
+				__func__);
 			continue;
 		}
 	}
@@ -1021,8 +1023,8 @@
 			    usb_submit_urb(serial->port[0]->interrupt_in_urb,
 					   GFP_KERNEL);
 			if (response) {
-				err("%s - Error %d submitting interrupt urb",
-				    __func__, response);
+				dev_err(&port->dev, "%s - Error %d submitting "
+					"interrupt urb\n", __func__, response);
 			}
 
 		}
@@ -1055,8 +1057,8 @@
 	    port->bulk_in_endpointAddress);
 	response = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
 	if (response) {
-		err("%s - Error %d submitting control urb", __func__,
-		    response);
+		dev_err(&port->dev, "%s - Error %d submitting control urb\n",
+			__func__, response);
 	}
 
 	/* initialize our wait queues */
@@ -1492,7 +1494,8 @@
 		    kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
 
 		if (urb->transfer_buffer == NULL) {
-			err("%s no more kernel memory...", __func__);
+			dev_err(&port->dev, "%s no more kernel memory...\n",
+				__func__);
 			goto exit;
 		}
 	}
@@ -1517,8 +1520,8 @@
 
 	if (status) {
 		mos7840_port->busy[i] = 0;
-		err("%s - usb_submit_urb(write bulk) failed with status = %d",
-		    __func__, status);
+		dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed "
+			"with status = %d\n", __func__, status);
 		bytes_sent = status;
 		goto exit;
 	}
@@ -1856,8 +1859,7 @@
 		/* Calculate the Divisor */
 
 		if (status) {
-			err("%s - bad baud rate", __func__);
-			dbg("%s\n", "bad baud rate");
+			dev_err(&port->dev, "%s - bad baud rate\n", __func__);
 			return status;
 		}
 		/* Enable access to divisor latch */
@@ -2446,7 +2448,7 @@
 	for (i = 0; i < serial->num_ports; ++i) {
 		mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
 		if (mos7840_port == NULL) {
-			err("%s - Out of memory", __func__);
+			dev_err(&dev->dev, "%s - Out of memory\n", __func__);
 			status = -ENOMEM;
 			i--; /* don't follow NULL pointer cleaning up */
 			goto error;
@@ -2743,7 +2745,8 @@
 		goto failed_port_device_register;
 
 	dbg("%s\n", "Entring...");
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 	/* Register with the usb */
 	retval = usb_register(&io_driver);
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index c4d70b0..df65397 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -154,8 +154,8 @@
 
 	od = kmalloc(sizeof(struct omninet_data), GFP_KERNEL);
 	if (!od) {
-		err("%s- kmalloc(%Zd) failed.",
-				__func__, sizeof(struct omninet_data));
+		dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n",
+			__func__, sizeof(struct omninet_data));
 		return -ENOMEM;
 	}
 	usb_set_serial_port_data(port, od);
@@ -183,8 +183,9 @@
 			omninet_read_bulk_callback, port);
 	result = usb_submit_urb(port->read_urb, GFP_KERNEL);
 	if (result)
-		err("%s - failed submitting read urb, error %d",
-							__func__, result);
+		dev_err(&port->dev,
+			"%s - failed submitting read urb, error %d\n",
+			__func__, result);
 	return result;
 }
 
@@ -244,8 +245,9 @@
 			omninet_read_bulk_callback, port);
 	result = usb_submit_urb(urb, GFP_ATOMIC);
 	if (result)
-		err("%s - failed resubmitting read urb, error %d",
-						__func__, result);
+		dev_err(&port->dev,
+			"%s - failed resubmitting read urb, error %d\n",
+			__func__, result);
 
 	return;
 }
@@ -298,8 +300,9 @@
 	result = usb_submit_urb(wport->write_urb, GFP_ATOMIC);
 	if (result) {
 		wport->write_urb_busy = 0;
-		err("%s - failed submitting write urb, error %d",
-							__func__, result);
+		dev_err(&port->dev,
+			"%s - failed submitting write urb, error %d\n",
+			__func__, result);
 	} else
 		result = count;
 
@@ -364,7 +367,8 @@
 	retval = usb_register(&omninet_driver);
 	if (retval)
 		goto failed_usb_register;
-	info(DRIVER_VERSION ":" DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 	usb_serial_deregister(&zyxel_omninet_device);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6b1727e..bd07eaa 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -79,38 +79,36 @@
 #define OPTION_PRODUCT_VIPER			0x6600
 #define OPTION_PRODUCT_VIPER_BUS		0x6601
 #define OPTION_PRODUCT_GT_MAX_READY		0x6701
-#define OPTION_PRODUCT_GT_MAX			0x6711
 #define OPTION_PRODUCT_FUJI_MODEM_LIGHT		0x6721
 #define OPTION_PRODUCT_FUJI_MODEM_GT		0x6741
 #define OPTION_PRODUCT_FUJI_MODEM_EX		0x6761
-#define OPTION_PRODUCT_FUJI_NETWORK_LIGHT	0x6731
-#define OPTION_PRODUCT_FUJI_NETWORK_GT		0x6751
-#define OPTION_PRODUCT_FUJI_NETWORK_EX		0x6771
 #define OPTION_PRODUCT_KOI_MODEM		0x6800
-#define OPTION_PRODUCT_KOI_NETWORK		0x6811
 #define OPTION_PRODUCT_SCORPION_MODEM		0x6901
-#define OPTION_PRODUCT_SCORPION_NETWORK		0x6911
 #define OPTION_PRODUCT_ETNA_MODEM		0x7001
-#define OPTION_PRODUCT_ETNA_NETWORK		0x7011
 #define OPTION_PRODUCT_ETNA_MODEM_LITE		0x7021
 #define OPTION_PRODUCT_ETNA_MODEM_GT		0x7041
 #define OPTION_PRODUCT_ETNA_MODEM_EX		0x7061
-#define OPTION_PRODUCT_ETNA_NETWORK_LITE	0x7031
-#define OPTION_PRODUCT_ETNA_NETWORK_GT		0x7051
-#define OPTION_PRODUCT_ETNA_NETWORK_EX		0x7071
 #define OPTION_PRODUCT_ETNA_KOI_MODEM		0x7100
-#define OPTION_PRODUCT_ETNA_KOI_NETWORK		0x7111
 
 #define HUAWEI_VENDOR_ID			0x12D1
 #define HUAWEI_PRODUCT_E600			0x1001
 #define HUAWEI_PRODUCT_E220			0x1003
 #define HUAWEI_PRODUCT_E220BIS			0x1004
 #define HUAWEI_PRODUCT_E1401			0x1401
+#define HUAWEI_PRODUCT_E1402			0x1402
 #define HUAWEI_PRODUCT_E1403			0x1403
+#define HUAWEI_PRODUCT_E1404			0x1404
 #define HUAWEI_PRODUCT_E1405			0x1405
 #define HUAWEI_PRODUCT_E1406			0x1406
+#define HUAWEI_PRODUCT_E1407			0x1407
 #define HUAWEI_PRODUCT_E1408			0x1408
 #define HUAWEI_PRODUCT_E1409			0x1409
+#define HUAWEI_PRODUCT_E140A			0x140A
+#define HUAWEI_PRODUCT_E140B			0x140B
+#define HUAWEI_PRODUCT_E140C			0x140C
+#define HUAWEI_PRODUCT_E140D			0x140D
+#define HUAWEI_PRODUCT_E140E			0x140E
+#define HUAWEI_PRODUCT_E140F			0x140F
 #define HUAWEI_PRODUCT_E1410			0x1410
 #define HUAWEI_PRODUCT_E1411			0x1411
 #define HUAWEI_PRODUCT_E1412			0x1412
@@ -121,6 +119,44 @@
 #define HUAWEI_PRODUCT_E1417			0x1417
 #define HUAWEI_PRODUCT_E1418			0x1418
 #define HUAWEI_PRODUCT_E1419			0x1419
+#define HUAWEI_PRODUCT_E141A			0x141A
+#define HUAWEI_PRODUCT_E141B			0x141B
+#define HUAWEI_PRODUCT_E141C			0x141C
+#define HUAWEI_PRODUCT_E141D			0x141D
+#define HUAWEI_PRODUCT_E141E			0x141E
+#define HUAWEI_PRODUCT_E141F			0x141F
+#define HUAWEI_PRODUCT_E1420			0x1420
+#define HUAWEI_PRODUCT_E1421			0x1421
+#define HUAWEI_PRODUCT_E1422			0x1422
+#define HUAWEI_PRODUCT_E1423			0x1423
+#define HUAWEI_PRODUCT_E1424			0x1424
+#define HUAWEI_PRODUCT_E1425			0x1425
+#define HUAWEI_PRODUCT_E1426			0x1426
+#define HUAWEI_PRODUCT_E1427			0x1427
+#define HUAWEI_PRODUCT_E1428			0x1428
+#define HUAWEI_PRODUCT_E1429			0x1429
+#define HUAWEI_PRODUCT_E142A			0x142A
+#define HUAWEI_PRODUCT_E142B			0x142B
+#define HUAWEI_PRODUCT_E142C			0x142C
+#define HUAWEI_PRODUCT_E142D			0x142D
+#define HUAWEI_PRODUCT_E142E			0x142E
+#define HUAWEI_PRODUCT_E142F			0x142F
+#define HUAWEI_PRODUCT_E1430			0x1430
+#define HUAWEI_PRODUCT_E1431			0x1431
+#define HUAWEI_PRODUCT_E1432			0x1432
+#define HUAWEI_PRODUCT_E1433			0x1433
+#define HUAWEI_PRODUCT_E1434			0x1434
+#define HUAWEI_PRODUCT_E1435			0x1435
+#define HUAWEI_PRODUCT_E1436			0x1436
+#define HUAWEI_PRODUCT_E1437			0x1437
+#define HUAWEI_PRODUCT_E1438			0x1438
+#define HUAWEI_PRODUCT_E1439			0x1439
+#define HUAWEI_PRODUCT_E143A			0x143A
+#define HUAWEI_PRODUCT_E143B			0x143B
+#define HUAWEI_PRODUCT_E143C			0x143C
+#define HUAWEI_PRODUCT_E143D			0x143D
+#define HUAWEI_PRODUCT_E143E			0x143E
+#define HUAWEI_PRODUCT_E143F			0x143F
 
 #define NOVATELWIRELESS_VENDOR_ID		0x1410
 
@@ -218,8 +254,19 @@
 /* ZTE PRODUCTS */
 #define ZTE_VENDOR_ID				0x19d2
 #define ZTE_PRODUCT_MF628			0x0015
+#define ZTE_PRODUCT_MF626			0x0031
 #define ZTE_PRODUCT_CDMA_TECH			0xfffe
 
+/* Ericsson products */
+#define ERICSSON_VENDOR_ID			0x0bdb
+#define ERICSSON_PRODUCT_F3507G			0x1900
+
+/* Pantech products */
+#define PANTECH_VENDOR_ID			0x106c
+#define PANTECH_PRODUCT_PC5740			0x3701
+#define PANTECH_PRODUCT_PC5750			0x3702  /* PX-500 */
+#define PANTECH_PRODUCT_UM150			0x3711
+
 static struct usb_device_id option_ids[] = {
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -235,36 +282,34 @@
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_VIPER) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_VIPER_BUS) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GT_MAX_READY) },
-	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GT_MAX) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_LIGHT) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_GT) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_EX) },
-	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_NETWORK_LIGHT) },
-	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_NETWORK_GT) },
-	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_NETWORK_EX) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_KOI_MODEM) },
-	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_KOI_NETWORK) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_SCORPION_MODEM) },
-	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_SCORPION_NETWORK) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM) },
-	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_NETWORK) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_LITE) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_GT) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_EX) },
-	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_NETWORK_LITE) },
-	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_NETWORK_GT) },
-	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_NETWORK_EX) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_MODEM) },
-	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_NETWORK) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) },
@@ -275,6 +320,44 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
@@ -318,7 +401,8 @@
 	{ USB_DEVICE(DELL_VENDOR_ID, 0x8136) },	/* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */
 	{ USB_DEVICE(DELL_VENDOR_ID, 0x8137) },	/* Dell Wireless HSDPA 5520 */
 	{ USB_DEVICE(DELL_VENDOR_ID, 0x8138) },	/* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */
-	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },
+	{ USB_DEVICE(DELL_VENDOR_ID, 0x8147) },	/* Dell Wireless 5530 Mobile Broadband (3G HSPA) Mini-Card */
+	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },	/* ADU-E100, ADU-310 */
 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
 	{ USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
@@ -347,8 +431,13 @@
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
 	{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) },
 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
+	{ USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G) },
+	{ USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_PC5740) },
+	{ USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_PC5750) },
+	{ USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_UM150) },
 	{ } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
@@ -427,7 +516,8 @@
 	if (retval)
 		goto failed_driver_register;
 
-	info(DRIVER_DESC ": " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 	return 0;
 
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9084378..491c885 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -1147,7 +1147,7 @@
 	retval = usb_register(&pl2303_driver);
 	if (retval)
 		goto failed_usb_register;
-	info(DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 	usb_serial_deregister(&pl2303_device);
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 72903ac..4b463cd 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -250,17 +250,18 @@
 		if (!fcs) {
 			int actual_length = data[length - 2] >> 2;
 			if (actual_length <= (length - 2)) {
-				info("%s - actual: %d", __func__,
-							actual_length);
+				dev_info(&urb->dev->dev, "%s - actual: %d\n",
+					 __func__, actual_length);
 				tty_insert_flip_string(tty,
 							data, actual_length);
 				tty_flip_buffer_push(tty);
 			} else {
-				err("%s - inconsistent lengths %d:%d",
+				dev_err(&port->dev,
+					"%s - inconsistent lengths %d:%d\n",
 					__func__, actual_length, length);
 			}
 		} else {
-			err("%s - bad CRC %x", __func__, fcs);
+			dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
 		}
 	} else {
 		tty_insert_flip_string(tty, data, length);
@@ -277,8 +278,9 @@
 
 	result = usb_submit_urb(urb, GFP_ATOMIC);
 	if (result)
-		err("%s - failed resubmitting read urb, error %d",
-							__func__, result);
+		dev_err(&port->dev,
+			"%s - failed resubmitting read urb, error %d\n",
+			__func__, result);
 		/* FIXME: Need a mechanism to retry later if this happens */
 }
 
@@ -369,8 +371,9 @@
 	result = usb_submit_urb(port->write_urb, GFP_KERNEL);
 	if (result) {
 		port->write_urb_busy = 0;
-		err("%s - failed submitting write urb, error %d",
-							__func__, result);
+		dev_err(&port->dev,
+			"%s - failed submitting write urb, error %d\n",
+			__func__, result);
 		return 0;
 	}
 	dbg("%s urb: %p submitted", __func__, port->write_urb);
@@ -428,14 +431,13 @@
 {
 	int i, retval;
 
-	info(DRIVER_VERSION " " DRIVER_AUTHOR);
-	info(DRIVER_DESC);
-	info("vendor: %x product: %x safe: %d padded: %d\n",
-					vendor, product, safe, padded);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 	/* if we have vendor / product parameters patch them into id list */
 	if (vendor || product) {
-		info("vendor: %x product: %x\n", vendor, product);
+		printk(KERN_INFO KBUILD_MODNAME ": vendor: %x product: %x\n",
+		       vendor, product);
 
 		for (i = 0; i < ARRAY_SIZE(id_table); i++) {
 			if (!id_table[i].idVendor && !id_table[i].idProduct) {
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 8b9eaf3..0f2b672 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -247,7 +247,7 @@
 	struct sierra_port_private *portdata;
 	__u16 interface = 0;
 
-	dbg("%s", __func__);
+	dev_dbg(&port->dev, "%s", __func__);
 
 	portdata = usb_get_serial_port_data(port);
 
@@ -284,7 +284,7 @@
 static void sierra_set_termios(struct tty_struct *tty,
 		struct usb_serial_port *port, struct ktermios *old_termios)
 {
-	dbg("%s", __func__);
+	dev_dbg(&port->dev, "%s", __func__);
 	tty_termios_copy_hw(tty->termios, old_termios);
 	sierra_send_setup(tty, port);
 }
@@ -295,6 +295,7 @@
 	unsigned int value;
 	struct sierra_port_private *portdata;
 
+	dev_dbg(&port->dev, "%s", __func__);
 	portdata = usb_get_serial_port_data(port);
 
 	value = ((portdata->rts_state) ? TIOCM_RTS : 0) |
@@ -334,14 +335,14 @@
 	int status = urb->status;
 	unsigned long flags;
 
-	dbg("%s - port %d", __func__, port->number);
+	dev_dbg(&port->dev, "%s - port %d", __func__, port->number);
 
 	/* free up the transfer buffer, as usb_free_urb() does not do this */
 	kfree(urb->transfer_buffer);
 
 	if (status)
-		dbg("%s - nonzero write bulk status received: %d",
-		    __func__, status);
+		dev_dbg(&port->dev, "%s - nonzero write bulk status "
+		    "received: %d", __func__, status);
 
 	spin_lock_irqsave(&portdata->lock, flags);
 	--portdata->outstanding_urbs;
@@ -363,12 +364,12 @@
 
 	portdata = usb_get_serial_port_data(port);
 
-	dbg("%s: write (%d chars)", __func__, count);
+	dev_dbg(&port->dev, "%s: write (%d chars)", __func__, count);
 
 	spin_lock_irqsave(&portdata->lock, flags);
 	if (portdata->outstanding_urbs > N_OUT_URB) {
 		spin_unlock_irqrestore(&portdata->lock, flags);
-		dbg("%s - write limit hit\n", __func__);
+		dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
 		return 0;
 	}
 	portdata->outstanding_urbs++;
@@ -437,8 +438,8 @@
 	port =  urb->context;
 
 	if (status) {
-		dbg("%s: nonzero status: %d on endpoint %02x.",
-		    __func__, status, endpoint);
+		dev_dbg(&port->dev, "%s: nonzero status: %d on"
+		    " endpoint %02x.", __func__, status, endpoint);
 	} else {
 		if (urb->actual_length) {
 		tty = tty_port_tty_get(&port->port);
@@ -447,7 +448,8 @@
 			tty_flip_buffer_push(tty);
 			tty_kref_put(tty);
 		} else
-			dbg("%s: empty read urb received", __func__);
+			dev_dbg(&port->dev, "%s: empty read urb"
+				" received", __func__);
 
 		/* Resubmit urb so we continue receiving */
 		if (port->port.count && status != -ESHUTDOWN) {
@@ -468,15 +470,17 @@
 	struct sierra_port_private *portdata = usb_get_serial_port_data(port);
 	struct usb_serial *serial = port->serial;
 
-	dbg("%s", __func__);
-	dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
+	dev_dbg(&port->dev, "%s", __func__);
+	dev_dbg(&port->dev, "%s: urb %p port %p has data %p", __func__,
+		urb, port, portdata);
 
 	if (status == 0) {
 		struct usb_ctrlrequest *req_pkt =
 				(struct usb_ctrlrequest *)urb->transfer_buffer;
 
 		if (!req_pkt) {
-			dbg("%s: NULL req_pkt\n", __func__);
+			dev_dbg(&port->dev, "%s: NULL req_pkt\n",
+				__func__);
 			return;
 		}
 		if ((req_pkt->bRequestType == 0xA1) &&
@@ -487,7 +491,8 @@
 					sizeof(struct usb_ctrlrequest));
 			struct tty_struct *tty;
 
-			dbg("%s: signal x%x", __func__, signals);
+			dev_dbg(&port->dev, "%s: signal x%x", __func__,
+				signals);
 
 			old_dcd_state = portdata->dcd_state;
 			portdata->cts_state = 1;
@@ -501,19 +506,20 @@
 				tty_hangup(tty);
 			tty_kref_put(tty);
 		} else {
-			dbg("%s: type %x req %x", __func__,
-				req_pkt->bRequestType, req_pkt->bRequest);
+			dev_dbg(&port->dev, "%s: type %x req %x",
+				__func__, req_pkt->bRequestType,
+				req_pkt->bRequest);
 		}
 	} else
-		dbg("%s: error %d", __func__, status);
+		dev_dbg(&port->dev, "%s: error %d", __func__, status);
 
 	/* Resubmit urb so we continue receiving IRQ data */
 	if (status != -ESHUTDOWN) {
 		urb->dev = serial->dev;
 		err = usb_submit_urb(urb, GFP_ATOMIC);
 		if (err)
-			dbg("%s: resubmit intr urb failed. (%d)",
-				__func__, err);
+			dev_dbg(&port->dev, "%s: resubmit intr urb "
+				"failed. (%d)",	__func__, err);
 	}
 }
 
@@ -523,14 +529,14 @@
 	struct sierra_port_private *portdata = usb_get_serial_port_data(port);
 	unsigned long flags;
 
-	dbg("%s - port %d", __func__, port->number);
+	dev_dbg(&port->dev, "%s - port %d", __func__, port->number);
 
 	/* try to give a good number back based on if we have any free urbs at
 	 * this point in time */
 	spin_lock_irqsave(&portdata->lock, flags);
 	if (portdata->outstanding_urbs > N_OUT_URB * 2 / 3) {
 		spin_unlock_irqrestore(&portdata->lock, flags);
-		dbg("%s - write limit hit\n", __func__);
+		dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
 		return 0;
 	}
 	spin_unlock_irqrestore(&portdata->lock, flags);
@@ -549,7 +555,7 @@
 
 	portdata = usb_get_serial_port_data(port);
 
-	dbg("%s", __func__);
+	dev_dbg(&port->dev, "%s", __func__);
 
 	/* Set some sane defaults */
 	portdata->rts_state = 1;
@@ -561,8 +567,8 @@
 		if (!urb)
 			continue;
 		if (urb->dev != serial->dev) {
-			dbg("%s: dev %p != %p", __func__,
-				urb->dev, serial->dev);
+			dev_dbg(&port->dev, "%s: dev %p != %p",
+				 __func__, urb->dev, serial->dev);
 			continue;
 		}
 
@@ -601,7 +607,7 @@
 	struct usb_serial *serial = port->serial;
 	struct sierra_port_private *portdata;
 
-	dbg("%s", __func__);
+	dev_dbg(&port->dev, "%s", __func__);
 	portdata = usb_get_serial_port_data(port);
 
 	portdata->rts_state = 0;
@@ -630,7 +636,7 @@
 	int i;
 	int j;
 
-	dbg("%s", __func__);
+	dev_dbg(&serial->dev->dev, "%s", __func__);
 
 	/* Set Device mode to D0 */
 	sierra_set_power_state(serial->dev, 0x0000);
@@ -644,8 +650,9 @@
 		port = serial->port[i];
 		portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
 		if (!portdata) {
-			dbg("%s: kmalloc for sierra_port_private (%d) failed!.",
-					__func__, i);
+			dev_dbg(&port->dev, "%s: kmalloc for "
+				"sierra_port_private (%d) failed!.",
+				__func__, i);
 			return -ENOMEM;
 		}
 		spin_lock_init(&portdata->lock);
@@ -665,8 +672,8 @@
 		for (j = 0; j < N_IN_URB; ++j) {
 			urb = usb_alloc_urb(0, GFP_KERNEL);
 			if (urb == NULL) {
-				dbg("%s: alloc for in port failed.",
-				    __func__);
+				dev_dbg(&port->dev, "%s: alloc for in "
+					"port failed.", __func__);
 				continue;
 			}
 			/* Fill URB using supplied data. */
@@ -688,7 +695,7 @@
 	struct usb_serial_port *port;
 	struct sierra_port_private *portdata;
 
-	dbg("%s", __func__);
+	dev_dbg(&serial->dev->dev, "%s", __func__);
 
 	for (i = 0; i < serial->num_ports; ++i) {
 		port = serial->port[i];
@@ -743,7 +750,8 @@
 	if (retval)
 		goto failed_driver_register;
 
-	info(DRIVER_DESC ": " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 
 	return 0;
 
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 1533d6e..a65bc2b 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -589,8 +589,8 @@
 	case 1000000:
 			buf[0] = 0x0b;	break;
 	default:
-		err("spcp825 driver does not support the baudrate "
-		    "requested, using default of 9600.");
+		dev_err(&port->dev, "spcp825 driver does not support the "
+			"baudrate requested, using default of 9600.\n");
 	}
 
 	/* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */
@@ -629,7 +629,8 @@
 			    SET_UART_FORMAT_TYPE, SET_UART_FORMAT,
 			    uartdata, 0, NULL, 0, 100);
 	if (i < 0)
-		err("Set UART format %#x failed (error = %d)", uartdata, i);
+		dev_err(&port->dev, "Set UART format %#x failed (error = %d)\n",
+			uartdata, i);
 	dbg("0x21:0x40:0:0  %d\n", i);
 
 	if (cflag & CRTSCTS) {
@@ -1054,7 +1055,8 @@
 	retval = usb_register(&spcp8x5_driver);
 	if (retval)
 		goto failed_usb_register;
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 	usb_serial_deregister(&spcp8x5_device);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index c90237d..31c42d1 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -85,7 +85,6 @@
 #include <linux/uaccess.h>
 #include <linux/usb.h>
 #include <linux/usb/serial.h>
-#include <linux/firmware.h>
 
 #include "ti_usb_3410_5052.h"
 
@@ -383,7 +382,8 @@
 	if (ret)
 		goto failed_usb;
 
-	info(TI_DRIVER_DESC " " TI_DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " TI_DRIVER_VERSION ":"
+	       TI_DRIVER_DESC "\n");
 
 	return 0;
 
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index e7d4246..8be3f39 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1121,7 +1121,8 @@
 
 	result = bus_register(&usb_serial_bus_type);
 	if (result) {
-		err("%s - registering bus driver failed", __func__);
+		printk(KERN_ERR "usb-serial: %s - registering bus driver "
+		       "failed\n", __func__);
 		goto exit_bus;
 	}
 
@@ -1142,25 +1143,28 @@
 	tty_set_operations(usb_serial_tty_driver, &serial_ops);
 	result = tty_register_driver(usb_serial_tty_driver);
 	if (result) {
-		err("%s - tty_register_driver failed", __func__);
+		printk(KERN_ERR "usb-serial: %s - tty_register_driver failed\n",
+		       __func__);
 		goto exit_reg_driver;
 	}
 
 	/* register the USB driver */
 	result = usb_register(&usb_serial_driver);
 	if (result < 0) {
-		err("%s - usb_register failed", __func__);
+		printk(KERN_ERR "usb-serial: %s - usb_register failed\n",
+		       __func__);
 		goto exit_tty;
 	}
 
 	/* register the generic driver, if we should */
 	result = usb_serial_generic_register(debug);
 	if (result < 0) {
-		err("%s - registering generic driver failed", __func__);
+		printk(KERN_ERR "usb-serial: %s - registering generic "
+		       "driver failed\n", __func__);
 		goto exit_generic;
 	}
 
-	info(DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n");
 
 	return result;
 
@@ -1174,7 +1178,8 @@
 	bus_unregister(&usb_serial_bus_type);
 
 exit_bus:
-	err("%s - returning with error %d", __func__, result);
+	printk(KERN_ERR "usb-serial: %s - returning with error %d\n",
+	       __func__, result);
 	put_tty_driver(usb_serial_tty_driver);
 	return result;
 }
@@ -1233,11 +1238,11 @@
 
 	retval = usb_serial_bus_register(driver);
 	if (retval) {
-		err("problem %d when registering driver %s",
-						retval, driver->description);
+		printk(KERN_ERR "usb-serial: problem %d when registering "
+		       "driver %s\n", retval, driver->description);
 		list_del(&driver->driver_list);
 	} else
-		info("USB Serial support registered for %s",
+		printk(KERN_INFO "USB Serial support registered for %s\n",
 						driver->description);
 
 	return retval;
@@ -1248,7 +1253,8 @@
 void usb_serial_deregister(struct usb_serial_driver *device)
 {
 	/* must be called with BKL held */
-	info("USB Serial deregistering driver %s", device->description);
+	printk(KERN_INFO "USB Serial deregistering driver %s\n",
+	       device->description);
 	list_del(&device->driver_list);
 	usb_serial_bus_deregister(device);
 }
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index a6d1c75..4facce3 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -768,7 +768,7 @@
 	dbg("%s", __func__);
 
 	if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
-		err("active config #%d != 1 ??",
+		dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
 			serial->dev->actconfig->desc.bConfigurationValue);
 		return -ENODEV;
 	}
@@ -971,11 +971,14 @@
 				break;
 			}
 		}
-		info(
-		  "Untested USB device specified at time of module insertion");
-		info("Warning: This is not guaranteed to work");
-		info("Using a newer kernel is preferred to this method");
-		info("Adding Palm OS protocol 4.x support for unknown device: 0x%x/0x%x",
+		printk(KERN_INFO KBUILD_MODNAME
+		       ": Untested USB device specified at time of module insertion\n");
+		printk(KERN_INFO KBUILD_MODNAME
+		       ": Warning: This is not guaranteed to work\n");
+		printk(KERN_INFO KBUILD_MODNAME
+		       ": Using a newer kernel is preferred to this method\n");
+		printk(KERN_INFO KBUILD_MODNAME
+		       ": Adding Palm OS protocol 4.x support for unknown device: 0x%x/0x%x\n",
 			vendor, product);
 	}
 	retval = usb_serial_register(&handspring_device);
@@ -990,7 +993,7 @@
 	retval = usb_register(&visor_driver);
 	if (retval)
 		goto failed_usb_register;
-	info(DRIVER_DESC);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n");
 
 	return 0;
 failed_usb_register:
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 11c8b97..5335d32 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -303,12 +303,15 @@
 
 	if (request_ihex_firmware(&firmware_fw, "whiteheat.fw",
 				  &serial->dev->dev)) {
-		err("%s - request \"whiteheat.fw\" failed", __func__);
+		dev_err(&serial->dev->dev,
+			"%s - request \"whiteheat.fw\" failed\n", __func__);
 		goto out;
 	}
 	if (request_ihex_firmware(&loader_fw, "whiteheat_loader.fw",
 			     &serial->dev->dev)) {
-		err("%s - request \"whiteheat_loader.fw\" failed", __func__);
+		dev_err(&serial->dev->dev,
+			"%s - request \"whiteheat_loader.fw\" failed\n",
+			__func__);
 		goto out;
 	}
 	ret = 0;
@@ -320,9 +323,10 @@
 					      (unsigned char *)record->data,
 					      be16_to_cpu(record->len), 0xa0);
 		if (response < 0) {
-			err("%s - ezusb_writememory failed for loader (%d %04X %p %d)",
-			    __func__, response, be32_to_cpu(record->addr),
-			    record->data, be16_to_cpu(record->len));
+			dev_err(&serial->dev->dev, "%s - ezusb_writememory "
+				"failed for loader (%d %04X %p %d)\n",
+				__func__, response, be32_to_cpu(record->addr),
+				record->data, be16_to_cpu(record->len));
 			break;
 		}
 		record = ihex_next_binrec(record);
@@ -338,9 +342,11 @@
 					      (unsigned char *)record->data,
 					      be16_to_cpu(record->len), 0xa3);
 		if (response < 0) {
-			err("%s - ezusb_writememory failed for first firmware step (%d %04X %p %d)", 
-			    __func__, response, be32_to_cpu(record->addr),
-			    record->data, be16_to_cpu(record->len));
+			dev_err(&serial->dev->dev, "%s - ezusb_writememory "
+				"failed for first firmware step "
+				"(%d %04X %p %d)\n", __func__, response,
+				be32_to_cpu(record->addr), record->data,
+				be16_to_cpu(record->len));
 			break;
 		}
 		++record;
@@ -354,9 +360,11 @@
 					      (unsigned char *)record->data,
 					      be16_to_cpu(record->len), 0xa0);
 		if (response < 0) {
-			err("%s - ezusb_writememory failed for second firmware step (%d %04X %p %d)", 
-			    __func__, response, be32_to_cpu(record->addr),
-			    record->data, be16_to_cpu(record->len));
+			dev_err(&serial->dev->dev, "%s - ezusb_writememory "
+				"failed for second firmware step "
+				"(%d %04X %p %d)\n", __func__, response,
+				be32_to_cpu(record->addr), record->data,
+				be16_to_cpu(record->len));
 			break;
 		}
 		++record;
@@ -421,12 +429,12 @@
 	ret = usb_bulk_msg(serial->dev, pipe, command, 2,
 						&alen, COMMAND_TIMEOUT_MS);
 	if (ret) {
-		err("%s: Couldn't send command [%d]",
-				serial->type->description, ret);
+		dev_err(&serial->dev->dev, "%s: Couldn't send command [%d]\n",
+			serial->type->description, ret);
 		goto no_firmware;
 	} else if (alen != 2) {
-		err("%s: Send command incomplete [%d]",
-				serial->type->description, alen);
+		dev_err(&serial->dev->dev, "%s: Send command incomplete [%d]\n",
+			serial->type->description, alen);
 		goto no_firmware;
 	}
 
@@ -437,31 +445,33 @@
 	ret = usb_bulk_msg(serial->dev, pipe, result,
 			sizeof(*hw_info) + 1, &alen, COMMAND_TIMEOUT_MS);
 	if (ret) {
-		err("%s: Couldn't get results [%d]",
-				serial->type->description, ret);
+		dev_err(&serial->dev->dev, "%s: Couldn't get results [%d]\n",
+			serial->type->description, ret);
 		goto no_firmware;
 	} else if (alen != sizeof(*hw_info) + 1) {
-		err("%s: Get results incomplete [%d]",
-				serial->type->description, alen);
+		dev_err(&serial->dev->dev, "%s: Get results incomplete [%d]\n",
+			serial->type->description, alen);
 		goto no_firmware;
 	} else if (result[0] != command[0]) {
-		err("%s: Command failed [%d]",
-				serial->type->description, result[0]);
+		dev_err(&serial->dev->dev, "%s: Command failed [%d]\n",
+			serial->type->description, result[0]);
 		goto no_firmware;
 	}
 
 	hw_info = (struct whiteheat_hw_info *)&result[1];
 
-	info("%s: Driver %s: Firmware v%d.%02d", serial->type->description,
-	     DRIVER_VERSION, hw_info->sw_major_rev, hw_info->sw_minor_rev);
+	dev_info(&serial->dev->dev, "%s: Driver %s: Firmware v%d.%02d\n",
+		 serial->type->description, DRIVER_VERSION,
+		 hw_info->sw_major_rev, hw_info->sw_minor_rev);
 
 	for (i = 0; i < serial->num_ports; i++) {
 		port = serial->port[i];
 
 		info = kmalloc(sizeof(struct whiteheat_private), GFP_KERNEL);
 		if (info == NULL) {
-			err("%s: Out of memory for port structures\n",
-					serial->type->description);
+			dev_err(&port->dev,
+				"%s: Out of memory for port structures\n",
+				serial->type->description);
 			goto no_private;
 		}
 
@@ -481,18 +491,20 @@
 		for (j = 0; j < urb_pool_size; j++) {
 			urb = usb_alloc_urb(0, GFP_KERNEL);
 			if (!urb) {
-				err("No free urbs available");
+				dev_err(&port->dev, "No free urbs available\n");
 				goto no_rx_urb;
 			}
 			buf_size = port->read_urb->transfer_buffer_length;
 			urb->transfer_buffer = kmalloc(buf_size, GFP_KERNEL);
 			if (!urb->transfer_buffer) {
-				err("Couldn't allocate urb buffer");
+				dev_err(&port->dev,
+					"Couldn't allocate urb buffer\n");
 				goto no_rx_buf;
 			}
 			wrap = kmalloc(sizeof(*wrap), GFP_KERNEL);
 			if (!wrap) {
-				err("Couldn't allocate urb wrapper");
+				dev_err(&port->dev,
+					"Couldn't allocate urb wrapper\n");
 				goto no_rx_wrap;
 			}
 			usb_fill_bulk_urb(urb, serial->dev,
@@ -505,18 +517,20 @@
 
 			urb = usb_alloc_urb(0, GFP_KERNEL);
 			if (!urb) {
-				err("No free urbs available");
+				dev_err(&port->dev, "No free urbs available\n");
 				goto no_tx_urb;
 			}
 			buf_size = port->write_urb->transfer_buffer_length;
 			urb->transfer_buffer = kmalloc(buf_size, GFP_KERNEL);
 			if (!urb->transfer_buffer) {
-				err("Couldn't allocate urb buffer");
+				dev_err(&port->dev,
+					"Couldn't allocate urb buffer\n");
 				goto no_tx_buf;
 			}
 			wrap = kmalloc(sizeof(*wrap), GFP_KERNEL);
 			if (!wrap) {
-				err("Couldn't allocate urb wrapper");
+				dev_err(&port->dev,
+					"Couldn't allocate urb wrapper\n");
 				goto no_tx_wrap;
 			}
 			usb_fill_bulk_urb(urb, serial->dev,
@@ -534,8 +548,9 @@
 	command_info = kmalloc(sizeof(struct whiteheat_command_private),
 								GFP_KERNEL);
 	if (command_info == NULL) {
-		err("%s: Out of memory for port structures\n",
-					serial->type->description);
+		dev_err(&serial->dev->dev,
+			"%s: Out of memory for port structures\n",
+			serial->type->description);
 		goto no_command_private;
 	}
 
@@ -552,12 +567,15 @@
 
 no_firmware:
 	/* Firmware likely not running */
-	err("%s: Unable to retrieve firmware version, try replugging\n",
-					serial->type->description);
-	err("%s: If the firmware is not running (status led not blinking)\n",
-					serial->type->description);
-	err("%s: please contact support@connecttech.com\n",
-					serial->type->description);
+	dev_err(&serial->dev->dev,
+		"%s: Unable to retrieve firmware version, try replugging\n",
+		serial->type->description);
+	dev_err(&serial->dev->dev,
+		"%s: If the firmware is not running (status led not blinking)\n",
+		serial->type->description);
+	dev_err(&serial->dev->dev,
+		"%s: please contact support@connecttech.com\n",
+		serial->type->description);
 	kfree(result);
 	return -ENODEV;
 
@@ -680,8 +698,9 @@
 	/* Start reading from the device */
 	retval = start_port_read(port);
 	if (retval) {
-		err("%s - failed submitting read urb, error %d",
-				__func__, retval);
+		dev_err(&port->dev,
+			"%s - failed submitting read urb, error %d\n",
+			__func__, retval);
 		firm_close(port);
 		stop_command_port(port->serial);
 		goto exit;
@@ -806,8 +825,9 @@
 		urb->transfer_buffer_length = bytes;
 		result = usb_submit_urb(urb, GFP_ATOMIC);
 		if (result) {
-			err("%s - failed submitting write urb, error %d",
-							__func__, result);
+			dev_err(&port->dev,
+				"%s - failed submitting write urb, error %d\n",
+				__func__, result);
 			sent = result;
 			spin_lock_irqsave(&info->lock, flags);
 			list_add(tmp, &info->tx_urbs_free);
@@ -1075,7 +1095,7 @@
 	wrap = urb_to_wrap(urb, &info->rx_urbs_submitted);
 	if (!wrap) {
 		spin_unlock(&info->lock);
-		err("%s - Not my urb!", __func__);
+		dev_err(&port->dev, "%s - Not my urb!\n", __func__);
 		return;
 	}
 	list_del(&wrap->list);
@@ -1119,7 +1139,7 @@
 	wrap = urb_to_wrap(urb, &info->tx_urbs_submitted);
 	if (!wrap) {
 		spin_unlock(&info->lock);
-		err("%s - Not my urb!", __func__);
+		dev_err(&port->dev, "%s - Not my urb!\n", __func__);
 		return;
 	}
 	list_move(&wrap->list, &info->tx_urbs_free);
@@ -1383,8 +1403,9 @@
 		command_port->read_urb->dev = serial->dev;
 		retval = usb_submit_urb(command_port->read_urb, GFP_KERNEL);
 		if (retval) {
-			err("%s - failed submitting read urb, error %d",
-							__func__, retval);
+			dev_err(&serial->dev->dev,
+				"%s - failed submitting read urb, error %d\n",
+				__func__, retval);
 			goto exit;
 		}
 	}
@@ -1522,7 +1543,8 @@
 		urb->dev = port->serial->dev;
 		result = usb_submit_urb(urb, GFP_ATOMIC);
 		if (result) {
-			err("%s - failed resubmitting read urb, error %d",
+			dev_err(&port->dev,
+				"%s - failed resubmitting read urb, error %d\n",
 				__func__, result);
 			spin_lock_irqsave(&info->lock, flags);
 			list_add(tmp, &info->rx_urbs_free);
@@ -1556,7 +1578,8 @@
 	retval = usb_register(&whiteheat_driver);
 	if (retval)
 		goto failed_usb_register;
-	info(DRIVER_DESC " " DRIVER_VERSION);
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+	       DRIVER_DESC "\n");
 	return 0;
 failed_usb_register:
 	usb_serial_deregister(&whiteheat_device);
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 4995bb5..2dd9bd4 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -95,11 +95,10 @@
 {
 	int result;
 
-	us->iobuf[0] = 0x1;
 	result = usb_stor_control_msg(us, us->send_ctrl_pipe,
 				      USB_REQ_SET_FEATURE,
 				      USB_TYPE_STANDARD | USB_RECIP_DEVICE,
-				      0x01, 0x0, us->iobuf, 0x1, 1000);
+				      0x01, 0x0, NULL, 0x0, 1000);
 	US_DEBUGP("usb_control_msg performing result is %d\n", result);
 	return (result ? 0 : -1);
 }
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 98b89ea..c7bf895 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -78,8 +78,8 @@
 resubmit:
 	retval = usb_submit_urb (urb, GFP_ATOMIC);
 	if (retval)
-		err ("can't resubmit intr, %s-%s/input0, retval %d",
-			onetouch->udev->bus->bus_name,
+		dev_err(&dev->dev, "can't resubmit intr, %s-%s/input0, "
+			"retval %d\n", onetouch->udev->bus->bus_name,
 			onetouch->udev->devpath, retval);
 }
 
@@ -90,7 +90,7 @@
 	onetouch->is_open = 1;
 	onetouch->irq->dev = onetouch->udev;
 	if (usb_submit_urb(onetouch->irq, GFP_KERNEL)) {
-		err("usb_submit_urb failed");
+		dev_err(&dev->dev, "usb_submit_urb failed\n");
 		return -EIO;
 	}
 
@@ -117,7 +117,8 @@
 			break;
 		case US_RESUME:
 			if (usb_submit_urb(onetouch->irq, GFP_KERNEL) != 0)
-				err("usb_submit_urb failed");
+				dev_err(&onetouch->irq->dev->dev,
+					"usb_submit_urb failed\n");
 			break;
 		default:
 			break;
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 3523a0b..79108d5 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -663,7 +663,7 @@
 	}
 
 	/* Did we transfer less than the minimum amount required? */
-	if (srb->result == SAM_STAT_GOOD &&
+	if ((srb->result == SAM_STAT_GOOD || srb->sense_buffer[2] == 0) &&
 			scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
 		srb->result = (DID_ERROR << 16) | (SUGGEST_RETRY << 24);
 
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index cd15547..a2b9ebbe 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1628,97 +1628,332 @@
 /* Reported by fangxiaozhi <huananhu@huawei.com>
  * This brings the HUAWEI data card devices into multi-port mode
  */
-UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x1001, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x1003, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x1004, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x1401, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x1402, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x1403, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x1404, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x1405, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x1406, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x1407, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x1408, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x1409, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x140A, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x140B, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x140C, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x140D, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x140E, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x140F, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
 		0),
-UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
+UNUSUAL_DEV(  0x12d1, 0x1410, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1411, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1412, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1413, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1414, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1415, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1416, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1417, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1418, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1419, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x141A, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x141B, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x141C, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x141D, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x141E, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x141F, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1420, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1421, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1422, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1423, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1424, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1425, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1426, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1427, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1428, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1429, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x142A, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x142B, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x142C, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x142D, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x142E, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x142F, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1430, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1431, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1432, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1433, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1434, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1435, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1436, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1437, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1438, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x1439, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x143A, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x143B, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x143C, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x143D, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x143E, 0x0000, 0x0000,
+		"HUAWEI MOBILE",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+		0),
+UNUSUAL_DEV(  0x12d1, 0x143F, 0x0000, 0x0000,
 		"HUAWEI MOBILE",
 		"Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
@@ -1745,6 +1980,15 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
+ * JMicron responds to USN and several other SCSI ioctls with a
+ * residue that causes subsequent I/O requests to fail.  */
+UNUSUAL_DEV(  0x152d, 0x2329, 0x0100, 0x0100,
+	        "JMicron",
+	        "USB to ATA/ATAPI Bridge",
+	        US_SC_DEVICE, US_PR_DEVICE, NULL,
+	        US_FL_IGNORE_RESIDUE ),
+
 /* Reported by Robert Schedel <r.schedel@yahoo.de>
  * Note: this is a 'super top' device like the above 14cd/6600 device */
 UNUSUAL_DEV(  0x1652, 0x6600, 0x0201, 0x0201,
@@ -1818,6 +2062,15 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_FIX_CAPACITY ),
 
+/* Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
+ * Mio Moov 330
+ */
+UNUSUAL_DEV(  0x3340, 0xffff, 0x0000, 0x0000,
+		"Mitac",
+		"Mio DigiWalker USB Sync",
+		US_SC_DEVICE,US_PR_DEVICE,NULL,
+		US_FL_MAX_SECTORS_64 ),
+
 /* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
 UNUSUAL_DEV(  0x4102, 0x1020, 0x0100,  0x0100,
 		"iRiver",
diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig
new file mode 100644
index 0000000..eb09a0a
--- /dev/null
+++ b/drivers/usb/wusbcore/Kconfig
@@ -0,0 +1,41 @@
+#
+# Wireless USB Core configuration
+#
+config USB_WUSB
+	tristate "Enable Wireless USB extensions (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	depends on USB
+        select UWB
+        select CRYPTO
+        select CRYPTO_BLKCIPHER
+        select CRYPTO_CBC
+        select CRYPTO_MANAGER
+        select CRYPTO_AES
+	help
+	  Enable the host-side support for Wireless USB.
+
+          To compile this support select Y (built in). It is safe to
+	  select even if you don't have the hardware.
+
+config USB_WUSB_CBAF
+	tristate "Support WUSB Cable Based Association (CBA)"
+	depends on USB
+	help
+	  Some WUSB devices support Cable Based Association. It's used to
+	  enable the secure communication between the host and the
+	  device.
+
+	  Enable this option if your WUSB device must to be connected
+	  via wired USB before establishing a wireless link.
+
+	  It is safe to select even if you don't have a compatible
+	  hardware.
+
+config USB_WUSB_CBAF_DEBUG
+	bool "Enable CBA debug messages"
+	depends on USB_WUSB_CBAF
+	help
+	  Say Y here if you want the CBA to produce a bunch of debug messages
+	  to the system log. Select this if you are having a problem with
+	  CBA support and want to see more of what is going on.
+
diff --git a/drivers/usb/wusbcore/Makefile b/drivers/usb/wusbcore/Makefile
new file mode 100644
index 0000000..75f1ade
--- /dev/null
+++ b/drivers/usb/wusbcore/Makefile
@@ -0,0 +1,26 @@
+obj-$(CONFIG_USB_WUSB)		+= wusbcore.o
+obj-$(CONFIG_USB_HWA_HCD)	+= wusb-wa.o
+obj-$(CONFIG_USB_WUSB_CBAF)	+= wusb-cbaf.o
+
+
+wusbcore-objs := 	\
+	crypto.o	\
+	devconnect.o	\
+	dev-sysfs.o	\
+	mmc.o		\
+	pal.o		\
+	rh.o		\
+	reservation.o	\
+	security.o	\
+	wusbhc.o
+
+wusb-cbaf-objs := cbaf.o
+
+wusb-wa-objs :=	wa-hc.o		\
+		wa-nep.o	\
+		wa-rpipe.o	\
+		wa-xfer.o
+
+ifeq ($(CONFIG_USB_WUSB_CBAF_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
new file mode 100644
index 0000000..ab4788d
--- /dev/null
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -0,0 +1,673 @@
+/*
+ * Wireless USB - Cable Based Association
+ *
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * WUSB devices have to be paired (associated in WUSB lingo) so
+ * that they can connect to the system.
+ *
+ * One way of pairing is using CBA-Cable Based Association. First
+ * time you plug the device with a cable, association is done between
+ * host and device and subsequent times, you can connect wirelessly
+ * without having to associate again. That's the idea.
+ *
+ * This driver does nothing Earth shattering. It just provides an
+ * interface to chat with the wire-connected device so we can get a
+ * CDID (device ID) that might have been previously associated to a
+ * CHID (host ID) and to set up a new <CHID,CDID,CK> triplet
+ * (connection context), with the CK being the secret, or connection
+ * key. This is the pairing data.
+ *
+ * When a device with the CBA capability connects, the probe routine
+ * just creates a bunch of sysfs files that a user space enumeration
+ * manager uses to allow it to connect wirelessly to the system or not.
+ *
+ * The process goes like this:
+ *
+ * 1. Device plugs, cbaf is loaded, notifications happen.
+ *
+ * 2. The connection manager (CM) sees a device with CBAF capability
+ *    (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE).
+ *
+ * 3. The CM writes the host name, supported band groups, and the CHID
+ *    (host ID) into the wusb_host_name, wusb_host_band_groups and
+ *    wusb_chid files. These get sent to the device and the CDID (if
+ *    any) for this host is requested.
+ *
+ * 4. The CM can verify that the device's supported band groups
+ *    (wusb_device_band_groups) are compatible with the host.
+ *
+ * 5. The CM reads the wusb_cdid file.
+ *
+ * 6. The CM looks up its database
+ *
+ * 6.1 If it has a matching CHID,CDID entry, the device has been
+ *     authorized before (paired) and nothing further needs to be
+ *     done.
+ *
+ * 6.2 If the CDID is zero (or the CM doesn't find a matching CDID in
+ *     its database), the device is assumed to be not known.  The CM
+ *     may associate the host with device by: writing a randomly
+ *     generated CDID to wusb_cdid and then a random CK to wusb_ck
+ *     (this uploads the new CC to the device).
+ *
+ *     CMD may choose to prompt the user before associating with a new
+ *     device.
+ *
+ * 7. Device is unplugged.
+ *
+ * When the device tries to connect wirelessly, it will present its
+ * CDID to the WUSB host controller.  The CM will query the
+ * database. If the CHID/CDID pair found, it will (with a 4-way
+ * handshake) challenge the device to demonstrate it has the CK secret
+ * key (from our database) without actually exchanging it. Once
+ * satisfied, crypto keys are derived from the CK, the device is
+ * connected and all communication is encrypted.
+ *
+ * References:
+ *   [WUSB-AM] Association Models Supplement to the Certified Wireless
+ *             Universal Serial Bus Specification, version 1.0.
+ */
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/version.h>
+#include <linux/usb.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+#include <linux/mutex.h>
+#include <linux/uwb.h>
+#include <linux/usb/wusb.h>
+#include <linux/usb/association.h>
+
+#define CBA_NAME_LEN 0x40 /* [WUSB-AM] table 4-7 */
+
+/* An instance of a Cable-Based-Association-Framework device */
+struct cbaf {
+	struct usb_device *usb_dev;
+	struct usb_interface *usb_iface;
+	void *buffer;
+	size_t buffer_size;
+
+	struct wusb_ckhdid chid;
+	char host_name[CBA_NAME_LEN];
+	u16 host_band_groups;
+
+	struct wusb_ckhdid cdid;
+	char device_name[CBA_NAME_LEN];
+	u16 device_band_groups;
+
+	struct wusb_ckhdid ck;
+};
+
+/*
+ * Verify that a CBAF USB-interface has what we need
+ *
+ * According to [WUSB-AM], CBA devices should provide at least two
+ * interfaces:
+ *  - RETRIEVE_HOST_INFO
+ *  - ASSOCIATE
+ *
+ * If the device doesn't provide these interfaces, we do not know how
+ * to deal with it.
+ */
+static int cbaf_check(struct cbaf *cbaf)
+{
+	int result;
+	struct device *dev = &cbaf->usb_iface->dev;
+	struct wusb_cbaf_assoc_info *assoc_info;
+	struct wusb_cbaf_assoc_request *assoc_request;
+	size_t assoc_size;
+	void *itr, *top;
+	int ar_rhi = 0, ar_assoc = 0;
+
+	result = usb_control_msg(
+		cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
+		CBAF_REQ_GET_ASSOCIATION_INFORMATION,
+		USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+		0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+		cbaf->buffer, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
+	if (result < 0) {
+		dev_err(dev, "Cannot get available association types: %d\n",
+			result);
+		return result;
+	}
+
+	assoc_info = cbaf->buffer;
+	if (result < sizeof(*assoc_info)) {
+		dev_err(dev, "Not enough data to decode association info "
+			"header (%zu vs %zu bytes required)\n",
+			(size_t)result, sizeof(*assoc_info));
+		return result;
+	}
+
+	assoc_size = le16_to_cpu(assoc_info->Length);
+	if (result < assoc_size) {
+		dev_err(dev, "Not enough data to decode association info "
+			"(%zu vs %zu bytes required)\n",
+			(size_t)assoc_size, sizeof(*assoc_info));
+		return result;
+	}
+	/*
+	 * From now on, we just verify, but won't error out unless we
+	 * don't find the AR_TYPE_WUSB_{RETRIEVE_HOST_INFO,ASSOCIATE}
+	 * types.
+	 */
+	itr = cbaf->buffer + sizeof(*assoc_info);
+	top = cbaf->buffer + assoc_size;
+	dev_dbg(dev, "Found %u association requests (%zu bytes)\n",
+		 assoc_info->NumAssociationRequests, assoc_size);
+
+	while (itr < top) {
+		u16 ar_type, ar_subtype;
+		u32 ar_size;
+		const char *ar_name;
+
+		assoc_request = itr;
+
+		if (top - itr < sizeof(*assoc_request)) {
+			dev_err(dev, "Not enough data to decode associaton "
+				"request (%zu vs %zu bytes needed)\n",
+				top - itr, sizeof(*assoc_request));
+			break;
+		}
+
+		ar_type = le16_to_cpu(assoc_request->AssociationTypeId);
+		ar_subtype = le16_to_cpu(assoc_request->AssociationSubTypeId);
+		ar_size = le32_to_cpu(assoc_request->AssociationTypeInfoSize);
+		ar_name = "unknown";
+
+		switch (ar_type) {
+		case AR_TYPE_WUSB:
+			/* Verify we have what is mandated by [WUSB-AM]. */
+			switch (ar_subtype) {
+			case AR_TYPE_WUSB_RETRIEVE_HOST_INFO:
+				ar_name = "RETRIEVE_HOST_INFO";
+				ar_rhi = 1;
+				break;
+			case AR_TYPE_WUSB_ASSOCIATE:
+				/* send assoc data */
+				ar_name = "ASSOCIATE";
+				ar_assoc = 1;
+				break;
+			};
+			break;
+		};
+
+		dev_dbg(dev, "Association request #%02u: 0x%04x/%04x "
+			 "(%zu bytes): %s\n",
+			 assoc_request->AssociationDataIndex, ar_type,
+			 ar_subtype, (size_t)ar_size, ar_name);
+
+		itr += sizeof(*assoc_request);
+	}
+
+	if (!ar_rhi) {
+		dev_err(dev, "Missing RETRIEVE_HOST_INFO association "
+			"request\n");
+		return -EINVAL;
+	}
+	if (!ar_assoc) {
+		dev_err(dev, "Missing ASSOCIATE association request\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct wusb_cbaf_host_info cbaf_host_info_defaults = {
+	.AssociationTypeId_hdr    = WUSB_AR_AssociationTypeId,
+	.AssociationTypeId    	  = cpu_to_le16(AR_TYPE_WUSB),
+	.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
+	.AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO),
+	.CHID_hdr                 = WUSB_AR_CHID,
+	.LangID_hdr               = WUSB_AR_LangID,
+	.HostFriendlyName_hdr     = WUSB_AR_HostFriendlyName,
+};
+
+/* Send WUSB host information (CHID and name) to a CBAF device */
+static int cbaf_send_host_info(struct cbaf *cbaf)
+{
+	struct wusb_cbaf_host_info *hi;
+	size_t name_len;
+	size_t hi_size;
+
+	hi = cbaf->buffer;
+	memset(hi, 0, sizeof(*hi));
+	*hi = cbaf_host_info_defaults;
+	hi->CHID = cbaf->chid;
+	hi->LangID = 0;	/* FIXME: I guess... */
+	strlcpy(hi->HostFriendlyName, cbaf->host_name, CBA_NAME_LEN);
+	name_len = strlen(cbaf->host_name);
+	hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len);
+	hi_size = sizeof(*hi) + name_len;
+
+	return usb_control_msg(cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
+			CBAF_REQ_SET_ASSOCIATION_RESPONSE,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			0x0101,
+			cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+			hi, hi_size, 1000 /* FIXME: arbitrary */);
+}
+
+/*
+ * Get device's information (CDID) associated to CHID
+ *
+ * The device will return it's information (CDID, name, bandgroups)
+ * associated to the CHID we have set before, or 0 CDID and default
+ * name and bandgroup if no CHID set or unknown.
+ */
+static int cbaf_cdid_get(struct cbaf *cbaf)
+{
+	int result;
+	struct device *dev = &cbaf->usb_iface->dev;
+	struct wusb_cbaf_device_info *di;
+	size_t needed;
+
+	di = cbaf->buffer;
+	result = usb_control_msg(
+		cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
+		CBAF_REQ_GET_ASSOCIATION_REQUEST,
+		USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+		0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+		di, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
+	if (result < 0) {
+		dev_err(dev, "Cannot request device information: %d\n", result);
+		return result;
+	}
+
+	needed = result < sizeof(*di) ? sizeof(*di) : le32_to_cpu(di->Length);
+	if (result < needed) {
+		dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs "
+			"%zu bytes needed)\n", (size_t)result, needed);
+		return result;
+	}
+
+	strlcpy(cbaf->device_name, di->DeviceFriendlyName, CBA_NAME_LEN);
+	cbaf->cdid = di->CDID;
+	cbaf->device_band_groups = le16_to_cpu(di->BandGroups);
+
+	return 0;
+}
+
+static ssize_t cbaf_wusb_chid_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct usb_interface *iface = to_usb_interface(dev);
+	struct cbaf *cbaf = usb_get_intfdata(iface);
+	char pr_chid[WUSB_CKHDID_STRSIZE];
+
+	ckhdid_printf(pr_chid, sizeof(pr_chid), &cbaf->chid);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", pr_chid);
+}
+
+static ssize_t cbaf_wusb_chid_store(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t size)
+{
+	ssize_t result;
+	struct usb_interface *iface = to_usb_interface(dev);
+	struct cbaf *cbaf = usb_get_intfdata(iface);
+
+	result = sscanf(buf,
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx",
+			&cbaf->chid.data[0] , &cbaf->chid.data[1],
+			&cbaf->chid.data[2] , &cbaf->chid.data[3],
+			&cbaf->chid.data[4] , &cbaf->chid.data[5],
+			&cbaf->chid.data[6] , &cbaf->chid.data[7],
+			&cbaf->chid.data[8] , &cbaf->chid.data[9],
+			&cbaf->chid.data[10], &cbaf->chid.data[11],
+			&cbaf->chid.data[12], &cbaf->chid.data[13],
+			&cbaf->chid.data[14], &cbaf->chid.data[15]);
+
+	if (result != 16)
+		return -EINVAL;
+
+	result = cbaf_send_host_info(cbaf);
+	if (result < 0)
+		return result;
+	result = cbaf_cdid_get(cbaf);
+	if (result < 0)
+		return -result;
+	return size;
+}
+static DEVICE_ATTR(wusb_chid, 0600, cbaf_wusb_chid_show, cbaf_wusb_chid_store);
+
+static ssize_t cbaf_wusb_host_name_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct usb_interface *iface = to_usb_interface(dev);
+	struct cbaf *cbaf = usb_get_intfdata(iface);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->host_name);
+}
+
+static ssize_t cbaf_wusb_host_name_store(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t size)
+{
+	ssize_t result;
+	struct usb_interface *iface = to_usb_interface(dev);
+	struct cbaf *cbaf = usb_get_intfdata(iface);
+
+	result = sscanf(buf, "%63s", cbaf->host_name);
+	if (result != 1)
+		return -EINVAL;
+
+	return size;
+}
+static DEVICE_ATTR(wusb_host_name, 0600, cbaf_wusb_host_name_show,
+					 cbaf_wusb_host_name_store);
+
+static ssize_t cbaf_wusb_host_band_groups_show(struct device *dev,
+					       struct device_attribute *attr,
+					       char *buf)
+{
+	struct usb_interface *iface = to_usb_interface(dev);
+	struct cbaf *cbaf = usb_get_intfdata(iface);
+
+	return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->host_band_groups);
+}
+
+static ssize_t cbaf_wusb_host_band_groups_store(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t size)
+{
+	ssize_t result;
+	struct usb_interface *iface = to_usb_interface(dev);
+	struct cbaf *cbaf = usb_get_intfdata(iface);
+	u16 band_groups = 0;
+
+	result = sscanf(buf, "%04hx", &band_groups);
+	if (result != 1)
+		return -EINVAL;
+
+	cbaf->host_band_groups = band_groups;
+
+	return size;
+}
+
+static DEVICE_ATTR(wusb_host_band_groups, 0600,
+		   cbaf_wusb_host_band_groups_show,
+		   cbaf_wusb_host_band_groups_store);
+
+static const struct wusb_cbaf_device_info cbaf_device_info_defaults = {
+	.Length_hdr               = WUSB_AR_Length,
+	.CDID_hdr                 = WUSB_AR_CDID,
+	.BandGroups_hdr           = WUSB_AR_BandGroups,
+	.LangID_hdr               = WUSB_AR_LangID,
+	.DeviceFriendlyName_hdr   = WUSB_AR_DeviceFriendlyName,
+};
+
+static ssize_t cbaf_wusb_cdid_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct usb_interface *iface = to_usb_interface(dev);
+	struct cbaf *cbaf = usb_get_intfdata(iface);
+	char pr_cdid[WUSB_CKHDID_STRSIZE];
+
+	ckhdid_printf(pr_cdid, sizeof(pr_cdid), &cbaf->cdid);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", pr_cdid);
+}
+
+static ssize_t cbaf_wusb_cdid_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	ssize_t result;
+	struct usb_interface *iface = to_usb_interface(dev);
+	struct cbaf *cbaf = usb_get_intfdata(iface);
+	struct wusb_ckhdid cdid;
+
+	result = sscanf(buf,
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx",
+			&cdid.data[0] , &cdid.data[1],
+			&cdid.data[2] , &cdid.data[3],
+			&cdid.data[4] , &cdid.data[5],
+			&cdid.data[6] , &cdid.data[7],
+			&cdid.data[8] , &cdid.data[9],
+			&cdid.data[10], &cdid.data[11],
+			&cdid.data[12], &cdid.data[13],
+			&cdid.data[14], &cdid.data[15]);
+	if (result != 16)
+		return -EINVAL;
+
+	cbaf->cdid = cdid;
+
+	return size;
+}
+static DEVICE_ATTR(wusb_cdid, 0600, cbaf_wusb_cdid_show, cbaf_wusb_cdid_store);
+
+static ssize_t cbaf_wusb_device_band_groups_show(struct device *dev,
+						 struct device_attribute *attr,
+						 char *buf)
+{
+	struct usb_interface *iface = to_usb_interface(dev);
+	struct cbaf *cbaf = usb_get_intfdata(iface);
+
+	return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->device_band_groups);
+}
+
+static DEVICE_ATTR(wusb_device_band_groups, 0600,
+		   cbaf_wusb_device_band_groups_show,
+		   NULL);
+
+static ssize_t cbaf_wusb_device_name_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct usb_interface *iface = to_usb_interface(dev);
+	struct cbaf *cbaf = usb_get_intfdata(iface);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->device_name);
+}
+static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL);
+
+static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = {
+	.AssociationTypeId_hdr    = WUSB_AR_AssociationTypeId,
+	.AssociationTypeId    	  = cpu_to_le16(AR_TYPE_WUSB),
+	.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
+	.AssociationSubTypeId     = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE),
+	.Length_hdr               = WUSB_AR_Length,
+	.Length               	  = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),
+	.ConnectionContext_hdr    = WUSB_AR_ConnectionContext,
+	.BandGroups_hdr           = WUSB_AR_BandGroups,
+};
+
+static const struct wusb_cbaf_cc_data_fail cbaf_cc_data_fail_defaults = {
+	.AssociationTypeId_hdr    = WUSB_AR_AssociationTypeId,
+	.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
+	.Length_hdr               = WUSB_AR_Length,
+	.AssociationStatus_hdr    = WUSB_AR_AssociationStatus,
+};
+
+/*
+ * Send a new CC to the device.
+ */
+static int cbaf_cc_upload(struct cbaf *cbaf)
+{
+	int result;
+	struct device *dev = &cbaf->usb_iface->dev;
+	struct wusb_cbaf_cc_data *ccd;
+	char pr_cdid[WUSB_CKHDID_STRSIZE];
+
+	ccd =  cbaf->buffer;
+	*ccd = cbaf_cc_data_defaults;
+	ccd->CHID = cbaf->chid;
+	ccd->CDID = cbaf->cdid;
+	ccd->CK = cbaf->ck;
+	ccd->BandGroups = cpu_to_le16(cbaf->host_band_groups);
+
+	dev_dbg(dev, "Trying to upload CC:\n");
+	ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CHID);
+	dev_dbg(dev, "  CHID       %s\n", pr_cdid);
+	ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CDID);
+	dev_dbg(dev, "  CDID       %s\n", pr_cdid);
+	dev_dbg(dev, "  Bandgroups 0x%04x\n", cbaf->host_band_groups);
+
+	result = usb_control_msg(
+		cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
+		CBAF_REQ_SET_ASSOCIATION_RESPONSE,
+		USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+		0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+		ccd, sizeof(*ccd), 1000 /* FIXME: arbitrary */);
+
+	return result;
+}
+
+static ssize_t cbaf_wusb_ck_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	ssize_t result;
+	struct usb_interface *iface = to_usb_interface(dev);
+	struct cbaf *cbaf = usb_get_intfdata(iface);
+
+	result = sscanf(buf,
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx",
+			&cbaf->ck.data[0] , &cbaf->ck.data[1],
+			&cbaf->ck.data[2] , &cbaf->ck.data[3],
+			&cbaf->ck.data[4] , &cbaf->ck.data[5],
+			&cbaf->ck.data[6] , &cbaf->ck.data[7],
+			&cbaf->ck.data[8] , &cbaf->ck.data[9],
+			&cbaf->ck.data[10], &cbaf->ck.data[11],
+			&cbaf->ck.data[12], &cbaf->ck.data[13],
+			&cbaf->ck.data[14], &cbaf->ck.data[15]);
+	if (result != 16)
+		return -EINVAL;
+
+	result = cbaf_cc_upload(cbaf);
+	if (result < 0)
+		return result;
+
+	return size;
+}
+static DEVICE_ATTR(wusb_ck, 0600, NULL, cbaf_wusb_ck_store);
+
+static struct attribute *cbaf_dev_attrs[] = {
+	&dev_attr_wusb_host_name.attr,
+	&dev_attr_wusb_host_band_groups.attr,
+	&dev_attr_wusb_chid.attr,
+	&dev_attr_wusb_cdid.attr,
+	&dev_attr_wusb_device_name.attr,
+	&dev_attr_wusb_device_band_groups.attr,
+	&dev_attr_wusb_ck.attr,
+	NULL,
+};
+
+static struct attribute_group cbaf_dev_attr_group = {
+	.name = NULL,	/* we want them in the same directory */
+	.attrs = cbaf_dev_attrs,
+};
+
+static int cbaf_probe(struct usb_interface *iface,
+		      const struct usb_device_id *id)
+{
+	struct cbaf *cbaf;
+	struct device *dev = &iface->dev;
+	int result = -ENOMEM;
+
+	cbaf = kzalloc(sizeof(*cbaf), GFP_KERNEL);
+	if (cbaf == NULL)
+		goto error_kzalloc;
+	cbaf->buffer = kmalloc(512, GFP_KERNEL);
+	if (cbaf->buffer == NULL)
+		goto error_kmalloc_buffer;
+
+	cbaf->buffer_size = 512;
+	cbaf->usb_dev = usb_get_dev(interface_to_usbdev(iface));
+	cbaf->usb_iface = usb_get_intf(iface);
+	result = cbaf_check(cbaf);
+	if (result < 0) {
+		dev_err(dev, "This device is not WUSB-CBAF compliant"
+			"and is not supported yet.\n");
+		goto error_check;
+	}
+
+	result = sysfs_create_group(&dev->kobj, &cbaf_dev_attr_group);
+	if (result < 0) {
+		dev_err(dev, "Can't register sysfs attr group: %d\n", result);
+		goto error_create_group;
+	}
+	usb_set_intfdata(iface, cbaf);
+	return 0;
+
+error_create_group:
+error_check:
+	kfree(cbaf->buffer);
+error_kmalloc_buffer:
+	kfree(cbaf);
+error_kzalloc:
+	return result;
+}
+
+static void cbaf_disconnect(struct usb_interface *iface)
+{
+	struct cbaf *cbaf = usb_get_intfdata(iface);
+	struct device *dev = &iface->dev;
+	sysfs_remove_group(&dev->kobj, &cbaf_dev_attr_group);
+	usb_set_intfdata(iface, NULL);
+	usb_put_intf(iface);
+	kfree(cbaf->buffer);
+	/* paranoia: clean up crypto keys */
+	memset(cbaf, 0, sizeof(*cbaf));
+	kfree(cbaf);
+}
+
+static struct usb_device_id cbaf_id_table[] = {
+	{ USB_INTERFACE_INFO(0xef, 0x03, 0x01), },
+	{ },
+};
+MODULE_DEVICE_TABLE(usb, cbaf_id_table);
+
+static struct usb_driver cbaf_driver = {
+	.name =		"wusb-cbaf",
+	.id_table =	cbaf_id_table,
+	.probe =	cbaf_probe,
+	.disconnect =	cbaf_disconnect,
+};
+
+static int __init cbaf_driver_init(void)
+{
+	return usb_register(&cbaf_driver);
+}
+module_init(cbaf_driver_init);
+
+static void __exit cbaf_driver_exit(void)
+{
+	usb_deregister(&cbaf_driver);
+}
+module_exit(cbaf_driver_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Wireless USB Cable Based Association");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
new file mode 100644
index 0000000..c36c438
--- /dev/null
+++ b/drivers/usb/wusbcore/crypto.c
@@ -0,0 +1,538 @@
+/*
+ * Ultra Wide Band
+ * AES-128 CCM Encryption
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * We don't do any encryption here; we use the Linux Kernel's AES-128
+ * crypto modules to construct keys and payload blocks in a way
+ * defined by WUSB1.0[6]. Check the erratas, as typos are are patched
+ * there.
+ *
+ * Thanks a zillion to John Keys for his help and clarifications over
+ * the designed-by-a-committee text.
+ *
+ * So the idea is that there is this basic Pseudo-Random-Function
+ * defined in WUSB1.0[6.5] which is the core of everything. It works
+ * by tweaking some blocks, AES crypting them and then xoring
+ * something else with them (this seems to be called CBC(AES) -- can
+ * you tell I know jack about crypto?). So we just funnel it into the
+ * Linux Crypto API.
+ *
+ * We leave a crypto test module so we can verify that vectors match,
+ * every now and then.
+ *
+ * Block size: 16 bytes -- AES seems to do things in 'block sizes'. I
+ *             am learning a lot...
+ *
+ *             Conveniently, some data structures that need to be
+ *             funneled through AES are...16 bytes in size!
+ */
+
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/uwb.h>
+#include <linux/usb/wusb.h>
+#include <linux/scatterlist.h>
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+
+/*
+ * Block of data, as understood by AES-CCM
+ *
+ * The code assumes this structure is nothing but a 16 byte array
+ * (packed in a struct to avoid common mess ups that I usually do with
+ * arrays and enforcing type checking).
+ */
+struct aes_ccm_block {
+	u8 data[16];
+} __attribute__((packed));
+
+/*
+ * Counter-mode Blocks (WUSB1.0[6.4])
+ *
+ * According to CCM (or so it seems), for the purpose of calculating
+ * the MIC, the message is broken in N counter-mode blocks, B0, B1,
+ * ... BN.
+ *
+ * B0 contains flags, the CCM nonce and l(m).
+ *
+ * B1 contains l(a), the MAC header, the encryption offset and padding.
+ *
+ * If EO is nonzero, additional blocks are built from payload bytes
+ * until EO is exahusted (FIXME: padding to 16 bytes, I guess). The
+ * padding is not xmitted.
+ */
+
+/* WUSB1.0[T6.4] */
+struct aes_ccm_b0 {
+	u8 flags;	/* 0x59, per CCM spec */
+	struct aes_ccm_nonce ccm_nonce;
+	__be16 lm;
+} __attribute__((packed));
+
+/* WUSB1.0[T6.5] */
+struct aes_ccm_b1 {
+	__be16 la;
+	u8 mac_header[10];
+	__le16 eo;
+	u8 security_reserved;	/* This is always zero */
+	u8 padding;		/* 0 */
+} __attribute__((packed));
+
+/*
+ * Encryption Blocks (WUSB1.0[6.4.4])
+ *
+ * CCM uses Ax blocks to generate a keystream with which the MIC and
+ * the message's payload are encoded. A0 always encrypts/decrypts the
+ * MIC. Ax (x>0) are used for the sucesive payload blocks.
+ *
+ * The x is the counter, and is increased for each block.
+ */
+struct aes_ccm_a {
+	u8 flags;	/* 0x01, per CCM spec */
+	struct aes_ccm_nonce ccm_nonce;
+	__be16 counter;	/* Value of x */
+} __attribute__((packed));
+
+static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
+			 size_t size)
+{
+	u8 *bo = _bo;
+	const u8 *bi1 = _bi1, *bi2 = _bi2;
+	size_t itr;
+	for (itr = 0; itr < size; itr++)
+		bo[itr] = bi1[itr] ^ bi2[itr];
+}
+
+/*
+ * CC-MAC function WUSB1.0[6.5]
+ *
+ * Take a data string and produce the encrypted CBC Counter-mode MIC
+ *
+ * Note the names for most function arguments are made to (more or
+ * less) match those used in the pseudo-function definition given in
+ * WUSB1.0[6.5].
+ *
+ * @tfm_cbc: CBC(AES) blkcipher handle (initialized)
+ *
+ * @tfm_aes: AES cipher handle (initialized)
+ *
+ * @mic: buffer for placing the computed MIC (Message Integrity
+ *       Code). This is exactly 8 bytes, and we expect the buffer to
+ *       be at least eight bytes in length.
+ *
+ * @key: 128 bit symmetric key
+ *
+ * @n: CCM nonce
+ *
+ * @a: ASCII string, 14 bytes long (I guess zero padded if needed;
+ *     we use exactly 14 bytes).
+ *
+ * @b: data stream to be processed; cannot be a global or const local
+ *     (will confuse the scatterlists)
+ *
+ * @blen: size of b...
+ *
+ * Still not very clear how this is done, but looks like this: we
+ * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with
+ * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we
+ * take the payload and divide it in blocks (16 bytes), xor them with
+ * the previous crypto result (16 bytes) and crypt it, repeat the next
+ * block with the output of the previous one, rinse wash (I guess this
+ * is what AES CBC mode means...but I truly have no idea). So we use
+ * the CBC(AES) blkcipher, that does precisely that. The IV (Initial
+ * Vector) is 16 bytes and is set to zero, so
+ *
+ * See rfc3610. Linux crypto has a CBC implementation, but the
+ * documentation is scarce, to say the least, and the example code is
+ * so intricated that is difficult to understand how things work. Most
+ * of this is guess work -- bite me.
+ *
+ * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and
+ *     using the 14 bytes of @a to fill up
+ *     b1.{mac_header,e0,security_reserved,padding}.
+ *
+ * NOTE: The definiton of l(a) in WUSB1.0[6.5] vs the definition of
+ *       l(m) is orthogonal, they bear no relationship, so it is not
+ *       in conflict with the parameter's relation that
+ *       WUSB1.0[6.4.2]) defines.
+ *
+ * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in
+ *       first errata released on 2005/07.
+ *
+ * NOTE: we need to clean IV to zero at each invocation to make sure
+ *       we start with a fresh empty Initial Vector, so that the CBC
+ *       works ok.
+ *
+ * NOTE: blen is not aligned to a block size, we'll pad zeros, that's
+ *       what sg[4] is for. Maybe there is a smarter way to do this.
+ */
+static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
+			struct crypto_cipher *tfm_aes, void *mic,
+			const struct aes_ccm_nonce *n,
+			const struct aes_ccm_label *a, const void *b,
+			size_t blen)
+{
+	int result = 0;
+	struct blkcipher_desc desc;
+	struct aes_ccm_b0 b0;
+	struct aes_ccm_b1 b1;
+	struct aes_ccm_a ax;
+	struct scatterlist sg[4], sg_dst;
+	void *iv, *dst_buf;
+	size_t ivsize, dst_size;
+	const u8 bzero[16] = { 0 };
+	size_t zero_padding;
+
+	d_fnstart(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, "
+		  "n %p, a %p, b %p, blen %zu)\n",
+		  tfm_cbc, tfm_aes, mic, n, a, b, blen);
+	/*
+	 * These checks should be compile time optimized out
+	 * ensure @a fills b1's mac_header and following fields
+	 */
+	WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
+	WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
+	WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
+	WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));
+
+	result = -ENOMEM;
+	zero_padding = sizeof(struct aes_ccm_block)
+		- blen % sizeof(struct aes_ccm_block);
+	zero_padding = blen % sizeof(struct aes_ccm_block);
+	if (zero_padding)
+		zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
+	dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
+	dst_buf = kzalloc(dst_size, GFP_KERNEL);
+	if (dst_buf == NULL) {
+		printk(KERN_ERR "E: can't alloc destination buffer\n");
+		goto error_dst_buf;
+	}
+
+	iv = crypto_blkcipher_crt(tfm_cbc)->iv;
+	ivsize = crypto_blkcipher_ivsize(tfm_cbc);
+	memset(iv, 0, ivsize);
+
+	/* Setup B0 */
+	b0.flags = 0x59;	/* Format B0 */
+	b0.ccm_nonce = *n;
+	b0.lm = cpu_to_be16(0);	/* WUSB1.0[6.5] sez l(m) is 0 */
+
+	/* Setup B1
+	 *
+	 * The WUSB spec is anything but clear! WUSB1.0[6.5]
+	 * says that to initialize B1 from A with 'l(a) = blen +
+	 * 14'--after clarification, it means to use A's contents
+	 * for MAC Header, EO, sec reserved and padding.
+	 */
+	b1.la = cpu_to_be16(blen + 14);
+	memcpy(&b1.mac_header, a, sizeof(*a));
+
+	d_printf(4, NULL, "I: B0 (%zu bytes)\n", sizeof(b0));
+	d_dump(4, NULL, &b0, sizeof(b0));
+	d_printf(4, NULL, "I: B1 (%zu bytes)\n", sizeof(b1));
+	d_dump(4, NULL, &b1, sizeof(b1));
+	d_printf(4, NULL, "I: B (%zu bytes)\n", blen);
+	d_dump(4, NULL, b, blen);
+	d_printf(4, NULL, "I: B 0-padding (%zu bytes)\n", zero_padding);
+	d_printf(4, NULL, "D: IV before crypto (%zu)\n", ivsize);
+	d_dump(4, NULL, iv, ivsize);
+
+	sg_init_table(sg, ARRAY_SIZE(sg));
+	sg_set_buf(&sg[0], &b0, sizeof(b0));
+	sg_set_buf(&sg[1], &b1, sizeof(b1));
+	sg_set_buf(&sg[2], b, blen);
+	/* 0 if well behaved :) */
+	sg_set_buf(&sg[3], bzero, zero_padding);
+	sg_init_one(&sg_dst, dst_buf, dst_size);
+
+	desc.tfm = tfm_cbc;
+	desc.flags = 0;
+	result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size);
+	if (result < 0) {
+		printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n",
+		       result);
+		goto error_cbc_crypt;
+	}
+	d_printf(4, NULL, "D: MIC tag\n");
+	d_dump(4, NULL, iv, ivsize);
+
+	/* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5]
+	 * The procedure is to AES crypt the A0 block and XOR the MIC
+	 * Tag agains it; we only do the first 8 bytes and place it
+	 * directly in the destination buffer.
+	 *
+	 * POS Crypto API: size is assumed to be AES's block size.
+	 * Thanks for documenting it -- tip taken from airo.c
+	 */
+	ax.flags = 0x01;		/* as per WUSB 1.0 spec */
+	ax.ccm_nonce = *n;
+	ax.counter = 0;
+	crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax);
+	bytewise_xor(mic, &ax, iv, 8);
+	d_printf(4, NULL, "D: CTR[MIC]\n");
+	d_dump(4, NULL, &ax, 8);
+	d_printf(4, NULL, "D: CCM-MIC tag\n");
+	d_dump(4, NULL, mic, 8);
+	result = 8;
+error_cbc_crypt:
+	kfree(dst_buf);
+error_dst_buf:
+	d_fnend(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, "
+		"n %p, a %p, b %p, blen %zu)\n",
+		tfm_cbc, tfm_aes, mic, n, a, b, blen);
+	return result;
+}
+
+/*
+ * WUSB Pseudo Random Function (WUSB1.0[6.5])
+ *
+ * @b: buffer to the source data; cannot be a global or const local
+ *     (will confuse the scatterlists)
+ */
+ssize_t wusb_prf(void *out, size_t out_size,
+		 const u8 key[16], const struct aes_ccm_nonce *_n,
+		 const struct aes_ccm_label *a,
+		 const void *b, size_t blen, size_t len)
+{
+	ssize_t result, bytes = 0, bitr;
+	struct aes_ccm_nonce n = *_n;
+	struct crypto_blkcipher *tfm_cbc;
+	struct crypto_cipher *tfm_aes;
+	u64 sfn = 0;
+	__le64 sfn_le;
+
+	d_fnstart(3, NULL, "(out %p, out_size %zu, key %p, _n %p, "
+		  "a %p, b %p, blen %zu, len %zu)\n", out, out_size,
+		  key, _n, a, b, blen, len);
+
+	tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm_cbc)) {
+		result = PTR_ERR(tfm_cbc);
+		printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result);
+		goto error_alloc_cbc;
+	}
+	result = crypto_blkcipher_setkey(tfm_cbc, key, 16);
+	if (result < 0) {
+		printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result);
+		goto error_setkey_cbc;
+	}
+
+	tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm_aes)) {
+		result = PTR_ERR(tfm_aes);
+		printk(KERN_ERR "E: can't load AES: %d\n", (int)result);
+		goto error_alloc_aes;
+	}
+	result = crypto_cipher_setkey(tfm_aes, key, 16);
+	if (result < 0) {
+		printk(KERN_ERR "E: can't set AES key: %d\n", (int)result);
+		goto error_setkey_aes;
+	}
+
+	for (bitr = 0; bitr < (len + 63) / 64; bitr++) {
+		sfn_le = cpu_to_le64(sfn++);
+		memcpy(&n.sfn, &sfn_le, sizeof(n.sfn));	/* n.sfn++... */
+		result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes,
+				      &n, a, b, blen);
+		if (result < 0)
+			goto error_ccm_mac;
+		bytes += result;
+	}
+	result = bytes;
+error_ccm_mac:
+error_setkey_aes:
+	crypto_free_cipher(tfm_aes);
+error_alloc_aes:
+error_setkey_cbc:
+	crypto_free_blkcipher(tfm_cbc);
+error_alloc_cbc:
+	d_fnend(3, NULL, "(out %p, out_size %zu, key %p, _n %p, "
+		"a %p, b %p, blen %zu, len %zu) = %d\n", out, out_size,
+		key, _n, a, b, blen, len, (int)bytes);
+	return result;
+}
+
+/* WUSB1.0[A.2] test vectors */
+static const u8 stv_hsmic_key[16] = {
+	0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
+	0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
+};
+
+static const struct aes_ccm_nonce stv_hsmic_n = {
+	.sfn = { 0 },
+	.tkid = { 0x76, 0x98, 0x01,  },
+	.dest_addr = { .data = { 0xbe, 0x00 } },
+		.src_addr = { .data = { 0x76, 0x98 } },
+};
+
+/*
+ * Out-of-band MIC Generation verification code
+ *
+ */
+static int wusb_oob_mic_verify(void)
+{
+	int result;
+	u8 mic[8];
+	/* WUSB1.0[A.2] test vectors
+	 *
+	 * Need to keep it in the local stack as GCC 4.1.3something
+	 * messes up and generates noise.
+	 */
+	struct usb_handshake stv_hsmic_hs = {
+		.bMessageNumber = 2,
+		.bStatus 	= 00,
+		.tTKID 		= { 0x76, 0x98, 0x01 },
+		.bReserved 	= 00,
+		.CDID 		= { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+				    0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
+				    0x3c, 0x3d, 0x3e, 0x3f },
+		.nonce	 	= { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
+				    0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
+				    0x2c, 0x2d, 0x2e, 0x2f },
+		.MIC	 	= { 0x75, 0x6a, 0x97, 0x51, 0x0c, 0x8c,
+				    0x14, 0x7b } ,
+	};
+	size_t hs_size;
+
+	result = wusb_oob_mic(mic, stv_hsmic_key, &stv_hsmic_n, &stv_hsmic_hs);
+	if (result < 0)
+		printk(KERN_ERR "E: WUSB OOB MIC test: failed: %d\n", result);
+	else if (memcmp(stv_hsmic_hs.MIC, mic, sizeof(mic))) {
+		printk(KERN_ERR "E: OOB MIC test: "
+		       "mismatch between MIC result and WUSB1.0[A2]\n");
+		hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC);
+		printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size);
+		dump_bytes(NULL, &stv_hsmic_hs, hs_size);
+		printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n",
+		       sizeof(stv_hsmic_n));
+		dump_bytes(NULL, &stv_hsmic_n, sizeof(stv_hsmic_n));
+		printk(KERN_ERR "E: MIC out:\n");
+		dump_bytes(NULL, mic, sizeof(mic));
+		printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n");
+		dump_bytes(NULL, stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC));
+		result = -EINVAL;
+	} else
+		result = 0;
+	return result;
+}
+
+/*
+ * Test vectors for Key derivation
+ *
+ * These come from WUSB1.0[6.5.1], the vectors in WUSB1.0[A.1]
+ * (errata corrected in 2005/07).
+ */
+static const u8 stv_key_a1[16] __attribute__ ((__aligned__(4))) = {
+	0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87,
+	0x78, 0x69, 0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f
+};
+
+static const struct aes_ccm_nonce stv_keydvt_n_a1 = {
+	.sfn = { 0 },
+	.tkid = { 0x76, 0x98, 0x01,  },
+	.dest_addr = { .data = { 0xbe, 0x00 } },
+	.src_addr = { .data = { 0x76, 0x98 } },
+};
+
+static const struct wusb_keydvt_out stv_keydvt_out_a1 = {
+	.kck = {
+		0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
+		0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
+	},
+	.ptk = {
+		0xc8, 0x70, 0x62, 0x82, 0xb6, 0x7c, 0xe9, 0x06,
+		0x7b, 0xc5, 0x25, 0x69, 0xf2, 0x36, 0x61, 0x2d
+	}
+};
+
+/*
+ * Performa a test to make sure we match the vectors defined in
+ * WUSB1.0[A.1](Errata2006/12)
+ */
+static int wusb_key_derive_verify(void)
+{
+	int result = 0;
+	struct wusb_keydvt_out keydvt_out;
+	/* These come from WUSB1.0[A.1] + 2006/12 errata
+	 * NOTE: can't make this const or global -- somehow it seems
+	 *       the scatterlists for crypto get confused and we get
+	 *       bad data. There is no doc on this... */
+	struct wusb_keydvt_in stv_keydvt_in_a1 = {
+		.hnonce = {
+			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+			0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
+		},
+		.dnonce = {
+			0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+			0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f
+		}
+	};
+
+	result = wusb_key_derive(&keydvt_out, stv_key_a1, &stv_keydvt_n_a1,
+				 &stv_keydvt_in_a1);
+	if (result < 0)
+		printk(KERN_ERR "E: WUSB key derivation test: "
+		       "derivation failed: %d\n", result);
+	if (memcmp(&stv_keydvt_out_a1, &keydvt_out, sizeof(keydvt_out))) {
+		printk(KERN_ERR "E: WUSB key derivation test: "
+		       "mismatch between key derivation result "
+		       "and WUSB1.0[A1] Errata 2006/12\n");
+		printk(KERN_ERR "E: keydvt in: key (%zu bytes)\n",
+		       sizeof(stv_key_a1));
+		dump_bytes(NULL, stv_key_a1, sizeof(stv_key_a1));
+		printk(KERN_ERR "E: keydvt in: nonce (%zu bytes)\n",
+		       sizeof(stv_keydvt_n_a1));
+		dump_bytes(NULL, &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1));
+		printk(KERN_ERR "E: keydvt in: hnonce & dnonce (%zu bytes)\n",
+		       sizeof(stv_keydvt_in_a1));
+		dump_bytes(NULL, &stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1));
+		printk(KERN_ERR "E: keydvt out: KCK\n");
+		dump_bytes(NULL, &keydvt_out.kck, sizeof(keydvt_out.kck));
+		printk(KERN_ERR "E: keydvt out: PTK\n");
+		dump_bytes(NULL, &keydvt_out.ptk, sizeof(keydvt_out.ptk));
+		result = -EINVAL;
+	} else
+		result = 0;
+	return result;
+}
+
+/*
+ * Initialize crypto system
+ *
+ * FIXME: we do nothing now, other than verifying. Later on we'll
+ * cache the encryption stuff, so that's why we have a separate init.
+ */
+int wusb_crypto_init(void)
+{
+	int result;
+
+	result = wusb_key_derive_verify();
+	if (result < 0)
+		return result;
+	return wusb_oob_mic_verify();
+}
+
+void wusb_crypto_exit(void)
+{
+	/* FIXME: free cached crypto transforms */
+}
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c
new file mode 100644
index 0000000..7897a19
--- /dev/null
+++ b/drivers/usb/wusbcore/dev-sysfs.c
@@ -0,0 +1,143 @@
+/*
+ * WUSB devices
+ * sysfs bindings
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Get them out of the way...
+ */
+
+#include <linux/jiffies.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include "wusbhc.h"
+
+#undef D_LOCAL
+#define D_LOCAL 4
+#include <linux/uwb/debug.h>
+
+static ssize_t wusb_disconnect_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	struct usb_device *usb_dev;
+	struct wusbhc *wusbhc;
+	unsigned command;
+	u8 port_idx;
+
+	if (sscanf(buf, "%u", &command) != 1)
+		return -EINVAL;
+	if (command == 0)
+		return size;
+	usb_dev = to_usb_device(dev);
+	wusbhc = wusbhc_get_by_usb_dev(usb_dev);
+	if (wusbhc == NULL)
+		return -ENODEV;
+
+	mutex_lock(&wusbhc->mutex);
+	port_idx = wusb_port_no_to_idx(usb_dev->portnum);
+	__wusbhc_dev_disable(wusbhc, port_idx);
+	mutex_unlock(&wusbhc->mutex);
+	wusbhc_put(wusbhc);
+	return size;
+}
+static DEVICE_ATTR(wusb_disconnect, 0200, NULL, wusb_disconnect_store);
+
+static ssize_t wusb_cdid_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	ssize_t result;
+	struct wusb_dev *wusb_dev;
+
+	wusb_dev = wusb_dev_get_by_usb_dev(to_usb_device(dev));
+	if (wusb_dev == NULL)
+		return -ENODEV;
+	result = ckhdid_printf(buf, PAGE_SIZE, &wusb_dev->cdid);
+	strcat(buf, "\n");
+	wusb_dev_put(wusb_dev);
+	return result + 1;
+}
+static DEVICE_ATTR(wusb_cdid, 0444, wusb_cdid_show, NULL);
+
+static ssize_t wusb_ck_store(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t size)
+{
+	int result;
+	struct usb_device *usb_dev;
+	struct wusbhc *wusbhc;
+	struct wusb_ckhdid ck;
+
+	result = sscanf(buf,
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx\n",
+			&ck.data[0] , &ck.data[1],
+			&ck.data[2] , &ck.data[3],
+			&ck.data[4] , &ck.data[5],
+			&ck.data[6] , &ck.data[7],
+			&ck.data[8] , &ck.data[9],
+			&ck.data[10], &ck.data[11],
+			&ck.data[12], &ck.data[13],
+			&ck.data[14], &ck.data[15]);
+	if (result != 16)
+		return -EINVAL;
+
+	usb_dev = to_usb_device(dev);
+	wusbhc = wusbhc_get_by_usb_dev(usb_dev);
+	if (wusbhc == NULL)
+		return -ENODEV;
+	result = wusb_dev_4way_handshake(wusbhc, usb_dev->wusb_dev, &ck);
+	memset(&ck, 0, sizeof(ck));
+	wusbhc_put(wusbhc);
+	return result < 0 ? result : size;
+}
+static DEVICE_ATTR(wusb_ck, 0200, NULL, wusb_ck_store);
+
+static struct attribute *wusb_dev_attrs[] = {
+		&dev_attr_wusb_disconnect.attr,
+		&dev_attr_wusb_cdid.attr,
+		&dev_attr_wusb_ck.attr,
+		NULL,
+};
+
+static struct attribute_group wusb_dev_attr_group = {
+	.name = NULL,	/* we want them in the same directory */
+	.attrs = wusb_dev_attrs,
+};
+
+int wusb_dev_sysfs_add(struct wusbhc *wusbhc, struct usb_device *usb_dev,
+		       struct wusb_dev *wusb_dev)
+{
+	int result = sysfs_create_group(&usb_dev->dev.kobj,
+					&wusb_dev_attr_group);
+	struct device *dev = &usb_dev->dev;
+	if (result < 0)
+		dev_err(dev, "Cannot register WUSB-dev attributes: %d\n",
+			result);
+	return result;
+}
+
+void wusb_dev_sysfs_rm(struct wusb_dev *wusb_dev)
+{
+	struct usb_device *usb_dev = wusb_dev->usb_dev;
+	if (usb_dev)
+		sysfs_remove_group(&usb_dev->dev.kobj, &wusb_dev_attr_group);
+}
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
new file mode 100644
index 0000000..f45d777
--- /dev/null
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -0,0 +1,1297 @@
+/*
+ * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
+ * Device Connect handling
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ * FIXME: this file needs to be broken up, it's grown too big
+ *
+ *
+ * WUSB1.0[7.1, 7.5.1, ]
+ *
+ * WUSB device connection is kind of messy. Some background:
+ *
+ *     When a device wants to connect it scans the UWB radio channels
+ *     looking for a WUSB Channel; a WUSB channel is defined by MMCs
+ *     (Micro Managed Commands or something like that) [see
+ *     Design-overview for more on this] .
+ *
+ * So, device scans the radio, finds MMCs and thus a host and checks
+ * when the next DNTS is. It sends a Device Notification Connect
+ * (DN_Connect); the host picks it up (through nep.c and notif.c, ends
+ * up in wusb_devconnect_ack(), which creates a wusb_dev structure in
+ * wusbhc->port[port_number].wusb_dev), assigns an unauth address
+ * to the device (this means from 0x80 to 0xfe) and sends, in the MMC
+ * a Connect Ack Information Element (ConnAck IE).
+ *
+ * So now the device now has a WUSB address. From now on, we use
+ * that to talk to it in the RPipes.
+ *
+ * ASSUMPTIONS:
+ *
+ *  - We use the the as device address the port number where it is
+ *    connected (port 0 doesn't exist). For unauth, it is 128 + that.
+ *
+ * ROADMAP:
+ *
+ *   This file contains the logic for doing that--entry points:
+ *
+ *   wusb_devconnect_ack()      Ack a device until _acked() called.
+ *                              Called by notif.c:wusb_handle_dn_connect()
+ *                              when a DN_Connect is received.
+ *
+ *   wusbhc_devconnect_auth()   Called by rh.c:wusbhc_rh_port_reset() when
+ *                              doing the device connect sequence.
+ *
+ *     wusb_devconnect_acked()  Ack done, release resources.
+ *
+ *   wusb_handle_dn_alive()     Called by notif.c:wusb_handle_dn()
+ *                              for processing a DN_Alive pong from a device.
+ *
+ *   wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to
+ *                              process a disconenct request from a
+ *                              device.
+ *
+ *   wusb_dev_reset()           Called by rh.c:wusbhc_rh_port_reset() when
+ *                              resetting a device.
+ *
+ *   __wusb_dev_disable()       Called by rh.c:wusbhc_rh_clear_port_feat() when
+ *                              disabling a port.
+ *
+ *   wusb_devconnect_create()   Called when creating the host by
+ *                              lc.c:wusbhc_create().
+ *
+ *   wusb_devconnect_destroy()  Cleanup called removing the host. Called
+ *                              by lc.c:wusbhc_destroy().
+ *
+ *   Each Wireless USB host maintains a list of DN_Connect requests
+ *   (actually we maintain a list of pending Connect Acks, the
+ *   wusbhc->ca_list).
+ *
+ * LIFE CYCLE OF port->wusb_dev
+ *
+ *   Before the @wusbhc structure put()s the reference it owns for
+ *   port->wusb_dev [and clean the wusb_dev pointer], it needs to
+ *   lock @wusbhc->mutex.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include "wusbhc.h"
+
+#undef D_LOCAL
+#define D_LOCAL 1
+#include <linux/uwb/debug.h>
+
+static void wusbhc_devconnect_acked_work(struct work_struct *work);
+
+static void wusb_dev_free(struct wusb_dev *wusb_dev)
+{
+	if (wusb_dev) {
+		kfree(wusb_dev->set_gtk_req);
+		usb_free_urb(wusb_dev->set_gtk_urb);
+		kfree(wusb_dev);
+	}
+}
+
+static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
+{
+	struct wusb_dev *wusb_dev;
+	struct urb *urb;
+	struct usb_ctrlrequest *req;
+
+	wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL);
+	if (wusb_dev == NULL)
+		goto err;
+
+	wusb_dev->wusbhc = wusbhc;
+
+	INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work);
+
+	urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (urb == NULL)
+		goto err;
+
+	req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+	if (req == NULL)
+		goto err;
+
+	req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
+	req->bRequest = USB_REQ_SET_DESCRIPTOR;
+	req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index);
+	req->wIndex = 0;
+	req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength);
+
+	wusb_dev->set_gtk_urb = urb;
+	wusb_dev->set_gtk_req = req;
+
+	return wusb_dev;
+err:
+	wusb_dev_free(wusb_dev);
+	return NULL;
+}
+
+
+/*
+ * Using the Connect-Ack list, fill out the @wusbhc Connect-Ack WUSB IE
+ * properly so that it can be added to the MMC.
+ *
+ * We just get the @wusbhc->ca_list and fill out the first four ones or
+ * less (per-spec WUSB1.0[7.5, before T7-38). If the ConnectAck WUSB
+ * IE is not allocated, we alloc it.
+ *
+ * @wusbhc->mutex must be taken
+ */
+static void wusbhc_fill_cack_ie(struct wusbhc *wusbhc)
+{
+	unsigned cnt;
+	struct wusb_dev *dev_itr;
+	struct wuie_connect_ack *cack_ie;
+
+	cack_ie = &wusbhc->cack_ie;
+	cnt = 0;
+	list_for_each_entry(dev_itr, &wusbhc->cack_list, cack_node) {
+		cack_ie->blk[cnt].CDID = dev_itr->cdid;
+		cack_ie->blk[cnt].bDeviceAddress = dev_itr->addr;
+		if (++cnt >= WUIE_ELT_MAX)
+			break;
+	}
+	cack_ie->hdr.bLength = sizeof(cack_ie->hdr)
+		+ cnt * sizeof(cack_ie->blk[0]);
+}
+
+/*
+ * Register a new device that wants to connect
+ *
+ * A new device wants to connect, so we add it to the Connect-Ack
+ * list. We give it an address in the unauthorized range (bit 8 set);
+ * user space will have to drive authorization further on.
+ *
+ * @dev_addr: address to use for the device (which is also the port
+ *            number).
+ *
+ * @wusbhc->mutex must be taken
+ */
+static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc,
+					struct wusb_dn_connect *dnc,
+					const char *pr_cdid, u8 port_idx)
+{
+	struct device *dev = wusbhc->dev;
+	struct wusb_dev *wusb_dev;
+	int new_connection = wusb_dn_connect_new_connection(dnc);
+	u8 dev_addr;
+	int result;
+
+	/* Is it registered already? */
+	list_for_each_entry(wusb_dev, &wusbhc->cack_list, cack_node)
+		if (!memcmp(&wusb_dev->cdid, &dnc->CDID,
+			    sizeof(wusb_dev->cdid)))
+			return wusb_dev;
+	/* We don't have it, create an entry, register it */
+	wusb_dev = wusb_dev_alloc(wusbhc);
+	if (wusb_dev == NULL)
+		return NULL;
+	wusb_dev_init(wusb_dev);
+	wusb_dev->cdid = dnc->CDID;
+	wusb_dev->port_idx = port_idx;
+
+	/*
+	 * Devices are always available within the cluster reservation
+	 * and since the hardware will take the intersection of the
+	 * per-device availability and the cluster reservation, the
+	 * per-device availability can simply be set to always
+	 * available.
+	 */
+	bitmap_fill(wusb_dev->availability.bm, UWB_NUM_MAS);
+
+	/* FIXME: handle reconnects instead of assuming connects are
+	   always new. */
+	if (1 && new_connection == 0)
+		new_connection = 1;
+	if (new_connection) {
+		dev_addr = (port_idx + 2) | WUSB_DEV_ADDR_UNAUTH;
+
+		dev_info(dev, "Connecting new WUSB device to address %u, "
+			"port %u\n", dev_addr, port_idx);
+
+		result = wusb_set_dev_addr(wusbhc, wusb_dev, dev_addr);
+		if (result < 0)
+			return NULL;
+	}
+	wusb_dev->entry_ts = jiffies;
+	list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list);
+	wusbhc->cack_count++;
+	wusbhc_fill_cack_ie(wusbhc);
+	return wusb_dev;
+}
+
+/*
+ * Remove a Connect-Ack context entry from the HCs view
+ *
+ * @wusbhc->mutex must be taken
+ */
+static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+	struct device *dev = wusbhc->dev;
+	d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev);
+	list_del_init(&wusb_dev->cack_node);
+	wusbhc->cack_count--;
+	wusbhc_fill_cack_ie(wusbhc);
+	d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev);
+}
+
+/*
+ * @wusbhc->mutex must be taken */
+static
+void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+	struct device *dev = wusbhc->dev;
+	d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev);
+	wusbhc_cack_rm(wusbhc, wusb_dev);
+	if (wusbhc->cack_count)
+		wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr);
+	else
+		wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr);
+	d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev);
+}
+
+static void wusbhc_devconnect_acked_work(struct work_struct *work)
+{
+	struct wusb_dev *wusb_dev = container_of(work, struct wusb_dev,
+						 devconnect_acked_work);
+	struct wusbhc *wusbhc = wusb_dev->wusbhc;
+
+	mutex_lock(&wusbhc->mutex);
+	wusbhc_devconnect_acked(wusbhc, wusb_dev);
+	mutex_unlock(&wusbhc->mutex);
+}
+
+/*
+ * Ack a device for connection
+ *
+ * FIXME: docs
+ *
+ * @pr_cdid:	Printable CDID...hex Use @dnc->cdid for the real deal.
+ *
+ * So we get the connect ack IE (may have been allocated already),
+ * find an empty connect block, an empty virtual port, create an
+ * address with it (see below), make it an unauth addr [bit 7 set] and
+ * set the MMC.
+ *
+ * Addresses: because WUSB hosts have no downstream hubs, we can do a
+ *            1:1 mapping between 'port number' and device
+ *            address. This simplifies many things, as during this
+ *            initial connect phase the USB stack has no knoledge of
+ *            the device and hasn't assigned an address yet--we know
+ *            USB's choose_address() will use the same euristics we
+ *            use here, so we can assume which address will be assigned.
+ *
+ *            USB stack always assigns address 1 to the root hub, so
+ *            to the port number we add 2 (thus virtual port #0 is
+ *            addr #2).
+ *
+ * @wusbhc shall be referenced
+ */
+static
+void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
+			   const char *pr_cdid)
+{
+	int result;
+	struct device *dev = wusbhc->dev;
+	struct wusb_dev *wusb_dev;
+	struct wusb_port *port;
+	unsigned idx, devnum;
+
+	d_fnstart(3, dev, "(%p, %p, %s)\n", wusbhc, dnc, pr_cdid);
+	mutex_lock(&wusbhc->mutex);
+
+	/* Check we are not handling it already */
+	for (idx = 0; idx < wusbhc->ports_max; idx++) {
+		port = wusb_port_by_idx(wusbhc, idx);
+		if (port->wusb_dev
+		    && memcmp(&dnc->CDID, &port->wusb_dev->cdid, sizeof(dnc->CDID)) == 0)
+			goto error_unlock;
+	}
+	/* Look up those fake ports we have for a free one */
+	for (idx = 0; idx < wusbhc->ports_max; idx++) {
+		port = wusb_port_by_idx(wusbhc, idx);
+		if ((port->status & USB_PORT_STAT_POWER)
+		    && !(port->status & USB_PORT_STAT_CONNECTION))
+			break;
+	}
+	if (idx >= wusbhc->ports_max) {
+		dev_err(dev, "Host controller can't connect more devices "
+			"(%u already connected); device %s rejected\n",
+			wusbhc->ports_max, pr_cdid);
+		/* NOTE: we could send a WUIE_Disconnect here, but we haven't
+		 *       event acked, so the device will eventually timeout the
+		 *       connection, right? */
+		goto error_unlock;
+	}
+
+	devnum = idx + 2;
+
+	/* Make sure we are using no crypto on that "virtual port" */
+	wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0);
+
+	/* Grab a filled in Connect-Ack context, fill out the
+	 * Connect-Ack Wireless USB IE, set the MMC */
+	wusb_dev = wusbhc_cack_add(wusbhc, dnc, pr_cdid, idx);
+	if (wusb_dev == NULL)
+		goto error_unlock;
+	result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr);
+	if (result < 0)
+		goto error_unlock;
+	/* Give the device at least 2ms (WUSB1.0[7.5.1p3]), let's do
+	 * three for a good measure */
+	msleep(3);
+	port->wusb_dev = wusb_dev;
+	port->status |= USB_PORT_STAT_CONNECTION;
+	port->change |= USB_PORT_STAT_C_CONNECTION;
+	port->reset_count = 0;
+	/* Now the port status changed to connected; khubd will
+	 * pick the change up and try to reset the port to bring it to
+	 * the enabled state--so this process returns up to the stack
+	 * and it calls back into wusbhc_rh_port_reset() who will call
+	 * devconnect_auth().
+	 */
+error_unlock:
+	mutex_unlock(&wusbhc->mutex);
+	d_fnend(3, dev, "(%p, %p, %s) = void\n", wusbhc, dnc, pr_cdid);
+	return;
+
+}
+
+/*
+ * Disconnect a Wireless USB device from its fake port
+ *
+ * Marks the port as disconnected so that khubd can pick up the change
+ * and drops our knowledge about the device.
+ *
+ * Assumes there is a device connected
+ *
+ * @port_index: zero based port number
+ *
+ * NOTE: @wusbhc->mutex is locked
+ *
+ * WARNING: From here it is not very safe to access anything hanging off
+ *	    wusb_dev
+ */
+static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc,
+				    struct wusb_port *port)
+{
+	struct device *dev = wusbhc->dev;
+	struct wusb_dev *wusb_dev = port->wusb_dev;
+
+	d_fnstart(3, dev, "(wusbhc %p, port %p)\n", wusbhc, port);
+	port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE
+			  | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET
+			  | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED);
+	port->change |= USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE;
+	if (wusb_dev) {
+		if (!list_empty(&wusb_dev->cack_node))
+			list_del_init(&wusb_dev->cack_node);
+		/* For the one in cack_add() */
+		wusb_dev_put(wusb_dev);
+	}
+	port->wusb_dev = NULL;
+	/* don't reset the reset_count to zero or wusbhc_rh_port_reset will get
+	 * confused! We only reset to zero when we connect a new device.
+	 */
+
+	/* After a device disconnects, change the GTK (see [WUSB]
+	 * section 6.2.11.2). */
+	wusbhc_gtk_rekey(wusbhc);
+
+	d_fnend(3, dev, "(wusbhc %p, port %p) = void\n", wusbhc, port);
+	/* The Wireless USB part has forgotten about the device already; now
+	 * khubd's timer will pick up the disconnection and remove the USB
+	 * device from the system
+	 */
+}
+
+/*
+ * Authenticate a device into the WUSB Cluster
+ *
+ * Called from the Root Hub code (rh.c:wusbhc_rh_port_reset()) when
+ * asking for a reset on a port that is not enabled (ie: first connect
+ * on the port).
+ *
+ * Performs the 4way handshake to allow the device to comunicate w/ the
+ * WUSB Cluster securely; once done, issue a request to the device for
+ * it to change to address 0.
+ *
+ * This mimics the reset step of Wired USB that once resetting a
+ * device, leaves the port in enabled state and the dev with the
+ * default address (0).
+ *
+ * WUSB1.0[7.1.2]
+ *
+ * @port_idx: port where the change happened--This is the index into
+ *            the wusbhc port array, not the USB port number.
+ */
+int wusbhc_devconnect_auth(struct wusbhc *wusbhc, u8 port_idx)
+{
+	struct device *dev = wusbhc->dev;
+	struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx);
+
+	d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx);
+	port->status &= ~USB_PORT_STAT_RESET;
+	port->status |= USB_PORT_STAT_ENABLE;
+	port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE;
+	d_fnend(3, dev, "(%p, %u) = 0\n", wusbhc, port_idx);
+	return 0;
+}
+
+/*
+ * Refresh the list of keep alives to emit in the MMC
+ *
+ * Some devices don't respond to keep alives unless they've been
+ * authenticated, so skip unauthenticated devices.
+ *
+ * We only publish the first four devices that have a coming timeout
+ * condition. Then when we are done processing those, we go for the
+ * next ones. We ignore the ones that have timed out already (they'll
+ * be purged).
+ *
+ * This might cause the first devices to timeout the last devices in
+ * the port array...FIXME: come up with a better algorithm?
+ *
+ * Note we can't do much about MMC's ops errors; we hope next refresh
+ * will kind of handle it.
+ *
+ * NOTE: @wusbhc->mutex is locked
+ */
+static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
+{
+	struct device *dev = wusbhc->dev;
+	unsigned cnt;
+	struct wusb_dev *wusb_dev;
+	struct wusb_port *wusb_port;
+	struct wuie_keep_alive *ie = &wusbhc->keep_alive_ie;
+	unsigned keep_alives, old_keep_alives;
+
+	old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr);
+	keep_alives = 0;
+	for (cnt = 0;
+	     keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max;
+	     cnt++) {
+		unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout);
+
+		wusb_port = wusb_port_by_idx(wusbhc, cnt);
+		wusb_dev = wusb_port->wusb_dev;
+
+		if (wusb_dev == NULL)
+			continue;
+		if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated)
+			continue;
+
+		if (time_after(jiffies, wusb_dev->entry_ts + tt)) {
+			dev_err(dev, "KEEPALIVE: device %u timed out\n",
+				wusb_dev->addr);
+			__wusbhc_dev_disconnect(wusbhc, wusb_port);
+		} else if (time_after(jiffies, wusb_dev->entry_ts + tt/2)) {
+			/* Approaching timeout cut out, need to refresh */
+			ie->bDeviceAddress[keep_alives++] = wusb_dev->addr;
+		}
+	}
+	if (keep_alives & 0x1)	/* pad to even number ([WUSB] section 7.5.9) */
+		ie->bDeviceAddress[keep_alives++] = 0x7f;
+	ie->hdr.bLength = sizeof(ie->hdr) +
+		keep_alives*sizeof(ie->bDeviceAddress[0]);
+	if (keep_alives > 0)
+		wusbhc_mmcie_set(wusbhc, 10, 5, &ie->hdr);
+	else if (old_keep_alives != 0)
+		wusbhc_mmcie_rm(wusbhc, &ie->hdr);
+}
+
+/*
+ * Do a run through all devices checking for timeouts
+ */
+static void wusbhc_keep_alive_run(struct work_struct *ws)
+{
+	struct delayed_work *dw =
+		container_of(ws, struct delayed_work, work);
+	struct wusbhc *wusbhc =
+		container_of(dw, struct wusbhc, keep_alive_timer);
+
+	d_fnstart(5, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
+	if (wusbhc->active) {
+		mutex_lock(&wusbhc->mutex);
+		__wusbhc_keep_alive(wusbhc);
+		mutex_unlock(&wusbhc->mutex);
+		queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
+				   (wusbhc->trust_timeout * CONFIG_HZ)/1000/2);
+	}
+	d_fnend(5, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc);
+	return;
+}
+
+/*
+ * Find the wusb_dev from its device address.
+ *
+ * The device can be found directly from the address (see
+ * wusb_cack_add() for where the device address is set to port_idx
+ * +2), except when the address is zero.
+ */
+static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr)
+{
+	int p;
+
+	if (addr == 0xff) /* unconnected */
+		return NULL;
+
+	if (addr > 0) {
+		int port = (addr & ~0x80) - 2;
+		if (port < 0 || port >= wusbhc->ports_max)
+			return NULL;
+		return wusb_port_by_idx(wusbhc, port)->wusb_dev;
+	}
+
+	/* Look for the device with address 0. */
+	for (p = 0; p < wusbhc->ports_max; p++) {
+		struct wusb_dev *wusb_dev = wusb_port_by_idx(wusbhc, p)->wusb_dev;
+		if (wusb_dev && wusb_dev->addr == addr)
+			return wusb_dev;
+	}
+	return NULL;
+}
+
+/*
+ * Handle a DN_Alive notification (WUSB1.0[7.6.1])
+ *
+ * This just updates the device activity timestamp and then refreshes
+ * the keep alive IE.
+ *
+ * @wusbhc shall be referenced and unlocked
+ */
+static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+	struct device *dev = wusbhc->dev;
+
+	d_printf(2, dev, "DN ALIVE: device 0x%02x pong\n", wusb_dev->addr);
+
+	mutex_lock(&wusbhc->mutex);
+	wusb_dev->entry_ts = jiffies;
+	__wusbhc_keep_alive(wusbhc);
+	mutex_unlock(&wusbhc->mutex);
+}
+
+/*
+ * Handle a DN_Connect notification (WUSB1.0[7.6.1])
+ *
+ * @wusbhc
+ * @pkt_hdr
+ * @size:    Size of the buffer where the notification resides; if the
+ *           notification data suggests there should be more data than
+ *           available, an error will be signaled and the whole buffer
+ *           consumed.
+ *
+ * @wusbhc->mutex shall be held
+ */
+static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc,
+				     struct wusb_dn_hdr *dn_hdr,
+				     size_t size)
+{
+	struct device *dev = wusbhc->dev;
+	struct wusb_dn_connect *dnc;
+	char pr_cdid[WUSB_CKHDID_STRSIZE];
+	static const char *beacon_behaviour[] = {
+		"reserved",
+		"self-beacon",
+		"directed-beacon",
+		"no-beacon"
+	};
+
+	d_fnstart(3, dev, "(%p, %p, %zu)\n", wusbhc, dn_hdr, size);
+	if (size < sizeof(*dnc)) {
+		dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n",
+			size, sizeof(*dnc));
+		goto out;
+	}
+
+	dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr);
+	ckhdid_printf(pr_cdid, sizeof(pr_cdid), &dnc->CDID);
+	dev_info(dev, "DN CONNECT: device %s @ %x (%s) wants to %s\n",
+		 pr_cdid,
+		 wusb_dn_connect_prev_dev_addr(dnc),
+		 beacon_behaviour[wusb_dn_connect_beacon_behavior(dnc)],
+		 wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect");
+	/* ACK the connect */
+	wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid);
+out:
+	d_fnend(3, dev, "(%p, %p, %zu) = void\n",
+		wusbhc, dn_hdr, size);
+	return;
+}
+
+/*
+ * Handle a DN_Disconnect notification (WUSB1.0[7.6.1])
+ *
+ * Device is going down -- do the disconnect.
+ *
+ * @wusbhc shall be referenced and unlocked
+ */
+static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+	struct device *dev = wusbhc->dev;
+
+	dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr);
+
+	mutex_lock(&wusbhc->mutex);
+	__wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx));
+	mutex_unlock(&wusbhc->mutex);
+}
+
+/*
+ * Reset a WUSB device on a HWA
+ *
+ * @wusbhc
+ * @port_idx   Index of the port where the device is
+ *
+ * In Wireless USB, a reset is more or less equivalent to a full
+ * disconnect; so we just do a full disconnect and send the device a
+ * Device Reset IE (WUSB1.0[7.5.11]) giving it a few millisecs (6 MMCs).
+ *
+ * @wusbhc should be refcounted and unlocked
+ */
+int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port_idx)
+{
+	int result;
+	struct device *dev = wusbhc->dev;
+	struct wusb_dev *wusb_dev;
+	struct wuie_reset *ie;
+
+	d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx);
+	mutex_lock(&wusbhc->mutex);
+	result = 0;
+	wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
+	if (wusb_dev == NULL) {
+		/* reset no device? ignore */
+		dev_dbg(dev, "RESET: no device at port %u, ignoring\n",
+			port_idx);
+		goto error_unlock;
+	}
+	result = -ENOMEM;
+	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
+	if (ie == NULL)
+		goto error_unlock;
+	ie->hdr.bLength = sizeof(ie->hdr) + sizeof(ie->CDID);
+	ie->hdr.bIEIdentifier = WUIE_ID_RESET_DEVICE;
+	ie->CDID = wusb_dev->cdid;
+	result = wusbhc_mmcie_set(wusbhc, 0xff, 6, &ie->hdr);
+	if (result < 0) {
+		dev_err(dev, "RESET: cant's set MMC: %d\n", result);
+		goto error_kfree;
+	}
+	__wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
+
+	/* 120ms, hopefully 6 MMCs (FIXME) */
+	msleep(120);
+	wusbhc_mmcie_rm(wusbhc, &ie->hdr);
+error_kfree:
+	kfree(ie);
+error_unlock:
+	mutex_unlock(&wusbhc->mutex);
+	d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result);
+	return result;
+}
+
+/*
+ * Handle a Device Notification coming a host
+ *
+ * The Device Notification comes from a host (HWA, DWA or WHCI)
+ * wrapped in a set of headers. Somebody else has peeled off those
+ * headers for us and we just get one Device Notifications.
+ *
+ * Invalid DNs (e.g., too short) are discarded.
+ *
+ * @wusbhc shall be referenced
+ *
+ * FIXMES:
+ *  - implement priorities as in WUSB1.0[Table 7-55]?
+ */
+void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr,
+		      struct wusb_dn_hdr *dn_hdr, size_t size)
+{
+	struct device *dev = wusbhc->dev;
+	struct wusb_dev *wusb_dev;
+
+	d_fnstart(3, dev, "(%p, %p)\n", wusbhc, dn_hdr);
+
+	if (size < sizeof(struct wusb_dn_hdr)) {
+		dev_err(dev, "DN data shorter than DN header (%d < %d)\n",
+			(int)size, (int)sizeof(struct wusb_dn_hdr));
+		goto out;
+	}
+
+	wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
+	if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) {
+		dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n",
+			dn_hdr->bType, srcaddr);
+		goto out;
+	}
+
+	switch (dn_hdr->bType) {
+	case WUSB_DN_CONNECT:
+		wusbhc_handle_dn_connect(wusbhc, dn_hdr, size);
+		break;
+	case WUSB_DN_ALIVE:
+		wusbhc_handle_dn_alive(wusbhc, wusb_dev);
+		break;
+	case WUSB_DN_DISCONNECT:
+		wusbhc_handle_dn_disconnect(wusbhc, wusb_dev);
+		break;
+	case WUSB_DN_MASAVAILCHANGED:
+	case WUSB_DN_RWAKE:
+	case WUSB_DN_SLEEP:
+		/* FIXME: handle these DNs. */
+		break;
+	case WUSB_DN_EPRDY:
+		/* The hardware handles these. */
+		break;
+	default:
+		dev_warn(dev, "unknown DN %u (%d octets) from %u\n",
+			 dn_hdr->bType, (int)size, srcaddr);
+	}
+out:
+	d_fnend(3, dev, "(%p, %p) = void\n", wusbhc, dn_hdr);
+	return;
+}
+EXPORT_SYMBOL_GPL(wusbhc_handle_dn);
+
+/*
+ * Disconnect a WUSB device from a the cluster
+ *
+ * @wusbhc
+ * @port     Fake port where the device is (wusbhc index, not USB port number).
+ *
+ * In Wireless USB, a disconnect is basically telling the device he is
+ * being disconnected and forgetting about him.
+ *
+ * We send the device a Device Disconnect IE (WUSB1.0[7.5.11]) for 100
+ * ms and then keep going.
+ *
+ * We don't do much in case of error; we always pretend we disabled
+ * the port and disconnected the device. If physically the request
+ * didn't get there (many things can fail in the way there), the stack
+ * will reject the device's communication attempts.
+ *
+ * @wusbhc should be refcounted and locked
+ */
+void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port_idx)
+{
+	int result;
+	struct device *dev = wusbhc->dev;
+	struct wusb_dev *wusb_dev;
+	struct wuie_disconnect *ie;
+
+	d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx);
+	result = 0;
+	wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
+	if (wusb_dev == NULL) {
+		/* reset no device? ignore */
+		dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n",
+			port_idx);
+		goto error;
+	}
+	__wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
+
+	result = -ENOMEM;
+	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
+	if (ie == NULL)
+		goto error;
+	ie->hdr.bLength = sizeof(*ie);
+	ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT;
+	ie->bDeviceAddress = wusb_dev->addr;
+	result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr);
+	if (result < 0) {
+		dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result);
+		goto error_kfree;
+	}
+
+	/* 120ms, hopefully 6 MMCs */
+	msleep(100);
+	wusbhc_mmcie_rm(wusbhc, &ie->hdr);
+error_kfree:
+	kfree(ie);
+error:
+	d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result);
+	return;
+}
+
+static void wusb_cap_descr_printf(const unsigned level, struct device *dev,
+				  const struct usb_wireless_cap_descriptor *wcd)
+{
+	d_printf(level, dev,
+		 "WUSB Capability Descriptor\n"
+		 "  bDevCapabilityType          0x%02x\n"
+		 "  bmAttributes                0x%02x\n"
+		 "  wPhyRates                   0x%04x\n"
+		 "  bmTFITXPowerInfo            0x%02x\n"
+		 "  bmFFITXPowerInfo            0x%02x\n"
+		 "  bmBandGroup                 0x%04x\n"
+		 "  bReserved                   0x%02x\n",
+		 wcd->bDevCapabilityType,
+		 wcd->bmAttributes,
+		 le16_to_cpu(wcd->wPHYRates),
+		 wcd->bmTFITXPowerInfo,
+		 wcd->bmFFITXPowerInfo,
+		 wcd->bmBandGroup,
+		 wcd->bReserved);
+}
+
+/*
+ * Walk over the BOS descriptor, verify and grok it
+ *
+ * @usb_dev: referenced
+ * @wusb_dev: referenced and unlocked
+ *
+ * The BOS descriptor is defined at WUSB1.0[7.4.1], and it defines a
+ * "flexible" way to wrap all kinds of descriptors inside an standard
+ * descriptor (wonder why they didn't use normal descriptors,
+ * btw). Not like they lack code.
+ *
+ * At the end we go to look for the WUSB Device Capabilities
+ * (WUSB1.0[7.4.1.1]) that is wrapped in a device capability descriptor
+ * that is part of the BOS descriptor set. That tells us what does the
+ * device support (dual role, beacon type, UWB PHY rates).
+ */
+static int wusb_dev_bos_grok(struct usb_device *usb_dev,
+			     struct wusb_dev *wusb_dev,
+			     struct usb_bos_descriptor *bos, size_t desc_size)
+{
+	ssize_t result;
+	struct device *dev = &usb_dev->dev;
+	void *itr, *top;
+
+	/* Walk over BOS capabilities, verify them */
+	itr = (void *)bos + sizeof(*bos);
+	top = itr + desc_size - sizeof(*bos);
+	while (itr < top) {
+		struct usb_dev_cap_header *cap_hdr = itr;
+		size_t cap_size;
+		u8 cap_type;
+		if (top - itr < sizeof(*cap_hdr)) {
+			dev_err(dev, "Device BUG? premature end of BOS header "
+				"data [offset 0x%02x]: only %zu bytes left\n",
+				(int)(itr - (void *)bos), top - itr);
+			result = -ENOSPC;
+			goto error_bad_cap;
+		}
+		cap_size = cap_hdr->bLength;
+		cap_type = cap_hdr->bDevCapabilityType;
+		d_printf(4, dev, "BOS Capability: 0x%02x (%zu bytes)\n",
+			 cap_type, cap_size);
+		if (cap_size == 0)
+			break;
+		if (cap_size > top - itr) {
+			dev_err(dev, "Device BUG? premature end of BOS data "
+				"[offset 0x%02x cap %02x %zu bytes]: "
+				"only %zu bytes left\n",
+				(int)(itr - (void *)bos),
+				cap_type, cap_size, top - itr);
+			result = -EBADF;
+			goto error_bad_cap;
+		}
+		d_dump(3, dev, itr, cap_size);
+		switch (cap_type) {
+		case USB_CAP_TYPE_WIRELESS_USB:
+			if (cap_size != sizeof(*wusb_dev->wusb_cap_descr))
+				dev_err(dev, "Device BUG? WUSB Capability "
+					"descriptor is %zu bytes vs %zu "
+					"needed\n", cap_size,
+					sizeof(*wusb_dev->wusb_cap_descr));
+			else {
+				wusb_dev->wusb_cap_descr = itr;
+				wusb_cap_descr_printf(3, dev, itr);
+			}
+			break;
+		default:
+			dev_err(dev, "BUG? Unknown BOS capability 0x%02x "
+				"(%zu bytes) at offset 0x%02x\n", cap_type,
+				cap_size, (int)(itr - (void *)bos));
+		}
+		itr += cap_size;
+	}
+	result = 0;
+error_bad_cap:
+	return result;
+}
+
+/*
+ * Add information from the BOS descriptors to the device
+ *
+ * @usb_dev: referenced
+ * @wusb_dev: referenced and unlocked
+ *
+ * So what we do is we alloc a space for the BOS descriptor of 64
+ * bytes; read the first four bytes which include the wTotalLength
+ * field (WUSB1.0[T7-26]) and if it fits in those 64 bytes, read the
+ * whole thing. If not we realloc to that size.
+ *
+ * Then we call the groking function, that will fill up
+ * wusb_dev->wusb_cap_descr, which is what we'll need later on.
+ */
+static int wusb_dev_bos_add(struct usb_device *usb_dev,
+			    struct wusb_dev *wusb_dev)
+{
+	ssize_t result;
+	struct device *dev = &usb_dev->dev;
+	struct usb_bos_descriptor *bos;
+	size_t alloc_size = 32, desc_size = 4;
+
+	bos = kmalloc(alloc_size, GFP_KERNEL);
+	if (bos == NULL)
+		return -ENOMEM;
+	result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size);
+	if (result < 4) {
+		dev_err(dev, "Can't get BOS descriptor or too short: %zd\n",
+			result);
+		goto error_get_descriptor;
+	}
+	desc_size = le16_to_cpu(bos->wTotalLength);
+	if (desc_size >= alloc_size) {
+		kfree(bos);
+		alloc_size = desc_size;
+		bos = kmalloc(alloc_size, GFP_KERNEL);
+		if (bos == NULL)
+			return -ENOMEM;
+	}
+	result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size);
+	if (result < 0 || result != desc_size) {
+		dev_err(dev, "Can't get  BOS descriptor or too short (need "
+			"%zu bytes): %zd\n", desc_size, result);
+		goto error_get_descriptor;
+	}
+	if (result < sizeof(*bos)
+	    || le16_to_cpu(bos->wTotalLength) != desc_size) {
+		dev_err(dev, "Can't get  BOS descriptor or too short (need "
+			"%zu bytes): %zd\n", desc_size, result);
+		goto error_get_descriptor;
+	}
+	d_printf(2, dev, "Got BOS descriptor %zd bytes, %u capabilities\n",
+		 result, bos->bNumDeviceCaps);
+	d_dump(2, dev, bos, result);
+	result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result);
+	if (result < 0)
+		goto error_bad_bos;
+	wusb_dev->bos = bos;
+	return 0;
+
+error_bad_bos:
+error_get_descriptor:
+	kfree(bos);
+	wusb_dev->wusb_cap_descr = NULL;
+	return result;
+}
+
+static void wusb_dev_bos_rm(struct wusb_dev *wusb_dev)
+{
+	kfree(wusb_dev->bos);
+	wusb_dev->wusb_cap_descr = NULL;
+};
+
+static struct usb_wireless_cap_descriptor wusb_cap_descr_default = {
+	.bLength = sizeof(wusb_cap_descr_default),
+	.bDescriptorType = USB_DT_DEVICE_CAPABILITY,
+	.bDevCapabilityType = USB_CAP_TYPE_WIRELESS_USB,
+
+	.bmAttributes = USB_WIRELESS_BEACON_NONE,
+	.wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53),
+	.bmTFITXPowerInfo = 0,
+	.bmFFITXPowerInfo = 0,
+	.bmBandGroup = cpu_to_le16(0x0001),	/* WUSB1.0[7.4.1] bottom */
+	.bReserved = 0
+};
+
+/*
+ * USB stack's device addition Notifier Callback
+ *
+ * Called from drivers/usb/core/hub.c when a new device is added; we
+ * use this hook to perform certain WUSB specific setup work on the
+ * new device. As well, it is the first time we can connect the
+ * wusb_dev and the usb_dev. So we note it down in wusb_dev and take a
+ * reference that we'll drop.
+ *
+ * First we need to determine if the device is a WUSB device (else we
+ * ignore it). For that we use the speed setting (USB_SPEED_VARIABLE)
+ * [FIXME: maybe we'd need something more definitive]. If so, we track
+ * it's usb_busd and from there, the WUSB HC.
+ *
+ * Because all WUSB HCs are contained in a 'struct wusbhc', voila, we
+ * get the wusbhc for the device.
+ *
+ * We have a reference on @usb_dev (as we are called at the end of its
+ * enumeration).
+ *
+ * NOTE: @usb_dev locked
+ */
+static void wusb_dev_add_ncb(struct usb_device *usb_dev)
+{
+	int result = 0;
+	struct wusb_dev *wusb_dev;
+	struct wusbhc *wusbhc;
+	struct device *dev = &usb_dev->dev;
+	u8 port_idx;
+
+	if (usb_dev->wusb == 0 || usb_dev->devnum == 1)
+		return;		/* skip non wusb and wusb RHs */
+
+	d_fnstart(3, dev, "(usb_dev %p)\n", usb_dev);
+
+	wusbhc = wusbhc_get_by_usb_dev(usb_dev);
+	if (wusbhc == NULL)
+		goto error_nodev;
+	mutex_lock(&wusbhc->mutex);
+	wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev);
+	port_idx = wusb_port_no_to_idx(usb_dev->portnum);
+	mutex_unlock(&wusbhc->mutex);
+	if (wusb_dev == NULL)
+		goto error_nodev;
+	wusb_dev->usb_dev = usb_get_dev(usb_dev);
+	usb_dev->wusb_dev = wusb_dev_get(wusb_dev);
+	result = wusb_dev_sec_add(wusbhc, usb_dev, wusb_dev);
+	if (result < 0) {
+		dev_err(dev, "Cannot enable security: %d\n", result);
+		goto error_sec_add;
+	}
+	/* Now query the device for it's BOS and attach it to wusb_dev */
+	result = wusb_dev_bos_add(usb_dev, wusb_dev);
+	if (result < 0) {
+		dev_err(dev, "Cannot get BOS descriptors: %d\n", result);
+		goto error_bos_add;
+	}
+	result = wusb_dev_sysfs_add(wusbhc, usb_dev, wusb_dev);
+	if (result < 0)
+		goto error_add_sysfs;
+out:
+	wusb_dev_put(wusb_dev);
+	wusbhc_put(wusbhc);
+error_nodev:
+	d_fnend(3, dev, "(usb_dev %p) = void\n", usb_dev);
+	return;
+
+	wusb_dev_sysfs_rm(wusb_dev);
+error_add_sysfs:
+	wusb_dev_bos_rm(wusb_dev);
+error_bos_add:
+	wusb_dev_sec_rm(wusb_dev);
+error_sec_add:
+	mutex_lock(&wusbhc->mutex);
+	__wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
+	mutex_unlock(&wusbhc->mutex);
+	goto out;
+}
+
+/*
+ * Undo all the steps done at connection by the notifier callback
+ *
+ * NOTE: @usb_dev locked
+ */
+static void wusb_dev_rm_ncb(struct usb_device *usb_dev)
+{
+	struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
+
+	if (usb_dev->wusb == 0 || usb_dev->devnum == 1)
+		return;		/* skip non wusb and wusb RHs */
+
+	wusb_dev_sysfs_rm(wusb_dev);
+	wusb_dev_bos_rm(wusb_dev);
+	wusb_dev_sec_rm(wusb_dev);
+	wusb_dev->usb_dev = NULL;
+	usb_dev->wusb_dev = NULL;
+	wusb_dev_put(wusb_dev);
+	usb_put_dev(usb_dev);
+}
+
+/*
+ * Handle notifications from the USB stack (notifier call back)
+ *
+ * This is called when the USB stack does a
+ * usb_{bus,device}_{add,remove}() so we can do WUSB specific
+ * handling. It is called with [for the case of
+ * USB_DEVICE_{ADD,REMOVE} with the usb_dev locked.
+ */
+int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
+		 void *priv)
+{
+	int result = NOTIFY_OK;
+
+	switch (val) {
+	case USB_DEVICE_ADD:
+		wusb_dev_add_ncb(priv);
+		break;
+	case USB_DEVICE_REMOVE:
+		wusb_dev_rm_ncb(priv);
+		break;
+	case USB_BUS_ADD:
+		/* ignore (for now) */
+	case USB_BUS_REMOVE:
+		break;
+	default:
+		WARN_ON(1);
+		result = NOTIFY_BAD;
+	};
+	return result;
+}
+
+/*
+ * Return a referenced wusb_dev given a @wusbhc and @usb_dev
+ */
+struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *wusbhc,
+					   struct usb_device *usb_dev)
+{
+	struct wusb_dev *wusb_dev;
+	u8 port_idx;
+
+	port_idx = wusb_port_no_to_idx(usb_dev->portnum);
+	BUG_ON(port_idx > wusbhc->ports_max);
+	wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
+	if (wusb_dev != NULL)		/* ops, device is gone */
+		wusb_dev_get(wusb_dev);
+	return wusb_dev;
+}
+EXPORT_SYMBOL_GPL(__wusb_dev_get_by_usb_dev);
+
+void wusb_dev_destroy(struct kref *_wusb_dev)
+{
+	struct wusb_dev *wusb_dev
+		= container_of(_wusb_dev, struct wusb_dev, refcnt);
+	list_del_init(&wusb_dev->cack_node);
+	wusb_dev_free(wusb_dev);
+	d_fnend(1, NULL, "%s (wusb_dev %p) = void\n", __func__, wusb_dev);
+}
+EXPORT_SYMBOL_GPL(wusb_dev_destroy);
+
+/*
+ * Create all the device connect handling infrastructure
+ *
+ * This is basically the device info array, Connect Acknowledgement
+ * (cack) lists, keep-alive timers (and delayed work thread).
+ */
+int wusbhc_devconnect_create(struct wusbhc *wusbhc)
+{
+	d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
+
+	wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE;
+	wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr);
+	INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run);
+
+	wusbhc->cack_ie.hdr.bIEIdentifier = WUIE_ID_CONNECTACK;
+	wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr);
+	INIT_LIST_HEAD(&wusbhc->cack_list);
+
+	d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc);
+	return 0;
+}
+
+/*
+ * Release all resources taken by the devconnect stuff
+ */
+void wusbhc_devconnect_destroy(struct wusbhc *wusbhc)
+{
+	d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
+	d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc);
+}
+
+/*
+ * wusbhc_devconnect_start - start accepting device connections
+ * @wusbhc: the WUSB HC
+ *
+ * Sets the Host Info IE to accept all new connections.
+ *
+ * FIXME: This also enables the keep alives but this is not necessary
+ * until there are connected and authenticated devices.
+ */
+int wusbhc_devconnect_start(struct wusbhc *wusbhc,
+			    const struct wusb_ckhdid *chid)
+{
+	struct device *dev = wusbhc->dev;
+	struct wuie_host_info *hi;
+	int result;
+
+	hi = kzalloc(sizeof(*hi), GFP_KERNEL);
+	if (hi == NULL)
+		return -ENOMEM;
+
+	hi->hdr.bLength       = sizeof(*hi);
+	hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO;
+	hi->attributes        = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL);
+	hi->CHID              = *chid;
+	result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr);
+	if (result < 0) {
+		dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result);
+		goto error_mmcie_set;
+	}
+	wusbhc->wuie_host_info = hi;
+
+	queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
+			   (wusbhc->trust_timeout*CONFIG_HZ)/1000/2);
+
+	return 0;
+
+error_mmcie_set:
+	kfree(hi);
+	return result;
+}
+
+/*
+ * wusbhc_devconnect_stop - stop managing connected devices
+ * @wusbhc: the WUSB HC
+ *
+ * Removes the Host Info IE and stops the keep alives.
+ *
+ * FIXME: should this disconnect all devices?
+ */
+void wusbhc_devconnect_stop(struct wusbhc *wusbhc)
+{
+	cancel_delayed_work_sync(&wusbhc->keep_alive_timer);
+	WARN_ON(!list_empty(&wusbhc->cack_list));
+
+	wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr);
+	kfree(wusbhc->wuie_host_info);
+	wusbhc->wuie_host_info = NULL;
+}
+
+/*
+ * wusb_set_dev_addr - set the WUSB device address used by the host
+ * @wusbhc: the WUSB HC the device is connect to
+ * @wusb_dev: the WUSB device
+ * @addr: new device address
+ */
+int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, u8 addr)
+{
+	int result;
+
+	wusb_dev->addr = addr;
+	result = wusbhc->dev_info_set(wusbhc, wusb_dev);
+	if (result < 0)
+		dev_err(wusbhc->dev, "device %d: failed to set device "
+			"address\n", wusb_dev->port_idx);
+	else
+		dev_info(wusbhc->dev, "device %d: %s addr %u\n",
+			 wusb_dev->port_idx,
+			 (addr & WUSB_DEV_ADDR_UNAUTH) ? "unauth" : "auth",
+			 wusb_dev->addr);
+
+	return result;
+}
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
new file mode 100644
index 0000000..cfa77a0
--- /dev/null
+++ b/drivers/usb/wusbcore/mmc.c
@@ -0,0 +1,321 @@
+/*
+ * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
+ * MMC (Microscheduled Management Command) handling
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * WUIEs and MMC IEs...well, they are almost the same at the end. MMC
+ * IEs are Wireless USB IEs that go into the MMC period...[what is
+ * that? look in Design-overview.txt].
+ *
+ *
+ * This is a simple subsystem to keep track of which IEs are being
+ * sent by the host in the MMC period.
+ *
+ * For each WUIE we ask to send, we keep it in an array, so we can
+ * request its removal later, or replace the content. They are tracked
+ * by pointer, so be sure to use the same pointer if you want to
+ * remove it or update the contents.
+ *
+ * FIXME:
+ *  - add timers that autoremove intervalled IEs?
+ */
+#include <linux/usb/wusb.h>
+#include "wusbhc.h"
+
+/* Initialize the MMCIEs handling mechanism */
+int wusbhc_mmcie_create(struct wusbhc *wusbhc)
+{
+	u8 mmcies = wusbhc->mmcies_max;
+	wusbhc->mmcie = kcalloc(mmcies, sizeof(wusbhc->mmcie[0]), GFP_KERNEL);
+	if (wusbhc->mmcie == NULL)
+		return -ENOMEM;
+	mutex_init(&wusbhc->mmcie_mutex);
+	return 0;
+}
+
+/* Release resources used by the MMCIEs handling mechanism */
+void wusbhc_mmcie_destroy(struct wusbhc *wusbhc)
+{
+	kfree(wusbhc->mmcie);
+}
+
+/*
+ * Add or replace an MMC Wireless USB IE.
+ *
+ * @interval:    See WUSB1.0[8.5.3.1]
+ * @repeat_cnt:  See WUSB1.0[8.5.3.1]
+ * @handle:      See WUSB1.0[8.5.3.1]
+ * @wuie:        Pointer to the header of the WUSB IE data to add.
+ *               MUST BE allocated in a kmalloc buffer (no stack or
+ *               vmalloc).
+ *               THE CALLER ALWAYS OWNS THE POINTER (we don't free it
+ *               on remove, we just forget about it).
+ * @returns:     0 if ok, < 0 errno code on error.
+ *
+ * Goes over the *whole* @wusbhc->mmcie array looking for (a) the
+ * first free spot and (b) if @wuie is already in the array (aka:
+ * transmitted in the MMCs) the spot were it is.
+ *
+ * If present, we "overwrite it" (update).
+ *
+ *
+ * NOTE: Need special ordering rules -- see below WUSB1.0 Table 7-38.
+ *       The host uses the handle as the 'sort' index. We
+ *       allocate the last one always for the WUIE_ID_HOST_INFO, and
+ *       the rest, first come first serve in inverse order.
+ *
+ *       Host software must make sure that it adds the other IEs in
+ *       the right order... the host hardware is responsible for
+ *       placing the WCTA IEs in the right place with the other IEs
+ *       set by host software.
+ *
+ * NOTE: we can access wusbhc->wa_descr without locking because it is
+ *       read only.
+ */
+int wusbhc_mmcie_set(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+		     struct wuie_hdr *wuie)
+{
+	int result = -ENOBUFS;
+	unsigned handle, itr;
+
+	/* Search a handle, taking into account the ordering */
+	mutex_lock(&wusbhc->mmcie_mutex);
+	switch (wuie->bIEIdentifier) {
+	case WUIE_ID_HOST_INFO:
+		/* Always last */
+		handle = wusbhc->mmcies_max - 1;
+		break;
+	case WUIE_ID_ISOCH_DISCARD:
+		dev_err(wusbhc->dev, "Special ordering case for WUIE ID 0x%x "
+			"unimplemented\n", wuie->bIEIdentifier);
+		result = -ENOSYS;
+		goto error_unlock;
+	default:
+		/* search for it or find the last empty slot */
+		handle = ~0;
+		for (itr = 0; itr < wusbhc->mmcies_max - 1; itr++) {
+			if (wusbhc->mmcie[itr] == wuie) {
+				handle = itr;
+				break;
+			}
+			if (wusbhc->mmcie[itr] == NULL)
+				handle = itr;
+		}
+		if (handle == ~0)
+			goto error_unlock;
+	}
+	result = (wusbhc->mmcie_add)(wusbhc, interval, repeat_cnt, handle,
+				     wuie);
+	if (result >= 0)
+		wusbhc->mmcie[handle] = wuie;
+error_unlock:
+	mutex_unlock(&wusbhc->mmcie_mutex);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_mmcie_set);
+
+/*
+ * Remove an MMC IE previously added with wusbhc_mmcie_set()
+ *
+ * @wuie	Pointer used to add the WUIE
+ */
+void wusbhc_mmcie_rm(struct wusbhc *wusbhc, struct wuie_hdr *wuie)
+{
+	int result;
+	unsigned handle, itr;
+
+	mutex_lock(&wusbhc->mmcie_mutex);
+	for (itr = 0; itr < wusbhc->mmcies_max; itr++) {
+		if (wusbhc->mmcie[itr] == wuie) {
+			handle = itr;
+			goto found;
+		}
+	}
+	mutex_unlock(&wusbhc->mmcie_mutex);
+	return;
+
+found:
+	result = (wusbhc->mmcie_rm)(wusbhc, handle);
+	if (result == 0)
+		wusbhc->mmcie[itr] = NULL;
+	mutex_unlock(&wusbhc->mmcie_mutex);
+}
+EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm);
+
+/*
+ * wusbhc_start - start transmitting MMCs and accepting connections
+ * @wusbhc: the HC to start
+ * @chid: the CHID to use for this host
+ *
+ * Establishes a cluster reservation, enables device connections, and
+ * starts MMCs with appropriate DNTS parameters.
+ */
+int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
+{
+	int result;
+	struct device *dev = wusbhc->dev;
+
+	WARN_ON(wusbhc->wuie_host_info != NULL);
+
+	result = wusbhc_rsv_establish(wusbhc);
+	if (result < 0) {
+		dev_err(dev, "cannot establish cluster reservation: %d\n",
+			result);
+		goto error_rsv_establish;
+	}
+
+	result = wusbhc_devconnect_start(wusbhc, chid);
+	if (result < 0) {
+		dev_err(dev, "error enabling device connections: %d\n", result);
+		goto error_devconnect_start;
+	}
+
+	result = wusbhc_sec_start(wusbhc);
+	if (result < 0) {
+		dev_err(dev, "error starting security in the HC: %d\n", result);
+		goto error_sec_start;
+	}
+	/* FIXME: the choice of the DNTS parameters is somewhat
+	 * arbitrary */
+	result = wusbhc->set_num_dnts(wusbhc, 0, 15);
+	if (result < 0) {
+		dev_err(dev, "Cannot set DNTS parameters: %d\n", result);
+		goto error_set_num_dnts;
+	}
+	result = wusbhc->start(wusbhc);
+	if (result < 0) {
+		dev_err(dev, "error starting wusbch: %d\n", result);
+		goto error_wusbhc_start;
+	}
+	wusbhc->active = 1;
+	return 0;
+
+error_wusbhc_start:
+	wusbhc_sec_stop(wusbhc);
+error_set_num_dnts:
+error_sec_start:
+	wusbhc_devconnect_stop(wusbhc);
+error_devconnect_start:
+	wusbhc_rsv_terminate(wusbhc);
+error_rsv_establish:
+	return result;
+}
+
+/*
+ * Disconnect all from the WUSB Channel
+ *
+ * Send a Host Disconnect IE in the MMC, wait, don't send it any more
+ */
+static int __wusbhc_host_disconnect_ie(struct wusbhc *wusbhc)
+{
+	int result = -ENOMEM;
+	struct wuie_host_disconnect *host_disconnect_ie;
+	might_sleep();
+	host_disconnect_ie = kmalloc(sizeof(*host_disconnect_ie), GFP_KERNEL);
+	if (host_disconnect_ie == NULL)
+		goto error_alloc;
+	host_disconnect_ie->hdr.bLength       = sizeof(*host_disconnect_ie);
+	host_disconnect_ie->hdr.bIEIdentifier = WUIE_ID_HOST_DISCONNECT;
+	result = wusbhc_mmcie_set(wusbhc, 0, 0, &host_disconnect_ie->hdr);
+	if (result < 0)
+		goto error_mmcie_set;
+
+	/* WUSB1.0[8.5.3.1 & 7.5.2] */
+	msleep(100);
+	wusbhc_mmcie_rm(wusbhc, &host_disconnect_ie->hdr);
+error_mmcie_set:
+	kfree(host_disconnect_ie);
+error_alloc:
+	return result;
+}
+
+/*
+ * wusbhc_stop - stop transmitting MMCs
+ * @wusbhc: the HC to stop
+ *
+ * Send a Host Disconnect IE, wait, remove all the MMCs (stop sending MMCs).
+ *
+ * If we can't allocate a Host Stop IE, screw it, we don't notify the
+ * devices we are disconnecting...
+ */
+void wusbhc_stop(struct wusbhc *wusbhc)
+{
+	if (wusbhc->active) {
+		wusbhc->active = 0;
+		wusbhc->stop(wusbhc);
+		wusbhc_sec_stop(wusbhc);
+		__wusbhc_host_disconnect_ie(wusbhc);
+		wusbhc_devconnect_stop(wusbhc);
+		wusbhc_rsv_terminate(wusbhc);
+	}
+}
+EXPORT_SYMBOL_GPL(wusbhc_stop);
+
+/*
+ * Change the CHID in a WUSB Channel
+ *
+ * If it is just a new CHID, send a Host Disconnect IE and then change
+ * the CHID IE.
+ */
+static int __wusbhc_chid_change(struct wusbhc *wusbhc,
+				const struct wusb_ckhdid *chid)
+{
+	int result = -ENOSYS;
+	struct device *dev = wusbhc->dev;
+	dev_err(dev, "%s() not implemented yet\n", __func__);
+	return result;
+
+	BUG_ON(wusbhc->wuie_host_info == NULL);
+	__wusbhc_host_disconnect_ie(wusbhc);
+	wusbhc->wuie_host_info->CHID = *chid;
+	result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->wuie_host_info->hdr);
+	if (result < 0)
+		dev_err(dev, "Can't update Host Info WUSB IE: %d\n", result);
+	return result;
+}
+
+/*
+ * Set/reset/update a new CHID
+ *
+ * Depending on the previous state of the MMCs, start, stop or change
+ * the sent MMC. This effectively switches the host controller on and
+ * off (radio wise).
+ */
+int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
+{
+	int result = 0;
+
+	if (memcmp(chid, &wusb_ckhdid_zero, sizeof(chid)) == 0)
+		chid = NULL;
+
+	mutex_lock(&wusbhc->mutex);
+	if (wusbhc->active) {
+		if (chid)
+			result = __wusbhc_chid_change(wusbhc, chid);
+		else
+			wusbhc_stop(wusbhc);
+	} else {
+		if (chid)
+			wusbhc_start(wusbhc, chid);
+	}
+	mutex_unlock(&wusbhc->mutex);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_chid_set);
diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c
new file mode 100644
index 0000000..7cc51e9
--- /dev/null
+++ b/drivers/usb/wusbcore/pal.c
@@ -0,0 +1,42 @@
+/*
+ * Wireless USB Host Controller
+ * UWB Protocol Adaptation Layer (PAL) glue.
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include "wusbhc.h"
+
+/**
+ * wusbhc_pal_register - register the WUSB HC as a UWB PAL
+ * @wusbhc: the WUSB HC
+ */
+int wusbhc_pal_register(struct wusbhc *wusbhc)
+{
+	uwb_pal_init(&wusbhc->pal);
+
+	wusbhc->pal.name   = "wusbhc";
+	wusbhc->pal.device = wusbhc->usb_hcd.self.controller;
+
+	return uwb_pal_register(wusbhc->uwb_rc, &wusbhc->pal);
+}
+
+/**
+ * wusbhc_pal_register - unregister the WUSB HC as a UWB PAL
+ * @wusbhc: the WUSB HC
+ */
+void wusbhc_pal_unregister(struct wusbhc *wusbhc)
+{
+	uwb_pal_unregister(wusbhc->uwb_rc, &wusbhc->pal);
+}
diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c
new file mode 100644
index 0000000..fc63e77
--- /dev/null
+++ b/drivers/usb/wusbcore/reservation.c
@@ -0,0 +1,115 @@
+/*
+ * WUSB cluster reservation management
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/uwb.h>
+
+#include "wusbhc.h"
+
+/*
+ * WUSB cluster reservations are multicast reservations with the
+ * broadcast cluster ID (BCID) as the target DevAddr.
+ *
+ * FIXME: consider adjusting the reservation depending on what devices
+ * are attached.
+ */
+
+static int wusbhc_bwa_set(struct wusbhc *wusbhc, u8 stream,
+	const struct uwb_mas_bm *mas)
+{
+	if (mas == NULL)
+		mas = &uwb_mas_bm_zero;
+	return wusbhc->bwa_set(wusbhc, stream, mas);
+}
+
+/**
+ * wusbhc_rsv_complete_cb - WUSB HC reservation complete callback
+ * @rsv:    the reservation
+ *
+ * Either set or clear the HC's view of the reservation.
+ *
+ * FIXME: when a reservation is denied the HC should be stopped.
+ */
+static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)
+{
+	struct wusbhc *wusbhc = rsv->pal_priv;
+	struct device *dev = wusbhc->dev;
+	char buf[72];
+
+	switch (rsv->state) {
+	case UWB_RSV_STATE_O_ESTABLISHED:
+		bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS);
+		dev_dbg(dev, "established reservation: %s\n", buf);
+		wusbhc_bwa_set(wusbhc, rsv->stream, &rsv->mas);
+		break;
+	case UWB_RSV_STATE_NONE:
+		dev_dbg(dev, "removed reservation\n");
+		wusbhc_bwa_set(wusbhc, 0, NULL);
+		wusbhc->rsv = NULL;
+		break;
+	default:
+		dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state);
+		break;
+	}
+}
+
+
+/**
+ * wusbhc_rsv_establish - establish a reservation for the cluster
+ * @wusbhc: the WUSB HC requesting a bandwith reservation
+ */
+int wusbhc_rsv_establish(struct wusbhc *wusbhc)
+{
+	struct uwb_rc *rc = wusbhc->uwb_rc;
+	struct uwb_rsv *rsv;
+	struct uwb_dev_addr bcid;
+	int ret;
+
+	rsv = uwb_rsv_create(rc, wusbhc_rsv_complete_cb, wusbhc);
+	if (rsv == NULL)
+		return -ENOMEM;
+
+	bcid.data[0] = wusbhc->cluster_id;
+	bcid.data[1] = 0;
+
+	rsv->owner = &rc->uwb_dev;
+	rsv->target.type = UWB_RSV_TARGET_DEVADDR;
+	rsv->target.devaddr = bcid;
+	rsv->type = UWB_DRP_TYPE_PRIVATE;
+	rsv->max_mas = 256;
+	rsv->min_mas = 16;  /* one MAS per zone? */
+	rsv->sparsity = 16; /* at least one MAS in each zone? */
+	rsv->is_multicast = true;
+
+	ret = uwb_rsv_establish(rsv);
+	if (ret == 0)
+		wusbhc->rsv = rsv;
+	else
+		uwb_rsv_destroy(rsv);
+	return ret;
+}
+
+
+/**
+ * wusbhc_rsv_terminate - terminate any cluster reservation
+ * @wusbhc: the WUSB host whose reservation is to be terminated
+ */
+void wusbhc_rsv_terminate(struct wusbhc *wusbhc)
+{
+	if (wusbhc->rsv)
+		uwb_rsv_terminate(wusbhc->rsv);
+}
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c
new file mode 100644
index 0000000..267a643
--- /dev/null
+++ b/drivers/usb/wusbcore/rh.c
@@ -0,0 +1,477 @@
+/*
+ * Wireless USB Host Controller
+ * Root Hub operations
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * We fake a root hub that has fake ports (as many as simultaneous
+ * devices the Wireless USB Host Controller can deal with). For each
+ * port we keep an state in @wusbhc->port[index] identical to the one
+ * specified in the USB2.0[ch11] spec and some extra device
+ * information that complements the one in 'struct usb_device' (as
+ * this lacs a hcpriv pointer).
+ *
+ * Note this is common to WHCI and HWA host controllers.
+ *
+ * Through here we enable most of the state changes that the USB stack
+ * will use to connect or disconnect devices. We need to do some
+ * forced adaptation of Wireless USB device states vs. wired:
+ *
+ *        USB:                 WUSB:
+ *
+ * Port   Powered-off          port slot n/a
+ *        Powered-on           port slot available
+ *        Disconnected         port slot available
+ *        Connected            port slot assigned device
+ *        		       device sent DN_Connect
+ *                             device was authenticated
+ *        Enabled              device is authenticated, transitioned
+ *                             from unauth -> auth -> default address
+ *                             -> enabled
+ *        Reset                disconnect
+ *        Disable              disconnect
+ *
+ * This maps the standard USB port states with the WUSB device states
+ * so we can fake ports without having to modify the USB stack.
+ *
+ * FIXME: this process will change in the future
+ *
+ *
+ * ENTRY POINTS
+ *
+ * Our entry points into here are, as in hcd.c, the USB stack root hub
+ * ops defined in the usb_hcd struct:
+ *
+ * wusbhc_rh_status_data()	Provide hub and port status data bitmap
+ *
+ * wusbhc_rh_control()          Execution of all the major requests
+ *                              you can do to a hub (Set|Clear
+ *                              features, get descriptors, status, etc).
+ *
+ * wusbhc_rh_[suspend|resume]() That
+ *
+ * wusbhc_rh_start_port_reset() ??? unimplemented
+ */
+#include "wusbhc.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/*
+ * Reset a fake port
+ *
+ * This can be called to reset a port from any other state or to reset
+ * it when connecting. In Wireless USB they are different; when doing
+ * a new connect that involves going over the authentication. When
+ * just reseting, its a different story.
+ *
+ * The Linux USB stack resets a port twice before it considers it
+ * enabled, so we have to detect and ignore that.
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ *
+ * Supposedly we are the only thread accesing @wusbhc->port; in any
+ * case, maybe we should move the mutex locking from
+ * wusbhc_devconnect_auth() to here.
+ *
+ * @port_idx refers to the wusbhc's port index, not the USB port number
+ */
+static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx)
+{
+	int result = 0;
+	struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx);
+
+	d_fnstart(3, wusbhc->dev, "(wusbhc %p port_idx %u)\n",
+		  wusbhc, port_idx);
+	if (port->reset_count == 0) {
+		wusbhc_devconnect_auth(wusbhc, port_idx);
+		port->reset_count++;
+	} else if (port->reset_count == 1)
+		/* see header */
+		d_printf(2, wusbhc->dev, "Ignoring second reset on port_idx "
+			"%u\n", port_idx);
+	else
+		result = wusbhc_dev_reset(wusbhc, port_idx);
+	d_fnend(3, wusbhc->dev, "(wusbhc %p port_idx %u) = %d\n",
+		wusbhc, port_idx, result);
+	return result;
+}
+
+/*
+ * Return the hub change status bitmap
+ *
+ * The bits in the change status bitmap are cleared when a
+ * ClearPortFeature request is issued (USB2.0[11.12.3,11.12.4].
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ *
+ * WARNING!! This gets called from atomic context; we cannot get the
+ *           mutex--the only race condition we can find is some bit
+ *           changing just after we copy it, which shouldn't be too
+ *           big of a problem [and we can't make it an spinlock
+ *           because other parts need to take it and sleep] .
+ *
+ *           @usb_hcd is refcounted, so it won't dissapear under us
+ *           and before killing a host, the polling of the root hub
+ *           would be stopped anyway.
+ */
+int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	size_t cnt, size;
+	unsigned long *buf = (unsigned long *) _buf;
+
+	d_fnstart(1, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
+	/* WE DON'T LOCK, see comment */
+	size = wusbhc->ports_max + 1 /* hub bit */;
+	size = (size + 8 - 1) / 8;	/* round to bytes */
+	for (cnt = 0; cnt < wusbhc->ports_max; cnt++)
+		if (wusb_port_by_idx(wusbhc, cnt)->change)
+			set_bit(cnt + 1, buf);
+		else
+			clear_bit(cnt + 1, buf);
+	d_fnend(1, wusbhc->dev, "(wusbhc %p) %u, buffer:\n", wusbhc, (int)size);
+	d_dump(1, wusbhc->dev, _buf, size);
+	return size;
+}
+EXPORT_SYMBOL_GPL(wusbhc_rh_status_data);
+
+/*
+ * Return the hub's desciptor
+ *
+ * NOTE: almost cut and paste from ehci-hub.c
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked
+ */
+static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue,
+				   u16 wIndex,
+				   struct usb_hub_descriptor *descr,
+				   u16 wLength)
+{
+	u16 temp = 1 + (wusbhc->ports_max / 8);
+	u8 length = 7 + 2 * temp;
+
+	if (wLength < length)
+		return -ENOSPC;
+	descr->bDescLength = 7 + 2 * temp;
+	descr->bDescriptorType = 0x29;	/* HUB type */
+	descr->bNbrPorts = wusbhc->ports_max;
+	descr->wHubCharacteristics = cpu_to_le16(
+		0x00			/* All ports power at once */
+		| 0x00			/* not part of compound device */
+		| 0x10			/* No overcurrent protection */
+		| 0x00			/* 8 FS think time FIXME ?? */
+		| 0x00);		/* No port indicators */
+	descr->bPwrOn2PwrGood = 0;
+	descr->bHubContrCurrent = 0;
+	/* two bitmaps:  ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+	memset(&descr->bitmap[0], 0, temp);
+	memset(&descr->bitmap[temp], 0xff, temp);
+	return 0;
+}
+
+/*
+ * Clear a hub feature
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ *
+ * Nothing to do, so no locking needed ;)
+ */
+static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature)
+{
+	int result;
+	struct device *dev = wusbhc->dev;
+
+	d_fnstart(4, dev, "(%p, feature 0x%04u)\n", wusbhc, feature);
+	switch (feature) {
+	case C_HUB_LOCAL_POWER:
+		/* FIXME: maybe plug bit 0 to the power input status,
+		 * if any?
+		 * see wusbhc_rh_get_hub_status() */
+	case C_HUB_OVER_CURRENT:
+		result = 0;
+		break;
+	default:
+		result = -EPIPE;
+	}
+	d_fnend(4, dev, "(%p, feature 0x%04u), %d\n", wusbhc, feature, result);
+	return result;
+}
+
+/*
+ * Return hub status (it is always zero...)
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ *
+ * Nothing to do, so no locking needed ;)
+ */
+static int wusbhc_rh_get_hub_status(struct wusbhc *wusbhc, u32 *buf,
+				    u16 wLength)
+{
+	/* FIXME: maybe plug bit 0 to the power input status (if any)? */
+	*buf = 0;
+	return 0;
+}
+
+/*
+ * Set a port feature
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ */
+static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature,
+				   u8 selector, u8 port_idx)
+{
+	int result = -EINVAL;
+	struct device *dev = wusbhc->dev;
+
+	d_fnstart(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d)\n",
+		  feature, selector, port_idx);
+
+	if (port_idx > wusbhc->ports_max)
+		goto error;
+
+	switch (feature) {
+		/* According to USB2.0[11.24.2.13]p2, these features
+		 * are not required to be implemented. */
+	case USB_PORT_FEAT_C_OVER_CURRENT:
+	case USB_PORT_FEAT_C_ENABLE:
+	case USB_PORT_FEAT_C_SUSPEND:
+	case USB_PORT_FEAT_C_CONNECTION:
+	case USB_PORT_FEAT_C_RESET:
+		result = 0;
+		break;
+
+	case USB_PORT_FEAT_POWER:
+		/* No such thing, but we fake it works */
+		mutex_lock(&wusbhc->mutex);
+		wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER;
+		mutex_unlock(&wusbhc->mutex);
+		result = 0;
+		break;
+	case USB_PORT_FEAT_RESET:
+		result = wusbhc_rh_port_reset(wusbhc, port_idx);
+		break;
+	case USB_PORT_FEAT_ENABLE:
+	case USB_PORT_FEAT_SUSPEND:
+		dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n",
+			port_idx, feature, selector);
+		result = -ENOSYS;
+		break;
+	default:
+		dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n",
+			port_idx, feature, selector);
+		result = -EPIPE;
+		break;
+	}
+error:
+	d_fnend(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d) = %d\n",
+		feature, selector, port_idx, result);
+	return result;
+}
+
+/*
+ * Clear a port feature...
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ */
+static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature,
+				     u8 selector, u8 port_idx)
+{
+	int result = -EINVAL;
+	struct device *dev = wusbhc->dev;
+
+	d_fnstart(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d)\n",
+		  wusbhc, feature, selector, port_idx);
+
+	if (port_idx > wusbhc->ports_max)
+		goto error;
+
+	mutex_lock(&wusbhc->mutex);
+	result = 0;
+	switch (feature) {
+	case USB_PORT_FEAT_POWER:	/* fake port always on */
+		/* According to USB2.0[11.24.2.7.1.4], no need to implement? */
+	case USB_PORT_FEAT_C_OVER_CURRENT:
+		break;
+	case USB_PORT_FEAT_C_RESET:
+		wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_RESET;
+		break;
+	case USB_PORT_FEAT_C_CONNECTION:
+		wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_CONNECTION;
+		break;
+	case USB_PORT_FEAT_ENABLE:
+		__wusbhc_dev_disable(wusbhc, port_idx);
+		break;
+	case USB_PORT_FEAT_C_ENABLE:
+		wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_ENABLE;
+		break;
+	case USB_PORT_FEAT_SUSPEND:
+	case USB_PORT_FEAT_C_SUSPEND:
+	case 0xffff:		/* ??? FIXME */
+		dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n",
+			port_idx, feature, selector);
+		/* dump_stack(); */
+		result = -ENOSYS;
+		break;
+	default:
+		dev_err(dev, "(port_idx %d) Clear feat %d/%d UNKNOWN\n",
+			port_idx, feature, selector);
+		result = -EPIPE;
+		break;
+	}
+	mutex_unlock(&wusbhc->mutex);
+error:
+	d_fnend(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d) = "
+		"%d\n", wusbhc, feature, selector, port_idx, result);
+	return result;
+}
+
+/*
+ * Return the port's status
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ */
+static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx,
+				     u32 *_buf, u16 wLength)
+{
+	int result = -EINVAL;
+	u16 *buf = (u16 *) _buf;
+
+	d_fnstart(1, wusbhc->dev, "(wusbhc %p port_idx %u wLength %u)\n",
+		  wusbhc, port_idx, wLength);
+	if (port_idx > wusbhc->ports_max)
+		goto error;
+	mutex_lock(&wusbhc->mutex);
+	buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status);
+	buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change);
+	result = 0;
+	mutex_unlock(&wusbhc->mutex);
+error:
+	d_fnend(1, wusbhc->dev, "(wusbhc %p) = %d, buffer:\n", wusbhc, result);
+	d_dump(1, wusbhc->dev, _buf, wLength);
+	return result;
+}
+
+/*
+ * Entry point for Root Hub operations
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ */
+int wusbhc_rh_control(struct usb_hcd *usb_hcd, u16 reqntype, u16 wValue,
+		      u16 wIndex, char *buf, u16 wLength)
+{
+	int result = -ENOSYS;
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+
+	switch (reqntype) {
+	case GetHubDescriptor:
+		result = wusbhc_rh_get_hub_descr(
+			wusbhc, wValue, wIndex,
+			(struct usb_hub_descriptor *) buf, wLength);
+		break;
+	case ClearHubFeature:
+		result = wusbhc_rh_clear_hub_feat(wusbhc, wValue);
+		break;
+	case GetHubStatus:
+		result = wusbhc_rh_get_hub_status(wusbhc, (u32 *)buf, wLength);
+		break;
+
+	case SetPortFeature:
+		result = wusbhc_rh_set_port_feat(wusbhc, wValue, wIndex >> 8,
+						 (wIndex & 0xff) - 1);
+		break;
+	case ClearPortFeature:
+		result = wusbhc_rh_clear_port_feat(wusbhc, wValue, wIndex >> 8,
+						   (wIndex & 0xff) - 1);
+		break;
+	case GetPortStatus:
+		result = wusbhc_rh_get_port_status(wusbhc, wIndex - 1,
+						   (u32 *)buf, wLength);
+		break;
+
+	case SetHubFeature:
+	default:
+		dev_err(wusbhc->dev, "%s (%p [%p], %x, %x, %x, %p, %x) "
+			"UNIMPLEMENTED\n", __func__, usb_hcd, wusbhc, reqntype,
+			wValue, wIndex, buf, wLength);
+		/* dump_stack(); */
+		result = -ENOSYS;
+	}
+	return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_rh_control);
+
+int wusbhc_rh_suspend(struct usb_hcd *usb_hcd)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
+		usb_hcd, wusbhc);
+	/* dump_stack(); */
+	return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(wusbhc_rh_suspend);
+
+int wusbhc_rh_resume(struct usb_hcd *usb_hcd)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
+		usb_hcd, wusbhc);
+	/* dump_stack(); */
+	return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(wusbhc_rh_resume);
+
+int wusbhc_rh_start_port_reset(struct usb_hcd *usb_hcd, unsigned port_idx)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	dev_err(wusbhc->dev, "%s (%p [%p], port_idx %u) UNIMPLEMENTED\n",
+		__func__, usb_hcd, wusbhc, port_idx);
+	WARN_ON(1);
+	return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(wusbhc_rh_start_port_reset);
+
+static void wusb_port_init(struct wusb_port *port)
+{
+	port->status |= USB_PORT_STAT_HIGH_SPEED;
+}
+
+/*
+ * Alloc fake port specific fields and status.
+ */
+int wusbhc_rh_create(struct wusbhc *wusbhc)
+{
+	int result = -ENOMEM;
+	size_t port_size, itr;
+	port_size = wusbhc->ports_max * sizeof(wusbhc->port[0]);
+	wusbhc->port = kzalloc(port_size, GFP_KERNEL);
+	if (wusbhc->port == NULL)
+		goto error_port_alloc;
+	for (itr = 0; itr < wusbhc->ports_max; itr++)
+		wusb_port_init(&wusbhc->port[itr]);
+	result = 0;
+error_port_alloc:
+	return result;
+}
+
+void wusbhc_rh_destroy(struct wusbhc *wusbhc)
+{
+	kfree(wusbhc->port);
+}
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
new file mode 100644
index 0000000..a101cad
--- /dev/null
+++ b/drivers/usb/wusbcore/security.c
@@ -0,0 +1,642 @@
+/*
+ * Wireless USB Host Controller
+ * Security support: encryption enablement, etc
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+#include <linux/random.h>
+#include "wusbhc.h"
+
+/*
+ * DEBUG & SECURITY WARNING!!!!
+ *
+ * If you enable this past 1, the debug code will weaken the
+ * cryptographic safety of the system (on purpose, for debugging).
+ *
+ * Weaken means:
+ *   we print secret keys and intermediate values all the way,
+ */
+#undef D_LOCAL
+#define D_LOCAL 2
+#include <linux/uwb/debug.h>
+
+static void wusbhc_set_gtk_callback(struct urb *urb);
+static void wusbhc_gtk_rekey_done_work(struct work_struct *work);
+
+int wusbhc_sec_create(struct wusbhc *wusbhc)
+{
+	wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data);
+	wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;
+	wusbhc->gtk.descr.bReserved = 0;
+
+	wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
+					   WUSB_KEY_INDEX_ORIGINATOR_HOST);
+
+	INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work);
+
+	return 0;
+}
+
+
+/* Called when the HC is destroyed */
+void wusbhc_sec_destroy(struct wusbhc *wusbhc)
+{
+}
+
+
+/**
+ * wusbhc_next_tkid - generate a new, currently unused, TKID
+ * @wusbhc:   the WUSB host controller
+ * @wusb_dev: the device whose PTK the TKID is for
+ *            (or NULL for a TKID for a GTK)
+ *
+ * The generated TKID consist of two parts: the device's authenicated
+ * address (or 0 or a GTK); and an incrementing number.  This ensures
+ * that TKIDs cannot be shared between devices and by the time the
+ * incrementing number wraps around the older TKIDs will no longer be
+ * in use (a maximum of two keys may be active at any one time).
+ */
+static u32 wusbhc_next_tkid(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+	u32 *tkid;
+	u32 addr;
+
+	if (wusb_dev == NULL) {
+		tkid = &wusbhc->gtk_tkid;
+		addr = 0;
+	} else {
+		tkid = &wusb_port_by_idx(wusbhc, wusb_dev->port_idx)->ptk_tkid;
+		addr = wusb_dev->addr & 0x7f;
+	}
+
+	*tkid = (addr << 8) | ((*tkid + 1) & 0xff);
+
+	return *tkid;
+}
+
+static void wusbhc_generate_gtk(struct wusbhc *wusbhc)
+{
+	const size_t key_size = sizeof(wusbhc->gtk.data);
+	u32 tkid;
+
+	tkid = wusbhc_next_tkid(wusbhc, NULL);
+
+	wusbhc->gtk.descr.tTKID[0] = (tkid >>  0) & 0xff;
+	wusbhc->gtk.descr.tTKID[1] = (tkid >>  8) & 0xff;
+	wusbhc->gtk.descr.tTKID[2] = (tkid >> 16) & 0xff;
+
+	get_random_bytes(wusbhc->gtk.descr.bKeyData, key_size);
+}
+
+/**
+ * wusbhc_sec_start - start the security management process
+ * @wusbhc: the WUSB host controller
+ *
+ * Generate and set an initial GTK on the host controller.
+ *
+ * Called when the HC is started.
+ */
+int wusbhc_sec_start(struct wusbhc *wusbhc)
+{
+	const size_t key_size = sizeof(wusbhc->gtk.data);
+	int result;
+
+	wusbhc_generate_gtk(wusbhc);
+
+	result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
+				 &wusbhc->gtk.descr.bKeyData, key_size);
+	if (result < 0)
+		dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n",
+			result);
+
+	return result;
+}
+
+/**
+ * wusbhc_sec_stop - stop the security management process
+ * @wusbhc: the WUSB host controller
+ *
+ * Wait for any pending GTK rekeys to stop.
+ */
+void wusbhc_sec_stop(struct wusbhc *wusbhc)
+{
+	cancel_work_sync(&wusbhc->gtk_rekey_done_work);
+}
+
+
+/** @returns encryption type name */
+const char *wusb_et_name(u8 x)
+{
+	switch (x) {
+	case USB_ENC_TYPE_UNSECURE:	return "unsecure";
+	case USB_ENC_TYPE_WIRED:	return "wired";
+	case USB_ENC_TYPE_CCM_1:	return "CCM-1";
+	case USB_ENC_TYPE_RSA_1:	return "RSA-1";
+	default: 			return "unknown";
+	}
+}
+EXPORT_SYMBOL_GPL(wusb_et_name);
+
+/*
+ * Set the device encryption method
+ *
+ * We tell the device which encryption method to use; we do this when
+ * setting up the device's security.
+ */
+static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
+{
+	int result;
+	struct device *dev = &usb_dev->dev;
+	struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
+
+	if (value) {
+		value = wusb_dev->ccm1_etd.bEncryptionValue;
+	} else {
+		/* FIXME: should be wusb_dev->etd[UNSECURE].bEncryptionValue */
+		value = 0;
+	}
+	/* Set device's */
+	result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+			USB_REQ_SET_ENCRYPTION,
+			USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+			value, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
+	if (result < 0)
+		dev_err(dev, "Can't set device's WUSB encryption to "
+			"%s (value %d): %d\n",
+			wusb_et_name(wusb_dev->ccm1_etd.bEncryptionType),
+			wusb_dev->ccm1_etd.bEncryptionValue,  result);
+	return result;
+}
+
+/*
+ * Set the GTK to be used by a device.
+ *
+ * The device must be authenticated.
+ */
+static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+	struct usb_device *usb_dev = wusb_dev->usb_dev;
+
+	return usb_control_msg(
+		usb_dev, usb_sndctrlpipe(usb_dev, 0),
+		USB_REQ_SET_DESCRIPTOR,
+		USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+		USB_DT_KEY << 8 | wusbhc->gtk_index, 0,
+		&wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
+		1000);
+}
+
+
+/* FIXME: prototype for adding security */
+int wusb_dev_sec_add(struct wusbhc *wusbhc,
+		     struct usb_device *usb_dev, struct wusb_dev *wusb_dev)
+{
+	int result, bytes, secd_size;
+	struct device *dev = &usb_dev->dev;
+	struct usb_security_descriptor secd;
+	const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL;
+	void *secd_buf;
+	const void *itr, *top;
+	char buf[64];
+
+	d_fnstart(3, dev, "(usb_dev %p, wusb_dev %p)\n", usb_dev, wusb_dev);
+	result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
+				    0, &secd, sizeof(secd));
+	if (result < sizeof(secd)) {
+		dev_err(dev, "Can't read security descriptor or "
+			"not enough data: %d\n", result);
+		goto error_secd;
+	}
+	secd_size = le16_to_cpu(secd.wTotalLength);
+	d_printf(5, dev, "got %d bytes of sec descriptor, total is %d\n",
+		 result, secd_size);
+	secd_buf = kmalloc(secd_size, GFP_KERNEL);
+	if (secd_buf == NULL) {
+		dev_err(dev, "Can't allocate space for security descriptors\n");
+		goto error_secd_alloc;
+	}
+	result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
+				    0, secd_buf, secd_size);
+	if (result < secd_size) {
+		dev_err(dev, "Can't read security descriptor or "
+			"not enough data: %d\n", result);
+		goto error_secd_all;
+	}
+	d_printf(5, dev, "got %d bytes of sec descriptors\n", result);
+	bytes = 0;
+	itr = secd_buf + sizeof(secd);
+	top = secd_buf + result;
+	while (itr < top) {
+		etd = itr;
+		if (top - itr < sizeof(*etd)) {
+			dev_err(dev, "BUG: bad device security descriptor; "
+				"not enough data (%zu vs %zu bytes left)\n",
+				top - itr, sizeof(*etd));
+			break;
+		}
+		if (etd->bLength < sizeof(*etd)) {
+			dev_err(dev, "BUG: bad device encryption descriptor; "
+				"descriptor is too short "
+				"(%u vs %zu needed)\n",
+				etd->bLength, sizeof(*etd));
+			break;
+		}
+		itr += etd->bLength;
+		bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
+				  "%s (0x%02x/%02x) ",
+				  wusb_et_name(etd->bEncryptionType),
+				  etd->bEncryptionValue, etd->bAuthKeyIndex);
+		if (etd->bEncryptionType == USB_ENC_TYPE_CCM_1)
+			ccm1_etd = etd;
+	}
+	/* This code only supports CCM1 as of now. */
+	/* FIXME: user has to choose which sec mode to use?
+	 * In theory we want CCM */
+	if (ccm1_etd == NULL) {
+		dev_err(dev, "WUSB device doesn't support CCM1 encryption, "
+			"can't use!\n");
+		result = -EINVAL;
+		goto error_no_ccm1;
+	}
+	wusb_dev->ccm1_etd = *ccm1_etd;
+	dev_info(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n",
+		 buf, wusb_et_name(ccm1_etd->bEncryptionType),
+		 ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex);
+	result = 0;
+	kfree(secd_buf);
+out:
+	d_fnend(3, dev, "(usb_dev %p, wusb_dev %p) = %d\n",
+		usb_dev, wusb_dev, result);
+	return result;
+
+
+error_no_ccm1:
+error_secd_all:
+	kfree(secd_buf);
+error_secd_alloc:
+error_secd:
+	goto out;
+}
+
+void wusb_dev_sec_rm(struct wusb_dev *wusb_dev)
+{
+	/* Nothing so far */
+}
+
+static void hs_printk(unsigned level, struct device *dev,
+		      struct usb_handshake *hs)
+{
+	d_printf(level, dev,
+		 "  bMessageNumber: %u\n"
+		 "  bStatus:        %u\n"
+		 "  tTKID:          %02x %02x %02x\n"
+		 "  CDID:           %02x %02x %02x %02x %02x %02x %02x %02x\n"
+		 "                  %02x %02x %02x %02x %02x %02x %02x %02x\n"
+		 "  nonce:          %02x %02x %02x %02x %02x %02x %02x %02x\n"
+		 "                  %02x %02x %02x %02x %02x %02x %02x %02x\n"
+		 "  MIC:            %02x %02x %02x %02x %02x %02x %02x %02x\n",
+		 hs->bMessageNumber, hs->bStatus,
+		 hs->tTKID[2], hs->tTKID[1], hs->tTKID[0],
+		 hs->CDID[0], hs->CDID[1], hs->CDID[2], hs->CDID[3],
+		 hs->CDID[4], hs->CDID[5], hs->CDID[6], hs->CDID[7],
+		 hs->CDID[8], hs->CDID[9], hs->CDID[10], hs->CDID[11],
+		 hs->CDID[12], hs->CDID[13], hs->CDID[14], hs->CDID[15],
+		 hs->nonce[0], hs->nonce[1], hs->nonce[2], hs->nonce[3],
+		 hs->nonce[4], hs->nonce[5], hs->nonce[6], hs->nonce[7],
+		 hs->nonce[8], hs->nonce[9], hs->nonce[10], hs->nonce[11],
+		 hs->nonce[12], hs->nonce[13], hs->nonce[14], hs->nonce[15],
+		 hs->MIC[0], hs->MIC[1], hs->MIC[2], hs->MIC[3],
+		 hs->MIC[4], hs->MIC[5], hs->MIC[6], hs->MIC[7]);
+}
+
+/**
+ * Update the address of an unauthenticated WUSB device
+ *
+ * Once we have successfully authenticated, we take it to addr0 state
+ * and then to a normal address.
+ *
+ * Before the device's address (as known by it) was usb_dev->devnum |
+ * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum.
+ */
+static int wusb_dev_update_address(struct wusbhc *wusbhc,
+				   struct wusb_dev *wusb_dev)
+{
+	int result = -ENOMEM;
+	struct usb_device *usb_dev = wusb_dev->usb_dev;
+	struct device *dev = &usb_dev->dev;
+	u8 new_address = wusb_dev->addr & 0x7F;
+
+	/* Set address 0 */
+	result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+				 USB_REQ_SET_ADDRESS, 0,
+				 0, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
+	if (result < 0) {
+		dev_err(dev, "auth failed: can't set address 0: %d\n",
+			result);
+		goto error_addr0;
+	}
+	result = wusb_set_dev_addr(wusbhc, wusb_dev, 0);
+	if (result < 0)
+		goto error_addr0;
+	usb_ep0_reinit(usb_dev);
+
+	/* Set new (authenticated) address. */
+	result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+				 USB_REQ_SET_ADDRESS, 0,
+				 new_address, 0, NULL, 0,
+				 1000 /* FIXME: arbitrary */);
+	if (result < 0) {
+		dev_err(dev, "auth failed: can't set address %u: %d\n",
+			new_address, result);
+		goto error_addr;
+	}
+	result = wusb_set_dev_addr(wusbhc, wusb_dev, new_address);
+	if (result < 0)
+		goto error_addr;
+	usb_ep0_reinit(usb_dev);
+	usb_dev->authenticated = 1;
+error_addr:
+error_addr0:
+	return result;
+}
+
+/*
+ *
+ *
+ */
+/* FIXME: split and cleanup */
+int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
+			    struct wusb_ckhdid *ck)
+{
+	int result = -ENOMEM;
+	struct usb_device *usb_dev = wusb_dev->usb_dev;
+	struct device *dev = &usb_dev->dev;
+	u32 tkid;
+	__le32 tkid_le;
+	struct usb_handshake *hs;
+	struct aes_ccm_nonce ccm_n;
+	u8 mic[8];
+	struct wusb_keydvt_in keydvt_in;
+	struct wusb_keydvt_out keydvt_out;
+
+	hs = kzalloc(3*sizeof(hs[0]), GFP_KERNEL);
+	if (hs == NULL) {
+		dev_err(dev, "can't allocate handshake data\n");
+		goto error_kzalloc;
+	}
+
+	/* We need to turn encryption before beginning the 4way
+	 * hshake (WUSB1.0[.3.2.2]) */
+	result = wusb_dev_set_encryption(usb_dev, 1);
+	if (result < 0)
+		goto error_dev_set_encryption;
+
+	tkid = wusbhc_next_tkid(wusbhc, wusb_dev);
+	tkid_le = cpu_to_le32(tkid);
+
+	hs[0].bMessageNumber = 1;
+	hs[0].bStatus = 0;
+	memcpy(hs[0].tTKID, &tkid_le, sizeof(hs[0].tTKID));
+	hs[0].bReserved = 0;
+	memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));
+	get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce));
+	memset(hs[0].MIC, 0, sizeof(hs[0].MIC));	/* Per WUSB1.0[T7-22] */
+
+	d_printf(1, dev, "I: sending hs1:\n");
+	hs_printk(2, dev, &hs[0]);
+
+	result = usb_control_msg(
+		usb_dev, usb_sndctrlpipe(usb_dev, 0),
+		USB_REQ_SET_HANDSHAKE,
+		USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+		1, 0, &hs[0], sizeof(hs[0]), 1000 /* FIXME: arbitrary */);
+	if (result < 0) {
+		dev_err(dev, "Handshake1: request failed: %d\n", result);
+		goto error_hs1;
+	}
+
+	/* Handshake 2, from the device -- need to verify fields */
+	result = usb_control_msg(
+		usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+		USB_REQ_GET_HANDSHAKE,
+		USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+		2, 0, &hs[1], sizeof(hs[1]), 1000 /* FIXME: arbitrary */);
+	if (result < 0) {
+		dev_err(dev, "Handshake2: request failed: %d\n", result);
+		goto error_hs2;
+	}
+	d_printf(1, dev, "got HS2:\n");
+	hs_printk(2, dev, &hs[1]);
+
+	result = -EINVAL;
+	if (hs[1].bMessageNumber != 2) {
+		dev_err(dev, "Handshake2 failed: bad message number %u\n",
+			hs[1].bMessageNumber);
+		goto error_hs2;
+	}
+	if (hs[1].bStatus != 0) {
+		dev_err(dev, "Handshake2 failed: bad status %u\n",
+			hs[1].bStatus);
+		goto error_hs2;
+	}
+	if (memcmp(hs[0].tTKID, hs[1].tTKID, sizeof(hs[0].tTKID))) {
+		dev_err(dev, "Handshake2 failed: TKID mismatch "
+			"(#1 0x%02x%02x%02x vs #2 0x%02x%02x%02x)\n",
+			hs[0].tTKID[0], hs[0].tTKID[1], hs[0].tTKID[2],
+			hs[1].tTKID[0], hs[1].tTKID[1], hs[1].tTKID[2]);
+		goto error_hs2;
+	}
+	if (memcmp(hs[0].CDID, hs[1].CDID, sizeof(hs[0].CDID))) {
+		dev_err(dev, "Handshake2 failed: CDID mismatch\n");
+		goto error_hs2;
+	}
+
+	/* Setup the CCM nonce */
+	memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn));	/* Per WUSB1.0[6.5.2] */
+	memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid));
+	ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;
+	ccm_n.dest_addr.data[0] = wusb_dev->addr;
+	ccm_n.dest_addr.data[1] = 0;
+
+	/* Derive the KCK and PTK from CK, the CCM, H and D nonces */
+	memcpy(keydvt_in.hnonce, hs[0].nonce, sizeof(keydvt_in.hnonce));
+	memcpy(keydvt_in.dnonce, hs[1].nonce, sizeof(keydvt_in.dnonce));
+	result = wusb_key_derive(&keydvt_out, ck->data, &ccm_n, &keydvt_in);
+	if (result < 0) {
+		dev_err(dev, "Handshake2 failed: cannot derive keys: %d\n",
+			result);
+		goto error_hs2;
+	}
+	d_printf(2, dev, "KCK:\n");
+	d_dump(2, dev, keydvt_out.kck, sizeof(keydvt_out.kck));
+	d_printf(2, dev, "PTK:\n");
+	d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk));
+
+	/* Compute MIC and verify it */
+	result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]);
+	if (result < 0) {
+		dev_err(dev, "Handshake2 failed: cannot compute MIC: %d\n",
+			result);
+		goto error_hs2;
+	}
+
+	d_printf(2, dev, "MIC:\n");
+	d_dump(2, dev, mic, sizeof(mic));
+	if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) {
+		dev_err(dev, "Handshake2 failed: MIC mismatch\n");
+		goto error_hs2;
+	}
+
+	/* Send Handshake3 */
+	hs[2].bMessageNumber = 3;
+	hs[2].bStatus = 0;
+	memcpy(hs[2].tTKID, &tkid_le, sizeof(hs[2].tTKID));
+	hs[2].bReserved = 0;
+	memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID));
+	memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce));
+	result = wusb_oob_mic(hs[2].MIC, keydvt_out.kck, &ccm_n, &hs[2]);
+	if (result < 0) {
+		dev_err(dev, "Handshake3 failed: cannot compute MIC: %d\n",
+			result);
+		goto error_hs2;
+	}
+
+	d_printf(1, dev, "I: sending hs3:\n");
+	hs_printk(2, dev, &hs[2]);
+
+	result = usb_control_msg(
+		usb_dev, usb_sndctrlpipe(usb_dev, 0),
+		USB_REQ_SET_HANDSHAKE,
+		USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+		3, 0, &hs[2], sizeof(hs[2]), 1000 /* FIXME: arbitrary */);
+	if (result < 0) {
+		dev_err(dev, "Handshake3: request failed: %d\n", result);
+		goto error_hs3;
+	}
+
+	d_printf(1, dev, "I: turning on encryption on host for device\n");
+	d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk));
+	result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid,
+				 keydvt_out.ptk, sizeof(keydvt_out.ptk));
+	if (result < 0)
+		goto error_wusbhc_set_ptk;
+
+	d_printf(1, dev, "I: setting a GTK\n");
+	result = wusb_dev_set_gtk(wusbhc, wusb_dev);
+	if (result < 0) {
+		dev_err(dev, "Set GTK for device: request failed: %d\n",
+			result);
+		goto error_wusbhc_set_gtk;
+	}
+
+	/* Update the device's address from unauth to auth */
+	if (usb_dev->authenticated == 0) {
+		d_printf(1, dev, "I: updating addres to auth from non-auth\n");
+		result = wusb_dev_update_address(wusbhc, wusb_dev);
+		if (result < 0)
+			goto error_dev_update_address;
+	}
+	result = 0;
+	d_printf(1, dev, "I: 4way handshke done, device authenticated\n");
+
+error_dev_update_address:
+error_wusbhc_set_gtk:
+error_wusbhc_set_ptk:
+error_hs3:
+error_hs2:
+error_hs1:
+	memset(hs, 0, 3*sizeof(hs[0]));
+	memset(&keydvt_out, 0, sizeof(keydvt_out));
+	memset(&keydvt_in, 0, sizeof(keydvt_in));
+	memset(&ccm_n, 0, sizeof(ccm_n));
+	memset(mic, 0, sizeof(mic));
+	if (result < 0) {
+		/* error path */
+		wusb_dev_set_encryption(usb_dev, 0);
+	}
+error_dev_set_encryption:
+	kfree(hs);
+error_kzalloc:
+	return result;
+}
+
+/*
+ * Once all connected and authenticated devices have received the new
+ * GTK, switch the host to using it.
+ */
+static void wusbhc_gtk_rekey_done_work(struct work_struct *work)
+{
+	struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work);
+	size_t key_size = sizeof(wusbhc->gtk.data);
+
+	mutex_lock(&wusbhc->mutex);
+
+	if (--wusbhc->pending_set_gtks == 0)
+		wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
+
+	mutex_unlock(&wusbhc->mutex);
+}
+
+static void wusbhc_set_gtk_callback(struct urb *urb)
+{
+	struct wusbhc *wusbhc = urb->context;
+
+	queue_work(wusbd, &wusbhc->gtk_rekey_done_work);
+}
+
+/**
+ * wusbhc_gtk_rekey - generate and distribute a new GTK
+ * @wusbhc: the WUSB host controller
+ *
+ * Generate a new GTK and distribute it to all connected and
+ * authenticated devices.  When all devices have the new GTK, the host
+ * starts using it.
+ *
+ * This must be called after every device disconnect (see [WUSB]
+ * section 6.2.11.2).
+ */
+void wusbhc_gtk_rekey(struct wusbhc *wusbhc)
+{
+	static const size_t key_size = sizeof(wusbhc->gtk.data);
+	int p;
+
+	wusbhc_generate_gtk(wusbhc);
+
+	for (p = 0; p < wusbhc->ports_max; p++) {
+		struct wusb_dev *wusb_dev;
+
+		wusb_dev = wusbhc->port[p].wusb_dev;
+		if (!wusb_dev || !wusb_dev->usb_dev | !wusb_dev->usb_dev->authenticated)
+			continue;
+
+		usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev,
+				     usb_sndctrlpipe(wusb_dev->usb_dev, 0),
+				     (void *)wusb_dev->set_gtk_req,
+				     &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
+				     wusbhc_set_gtk_callback, wusbhc);
+		if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0)
+			wusbhc->pending_set_gtks++;
+	}
+	if (wusbhc->pending_set_gtks == 0)
+		wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
+}
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
new file mode 100644
index 0000000..9d04722
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -0,0 +1,95 @@
+/*
+ * Wire Adapter Host Controller Driver
+ * Common items to HWA and DWA based HCDs
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+#include "wusbhc.h"
+#include "wa-hc.h"
+
+/**
+ * Assumes
+ *
+ * wa->usb_dev and wa->usb_iface initialized and refcounted,
+ * wa->wa_descr initialized.
+ */
+int wa_create(struct wahc *wa, struct usb_interface *iface)
+{
+	int result;
+	struct device *dev = &iface->dev;
+
+	result = wa_rpipes_create(wa);
+	if (result < 0)
+		goto error_rpipes_create;
+	/* Fill up Data Transfer EP pointers */
+	wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc;
+	wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc;
+	wa->xfer_result_size = le16_to_cpu(wa->dti_epd->wMaxPacketSize);
+	wa->xfer_result = kmalloc(wa->xfer_result_size, GFP_KERNEL);
+	if (wa->xfer_result == NULL)
+		goto error_xfer_result_alloc;
+	result = wa_nep_create(wa, iface);
+	if (result < 0) {
+		dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n",
+			result);
+		goto error_nep_create;
+	}
+	return 0;
+
+error_nep_create:
+	kfree(wa->xfer_result);
+error_xfer_result_alloc:
+	wa_rpipes_destroy(wa);
+error_rpipes_create:
+	return result;
+}
+EXPORT_SYMBOL_GPL(wa_create);
+
+
+void __wa_destroy(struct wahc *wa)
+{
+	if (wa->dti_urb) {
+		usb_kill_urb(wa->dti_urb);
+		usb_put_urb(wa->dti_urb);
+		usb_kill_urb(wa->buf_in_urb);
+		usb_put_urb(wa->buf_in_urb);
+	}
+	kfree(wa->xfer_result);
+	wa_nep_destroy(wa);
+	wa_rpipes_destroy(wa);
+}
+EXPORT_SYMBOL_GPL(__wa_destroy);
+
+/**
+ * wa_reset_all - reset the WA device
+ * @wa: the WA to be reset
+ *
+ * For HWAs the radio controller and all other PALs are also reset.
+ */
+void wa_reset_all(struct wahc *wa)
+{
+	/* FIXME: assuming HWA. */
+	wusbhc_reset_all(wa->wusb);
+}
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Wireless USB Wire Adapter core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
new file mode 100644
index 0000000..586d350
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -0,0 +1,417 @@
+/*
+ * HWA Host Controller Driver
+ * Wire Adapter Control/Data Streaming Iface (WUSB1.0[8])
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This driver implements a USB Host Controller (struct usb_hcd) for a
+ * Wireless USB Host Controller based on the Wireless USB 1.0
+ * Host-Wire-Adapter specification (in layman terms, a USB-dongle that
+ * implements a Wireless USB host).
+ *
+ * Check out the Design-overview.txt file in the source documentation
+ * for other details on the implementation.
+ *
+ * Main blocks:
+ *
+ *  driver     glue with the driver API, workqueue daemon
+ *
+ *  lc         RC instance life cycle management (create, destroy...)
+ *
+ *  hcd        glue with the USB API Host Controller Interface API.
+ *
+ *  nep        Notification EndPoint managent: collect notifications
+ *             and queue them with the workqueue daemon.
+ *
+ *             Handle notifications as coming from the NEP. Sends them
+ *             off others to their respective modules (eg: connect,
+ *             disconnect and reset go to devconnect).
+ *
+ *  rpipe      Remote Pipe management; rpipe is what we use to write
+ *             to an endpoint on a WUSB device that is connected to a
+ *             HWA RC.
+ *
+ *  xfer       Transfer managment -- this is all the code that gets a
+ *             buffer and pushes it to a device (or viceversa). *
+ *
+ * Some day a lot of this code will be shared between this driver and
+ * the drivers for DWA (xfer, rpipe).
+ *
+ * All starts at driver.c:hwahc_probe(), when one of this guys is
+ * connected. hwahc_disconnect() stops it.
+ *
+ * During operation, the main driver is devices connecting or
+ * disconnecting. They cause the HWA RC to send notifications into
+ * nep.c:hwahc_nep_cb() that will dispatch them to
+ * notif.c:wa_notif_dispatch(). From there they will fan to cause
+ * device connects, disconnects, etc.
+ *
+ * Note much of the activity is difficult to follow. For example a
+ * device connect goes to devconnect, which will cause the "fake" root
+ * hub port to show a connect and stop there. Then khubd will notice
+ * and call into the rh.c:hwahc_rc_port_reset() code to authenticate
+ * the device (and this might require user intervention) and enable
+ * the port.
+ *
+ * We also have a timer workqueue going from devconnect.c that
+ * schedules in hwahc_devconnect_create().
+ *
+ * The rest of the traffic is in the usual entry points of a USB HCD,
+ * which are hooked up in driver.c:hwahc_rc_driver, and defined in
+ * hcd.c.
+ */
+
+#ifndef __HWAHC_INTERNAL_H__
+#define __HWAHC_INTERNAL_H__
+
+#include <linux/completion.h>
+#include <linux/usb.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/uwb.h>
+#include <linux/usb/wusb.h>
+#include <linux/usb/wusb-wa.h>
+
+struct wusbhc;
+struct wahc;
+extern void wa_urb_enqueue_run(struct work_struct *ws);
+
+/**
+ * RPipe instance
+ *
+ * @descr's fields are kept in LE, as we need to send it back and
+ * forth.
+ *
+ * @wa is referenced when set
+ *
+ * @segs_available is the number of requests segments that still can
+ *                 be submitted to the controller without overloading
+ *                 it. It is initialized to descr->wRequests when
+ *                 aiming.
+ *
+ * A rpipe supports a max of descr->wRequests at the same time; before
+ * submitting seg_lock has to be taken. If segs_avail > 0, then we can
+ * submit; if not, we have to queue them.
+ */
+struct wa_rpipe {
+	struct kref refcnt;
+	struct usb_rpipe_descriptor descr;
+	struct usb_host_endpoint *ep;
+	struct wahc *wa;
+	spinlock_t seg_lock;
+	struct list_head seg_list;
+	atomic_t segs_available;
+	u8 buffer[1];	/* For reads/writes on USB */
+};
+
+
+/**
+ * Instance of a HWA Host Controller
+ *
+ * Except where a more specific lock/mutex applies or atomic, all
+ * fields protected by @mutex.
+ *
+ * @wa_descr  Can be accessed without locking because it is in
+ *            the same area where the device descriptors were
+ *            read, so it is guaranteed to exist umodified while
+ *            the device exists.
+ *
+ *            Endianess has been converted to CPU's.
+ *
+ * @nep_* can be accessed without locking as its processing is
+ *        serialized; we submit a NEP URB and it comes to
+ *        hwahc_nep_cb(), which won't issue another URB until it is
+ *        done processing it.
+ *
+ * @xfer_list:
+ *
+ *   List of active transfers to verify existence from a xfer id
+ *   gotten from the xfer result message. Can't use urb->list because
+ *   it goes by endpoint, and we don't know the endpoint at the time
+ *   when we get the xfer result message. We can't really rely on the
+ *   pointer (will have to change for 64 bits) as the xfer id is 32 bits.
+ *
+ * @xfer_delayed_list:   List of transfers that need to be started
+ *                       (with a workqueue, because they were
+ *                       submitted from an atomic context).
+ *
+ * FIXME: this needs to be layered up: a wusbhc layer (for sharing
+ *        comonalities with WHCI), a wa layer (for sharing
+ *        comonalities with DWA-RC).
+ */
+struct wahc {
+	struct usb_device *usb_dev;
+	struct usb_interface *usb_iface;
+
+	/* HC to deliver notifications */
+	union {
+		struct wusbhc *wusb;
+		struct dwahc *dwa;
+	};
+
+	const struct usb_endpoint_descriptor *dto_epd, *dti_epd;
+	const struct usb_wa_descriptor *wa_descr;
+
+	struct urb *nep_urb;		/* Notification EndPoint [lockless] */
+	struct edc nep_edc;
+	void *nep_buffer;
+	size_t nep_buffer_size;
+
+	atomic_t notifs_queued;
+
+	u16 rpipes;
+	unsigned long *rpipe_bm;	/* rpipe usage bitmap */
+	spinlock_t rpipe_bm_lock;	/* protect rpipe_bm */
+	struct mutex rpipe_mutex;	/* assigning resources to endpoints */
+
+	struct urb *dti_urb;		/* URB for reading xfer results */
+	struct urb *buf_in_urb;		/* URB for reading data in */
+	struct edc dti_edc;		/* DTI error density counter */
+	struct wa_xfer_result *xfer_result; /* real size = dti_ep maxpktsize */
+	size_t xfer_result_size;
+
+	s32 status;			/* For reading status */
+
+	struct list_head xfer_list;
+	struct list_head xfer_delayed_list;
+	spinlock_t xfer_list_lock;
+	struct work_struct xfer_work;
+	atomic_t xfer_id_count;
+};
+
+
+extern int wa_create(struct wahc *wa, struct usb_interface *iface);
+extern void __wa_destroy(struct wahc *wa);
+void wa_reset_all(struct wahc *wa);
+
+
+/* Miscellaneous constants */
+enum {
+	/** Max number of EPROTO errors we tolerate on the NEP in a
+	 * period of time */
+	HWAHC_EPROTO_MAX = 16,
+	/** Period of time for EPROTO errors (in jiffies) */
+	HWAHC_EPROTO_PERIOD = 4 * HZ,
+};
+
+
+/* Notification endpoint handling */
+extern int wa_nep_create(struct wahc *, struct usb_interface *);
+extern void wa_nep_destroy(struct wahc *);
+
+static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask)
+{
+	struct urb *urb = wa->nep_urb;
+	urb->transfer_buffer = wa->nep_buffer;
+	urb->transfer_buffer_length = wa->nep_buffer_size;
+	return usb_submit_urb(urb, gfp_mask);
+}
+
+static inline void wa_nep_disarm(struct wahc *wa)
+{
+	usb_kill_urb(wa->nep_urb);
+}
+
+
+/* RPipes */
+static inline void wa_rpipe_init(struct wahc *wa)
+{
+	spin_lock_init(&wa->rpipe_bm_lock);
+	mutex_init(&wa->rpipe_mutex);
+}
+
+static inline void wa_init(struct wahc *wa)
+{
+	edc_init(&wa->nep_edc);
+	atomic_set(&wa->notifs_queued, 0);
+	wa_rpipe_init(wa);
+	edc_init(&wa->dti_edc);
+	INIT_LIST_HEAD(&wa->xfer_list);
+	INIT_LIST_HEAD(&wa->xfer_delayed_list);
+	spin_lock_init(&wa->xfer_list_lock);
+	INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
+	atomic_set(&wa->xfer_id_count, 1);
+}
+
+/**
+ * Destroy a pipe (when refcount drops to zero)
+ *
+ * Assumes it has been moved to the "QUIESCING" state.
+ */
+struct wa_xfer;
+extern void rpipe_destroy(struct kref *_rpipe);
+static inline
+void __rpipe_get(struct wa_rpipe *rpipe)
+{
+	kref_get(&rpipe->refcnt);
+}
+extern int rpipe_get_by_ep(struct wahc *, struct usb_host_endpoint *,
+			   struct urb *, gfp_t);
+static inline void rpipe_put(struct wa_rpipe *rpipe)
+{
+	kref_put(&rpipe->refcnt, rpipe_destroy);
+
+}
+extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
+extern int wa_rpipes_create(struct wahc *);
+extern void wa_rpipes_destroy(struct wahc *);
+static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
+{
+	atomic_dec(&rpipe->segs_available);
+}
+
+/**
+ * Returns true if the rpipe is ready to submit more segments.
+ */
+static inline int rpipe_avail_inc(struct wa_rpipe *rpipe)
+{
+	return atomic_inc_return(&rpipe->segs_available) > 0
+		&& !list_empty(&rpipe->seg_list);
+}
+
+
+/* Transferring data */
+extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *,
+			  struct urb *, gfp_t);
+extern int wa_urb_dequeue(struct wahc *, struct urb *);
+extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
+
+
+/* Misc
+ *
+ * FIXME: Refcounting for the actual @hwahc object is not correct; I
+ *        mean, this should be refcounting on the HCD underneath, but
+ *        it is not. In any case, the semantics for HCD refcounting
+ *        are *weird*...on refcount reaching zero it just frees
+ *        it...no RC specific function is called...unless I miss
+ *        something.
+ *
+ * FIXME: has to go away in favour of an 'struct' hcd based sollution
+ */
+static inline struct wahc *wa_get(struct wahc *wa)
+{
+	usb_get_intf(wa->usb_iface);
+	return wa;
+}
+
+static inline void wa_put(struct wahc *wa)
+{
+	usb_put_intf(wa->usb_iface);
+}
+
+
+static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature)
+{
+	return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			op ? USB_REQ_SET_FEATURE : USB_REQ_CLEAR_FEATURE,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			feature,
+			wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+			NULL, 0, 1000 /* FIXME: arbitrary */);
+}
+
+
+static inline int __wa_set_feature(struct wahc *wa, u16 feature)
+{
+	return  __wa_feature(wa, 1, feature);
+}
+
+
+static inline int __wa_clear_feature(struct wahc *wa, u16 feature)
+{
+	return __wa_feature(wa, 0, feature);
+}
+
+
+/**
+ * Return the status of a Wire Adapter
+ *
+ * @wa:		Wire Adapter instance
+ * @returns     < 0 errno code on error, or status bitmap as described
+ *              in WUSB1.0[8.3.1.6].
+ *
+ * NOTE: need malloc, some arches don't take USB from the stack
+ */
+static inline
+s32 __wa_get_status(struct wahc *wa)
+{
+	s32 result;
+	result = usb_control_msg(
+		wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+		USB_REQ_GET_STATUS,
+		USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+		0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+		&wa->status, sizeof(wa->status),
+		1000 /* FIXME: arbitrary */);
+	if (result >= 0)
+		result = wa->status;
+	return result;
+}
+
+
+/**
+ * Waits until the Wire Adapter's status matches @mask/@value
+ *
+ * @wa:		Wire Adapter instance.
+ * @returns     < 0 errno code on error, otherwise status.
+ *
+ * Loop until the WAs status matches the mask and value (status & mask
+ * == value). Timeout if it doesn't happen.
+ *
+ * FIXME: is there an official specification on how long status
+ *        changes can take?
+ */
+static inline s32 __wa_wait_status(struct wahc *wa, u32 mask, u32 value)
+{
+	s32 result;
+	unsigned loops = 10;
+	do {
+		msleep(50);
+		result = __wa_get_status(wa);
+		if ((result & mask) == value)
+			break;
+		if (loops-- == 0) {
+			result = -ETIMEDOUT;
+			break;
+		}
+	} while (result >= 0);
+	return result;
+}
+
+
+/** Command @hwahc to stop, @returns 0 if ok, < 0 errno code on error */
+static inline int __wa_stop(struct wahc *wa)
+{
+	int result;
+	struct device *dev = &wa->usb_iface->dev;
+
+	result = __wa_clear_feature(wa, WA_ENABLE);
+	if (result < 0 && result != -ENODEV) {
+		dev_err(dev, "error commanding HC to stop: %d\n", result);
+		goto out;
+	}
+	result = __wa_wait_status(wa, WA_ENABLE, 0);
+	if (result < 0 && result != -ENODEV)
+		dev_err(dev, "error waiting for HC to stop: %d\n", result);
+out:
+	return 0;
+}
+
+
+#endif /* #ifndef __HWAHC_INTERNAL_H__ */
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
new file mode 100644
index 0000000..3f54299
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -0,0 +1,310 @@
+/*
+ * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
+ * Notification EndPoint support
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This part takes care of getting the notification from the hw
+ * only and dispatching through wusbwad into
+ * wa_notif_dispatch. Handling is done there.
+ *
+ * WA notifications are limited in size; most of them are three or
+ * four bytes long, and the longest is the HWA Device Notification,
+ * which would not exceed 38 bytes (DNs are limited in payload to 32
+ * bytes plus 3 bytes header (WUSB1.0[7.6p2]), plus 3 bytes HWA
+ * header (WUSB1.0[8.5.4.2]).
+ *
+ * It is not clear if more than one Device Notification can be packed
+ * in a HWA Notification, I assume no because of the wording in
+ * WUSB1.0[8.5.4.2]. In any case, the bigger any notification could
+ * get is 256 bytes (as the bLength field is a byte).
+ *
+ * So what we do is we have this buffer and read into it; when a
+ * notification arrives we schedule work to a specific, single thread
+ * workqueue (so notifications are serialized) and copy the
+ * notification data. After scheduling the work, we rearm the read from
+ * the notification endpoint.
+ *
+ * Entry points here are:
+ *
+ * wa_nep_[create|destroy]()   To initialize/release this subsystem
+ *
+ * wa_nep_cb()                 Callback for the notification
+ *                                endpoint; when data is ready, this
+ *                                does the dispatching.
+ */
+#include <linux/workqueue.h>
+#include <linux/ctype.h>
+#include <linux/uwb/debug.h>
+#include "wa-hc.h"
+#include "wusbhc.h"
+
+/* Structure for queueing notifications to the workqueue */
+struct wa_notif_work {
+	struct work_struct work;
+	struct wahc *wa;
+	size_t size;
+	u8 data[];
+};
+
+/*
+ * Process incoming notifications from the WA's Notification EndPoint
+ * [the wuswad daemon, basically]
+ *
+ * @_nw:	Pointer to a descriptor which has the pointer to the
+ * 		@wa, the size of the buffer and the work queue
+ * 		structure (so we can free all when done).
+ * @returns     0 if ok, < 0 errno code on error.
+ *
+ * All notifications follow the same format; they need to start with a
+ * 'struct wa_notif_hdr' header, so it is easy to parse through
+ * them. We just break the buffer in individual notifications (the
+ * standard doesn't say if it can be done or is forbidden, so we are
+ * cautious) and dispatch each.
+ *
+ * So the handling layers are is:
+ *
+ *   WA specific notification (from NEP)
+ *      Device Notification Received -> wa_handle_notif_dn()
+ *        WUSB Device notification generic handling
+ *      BPST Adjustment -> wa_handle_notif_bpst_adj()
+ *      ... -> ...
+ *
+ * @wa has to be referenced
+ */
+static void wa_notif_dispatch(struct work_struct *ws)
+{
+	void *itr;
+	u8 missing = 0;
+	struct wa_notif_work *nw = container_of(ws, struct wa_notif_work, work);
+	struct wahc *wa = nw->wa;
+	struct wa_notif_hdr *notif_hdr;
+	size_t size;
+
+	struct device *dev = &wa->usb_iface->dev;
+
+#if 0
+	/* FIXME: need to check for this??? */
+	if (usb_hcd->state == HC_STATE_QUIESCING)	/* Going down? */
+		goto out;				/* screw it */
+#endif
+	atomic_dec(&wa->notifs_queued);		/* Throttling ctl */
+	dev = &wa->usb_iface->dev;
+	size = nw->size;
+	itr = nw->data;
+
+	while (size) {
+		if (size < sizeof(*notif_hdr)) {
+			missing = sizeof(*notif_hdr) - size;
+			goto exhausted_buffer;
+		}
+		notif_hdr = itr;
+		if (size < notif_hdr->bLength)
+			goto exhausted_buffer;
+		itr += notif_hdr->bLength;
+		size -= notif_hdr->bLength;
+		/* Dispatch the notification [don't use itr or size!] */
+		switch (notif_hdr->bNotifyType) {
+		case HWA_NOTIF_DN: {
+			struct hwa_notif_dn *hwa_dn;
+			hwa_dn = container_of(notif_hdr, struct hwa_notif_dn,
+					      hdr);
+			wusbhc_handle_dn(wa->wusb, hwa_dn->bSourceDeviceAddr,
+					 hwa_dn->dndata,
+					 notif_hdr->bLength - sizeof(*hwa_dn));
+			break;
+		}
+		case WA_NOTIF_TRANSFER:
+			wa_handle_notif_xfer(wa, notif_hdr);
+			break;
+		case DWA_NOTIF_RWAKE:
+		case DWA_NOTIF_PORTSTATUS:
+		case HWA_NOTIF_BPST_ADJ:
+			/* FIXME: unimplemented WA NOTIFs */
+			/* fallthru */
+		default:
+			if (printk_ratelimit()) {
+				dev_err(dev, "HWA: unknown notification 0x%x, "
+					"%zu bytes; discarding\n",
+					notif_hdr->bNotifyType,
+					(size_t)notif_hdr->bLength);
+				dump_bytes(dev, notif_hdr, 16);
+			}
+			break;
+		}
+	}
+out:
+	wa_put(wa);
+	kfree(nw);
+	return;
+
+	/* THIS SHOULD NOT HAPPEN
+	 *
+	 * Buffer exahusted with partial data remaining; just warn and
+	 * discard the data, as this should not happen.
+	 */
+exhausted_buffer:
+	if (!printk_ratelimit())
+		goto out;
+	dev_warn(dev, "HWA: device sent short notification, "
+		 "%d bytes missing; discarding %d bytes.\n",
+		 missing, (int)size);
+	dump_bytes(dev, itr, size);
+	goto out;
+}
+
+/*
+ * Deliver incoming WA notifications to the wusbwa workqueue
+ *
+ * @wa:	Pointer the Wire Adapter Controller Data Streaming
+ *              instance (part of an 'struct usb_hcd').
+ * @size:       Size of the received buffer
+ * @returns     0 if ok, < 0 errno code on error.
+ *
+ * The input buffer is @wa->nep_buffer, with @size bytes
+ * (guaranteed to fit in the allocated space,
+ * @wa->nep_buffer_size).
+ */
+static int wa_nep_queue(struct wahc *wa, size_t size)
+{
+	int result = 0;
+	struct device *dev = &wa->usb_iface->dev;
+	struct wa_notif_work *nw;
+
+	/* dev_fnstart(dev, "(wa %p, size %zu)\n", wa, size); */
+	BUG_ON(size > wa->nep_buffer_size);
+	if (size == 0)
+		goto out;
+	if (atomic_read(&wa->notifs_queued) > 200) {
+		if (printk_ratelimit())
+			dev_err(dev, "Too many notifications queued, "
+				"throttling back\n");
+		goto out;
+	}
+	nw = kzalloc(sizeof(*nw) + size, GFP_ATOMIC);
+	if (nw == NULL) {
+		if (printk_ratelimit())
+			dev_err(dev, "No memory to queue notification\n");
+		goto out;
+	}
+	INIT_WORK(&nw->work, wa_notif_dispatch);
+	nw->wa = wa_get(wa);
+	nw->size = size;
+	memcpy(nw->data, wa->nep_buffer, size);
+	atomic_inc(&wa->notifs_queued);		/* Throttling ctl */
+	queue_work(wusbd, &nw->work);
+out:
+	/* dev_fnend(dev, "(wa %p, size %zu) = result\n", wa, size, result); */
+	return result;
+}
+
+/*
+ * Callback for the notification event endpoint
+ *
+ * Check's that everything is fine and then passes the data to be
+ * queued to the workqueue.
+ */
+static void wa_nep_cb(struct urb *urb)
+{
+	int result;
+	struct wahc *wa = urb->context;
+	struct device *dev = &wa->usb_iface->dev;
+
+	switch (result = urb->status) {
+	case 0:
+		result = wa_nep_queue(wa, urb->actual_length);
+		if (result < 0)
+			dev_err(dev, "NEP: unable to process notification(s): "
+				"%d\n", result);
+		break;
+	case -ECONNRESET:	/* Not an error, but a controlled situation; */
+	case -ENOENT:		/* (we killed the URB)...so, no broadcast */
+	case -ESHUTDOWN:
+		dev_dbg(dev, "NEP: going down %d\n", urb->status);
+		goto out;
+	default:	/* On general errors, we retry unless it gets ugly */
+		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+			    EDC_ERROR_TIMEFRAME)) {
+			dev_err(dev, "NEP: URB max acceptable errors "
+				"exceeded, resetting device\n");
+			wa_reset_all(wa);
+			goto out;
+		}
+		dev_err(dev, "NEP: URB error %d\n", urb->status);
+	}
+	result = wa_nep_arm(wa, GFP_ATOMIC);
+	if (result < 0) {
+		dev_err(dev, "NEP: cannot submit URB: %d\n", result);
+		wa_reset_all(wa);
+	}
+out:
+	return;
+}
+
+/*
+ * Initialize @wa's notification and event's endpoint stuff
+ *
+ * This includes the allocating the read buffer, the context ID
+ * allocation bitmap, the URB and submitting the URB.
+ */
+int wa_nep_create(struct wahc *wa, struct usb_interface *iface)
+{
+	int result;
+	struct usb_endpoint_descriptor *epd;
+	struct usb_device *usb_dev = interface_to_usbdev(iface);
+	struct device *dev = &iface->dev;
+
+	edc_init(&wa->nep_edc);
+	epd = &iface->cur_altsetting->endpoint[0].desc;
+	wa->nep_buffer_size = 1024;
+	wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL);
+	if (wa->nep_buffer == NULL) {
+		dev_err(dev, "Unable to allocate notification's read buffer\n");
+		goto error_nep_buffer;
+	}
+	wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (wa->nep_urb == NULL) {
+		dev_err(dev, "Unable to allocate notification URB\n");
+		goto error_urb_alloc;
+	}
+	usb_fill_int_urb(wa->nep_urb, usb_dev,
+			 usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
+			 wa->nep_buffer, wa->nep_buffer_size,
+			 wa_nep_cb, wa, epd->bInterval);
+	result = wa_nep_arm(wa, GFP_KERNEL);
+	if (result < 0) {
+		dev_err(dev, "Cannot submit notification URB: %d\n", result);
+		goto error_nep_arm;
+	}
+	return 0;
+
+error_nep_arm:
+	usb_free_urb(wa->nep_urb);
+error_urb_alloc:
+	kfree(wa->nep_buffer);
+error_nep_buffer:
+	return -ENOMEM;
+}
+
+void wa_nep_destroy(struct wahc *wa)
+{
+	wa_nep_disarm(wa);
+	usb_free_urb(wa->nep_urb);
+	kfree(wa->nep_buffer);
+}
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
new file mode 100644
index 0000000..f18e4aa
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -0,0 +1,562 @@
+/*
+ * WUSB Wire Adapter
+ * rpipe management
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * RPIPE
+ *
+ *   Targetted at different downstream endpoints
+ *
+ *   Descriptor: use to config the remote pipe.
+ *
+ *   The number of blocks could be dynamic (wBlocks in descriptor is
+ *   0)--need to schedule them then.
+ *
+ * Each bit in wa->rpipe_bm represents if an rpipe is being used or
+ * not. Rpipes are represented with a 'struct wa_rpipe' that is
+ * attached to the hcpriv member of a 'struct usb_host_endpoint'.
+ *
+ * When you need to xfer data to an endpoint, you get an rpipe for it
+ * with wa_ep_rpipe_get(), which gives you a reference to the rpipe
+ * and keeps a single one (the first one) with the endpoint. When you
+ * are done transferring, you drop that reference. At the end the
+ * rpipe is always allocated and bound to the endpoint. There it might
+ * be recycled when not used.
+ *
+ * Addresses:
+ *
+ *  We use a 1:1 mapping mechanism between port address (0 based
+ *  index, actually) and the address. The USB stack knows about this.
+ *
+ *  USB Stack port number    4 (1 based)
+ *  WUSB code port index     3 (0 based)
+ *  USB Addresss             5 (2 based -- 0 is for default, 1 for root hub)
+ *
+ *  Now, because we don't use the concept as default address exactly
+ *  like the (wired) USB code does, we need to kind of skip it. So we
+ *  never take addresses from the urb->pipe, but from the
+ *  urb->dev->devnum, to make sure that we always have the right
+ *  destination address.
+ */
+#include <linux/init.h>
+#include <asm/atomic.h>
+#include <linux/bitmap.h>
+#include "wusbhc.h"
+#include "wa-hc.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+
+static int __rpipe_get_descr(struct wahc *wa,
+			     struct usb_rpipe_descriptor *descr, u16 index)
+{
+	ssize_t result;
+	struct device *dev = &wa->usb_iface->dev;
+
+	/* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor()
+	 * function because the arguments are different.
+	 */
+	d_printf(1, dev, "rpipe %u: get descr\n", index);
+	result = usb_control_msg(
+		wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+		USB_REQ_GET_DESCRIPTOR,
+		USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+		USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
+		1000 /* FIXME: arbitrary */);
+	if (result < 0) {
+		dev_err(dev, "rpipe %u: get descriptor failed: %d\n",
+			index, (int)result);
+		goto error;
+	}
+	if (result < sizeof(*descr)) {
+		dev_err(dev, "rpipe %u: got short descriptor "
+			"(%zd vs %zd bytes needed)\n",
+			index, result, sizeof(*descr));
+		result = -EINVAL;
+		goto error;
+	}
+	result = 0;
+
+error:
+	return result;
+}
+
+/*
+ *
+ * The descriptor is assumed to be properly initialized (ie: you got
+ * it through __rpipe_get_descr()).
+ */
+static int __rpipe_set_descr(struct wahc *wa,
+			     struct usb_rpipe_descriptor *descr, u16 index)
+{
+	ssize_t result;
+	struct device *dev = &wa->usb_iface->dev;
+
+	/* we cannot use the usb_get_descriptor() function because the
+	 * arguments are different.
+	 */
+	d_printf(1, dev, "rpipe %u: set descr\n", index);
+	result = usb_control_msg(
+		wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+		USB_REQ_SET_DESCRIPTOR,
+		USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+		USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
+		HZ / 10);
+	if (result < 0) {
+		dev_err(dev, "rpipe %u: set descriptor failed: %d\n",
+			index, (int)result);
+		goto error;
+	}
+	if (result < sizeof(*descr)) {
+		dev_err(dev, "rpipe %u: sent short descriptor "
+			"(%zd vs %zd bytes required)\n",
+			index, result, sizeof(*descr));
+		result = -EINVAL;
+		goto error;
+	}
+	result = 0;
+
+error:
+	return result;
+
+}
+
+static void rpipe_init(struct wa_rpipe *rpipe)
+{
+	kref_init(&rpipe->refcnt);
+	spin_lock_init(&rpipe->seg_lock);
+	INIT_LIST_HEAD(&rpipe->seg_list);
+}
+
+static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
+	rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);
+	if (rpipe_idx < wa->rpipes)
+		set_bit(rpipe_idx, wa->rpipe_bm);
+	spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
+
+	return rpipe_idx;
+}
+
+static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
+	clear_bit(rpipe_idx, wa->rpipe_bm);
+	spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
+}
+
+void rpipe_destroy(struct kref *_rpipe)
+{
+	struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt);
+	u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
+	d_fnstart(1, NULL, "(rpipe %p %u)\n", rpipe, index);
+	if (rpipe->ep)
+		rpipe->ep->hcpriv = NULL;
+	rpipe_put_idx(rpipe->wa, index);
+	wa_put(rpipe->wa);
+	kfree(rpipe);
+	d_fnend(1, NULL, "(rpipe %p %u)\n", rpipe, index);
+}
+EXPORT_SYMBOL_GPL(rpipe_destroy);
+
+/*
+ * Locate an idle rpipe, create an structure for it and return it
+ *
+ * @wa 	  is referenced and unlocked
+ * @crs   enum rpipe_attr, required endpoint characteristics
+ *
+ * The rpipe can be used only sequentially (not in parallel).
+ *
+ * The rpipe is moved into the "ready" state.
+ */
+static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs,
+			  gfp_t gfp)
+{
+	int result;
+	unsigned rpipe_idx;
+	struct wa_rpipe *rpipe;
+	struct device *dev = &wa->usb_iface->dev;
+
+	d_fnstart(3, dev, "(wa %p crs 0x%02x)\n", wa, crs);
+	rpipe = kzalloc(sizeof(*rpipe), gfp);
+	if (rpipe == NULL)
+		return -ENOMEM;
+	rpipe_init(rpipe);
+
+	/* Look for an idle pipe */
+	for (rpipe_idx = 0; rpipe_idx < wa->rpipes; rpipe_idx++) {
+		rpipe_idx = rpipe_get_idx(wa, rpipe_idx);
+		if (rpipe_idx >= wa->rpipes)	/* no more pipes :( */
+			break;
+		result =  __rpipe_get_descr(wa, &rpipe->descr, rpipe_idx);
+		if (result < 0)
+			dev_err(dev, "Can't get descriptor for rpipe %u: %d\n",
+				rpipe_idx, result);
+		else if ((rpipe->descr.bmCharacteristics & crs) != 0)
+			goto found;
+		rpipe_put_idx(wa, rpipe_idx);
+	}
+	*prpipe = NULL;
+	kfree(rpipe);
+	d_fnend(3, dev, "(wa %p crs 0x%02x) = -ENXIO\n", wa, crs);
+	return -ENXIO;
+
+found:
+	set_bit(rpipe_idx, wa->rpipe_bm);
+	rpipe->wa = wa_get(wa);
+	*prpipe = rpipe;
+	d_fnstart(3, dev, "(wa %p crs 0x%02x) = 0\n", wa, crs);
+	return 0;
+}
+
+static int __rpipe_reset(struct wahc *wa, unsigned index)
+{
+	int result;
+	struct device *dev = &wa->usb_iface->dev;
+
+	d_printf(1, dev, "rpipe %u: reset\n", index);
+	result = usb_control_msg(
+		wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+		USB_REQ_RPIPE_RESET,
+		USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+		0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
+	if (result < 0)
+		dev_err(dev, "rpipe %u: reset failed: %d\n",
+			index, result);
+	return result;
+}
+
+/*
+ * Fake companion descriptor for ep0
+ *
+ * See WUSB1.0[7.4.4], most of this is zero for bulk/int/ctl
+ */
+static struct usb_wireless_ep_comp_descriptor epc0 = {
+	.bLength = sizeof(epc0),
+	.bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP,
+/*	.bMaxBurst = 1, */
+	.bMaxSequence = 31,
+};
+
+/*
+ * Look for EP companion descriptor
+ *
+ * Get there, look for Inara in the endpoint's extra descriptors
+ */
+static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(
+		struct device *dev, struct usb_host_endpoint *ep)
+{
+	void *itr;
+	size_t itr_size;
+	struct usb_descriptor_header *hdr;
+	struct usb_wireless_ep_comp_descriptor *epcd;
+
+	d_fnstart(3, dev, "(ep %p)\n", ep);
+	if (ep->desc.bEndpointAddress == 0) {
+		epcd = &epc0;
+		goto out;
+	}
+	itr = ep->extra;
+	itr_size = ep->extralen;
+	epcd = NULL;
+	while (itr_size > 0) {
+		if (itr_size < sizeof(*hdr)) {
+			dev_err(dev, "HW Bug? ep 0x%02x: extra descriptors "
+				"at offset %zu: only %zu bytes left\n",
+				ep->desc.bEndpointAddress,
+				itr - (void *) ep->extra, itr_size);
+			break;
+		}
+		hdr = itr;
+		if (hdr->bDescriptorType == USB_DT_WIRELESS_ENDPOINT_COMP) {
+			epcd = itr;
+			break;
+		}
+		if (hdr->bLength > itr_size) {
+			dev_err(dev, "HW Bug? ep 0x%02x: extra descriptor "
+				"at offset %zu (type 0x%02x) "
+				"length %d but only %zu bytes left\n",
+				ep->desc.bEndpointAddress,
+				itr - (void *) ep->extra, hdr->bDescriptorType,
+				hdr->bLength, itr_size);
+			break;
+		}
+		itr += hdr->bLength;
+		itr_size -= hdr->bDescriptorType;
+	}
+out:
+	d_fnend(3, dev, "(ep %p) = %p\n", ep, epcd);
+	return epcd;
+}
+
+/*
+ * Aim an rpipe to its device & endpoint destination
+ *
+ * Make sure we change the address to unauthenticathed if the device
+ * is WUSB and it is not authenticated.
+ */
+static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
+		     struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp)
+{
+	int result = -ENOMSG;	/* better code for lack of companion? */
+	struct device *dev = &wa->usb_iface->dev;
+	struct usb_device *usb_dev = urb->dev;
+	struct usb_wireless_ep_comp_descriptor *epcd;
+	u8 unauth;
+
+	d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n",
+		    rpipe, wa, ep, urb);
+	epcd = rpipe_epc_find(dev, ep);
+	if (epcd == NULL) {
+		dev_err(dev, "ep 0x%02x: can't find companion descriptor\n",
+			ep->desc.bEndpointAddress);
+		goto error;
+	}
+	unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0;
+	__rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex));
+	atomic_set(&rpipe->segs_available, le16_to_cpu(rpipe->descr.wRequests));
+	/* FIXME: block allocation system; request with queuing and timeout */
+	/* FIXME: compute so seg_size > ep->maxpktsize */
+	rpipe->descr.wBlocks = cpu_to_le16(16);		/* given */
+	/* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
+	rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize);
+	rpipe->descr.bHSHubAddress = 0;			/* reserved: zero */
+	rpipe->descr.bHSHubPort = wusb_port_no_to_idx(urb->dev->portnum);
+	/* FIXME: use maximum speed as supported or recommended by device */
+	rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ?
+		UWB_PHY_RATE_53 : UWB_PHY_RATE_200;
+	d_printf(2, dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n",
+		 urb->dev->devnum, urb->dev->devnum | unauth,
+		 le16_to_cpu(rpipe->descr.wRPipeIndex),
+		 usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed);
+	/* see security.c:wusb_update_address() */
+	if (unlikely(urb->dev->devnum == 0x80))
+		rpipe->descr.bDeviceAddress = 0;
+	else
+		rpipe->descr.bDeviceAddress = urb->dev->devnum | unauth;
+	rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress;
+	/* FIXME: bDataSequence */
+	rpipe->descr.bDataSequence = 0;
+	/* FIXME: dwCurrentWindow */
+	rpipe->descr.dwCurrentWindow = cpu_to_le32(1);
+	/* FIXME: bMaxDataSequence */
+	rpipe->descr.bMaxDataSequence = epcd->bMaxSequence - 1;
+	rpipe->descr.bInterval = ep->desc.bInterval;
+	/* FIXME: bOverTheAirInterval */
+	rpipe->descr.bOverTheAirInterval = 0;	/* 0 if not isoc */
+	/* FIXME: xmit power & preamble blah blah */
+	rpipe->descr.bmAttribute = ep->desc.bmAttributes & 0x03;
+	/* rpipe->descr.bmCharacteristics RO */
+	/* FIXME: bmRetryOptions */
+	rpipe->descr.bmRetryOptions = 15;
+	/* FIXME: use for assessing link quality? */
+	rpipe->descr.wNumTransactionErrors = 0;
+	result = __rpipe_set_descr(wa, &rpipe->descr,
+				   le16_to_cpu(rpipe->descr.wRPipeIndex));
+	if (result < 0) {
+		dev_err(dev, "Cannot aim rpipe: %d\n", result);
+		goto error;
+	}
+	result = 0;
+error:
+	d_fnend(3, dev, "(rpipe %p wa %p ep %p urb %p) = %d\n",
+		  rpipe, wa, ep, urb, result);
+	return result;
+}
+
+/*
+ * Check an aimed rpipe to make sure it points to where we want
+ *
+ * We use bit 19 of the Linux USB pipe bitmap for unauth vs auth
+ * space; when it is like that, we or 0x80 to make an unauth address.
+ */
+static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa,
+			   const struct usb_host_endpoint *ep,
+			   const struct urb *urb, gfp_t gfp)
+{
+	int result = 0;		/* better code for lack of companion? */
+	struct device *dev = &wa->usb_iface->dev;
+	struct usb_device *usb_dev = urb->dev;
+	u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0;
+	u8 portnum = wusb_port_no_to_idx(urb->dev->portnum);
+
+	d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n",
+		    rpipe, wa, ep, urb);
+#define AIM_CHECK(rdf, val, text)					\
+	do {								\
+		if (rpipe->descr.rdf != (val)) {			\
+			dev_err(dev,					\
+				"rpipe aim discrepancy: " #rdf " " text "\n", \
+				rpipe->descr.rdf, (val));		\
+			result = -EINVAL;				\
+			WARN_ON(1);					\
+		}							\
+	} while (0)
+	AIM_CHECK(wMaxPacketSize, cpu_to_le16(ep->desc.wMaxPacketSize),
+		  "(%u vs %u)");
+	AIM_CHECK(bHSHubPort, portnum, "(%u vs %u)");
+	AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ?
+			UWB_PHY_RATE_53 : UWB_PHY_RATE_200,
+		  "(%u vs %u)");
+	AIM_CHECK(bDeviceAddress, urb->dev->devnum | unauth, "(%u vs %u)");
+	AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)");
+	AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)");
+	AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)");
+#undef AIM_CHECK
+	return result;
+}
+
+#ifndef CONFIG_BUG
+#define CONFIG_BUG 0
+#endif
+
+/*
+ * Make sure there is an rpipe allocated for an endpoint
+ *
+ * If already allocated, we just refcount it; if not, we get an
+ * idle one, aim it to the right location and take it.
+ *
+ * Attaches to ep->hcpriv and rpipe->ep to ep.
+ */
+int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep,
+		    struct urb *urb, gfp_t gfp)
+{
+	int result = 0;
+	struct device *dev = &wa->usb_iface->dev;
+	struct wa_rpipe *rpipe;
+	u8 eptype;
+
+	d_fnstart(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb,
+		  gfp);
+	mutex_lock(&wa->rpipe_mutex);
+	rpipe = ep->hcpriv;
+	if (rpipe != NULL) {
+		if (CONFIG_BUG == 1) {
+			result = rpipe_check_aim(rpipe, wa, ep, urb, gfp);
+			if (result < 0)
+				goto error;
+		}
+		__rpipe_get(rpipe);
+		d_printf(2, dev, "ep 0x%02x: reusing rpipe %u\n",
+			 ep->desc.bEndpointAddress,
+			 le16_to_cpu(rpipe->descr.wRPipeIndex));
+	} else {
+		/* hmm, assign idle rpipe, aim it */
+		result = -ENOBUFS;
+		eptype = ep->desc.bmAttributes & 0x03;
+		result = rpipe_get_idle(&rpipe, wa, 1 << eptype, gfp);
+		if (result < 0)
+			goto error;
+		result = rpipe_aim(rpipe, wa, ep, urb, gfp);
+		if (result < 0) {
+			rpipe_put(rpipe);
+			goto error;
+		}
+		ep->hcpriv = rpipe;
+		rpipe->ep = ep;
+		__rpipe_get(rpipe);	/* for caching into ep->hcpriv */
+		d_printf(2, dev, "ep 0x%02x: using rpipe %u\n",
+			 ep->desc.bEndpointAddress,
+			 le16_to_cpu(rpipe->descr.wRPipeIndex));
+	}
+	d_dump(4, dev, &rpipe->descr, sizeof(rpipe->descr));
+error:
+	mutex_unlock(&wa->rpipe_mutex);
+	d_fnend(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, gfp);
+	return result;
+}
+
+/*
+ * Allocate the bitmap for each rpipe.
+ */
+int wa_rpipes_create(struct wahc *wa)
+{
+	wa->rpipes = wa->wa_descr->wNumRPipes;
+	wa->rpipe_bm = kzalloc(BITS_TO_LONGS(wa->rpipes)*sizeof(unsigned long),
+			       GFP_KERNEL);
+	if (wa->rpipe_bm == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+void wa_rpipes_destroy(struct wahc *wa)
+{
+	struct device *dev = &wa->usb_iface->dev;
+	d_fnstart(3, dev, "(wa %p)\n", wa);
+	if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) {
+		char buf[256];
+		WARN_ON(1);
+		bitmap_scnprintf(buf, sizeof(buf), wa->rpipe_bm, wa->rpipes);
+		dev_err(dev, "BUG: pipes not released on exit: %s\n", buf);
+	}
+	kfree(wa->rpipe_bm);
+	d_fnend(3, dev, "(wa %p)\n", wa);
+}
+
+/*
+ * Release resources allocated for an endpoint
+ *
+ * If there is an associated rpipe to this endpoint, Abort any pending
+ * transfers and put it. If the rpipe ends up being destroyed,
+ * __rpipe_destroy() will cleanup ep->hcpriv.
+ *
+ * This is called before calling hcd->stop(), so you don't need to do
+ * anything else in there.
+ */
+void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
+{
+	struct device *dev = &wa->usb_iface->dev;
+	struct wa_rpipe *rpipe;
+	d_fnstart(2, dev, "(wa %p ep %p)\n", wa, ep);
+	mutex_lock(&wa->rpipe_mutex);
+	rpipe = ep->hcpriv;
+	if (rpipe != NULL) {
+		unsigned rc = atomic_read(&rpipe->refcnt.refcount);
+		int result;
+		u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
+
+		if (rc != 1)
+			d_printf(1, dev, "(wa %p ep %p) rpipe %p refcnt %u\n",
+				 wa, ep, rpipe, rc);
+
+		d_printf(1, dev, "rpipe %u: abort\n", index);
+		result = usb_control_msg(
+			wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+			USB_REQ_RPIPE_ABORT,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+			0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
+		if (result < 0 && result != -ENODEV /* dev is gone */)
+			d_printf(1, dev, "(wa %p rpipe %u): abort failed: %d\n",
+				 wa, index, result);
+		rpipe_put(rpipe);
+	}
+	mutex_unlock(&wa->rpipe_mutex);
+	d_fnend(2, dev, "(wa %p ep %p)\n", wa, ep);
+	return;
+}
+EXPORT_SYMBOL_GPL(rpipe_ep_disable);
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
new file mode 100644
index 0000000..c038635
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -0,0 +1,1709 @@
+/*
+ * WUSB Wire Adapter
+ * Data transfer and URB enqueing
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * How transfers work: get a buffer, break it up in segments (segment
+ * size is a multiple of the maxpacket size). For each segment issue a
+ * segment request (struct wa_xfer_*), then send the data buffer if
+ * out or nothing if in (all over the DTO endpoint).
+ *
+ * For each submitted segment request, a notification will come over
+ * the NEP endpoint and a transfer result (struct xfer_result) will
+ * arrive in the DTI URB. Read it, get the xfer ID, see if there is
+ * data coming (inbound transfer), schedule a read and handle it.
+ *
+ * Sounds simple, it is a pain to implement.
+ *
+ *
+ * ENTRY POINTS
+ *
+ *   FIXME
+ *
+ * LIFE CYCLE / STATE DIAGRAM
+ *
+ *   FIXME
+ *
+ * THIS CODE IS DISGUSTING
+ *
+ *   Warned you are; it's my second try and still not happy with it.
+ *
+ * NOTES:
+ *
+ *   - No iso
+ *
+ *   - Supports DMA xfers, control, bulk and maybe interrupt
+ *
+ *   - Does not recycle unused rpipes
+ *
+ *     An rpipe is assigned to an endpoint the first time it is used,
+ *     and then it's there, assigned, until the endpoint is disabled
+ *     (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
+ *     rpipe to the endpoint is done under the wa->rpipe_sem semaphore
+ *     (should be a mutex).
+ *
+ *     Two methods it could be done:
+ *
+ *     (a) set up a timer everytime an rpipe's use count drops to 1
+ *         (which means unused) or when a transfer ends. Reset the
+ *         timer when a xfer is queued. If the timer expires, release
+ *         the rpipe [see rpipe_ep_disable()].
+ *
+ *     (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
+ *         when none are found go over the list, check their endpoint
+ *         and their activity record (if no last-xfer-done-ts in the
+ *         last x seconds) take it
+ *
+ *     However, due to the fact that we have a set of limited
+ *     resources (max-segments-at-the-same-time per xfer,
+ *     xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
+ *     we are going to have to rebuild all this based on an scheduler,
+ *     to where we have a list of transactions to do and based on the
+ *     availability of the different requried components (blocks,
+ *     rpipes, segment slots, etc), we go scheduling them. Painful.
+ */
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/hash.h>
+#include "wa-hc.h"
+#include "wusbhc.h"
+
+#undef D_LOCAL
+#define D_LOCAL 0 /* 0 disabled, > 0 different levels... */
+#include <linux/uwb/debug.h>
+
+enum {
+	WA_SEGS_MAX = 255,
+};
+
+enum wa_seg_status {
+	WA_SEG_NOTREADY,
+	WA_SEG_READY,
+	WA_SEG_DELAYED,
+	WA_SEG_SUBMITTED,
+	WA_SEG_PENDING,
+	WA_SEG_DTI_PENDING,
+	WA_SEG_DONE,
+	WA_SEG_ERROR,
+	WA_SEG_ABORTED,
+};
+
+static void wa_xfer_delayed_run(struct wa_rpipe *);
+
+/*
+ * Life cycle governed by 'struct urb' (the refcount of the struct is
+ * that of the 'struct urb' and usb_free_urb() would free the whole
+ * struct).
+ */
+struct wa_seg {
+	struct urb urb;
+	struct urb *dto_urb;		/* for data output? */
+	struct list_head list_node;	/* for rpipe->req_list */
+	struct wa_xfer *xfer;		/* out xfer */
+	u8 index;			/* which segment we are */
+	enum wa_seg_status status;
+	ssize_t result;			/* bytes xfered or error */
+	struct wa_xfer_hdr xfer_hdr;
+	u8 xfer_extra[];		/* xtra space for xfer_hdr_ctl */
+};
+
+static void wa_seg_init(struct wa_seg *seg)
+{
+	/* usb_init_urb() repeats a lot of work, so we do it here */
+	kref_init(&seg->urb.kref);
+}
+
+/*
+ * Protected by xfer->lock
+ *
+ */
+struct wa_xfer {
+	struct kref refcnt;
+	struct list_head list_node;
+	spinlock_t lock;
+	u32 id;
+
+	struct wahc *wa;		/* Wire adapter we are plugged to */
+	struct usb_host_endpoint *ep;
+	struct urb *urb;		/* URB we are transfering for */
+	struct wa_seg **seg;		/* transfer segments */
+	u8 segs, segs_submitted, segs_done;
+	unsigned is_inbound:1;
+	unsigned is_dma:1;
+	size_t seg_size;
+	int result;
+
+	gfp_t gfp;			/* allocation mask */
+
+	struct wusb_dev *wusb_dev;	/* for activity timestamps */
+};
+
+static inline void wa_xfer_init(struct wa_xfer *xfer)
+{
+	kref_init(&xfer->refcnt);
+	INIT_LIST_HEAD(&xfer->list_node);
+	spin_lock_init(&xfer->lock);
+}
+
+/*
+ * Destory a transfer structure
+ *
+ * Note that the xfer->seg[index] thingies follow the URB life cycle,
+ * so we need to put them, not free them.
+ */
+static void wa_xfer_destroy(struct kref *_xfer)
+{
+	struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
+	if (xfer->seg) {
+		unsigned cnt;
+		for (cnt = 0; cnt < xfer->segs; cnt++) {
+			if (xfer->is_inbound)
+				usb_put_urb(xfer->seg[cnt]->dto_urb);
+			usb_put_urb(&xfer->seg[cnt]->urb);
+		}
+	}
+	kfree(xfer);
+	d_printf(2, NULL, "xfer %p destroyed\n", xfer);
+}
+
+static void wa_xfer_get(struct wa_xfer *xfer)
+{
+	kref_get(&xfer->refcnt);
+}
+
+static void wa_xfer_put(struct wa_xfer *xfer)
+{
+	d_fnstart(3, NULL, "(xfer %p) -- ref count bef put %d\n",
+		    xfer, atomic_read(&xfer->refcnt.refcount));
+	kref_put(&xfer->refcnt, wa_xfer_destroy);
+	d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
+}
+
+/*
+ * xfer is referenced
+ *
+ * xfer->lock has to be unlocked
+ *
+ * We take xfer->lock for setting the result; this is a barrier
+ * against drivers/usb/core/hcd.c:unlink1() being called after we call
+ * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
+ * reference to the transfer.
+ */
+static void wa_xfer_giveback(struct wa_xfer *xfer)
+{
+	unsigned long flags;
+	d_fnstart(3, NULL, "(xfer %p)\n", xfer);
+	spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
+	list_del_init(&xfer->list_node);
+	spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
+	/* FIXME: segmentation broken -- kills DWA */
+	wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
+	wa_put(xfer->wa);
+	wa_xfer_put(xfer);
+	d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
+}
+
+/*
+ * xfer is referenced
+ *
+ * xfer->lock has to be unlocked
+ */
+static void wa_xfer_completion(struct wa_xfer *xfer)
+{
+	d_fnstart(3, NULL, "(xfer %p)\n", xfer);
+	if (xfer->wusb_dev)
+		wusb_dev_put(xfer->wusb_dev);
+	rpipe_put(xfer->ep->hcpriv);
+	wa_xfer_giveback(xfer);
+	d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
+	return;
+}
+
+/*
+ * If transfer is done, wrap it up and return true
+ *
+ * xfer->lock has to be locked
+ */
+static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
+{
+	unsigned result, cnt;
+	struct wa_seg *seg;
+	struct urb *urb = xfer->urb;
+	unsigned found_short = 0;
+
+	d_fnstart(3, NULL, "(xfer %p)\n", xfer);
+	result = xfer->segs_done == xfer->segs_submitted;
+	if (result == 0)
+		goto out;
+	urb->actual_length = 0;
+	for (cnt = 0; cnt < xfer->segs; cnt++) {
+		seg = xfer->seg[cnt];
+		switch (seg->status) {
+		case WA_SEG_DONE:
+			if (found_short && seg->result > 0) {
+				if (printk_ratelimit())
+					printk(KERN_ERR "xfer %p#%u: bad short "
+					       "segments (%zu)\n", xfer, cnt,
+					       seg->result);
+				urb->status = -EINVAL;
+				goto out;
+			}
+			urb->actual_length += seg->result;
+			if (seg->result < xfer->seg_size
+			    && cnt != xfer->segs-1)
+				found_short = 1;
+			d_printf(2, NULL, "xfer %p#%u: DONE short %d "
+				 "result %zu urb->actual_length %d\n",
+				 xfer, seg->index, found_short, seg->result,
+				 urb->actual_length);
+			break;
+		case WA_SEG_ERROR:
+			xfer->result = seg->result;
+			d_printf(2, NULL, "xfer %p#%u: ERROR result %zu\n",
+				 xfer, seg->index, seg->result);
+			goto out;
+		case WA_SEG_ABORTED:
+			WARN_ON(urb->status != -ECONNRESET
+				&& urb->status != -ENOENT);
+			d_printf(2, NULL, "xfer %p#%u ABORTED: result %d\n",
+				 xfer, seg->index, urb->status);
+			xfer->result = urb->status;
+			goto out;
+		default:
+			/* if (printk_ratelimit()) */
+				printk(KERN_ERR "xfer %p#%u: "
+				       "is_done bad state %d\n",
+				       xfer, cnt, seg->status);
+			xfer->result = -EINVAL;
+			WARN_ON(1);
+			goto out;
+		}
+	}
+	xfer->result = 0;
+out:
+	d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
+	return result;
+}
+
+/*
+ * Initialize a transfer's ID
+ *
+ * We need to use a sequential number; if we use the pointer or the
+ * hash of the pointer, it can repeat over sequential transfers and
+ * then it will confuse the HWA....wonder why in hell they put a 32
+ * bit handle in there then.
+ */
+static void wa_xfer_id_init(struct wa_xfer *xfer)
+{
+	xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
+}
+
+/*
+ * Return the xfer's ID associated with xfer
+ *
+ * Need to generate a
+ */
+static u32 wa_xfer_id(struct wa_xfer *xfer)
+{
+	return xfer->id;
+}
+
+/*
+ * Search for a transfer list ID on the HCD's URB list
+ *
+ * For 32 bit architectures, we use the pointer itself; for 64 bits, a
+ * 32-bit hash of the pointer.
+ *
+ * @returns NULL if not found.
+ */
+static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
+{
+	unsigned long flags;
+	struct wa_xfer *xfer_itr;
+	spin_lock_irqsave(&wa->xfer_list_lock, flags);
+	list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
+		if (id == xfer_itr->id) {
+			wa_xfer_get(xfer_itr);
+			goto out;
+		}
+	}
+	xfer_itr = NULL;
+out:
+	spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
+	return xfer_itr;
+}
+
+struct wa_xfer_abort_buffer {
+	struct urb urb;
+	struct wa_xfer_abort cmd;
+};
+
+static void __wa_xfer_abort_cb(struct urb *urb)
+{
+	struct wa_xfer_abort_buffer *b = urb->context;
+	usb_put_urb(&b->urb);
+}
+
+/*
+ * Aborts an ongoing transaction
+ *
+ * Assumes the transfer is referenced and locked and in a submitted
+ * state (mainly that there is an endpoint/rpipe assigned).
+ *
+ * The callback (see above) does nothing but freeing up the data by
+ * putting the URB. Because the URB is allocated at the head of the
+ * struct, the whole space we allocated is kfreed.
+ *
+ * We'll get an 'aborted transaction' xfer result on DTI, that'll
+ * politely ignore because at this point the transaction has been
+ * marked as aborted already.
+ */
+static void __wa_xfer_abort(struct wa_xfer *xfer)
+{
+	int result;
+	struct device *dev = &xfer->wa->usb_iface->dev;
+	struct wa_xfer_abort_buffer *b;
+	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+	b = kmalloc(sizeof(*b), GFP_ATOMIC);
+	if (b == NULL)
+		goto error_kmalloc;
+	b->cmd.bLength =  sizeof(b->cmd);
+	b->cmd.bRequestType = WA_XFER_ABORT;
+	b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
+	b->cmd.dwTransferID = wa_xfer_id(xfer);
+
+	usb_init_urb(&b->urb);
+	usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
+		usb_sndbulkpipe(xfer->wa->usb_dev,
+				xfer->wa->dto_epd->bEndpointAddress),
+		&b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
+	result = usb_submit_urb(&b->urb, GFP_ATOMIC);
+	if (result < 0)
+		goto error_submit;
+	return;				/* callback frees! */
+
+
+error_submit:
+	if (printk_ratelimit())
+		dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
+			xfer, result);
+	kfree(b);
+error_kmalloc:
+	return;
+
+}
+
+/*
+ *
+ * @returns < 0 on error, transfer segment request size if ok
+ */
+static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
+				     enum wa_xfer_type *pxfer_type)
+{
+	ssize_t result;
+	struct device *dev = &xfer->wa->usb_iface->dev;
+	size_t maxpktsize;
+	struct urb *urb = xfer->urb;
+	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+	d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n",
+		  xfer, rpipe, urb);
+	switch (rpipe->descr.bmAttribute & 0x3) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		*pxfer_type = WA_XFER_TYPE_CTL;
+		result = sizeof(struct wa_xfer_ctl);
+		break;
+	case USB_ENDPOINT_XFER_INT:
+	case USB_ENDPOINT_XFER_BULK:
+		*pxfer_type = WA_XFER_TYPE_BI;
+		result = sizeof(struct wa_xfer_bi);
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		dev_err(dev, "FIXME: ISOC not implemented\n");
+		result = -ENOSYS;
+		goto error;
+	default:
+		/* never happens */
+		BUG();
+		result = -EINVAL;	/* shut gcc up */
+	};
+	xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
+	xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
+	xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
+		* 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
+	/* Compute the segment size and make sure it is a multiple of
+	 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
+	 * a check (FIXME) */
+	maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
+	if (xfer->seg_size < maxpktsize) {
+		dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
+			"%zu\n", xfer->seg_size, maxpktsize);
+		result = -EINVAL;
+		goto error;
+	}
+	xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
+	xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
+		/ xfer->seg_size;
+	if (xfer->segs >= WA_SEGS_MAX) {
+		dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
+			(int)(urb->transfer_buffer_length / xfer->seg_size),
+			WA_SEGS_MAX);
+		result = -EINVAL;
+		goto error;
+	}
+	if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
+		xfer->segs = 1;
+error:
+	d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n",
+		xfer, rpipe, urb, (int)result);
+	return result;
+}
+
+/** Fill in the common request header and xfer-type specific data. */
+static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
+				 struct wa_xfer_hdr *xfer_hdr0,
+				 enum wa_xfer_type xfer_type,
+				 size_t xfer_hdr_size)
+{
+	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+	xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
+	xfer_hdr0->bLength = xfer_hdr_size;
+	xfer_hdr0->bRequestType = xfer_type;
+	xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
+	xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
+	xfer_hdr0->bTransferSegment = 0;
+	switch (xfer_type) {
+	case WA_XFER_TYPE_CTL: {
+		struct wa_xfer_ctl *xfer_ctl =
+			container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
+		xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
+		BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP
+		       && xfer->urb->setup_packet == NULL);
+		memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
+		       sizeof(xfer_ctl->baSetupData));
+		break;
+	}
+	case WA_XFER_TYPE_BI:
+		break;
+	case WA_XFER_TYPE_ISO:
+		printk(KERN_ERR "FIXME: ISOC not implemented\n");
+	default:
+		BUG();
+	};
+}
+
+/*
+ * Callback for the OUT data phase of the segment request
+ *
+ * Check wa_seg_cb(); most comments also apply here because this
+ * function does almost the same thing and they work closely
+ * together.
+ *
+ * If the seg request has failed but this DTO phase has suceeded,
+ * wa_seg_cb() has already failed the segment and moved the
+ * status to WA_SEG_ERROR, so this will go through 'case 0' and
+ * effectively do nothing.
+ */
+static void wa_seg_dto_cb(struct urb *urb)
+{
+	struct wa_seg *seg = urb->context;
+	struct wa_xfer *xfer = seg->xfer;
+	struct wahc *wa;
+	struct device *dev;
+	struct wa_rpipe *rpipe;
+	unsigned long flags;
+	unsigned rpipe_ready = 0;
+	u8 done = 0;
+
+	d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
+	switch (urb->status) {
+	case 0:
+		spin_lock_irqsave(&xfer->lock, flags);
+		wa = xfer->wa;
+		dev = &wa->usb_iface->dev;
+		d_printf(2, dev, "xfer %p#%u: data out done (%d bytes)\n",
+			   xfer, seg->index, urb->actual_length);
+		if (seg->status < WA_SEG_PENDING)
+			seg->status = WA_SEG_PENDING;
+		seg->result = urb->actual_length;
+		spin_unlock_irqrestore(&xfer->lock, flags);
+		break;
+	case -ECONNRESET:	/* URB unlinked; no need to do anything */
+	case -ENOENT:		/* as it was done by the who unlinked us */
+		break;
+	default:		/* Other errors ... */
+		spin_lock_irqsave(&xfer->lock, flags);
+		wa = xfer->wa;
+		dev = &wa->usb_iface->dev;
+		rpipe = xfer->ep->hcpriv;
+		if (printk_ratelimit())
+			dev_err(dev, "xfer %p#%u: data out error %d\n",
+				xfer, seg->index, urb->status);
+		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+			    EDC_ERROR_TIMEFRAME)){
+			dev_err(dev, "DTO: URB max acceptable errors "
+				"exceeded, resetting device\n");
+			wa_reset_all(wa);
+		}
+		if (seg->status != WA_SEG_ERROR) {
+			seg->status = WA_SEG_ERROR;
+			seg->result = urb->status;
+			xfer->segs_done++;
+			__wa_xfer_abort(xfer);
+			rpipe_ready = rpipe_avail_inc(rpipe);
+			done = __wa_xfer_is_done(xfer);
+		}
+		spin_unlock_irqrestore(&xfer->lock, flags);
+		if (done)
+			wa_xfer_completion(xfer);
+		if (rpipe_ready)
+			wa_xfer_delayed_run(rpipe);
+	}
+	d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
+}
+
+/*
+ * Callback for the segment request
+ *
+ * If succesful transition state (unless already transitioned or
+ * outbound transfer); otherwise, take a note of the error, mark this
+ * segment done and try completion.
+ *
+ * Note we don't access until we are sure that the transfer hasn't
+ * been cancelled (ECONNRESET, ENOENT), which could mean that
+ * seg->xfer could be already gone.
+ *
+ * We have to check before setting the status to WA_SEG_PENDING
+ * because sometimes the xfer result callback arrives before this
+ * callback (geeeeeeze), so it might happen that we are already in
+ * another state. As well, we don't set it if the transfer is inbound,
+ * as in that case, wa_seg_dto_cb will do it when the OUT data phase
+ * finishes.
+ */
+static void wa_seg_cb(struct urb *urb)
+{
+	struct wa_seg *seg = urb->context;
+	struct wa_xfer *xfer = seg->xfer;
+	struct wahc *wa;
+	struct device *dev;
+	struct wa_rpipe *rpipe;
+	unsigned long flags;
+	unsigned rpipe_ready;
+	u8 done = 0;
+
+	d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
+	switch (urb->status) {
+	case 0:
+		spin_lock_irqsave(&xfer->lock, flags);
+		wa = xfer->wa;
+		dev = &wa->usb_iface->dev;
+		d_printf(2, dev, "xfer %p#%u: request done\n",
+			   xfer, seg->index);
+		if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
+			seg->status = WA_SEG_PENDING;
+		spin_unlock_irqrestore(&xfer->lock, flags);
+		break;
+	case -ECONNRESET:	/* URB unlinked; no need to do anything */
+	case -ENOENT:		/* as it was done by the who unlinked us */
+		break;
+	default:		/* Other errors ... */
+		spin_lock_irqsave(&xfer->lock, flags);
+		wa = xfer->wa;
+		dev = &wa->usb_iface->dev;
+		rpipe = xfer->ep->hcpriv;
+		if (printk_ratelimit())
+			dev_err(dev, "xfer %p#%u: request error %d\n",
+				xfer, seg->index, urb->status);
+		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+			    EDC_ERROR_TIMEFRAME)){
+			dev_err(dev, "DTO: URB max acceptable errors "
+				"exceeded, resetting device\n");
+			wa_reset_all(wa);
+		}
+		usb_unlink_urb(seg->dto_urb);
+		seg->status = WA_SEG_ERROR;
+		seg->result = urb->status;
+		xfer->segs_done++;
+		__wa_xfer_abort(xfer);
+		rpipe_ready = rpipe_avail_inc(rpipe);
+		done = __wa_xfer_is_done(xfer);
+		spin_unlock_irqrestore(&xfer->lock, flags);
+		if (done)
+			wa_xfer_completion(xfer);
+		if (rpipe_ready)
+			wa_xfer_delayed_run(rpipe);
+	}
+	d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
+}
+
+/*
+ * Allocate the segs array and initialize each of them
+ *
+ * The segments are freed by wa_xfer_destroy() when the xfer use count
+ * drops to zero; however, because each segment is given the same life
+ * cycle as the USB URB it contains, it is actually freed by
+ * usb_put_urb() on the contained USB URB (twisted, eh?).
+ */
+static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
+{
+	int result, cnt;
+	size_t alloc_size = sizeof(*xfer->seg[0])
+		- sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
+	struct usb_device *usb_dev = xfer->wa->usb_dev;
+	const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
+	struct wa_seg *seg;
+	size_t buf_itr, buf_size, buf_itr_size;
+
+	result = -ENOMEM;
+	xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
+	if (xfer->seg == NULL)
+		goto error_segs_kzalloc;
+	buf_itr = 0;
+	buf_size = xfer->urb->transfer_buffer_length;
+	for (cnt = 0; cnt < xfer->segs; cnt++) {
+		seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
+		if (seg == NULL)
+			goto error_seg_kzalloc;
+		wa_seg_init(seg);
+		seg->xfer = xfer;
+		seg->index = cnt;
+		usb_fill_bulk_urb(&seg->urb, usb_dev,
+				  usb_sndbulkpipe(usb_dev,
+						  dto_epd->bEndpointAddress),
+				  &seg->xfer_hdr, xfer_hdr_size,
+				  wa_seg_cb, seg);
+		buf_itr_size = buf_size > xfer->seg_size ?
+			xfer->seg_size : buf_size;
+		if (xfer->is_inbound == 0 && buf_size > 0) {
+			seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
+			if (seg->dto_urb == NULL)
+				goto error_dto_alloc;
+			usb_fill_bulk_urb(
+				seg->dto_urb, usb_dev,
+				usb_sndbulkpipe(usb_dev,
+						dto_epd->bEndpointAddress),
+				NULL, 0, wa_seg_dto_cb, seg);
+			if (xfer->is_dma) {
+				seg->dto_urb->transfer_dma =
+					xfer->urb->transfer_dma + buf_itr;
+				seg->dto_urb->transfer_flags |=
+					URB_NO_TRANSFER_DMA_MAP;
+			} else
+				seg->dto_urb->transfer_buffer =
+					xfer->urb->transfer_buffer + buf_itr;
+			seg->dto_urb->transfer_buffer_length = buf_itr_size;
+		}
+		seg->status = WA_SEG_READY;
+		buf_itr += buf_itr_size;
+		buf_size -= buf_itr_size;
+	}
+	return 0;
+
+error_dto_alloc:
+	kfree(xfer->seg[cnt]);
+	cnt--;
+error_seg_kzalloc:
+	/* use the fact that cnt is left at were it failed */
+	for (; cnt > 0; cnt--) {
+		if (xfer->is_inbound == 0)
+			kfree(xfer->seg[cnt]->dto_urb);
+		kfree(xfer->seg[cnt]);
+	}
+error_segs_kzalloc:
+	return result;
+}
+
+/*
+ * Allocates all the stuff needed to submit a transfer
+ *
+ * Breaks the whole data buffer in a list of segments, each one has a
+ * structure allocated to it and linked in xfer->seg[index]
+ *
+ * FIXME: merge setup_segs() and the last part of this function, no
+ *        need to do two for loops when we could run everything in a
+ *        single one
+ */
+static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
+{
+	int result;
+	struct device *dev = &xfer->wa->usb_iface->dev;
+	enum wa_xfer_type xfer_type = 0; /* shut up GCC */
+	size_t xfer_hdr_size, cnt, transfer_size;
+	struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
+
+	d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n",
+		  xfer, xfer->ep->hcpriv, urb);
+
+	result = __wa_xfer_setup_sizes(xfer, &xfer_type);
+	if (result < 0)
+		goto error_setup_sizes;
+	xfer_hdr_size = result;
+	result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
+	if (result < 0) {
+		dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
+			xfer, xfer->segs, result);
+		goto error_setup_segs;
+	}
+	/* Fill the first header */
+	xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
+	wa_xfer_id_init(xfer);
+	__wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
+
+	/* Fill remainig headers */
+	xfer_hdr = xfer_hdr0;
+	transfer_size = urb->transfer_buffer_length;
+	xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
+		xfer->seg_size : transfer_size;
+	transfer_size -=  xfer->seg_size;
+	for (cnt = 1; cnt < xfer->segs; cnt++) {
+		xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
+		memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
+		xfer_hdr->bTransferSegment = cnt;
+		xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
+			cpu_to_le32(xfer->seg_size)
+			: cpu_to_le32(transfer_size);
+		xfer->seg[cnt]->status = WA_SEG_READY;
+		transfer_size -=  xfer->seg_size;
+	}
+	xfer_hdr->bTransferSegment |= 0x80;	/* this is the last segment */
+	result = 0;
+error_setup_segs:
+error_setup_sizes:
+	d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n",
+		xfer, xfer->ep->hcpriv, urb, result);
+	return result;
+}
+
+/*
+ *
+ *
+ * rpipe->seg_lock is held!
+ */
+static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
+			   struct wa_seg *seg)
+{
+	int result;
+	result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
+	if (result < 0) {
+		printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
+		       xfer, seg->index, result);
+		goto error_seg_submit;
+	}
+	if (seg->dto_urb) {
+		result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
+		if (result < 0) {
+			printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
+			       xfer, seg->index, result);
+			goto error_dto_submit;
+		}
+	}
+	seg->status = WA_SEG_SUBMITTED;
+	rpipe_avail_dec(rpipe);
+	return 0;
+
+error_dto_submit:
+	usb_unlink_urb(&seg->urb);
+error_seg_submit:
+	seg->status = WA_SEG_ERROR;
+	seg->result = result;
+	return result;
+}
+
+/*
+ * Execute more queued request segments until the maximum concurrent allowed
+ *
+ * The ugly unlock/lock sequence on the error path is needed as the
+ * xfer->lock normally nests the seg_lock and not viceversa.
+ *
+ */
+static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
+{
+	int result;
+	struct device *dev = &rpipe->wa->usb_iface->dev;
+	struct wa_seg *seg;
+	struct wa_xfer *xfer;
+	unsigned long flags;
+
+	d_fnstart(1, dev, "(rpipe #%d) %d segments available\n",
+		  le16_to_cpu(rpipe->descr.wRPipeIndex),
+		  atomic_read(&rpipe->segs_available));
+	spin_lock_irqsave(&rpipe->seg_lock, flags);
+	while (atomic_read(&rpipe->segs_available) > 0
+	      && !list_empty(&rpipe->seg_list)) {
+		seg = list_entry(rpipe->seg_list.next, struct wa_seg,
+				 list_node);
+		list_del(&seg->list_node);
+		xfer = seg->xfer;
+		result = __wa_seg_submit(rpipe, xfer, seg);
+		d_printf(1, dev, "xfer %p#%u submitted from delayed "
+			 "[%d segments available] %d\n",
+			 xfer, seg->index,
+			 atomic_read(&rpipe->segs_available), result);
+		if (unlikely(result < 0)) {
+			spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+			spin_lock_irqsave(&xfer->lock, flags);
+			__wa_xfer_abort(xfer);
+			xfer->segs_done++;
+			spin_unlock_irqrestore(&xfer->lock, flags);
+			spin_lock_irqsave(&rpipe->seg_lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+	d_fnend(1, dev, "(rpipe #%d) = void, %d segments available\n",
+		le16_to_cpu(rpipe->descr.wRPipeIndex),
+		atomic_read(&rpipe->segs_available));
+
+}
+
+/*
+ *
+ * xfer->lock is taken
+ *
+ * On failure submitting we just stop submitting and return error;
+ * wa_urb_enqueue_b() will execute the completion path
+ */
+static int __wa_xfer_submit(struct wa_xfer *xfer)
+{
+	int result;
+	struct wahc *wa = xfer->wa;
+	struct device *dev = &wa->usb_iface->dev;
+	unsigned cnt;
+	struct wa_seg *seg;
+	unsigned long flags;
+	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+	size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
+	u8 available;
+	u8 empty;
+
+	d_fnstart(3, dev, "(xfer %p [rpipe %p])\n",
+		  xfer, xfer->ep->hcpriv);
+
+	spin_lock_irqsave(&wa->xfer_list_lock, flags);
+	list_add_tail(&xfer->list_node, &wa->xfer_list);
+	spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
+
+	BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
+	result = 0;
+	spin_lock_irqsave(&rpipe->seg_lock, flags);
+	for (cnt = 0; cnt < xfer->segs; cnt++) {
+		available = atomic_read(&rpipe->segs_available);
+		empty = list_empty(&rpipe->seg_list);
+		seg = xfer->seg[cnt];
+		d_printf(2, dev, "xfer %p#%u: available %u empty %u (%s)\n",
+			 xfer, cnt, available, empty,
+			 available == 0 || !empty ? "delayed" : "submitted");
+		if (available == 0 || !empty) {
+			d_printf(1, dev, "xfer %p#%u: delayed\n", xfer, cnt);
+			seg->status = WA_SEG_DELAYED;
+			list_add_tail(&seg->list_node, &rpipe->seg_list);
+		} else {
+			result = __wa_seg_submit(rpipe, xfer, seg);
+			if (result < 0)
+				goto error_seg_submit;
+		}
+		xfer->segs_submitted++;
+	}
+	spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+	d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer,
+		xfer->ep->hcpriv);
+	return result;
+
+error_seg_submit:
+	__wa_xfer_abort(xfer);
+	spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+	d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer,
+		xfer->ep->hcpriv);
+	return result;
+}
+
+/*
+ * Second part of a URB/transfer enqueuement
+ *
+ * Assumes this comes from wa_urb_enqueue() [maybe through
+ * wa_urb_enqueue_run()]. At this point:
+ *
+ * xfer->wa	filled and refcounted
+ * xfer->ep	filled with rpipe refcounted if
+ *              delayed == 0
+ * xfer->urb 	filled and refcounted (this is the case when called
+ *              from wa_urb_enqueue() as we come from usb_submit_urb()
+ *              and when called by wa_urb_enqueue_run(), as we took an
+ *              extra ref dropped by _run() after we return).
+ * xfer->gfp	filled
+ *
+ * If we fail at __wa_xfer_submit(), then we just check if we are done
+ * and if so, we run the completion procedure. However, if we are not
+ * yet done, we do nothing and wait for the completion handlers from
+ * the submitted URBs or from the xfer-result path to kick in. If xfer
+ * result never kicks in, the xfer will timeout from the USB code and
+ * dequeue() will be called.
+ */
+static void wa_urb_enqueue_b(struct wa_xfer *xfer)
+{
+	int result;
+	unsigned long flags;
+	struct urb *urb = xfer->urb;
+	struct wahc *wa = xfer->wa;
+	struct wusbhc *wusbhc = wa->wusb;
+	struct device *dev = &wa->usb_iface->dev;
+	struct wusb_dev *wusb_dev;
+	unsigned done;
+
+	d_fnstart(3, dev, "(wa %p urb %p)\n", wa, urb);
+	result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
+	if (result < 0)
+		goto error_rpipe_get;
+	result = -ENODEV;
+	/* FIXME: segmentation broken -- kills DWA */
+	mutex_lock(&wusbhc->mutex);		/* get a WUSB dev */
+	if (urb->dev == NULL)
+		goto error_dev_gone;
+	wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
+	if (wusb_dev == NULL) {
+		mutex_unlock(&wusbhc->mutex);
+		goto error_dev_gone;
+	}
+	mutex_unlock(&wusbhc->mutex);
+
+	spin_lock_irqsave(&xfer->lock, flags);
+	xfer->wusb_dev = wusb_dev;
+	result = urb->status;
+	if (urb->status != -EINPROGRESS)
+		goto error_dequeued;
+
+	result = __wa_xfer_setup(xfer, urb);
+	if (result < 0)
+		goto error_xfer_setup;
+	result = __wa_xfer_submit(xfer);
+	if (result < 0)
+		goto error_xfer_submit;
+	spin_unlock_irqrestore(&xfer->lock, flags);
+	d_fnend(3, dev, "(wa %p urb %p) = void\n", wa, urb);
+	return;
+
+	/* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
+	 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
+	 * upundo setup().
+	 */
+error_xfer_setup:
+error_dequeued:
+	spin_unlock_irqrestore(&xfer->lock, flags);
+	/* FIXME: segmentation broken, kills DWA */
+	if (wusb_dev)
+		wusb_dev_put(wusb_dev);
+error_dev_gone:
+	rpipe_put(xfer->ep->hcpriv);
+error_rpipe_get:
+	xfer->result = result;
+	wa_xfer_giveback(xfer);
+	d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result);
+	return;
+
+error_xfer_submit:
+	done = __wa_xfer_is_done(xfer);
+	xfer->result = result;
+	spin_unlock_irqrestore(&xfer->lock, flags);
+	if (done)
+		wa_xfer_completion(xfer);
+	d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result);
+	return;
+}
+
+/*
+ * Execute the delayed transfers in the Wire Adapter @wa
+ *
+ * We need to be careful here, as dequeue() could be called in the
+ * middle.  That's why we do the whole thing under the
+ * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
+ * and then checks the list -- so as we would be acquiring in inverse
+ * order, we just drop the lock once we have the xfer and reacquire it
+ * later.
+ */
+void wa_urb_enqueue_run(struct work_struct *ws)
+{
+	struct wahc *wa = container_of(ws, struct wahc, xfer_work);
+	struct device *dev = &wa->usb_iface->dev;
+	struct wa_xfer *xfer, *next;
+	struct urb *urb;
+
+	d_fnstart(3, dev, "(wa %p)\n", wa);
+	spin_lock_irq(&wa->xfer_list_lock);
+	list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
+				 list_node) {
+		list_del_init(&xfer->list_node);
+		spin_unlock_irq(&wa->xfer_list_lock);
+
+		urb = xfer->urb;
+		wa_urb_enqueue_b(xfer);
+		usb_put_urb(urb);	/* taken when queuing */
+
+		spin_lock_irq(&wa->xfer_list_lock);
+	}
+	spin_unlock_irq(&wa->xfer_list_lock);
+	d_fnend(3, dev, "(wa %p) = void\n", wa);
+}
+EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
+
+/*
+ * Submit a transfer to the Wire Adapter in a delayed way
+ *
+ * The process of enqueuing involves possible sleeps() [see
+ * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
+ * in an atomic section, we defer the enqueue_b() call--else we call direct.
+ *
+ * @urb: We own a reference to it done by the HCI Linux USB stack that
+ *       will be given up by calling usb_hcd_giveback_urb() or by
+ *       returning error from this function -> ergo we don't have to
+ *       refcount it.
+ */
+int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
+		   struct urb *urb, gfp_t gfp)
+{
+	int result;
+	struct device *dev = &wa->usb_iface->dev;
+	struct wa_xfer *xfer;
+	unsigned long my_flags;
+	unsigned cant_sleep = irqs_disabled() | in_atomic();
+
+	d_fnstart(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x)\n",
+		  wa, ep, urb, urb->transfer_buffer_length, gfp);
+
+	if (urb->transfer_buffer == NULL
+	    && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
+	    && urb->transfer_buffer_length != 0) {
+		dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
+		dump_stack();
+	}
+
+	result = -ENOMEM;
+	xfer = kzalloc(sizeof(*xfer), gfp);
+	if (xfer == NULL)
+		goto error_kmalloc;
+
+	result = -ENOENT;
+	if (urb->status != -EINPROGRESS)	/* cancelled */
+		goto error_dequeued;		/* before starting? */
+	wa_xfer_init(xfer);
+	xfer->wa = wa_get(wa);
+	xfer->urb = urb;
+	xfer->gfp = gfp;
+	xfer->ep = ep;
+	urb->hcpriv = xfer;
+	d_printf(2, dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
+		 xfer, urb, urb->pipe, urb->transfer_buffer_length,
+		 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
+		 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
+		 cant_sleep ? "deferred" : "inline");
+	if (cant_sleep) {
+		usb_get_urb(urb);
+		spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+		list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
+		spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
+		queue_work(wusbd, &wa->xfer_work);
+	} else {
+		wa_urb_enqueue_b(xfer);
+	}
+	d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = 0\n",
+		wa, ep, urb, urb->transfer_buffer_length, gfp);
+	return 0;
+
+error_dequeued:
+	kfree(xfer);
+error_kmalloc:
+	d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = %d\n",
+		wa, ep, urb, urb->transfer_buffer_length, gfp, result);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wa_urb_enqueue);
+
+/*
+ * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
+ * handler] is called.
+ *
+ * Until a transfer goes successfully through wa_urb_enqueue() it
+ * needs to be dequeued with completion calling; when stuck in delayed
+ * or before wa_xfer_setup() is called, we need to do completion.
+ *
+ *  not setup  If there is no hcpriv yet, that means that that enqueue
+ *             still had no time to set the xfer up. Because
+ *             urb->status should be other than -EINPROGRESS,
+ *             enqueue() will catch that and bail out.
+ *
+ * If the transfer has gone through setup, we just need to clean it
+ * up. If it has gone through submit(), we have to abort it [with an
+ * asynch request] and then make sure we cancel each segment.
+ *
+ */
+int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
+{
+	struct device *dev = &wa->usb_iface->dev;
+	unsigned long flags, flags2;
+	struct wa_xfer *xfer;
+	struct wa_seg *seg;
+	struct wa_rpipe *rpipe;
+	unsigned cnt;
+	unsigned rpipe_ready = 0;
+
+	d_fnstart(3, dev, "(wa %p, urb %p)\n", wa, urb);
+
+	d_printf(1, dev, "xfer %p urb %p: aborting\n", urb->hcpriv, urb);
+	xfer = urb->hcpriv;
+	if (xfer == NULL) {
+		/* NOthing setup yet enqueue will see urb->status !=
+		 * -EINPROGRESS (by hcd layer) and bail out with
+		 * error, no need to do completion
+		 */
+		BUG_ON(urb->status == -EINPROGRESS);
+		goto out;
+	}
+	spin_lock_irqsave(&xfer->lock, flags);
+	rpipe = xfer->ep->hcpriv;
+	/* Check the delayed list -> if there, release and complete */
+	spin_lock_irqsave(&wa->xfer_list_lock, flags2);
+	if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
+		goto dequeue_delayed;
+	spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
+	if (xfer->seg == NULL)  	/* still hasn't reached */
+		goto out_unlock;	/* setup(), enqueue_b() completes */
+	/* Ok, the xfer is in flight already, it's been setup and submitted.*/
+	__wa_xfer_abort(xfer);
+	for (cnt = 0; cnt < xfer->segs; cnt++) {
+		seg = xfer->seg[cnt];
+		switch (seg->status) {
+		case WA_SEG_NOTREADY:
+		case WA_SEG_READY:
+			printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
+			       xfer, cnt, seg->status);
+			WARN_ON(1);
+			break;
+		case WA_SEG_DELAYED:
+			seg->status = WA_SEG_ABORTED;
+			spin_lock_irqsave(&rpipe->seg_lock, flags2);
+			list_del(&seg->list_node);
+			xfer->segs_done++;
+			rpipe_ready = rpipe_avail_inc(rpipe);
+			spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
+			break;
+		case WA_SEG_SUBMITTED:
+			seg->status = WA_SEG_ABORTED;
+			usb_unlink_urb(&seg->urb);
+			if (xfer->is_inbound == 0)
+				usb_unlink_urb(seg->dto_urb);
+			xfer->segs_done++;
+			rpipe_ready = rpipe_avail_inc(rpipe);
+			break;
+		case WA_SEG_PENDING:
+			seg->status = WA_SEG_ABORTED;
+			xfer->segs_done++;
+			rpipe_ready = rpipe_avail_inc(rpipe);
+			break;
+		case WA_SEG_DTI_PENDING:
+			usb_unlink_urb(wa->dti_urb);
+			seg->status = WA_SEG_ABORTED;
+			xfer->segs_done++;
+			rpipe_ready = rpipe_avail_inc(rpipe);
+			break;
+		case WA_SEG_DONE:
+		case WA_SEG_ERROR:
+		case WA_SEG_ABORTED:
+			break;
+		}
+	}
+	xfer->result = urb->status;	/* -ENOENT or -ECONNRESET */
+	__wa_xfer_is_done(xfer);
+	spin_unlock_irqrestore(&xfer->lock, flags);
+	wa_xfer_completion(xfer);
+	if (rpipe_ready)
+		wa_xfer_delayed_run(rpipe);
+	d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
+	return 0;
+
+out_unlock:
+	spin_unlock_irqrestore(&xfer->lock, flags);
+out:
+	d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
+	return 0;
+
+dequeue_delayed:
+	list_del_init(&xfer->list_node);
+	spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
+	xfer->result = urb->status;
+	spin_unlock_irqrestore(&xfer->lock, flags);
+	wa_xfer_giveback(xfer);
+	usb_put_urb(urb);		/* we got a ref in enqueue() */
+	d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(wa_urb_dequeue);
+
+/*
+ * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
+ * codes
+ *
+ * Positive errno values are internal inconsistencies and should be
+ * flagged louder. Negative are to be passed up to the user in the
+ * normal way.
+ *
+ * @status: USB WA status code -- high two bits are stripped.
+ */
+static int wa_xfer_status_to_errno(u8 status)
+{
+	int errno;
+	u8 real_status = status;
+	static int xlat[] = {
+		[WA_XFER_STATUS_SUCCESS] = 		0,
+		[WA_XFER_STATUS_HALTED] = 		-EPIPE,
+		[WA_XFER_STATUS_DATA_BUFFER_ERROR] = 	-ENOBUFS,
+		[WA_XFER_STATUS_BABBLE] = 		-EOVERFLOW,
+		[WA_XFER_RESERVED] = 			EINVAL,
+		[WA_XFER_STATUS_NOT_FOUND] =		0,
+		[WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
+		[WA_XFER_STATUS_TRANSACTION_ERROR] = 	-EILSEQ,
+		[WA_XFER_STATUS_ABORTED] = 		-EINTR,
+		[WA_XFER_STATUS_RPIPE_NOT_READY] = 	EINVAL,
+		[WA_XFER_INVALID_FORMAT] = 		EINVAL,
+		[WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = 	EINVAL,
+		[WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = 	EINVAL,
+	};
+	status &= 0x3f;
+
+	if (status == 0)
+		return 0;
+	if (status >= ARRAY_SIZE(xlat)) {
+		if (printk_ratelimit())
+			printk(KERN_ERR "%s(): BUG? "
+			       "Unknown WA transfer status 0x%02x\n",
+			       __func__, real_status);
+		return -EINVAL;
+	}
+	errno = xlat[status];
+	if (unlikely(errno > 0)) {
+		if (printk_ratelimit())
+			printk(KERN_ERR "%s(): BUG? "
+			       "Inconsistent WA status: 0x%02x\n",
+			       __func__, real_status);
+		errno = -errno;
+	}
+	return errno;
+}
+
+/*
+ * Process a xfer result completion message
+ *
+ * inbound transfers: need to schedule a DTI read
+ *
+ * FIXME: this functio needs to be broken up in parts
+ */
+static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
+{
+	int result;
+	struct device *dev = &wa->usb_iface->dev;
+	unsigned long flags;
+	u8 seg_idx;
+	struct wa_seg *seg;
+	struct wa_rpipe *rpipe;
+	struct wa_xfer_result *xfer_result = wa->xfer_result;
+	u8 done = 0;
+	u8 usb_status;
+	unsigned rpipe_ready = 0;
+
+	d_fnstart(3, dev, "(wa %p xfer %p)\n", wa, xfer);
+	spin_lock_irqsave(&xfer->lock, flags);
+	seg_idx = xfer_result->bTransferSegment & 0x7f;
+	if (unlikely(seg_idx >= xfer->segs))
+		goto error_bad_seg;
+	seg = xfer->seg[seg_idx];
+	rpipe = xfer->ep->hcpriv;
+	usb_status = xfer_result->bTransferStatus;
+	d_printf(2, dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
+		 xfer, seg_idx, usb_status, seg->status);
+	if (seg->status == WA_SEG_ABORTED
+	    || seg->status == WA_SEG_ERROR)	/* already handled */
+		goto segment_aborted;
+	if (seg->status == WA_SEG_SUBMITTED)	/* ops, got here */
+		seg->status = WA_SEG_PENDING;	/* before wa_seg{_dto}_cb() */
+	if (seg->status != WA_SEG_PENDING) {
+		if (printk_ratelimit())
+			dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
+				xfer, seg_idx, seg->status);
+		seg->status = WA_SEG_PENDING;	/* workaround/"fix" it */
+	}
+	if (usb_status & 0x80) {
+		seg->result = wa_xfer_status_to_errno(usb_status);
+		dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
+			xfer, seg->index, usb_status);
+		goto error_complete;
+	}
+	/* FIXME: we ignore warnings, tally them for stats */
+	if (usb_status & 0x40) 		/* Warning?... */
+		usb_status = 0;		/* ... pass */
+	if (xfer->is_inbound) {	/* IN data phase: read to buffer */
+		seg->status = WA_SEG_DTI_PENDING;
+		BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
+		if (xfer->is_dma) {
+			wa->buf_in_urb->transfer_dma =
+				xfer->urb->transfer_dma
+				+ seg_idx * xfer->seg_size;
+			wa->buf_in_urb->transfer_flags
+				|= URB_NO_TRANSFER_DMA_MAP;
+		} else {
+			wa->buf_in_urb->transfer_buffer =
+				xfer->urb->transfer_buffer
+				+ seg_idx * xfer->seg_size;
+			wa->buf_in_urb->transfer_flags
+				&= ~URB_NO_TRANSFER_DMA_MAP;
+		}
+		wa->buf_in_urb->transfer_buffer_length =
+			le32_to_cpu(xfer_result->dwTransferLength);
+		wa->buf_in_urb->context = seg;
+		result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
+		if (result < 0)
+			goto error_submit_buf_in;
+	} else {
+		/* OUT data phase, complete it -- */
+		seg->status = WA_SEG_DONE;
+		seg->result = le32_to_cpu(xfer_result->dwTransferLength);
+		xfer->segs_done++;
+		rpipe_ready = rpipe_avail_inc(rpipe);
+		done = __wa_xfer_is_done(xfer);
+	}
+	spin_unlock_irqrestore(&xfer->lock, flags);
+	if (done)
+		wa_xfer_completion(xfer);
+	if (rpipe_ready)
+		wa_xfer_delayed_run(rpipe);
+	d_fnend(3, dev, "(wa %p xfer %p) = void\n", wa, xfer);
+	return;
+
+
+error_submit_buf_in:
+	if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+		dev_err(dev, "DTI: URB max acceptable errors "
+			"exceeded, resetting device\n");
+		wa_reset_all(wa);
+	}
+	if (printk_ratelimit())
+		dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
+			xfer, seg_idx, result);
+	seg->result = result;
+error_complete:
+	seg->status = WA_SEG_ERROR;
+	xfer->segs_done++;
+	rpipe_ready = rpipe_avail_inc(rpipe);
+	__wa_xfer_abort(xfer);
+	done = __wa_xfer_is_done(xfer);
+	spin_unlock_irqrestore(&xfer->lock, flags);
+	if (done)
+		wa_xfer_completion(xfer);
+	if (rpipe_ready)
+		wa_xfer_delayed_run(rpipe);
+	d_fnend(3, dev, "(wa %p xfer %p) = void [segment/DTI-submit error]\n",
+		wa, xfer);
+	return;
+
+
+error_bad_seg:
+	spin_unlock_irqrestore(&xfer->lock, flags);
+	wa_urb_dequeue(wa, xfer->urb);
+	if (printk_ratelimit())
+		dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
+	if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+		dev_err(dev, "DTI: URB max acceptable errors "
+			"exceeded, resetting device\n");
+		wa_reset_all(wa);
+	}
+	d_fnend(3, dev, "(wa %p xfer %p) = void [bad seg]\n", wa, xfer);
+	return;
+
+
+segment_aborted:
+	/* nothing to do, as the aborter did the completion */
+	spin_unlock_irqrestore(&xfer->lock, flags);
+	d_fnend(3, dev, "(wa %p xfer %p) = void [segment aborted]\n",
+		wa, xfer);
+	return;
+
+}
+
+/*
+ * Callback for the IN data phase
+ *
+ * If succesful transition state; otherwise, take a note of the
+ * error, mark this segment done and try completion.
+ *
+ * Note we don't access until we are sure that the transfer hasn't
+ * been cancelled (ECONNRESET, ENOENT), which could mean that
+ * seg->xfer could be already gone.
+ */
+static void wa_buf_in_cb(struct urb *urb)
+{
+	struct wa_seg *seg = urb->context;
+	struct wa_xfer *xfer = seg->xfer;
+	struct wahc *wa;
+	struct device *dev;
+	struct wa_rpipe *rpipe;
+	unsigned rpipe_ready;
+	unsigned long flags;
+	u8 done = 0;
+
+	d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
+	switch (urb->status) {
+	case 0:
+		spin_lock_irqsave(&xfer->lock, flags);
+		wa = xfer->wa;
+		dev = &wa->usb_iface->dev;
+		rpipe = xfer->ep->hcpriv;
+		d_printf(2, dev, "xfer %p#%u: data in done (%zu bytes)\n",
+			   xfer, seg->index, (size_t)urb->actual_length);
+		seg->status = WA_SEG_DONE;
+		seg->result = urb->actual_length;
+		xfer->segs_done++;
+		rpipe_ready = rpipe_avail_inc(rpipe);
+		done = __wa_xfer_is_done(xfer);
+		spin_unlock_irqrestore(&xfer->lock, flags);
+		if (done)
+			wa_xfer_completion(xfer);
+		if (rpipe_ready)
+			wa_xfer_delayed_run(rpipe);
+		break;
+	case -ECONNRESET:	/* URB unlinked; no need to do anything */
+	case -ENOENT:		/* as it was done by the who unlinked us */
+		break;
+	default:		/* Other errors ... */
+		spin_lock_irqsave(&xfer->lock, flags);
+		wa = xfer->wa;
+		dev = &wa->usb_iface->dev;
+		rpipe = xfer->ep->hcpriv;
+		if (printk_ratelimit())
+			dev_err(dev, "xfer %p#%u: data in error %d\n",
+				xfer, seg->index, urb->status);
+		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+			    EDC_ERROR_TIMEFRAME)){
+			dev_err(dev, "DTO: URB max acceptable errors "
+				"exceeded, resetting device\n");
+			wa_reset_all(wa);
+		}
+		seg->status = WA_SEG_ERROR;
+		seg->result = urb->status;
+		xfer->segs_done++;
+		rpipe_ready = rpipe_avail_inc(rpipe);
+		__wa_xfer_abort(xfer);
+		done = __wa_xfer_is_done(xfer);
+		spin_unlock_irqrestore(&xfer->lock, flags);
+		if (done)
+			wa_xfer_completion(xfer);
+		if (rpipe_ready)
+			wa_xfer_delayed_run(rpipe);
+	}
+	d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
+}
+
+/*
+ * Handle an incoming transfer result buffer
+ *
+ * Given a transfer result buffer, it completes the transfer (possibly
+ * scheduling and buffer in read) and then resubmits the DTI URB for a
+ * new transfer result read.
+ *
+ *
+ * The xfer_result DTI URB state machine
+ *
+ * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
+ *
+ * We start in OFF mode, the first xfer_result notification [through
+ * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
+ * read.
+ *
+ * We receive a buffer -- if it is not a xfer_result, we complain and
+ * repost the DTI-URB. If it is a xfer_result then do the xfer seg
+ * request accounting. If it is an IN segment, we move to RBI and post
+ * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
+ * repost the DTI-URB and move to RXR state. if there was no IN
+ * segment, it will repost the DTI-URB.
+ *
+ * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
+ * errors) in the URBs.
+ */
+static void wa_xfer_result_cb(struct urb *urb)
+{
+	int result;
+	struct wahc *wa = urb->context;
+	struct device *dev = &wa->usb_iface->dev;
+	struct wa_xfer_result *xfer_result;
+	u32 xfer_id;
+	struct wa_xfer *xfer;
+	u8 usb_status;
+
+	d_fnstart(3, dev, "(%p)\n", wa);
+	BUG_ON(wa->dti_urb != urb);
+	switch (wa->dti_urb->status) {
+	case 0:
+		/* We have a xfer result buffer; check it */
+		d_printf(2, dev, "DTI: xfer result %d bytes at %p\n",
+			   urb->actual_length, urb->transfer_buffer);
+		d_dump(3, dev, urb->transfer_buffer, urb->actual_length);
+		if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
+			dev_err(dev, "DTI Error: xfer result--bad size "
+				"xfer result (%d bytes vs %zu needed)\n",
+				urb->actual_length, sizeof(*xfer_result));
+			break;
+		}
+		xfer_result = wa->xfer_result;
+		if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
+			dev_err(dev, "DTI Error: xfer result--"
+				"bad header length %u\n",
+				xfer_result->hdr.bLength);
+			break;
+		}
+		if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
+			dev_err(dev, "DTI Error: xfer result--"
+				"bad header type 0x%02x\n",
+				xfer_result->hdr.bNotifyType);
+			break;
+		}
+		usb_status = xfer_result->bTransferStatus & 0x3f;
+		if (usb_status == WA_XFER_STATUS_ABORTED
+		    || usb_status == WA_XFER_STATUS_NOT_FOUND)
+			/* taken care of already */
+			break;
+		xfer_id = xfer_result->dwTransferID;
+		xfer = wa_xfer_get_by_id(wa, xfer_id);
+		if (xfer == NULL) {
+			/* FIXME: transaction might have been cancelled */
+			dev_err(dev, "DTI Error: xfer result--"
+				"unknown xfer 0x%08x (status 0x%02x)\n",
+				xfer_id, usb_status);
+			break;
+		}
+		wa_xfer_result_chew(wa, xfer);
+		wa_xfer_put(xfer);
+		break;
+	case -ENOENT:		/* (we killed the URB)...so, no broadcast */
+	case -ESHUTDOWN:	/* going away! */
+		dev_dbg(dev, "DTI: going down! %d\n", urb->status);
+		goto out;
+	default:
+		/* Unknown error */
+		if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
+			    EDC_ERROR_TIMEFRAME)) {
+			dev_err(dev, "DTI: URB max acceptable errors "
+				"exceeded, resetting device\n");
+			wa_reset_all(wa);
+			goto out;
+		}
+		if (printk_ratelimit())
+			dev_err(dev, "DTI: URB error %d\n", urb->status);
+		break;
+	}
+	/* Resubmit the DTI URB */
+	result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+	if (result < 0) {
+		dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
+			"resetting\n", result);
+		wa_reset_all(wa);
+	}
+out:
+	d_fnend(3, dev, "(%p) = void\n", wa);
+	return;
+}
+
+/*
+ * Transfer complete notification
+ *
+ * Called from the notif.c code. We get a notification on EP2 saying
+ * that some endpoint has some transfer result data available. We are
+ * about to read it.
+ *
+ * To speed up things, we always have a URB reading the DTI URB; we
+ * don't really set it up and start it until the first xfer complete
+ * notification arrives, which is what we do here.
+ *
+ * Follow up in wa_xfer_result_cb(), as that's where the whole state
+ * machine starts.
+ *
+ * So here we just initialize the DTI URB for reading transfer result
+ * notifications and also the buffer-in URB, for reading buffers. Then
+ * we just submit the DTI URB.
+ *
+ * @wa shall be referenced
+ */
+void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
+{
+	int result;
+	struct device *dev = &wa->usb_iface->dev;
+	struct wa_notif_xfer *notif_xfer;
+	const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
+
+	d_fnstart(4, dev, "(%p, %p)\n", wa, notif_hdr);
+	notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
+	BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
+
+	if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
+		/* FIXME: hardcoded limitation, adapt */
+		dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
+			notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
+		goto error;
+	}
+	if (wa->dti_urb != NULL)	/* DTI URB already started */
+		goto out;
+
+	wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (wa->dti_urb == NULL) {
+		dev_err(dev, "Can't allocate DTI URB\n");
+		goto error_dti_urb_alloc;
+	}
+	usb_fill_bulk_urb(
+		wa->dti_urb, wa->usb_dev,
+		usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
+		wa->xfer_result, wa->xfer_result_size,
+		wa_xfer_result_cb, wa);
+
+	wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (wa->buf_in_urb == NULL) {
+		dev_err(dev, "Can't allocate BUF-IN URB\n");
+		goto error_buf_in_urb_alloc;
+	}
+	usb_fill_bulk_urb(
+		wa->buf_in_urb, wa->usb_dev,
+		usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
+		NULL, 0, wa_buf_in_cb, wa);
+	result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
+	if (result < 0) {
+		dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
+			"resetting\n", result);
+		goto error_dti_urb_submit;
+	}
+out:
+	d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr);
+	return;
+
+error_dti_urb_submit:
+	usb_put_urb(wa->buf_in_urb);
+error_buf_in_urb_alloc:
+	usb_put_urb(wa->dti_urb);
+	wa->dti_urb = NULL;
+error_dti_urb_alloc:
+error:
+	wa_reset_all(wa);
+	d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr);
+	return;
+}
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
new file mode 100644
index 0000000..07c63a3
--- /dev/null
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -0,0 +1,418 @@
+/*
+ * Wireless USB Host Controller
+ * sysfs glue, wusbcore module support and life cycle management
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Creation/destruction of wusbhc is split in two parts; that that
+ * doesn't require the HCD to be added (wusbhc_{create,destroy}) and
+ * the one that requires (phase B, wusbhc_b_{create,destroy}).
+ *
+ * This is so because usb_add_hcd() will start the HC, and thus, all
+ * the HC specific stuff has to be already initialiazed (like sysfs
+ * thingies).
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include "wusbhc.h"
+
+/**
+ * Extract the wusbhc that corresponds to a USB Host Controller class device
+ *
+ * WARNING! Apply only if @dev is that of a
+ *          wusbhc.usb_hcd.self->class_dev; otherwise, you loose.
+ */
+static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev)
+{
+	struct usb_bus *usb_bus = dev_get_drvdata(dev);
+	struct usb_hcd *usb_hcd = bus_to_hcd(usb_bus);
+	return usb_hcd_to_wusbhc(usb_hcd);
+}
+
+/*
+ * Show & store the current WUSB trust timeout
+ *
+ * We don't do locking--it is an 'atomic' value.
+ *
+ * The units that we store/show are always MILLISECONDS. However, the
+ * value of trust_timeout is jiffies.
+ */
+static ssize_t wusb_trust_timeout_show(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", wusbhc->trust_timeout);
+}
+
+static ssize_t wusb_trust_timeout_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+	ssize_t result = -ENOSYS;
+	unsigned trust_timeout;
+
+	result = sscanf(buf, "%u", &trust_timeout);
+	if (result != 1) {
+		result = -EINVAL;
+		goto out;
+	}
+	/* FIXME: maybe we should check for range validity? */
+	wusbhc->trust_timeout = trust_timeout;
+	cancel_delayed_work(&wusbhc->keep_alive_timer);
+	flush_workqueue(wusbd);
+	queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
+			   (trust_timeout * CONFIG_HZ)/1000/2);
+out:
+	return result < 0 ? result : size;
+}
+static DEVICE_ATTR(wusb_trust_timeout, 0644, wusb_trust_timeout_show,
+					     wusb_trust_timeout_store);
+
+/*
+ * Show & store the current WUSB CHID
+ */
+static ssize_t wusb_chid_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+	ssize_t result = 0;
+
+	if (wusbhc->wuie_host_info != NULL)
+		result += ckhdid_printf(buf, PAGE_SIZE,
+					&wusbhc->wuie_host_info->CHID);
+	return result;
+}
+
+/*
+ * Store a new CHID
+ *
+ * This will (FIXME) trigger many changes.
+ *
+ * - Send an all zeros CHID and it will stop the controller
+ * - Send a non-zero CHID and it will start it
+ *   (unless it was started, it will just change the CHID,
+ *   diconnecting all devices first).
+ *
+ * So first we scan the MMC we are sent and then we act on it.  We
+ * read it in the same format as we print it, an ASCII string of 16
+ * hex bytes.
+ *
+ * See wusbhc_chid_set() for more info.
+ */
+static ssize_t wusb_chid_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t size)
+{
+	struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+	struct wusb_ckhdid chid;
+	ssize_t result;
+
+	result = sscanf(buf,
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx\n",
+			&chid.data[0] , &chid.data[1] ,
+			&chid.data[2] , &chid.data[3] ,
+			&chid.data[4] , &chid.data[5] ,
+			&chid.data[6] , &chid.data[7] ,
+			&chid.data[8] , &chid.data[9] ,
+			&chid.data[10], &chid.data[11],
+			&chid.data[12], &chid.data[13],
+			&chid.data[14], &chid.data[15]);
+	if (result != 16) {
+		dev_err(dev, "Unrecognized CHID (need 16 8-bit hex digits): "
+			"%d\n", (int)result);
+		return -EINVAL;
+	}
+	result = wusbhc_chid_set(wusbhc, &chid);
+	return result < 0 ? result : size;
+}
+static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store);
+
+/* Group all the WUSBHC attributes */
+static struct attribute *wusbhc_attrs[] = {
+		&dev_attr_wusb_trust_timeout.attr,
+		&dev_attr_wusb_chid.attr,
+		NULL,
+};
+
+static struct attribute_group wusbhc_attr_group = {
+	.name = NULL,	/* we want them in the same directory */
+	.attrs = wusbhc_attrs,
+};
+
+/*
+ * Create a wusbhc instance
+ *
+ * NOTEs:
+ *
+ *  - assumes *wusbhc has been zeroed and wusbhc->usb_hcd has been
+ *    initialized but not added.
+ *
+ *  - fill out ports_max, mmcies_max and mmcie_{add,rm} before calling.
+ *
+ *  - fill out wusbhc->uwb_rc and refcount it before calling
+ *  - fill out the wusbhc->sec_modes array
+ */
+int wusbhc_create(struct wusbhc *wusbhc)
+{
+	int result = 0;
+
+	wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS;
+	mutex_init(&wusbhc->mutex);
+	result = wusbhc_mmcie_create(wusbhc);
+	if (result < 0)
+		goto error_mmcie_create;
+	result = wusbhc_devconnect_create(wusbhc);
+	if (result < 0)
+		goto error_devconnect_create;
+	result = wusbhc_rh_create(wusbhc);
+	if (result < 0)
+		goto error_rh_create;
+	result = wusbhc_sec_create(wusbhc);
+	if (result < 0)
+		goto error_sec_create;
+	return 0;
+
+error_sec_create:
+	wusbhc_rh_destroy(wusbhc);
+error_rh_create:
+	wusbhc_devconnect_destroy(wusbhc);
+error_devconnect_create:
+	wusbhc_mmcie_destroy(wusbhc);
+error_mmcie_create:
+	return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_create);
+
+static inline struct kobject *wusbhc_kobj(struct wusbhc *wusbhc)
+{
+	return &wusbhc->usb_hcd.self.controller->kobj;
+}
+
+/*
+ * Phase B of a wusbhc instance creation
+ *
+ * Creates fields that depend on wusbhc->usb_hcd having been
+ * added. This is where we create the sysfs files in
+ * /sys/class/usb_host/usb_hostX/.
+ *
+ * NOTE: Assumes wusbhc->usb_hcd has been already added by the upper
+ *       layer (hwahc or whci)
+ */
+int wusbhc_b_create(struct wusbhc *wusbhc)
+{
+	int result = 0;
+	struct device *dev = wusbhc->usb_hcd.self.controller;
+
+	result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
+	if (result < 0) {
+		dev_err(dev, "Cannot register WUSBHC attributes: %d\n", result);
+		goto error_create_attr_group;
+	}
+
+	result = wusbhc_pal_register(wusbhc);
+	if (result < 0)
+		goto error_pal_register;
+	return 0;
+
+error_pal_register:
+	sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
+error_create_attr_group:
+	return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_b_create);
+
+void wusbhc_b_destroy(struct wusbhc *wusbhc)
+{
+	wusbhc_pal_unregister(wusbhc);
+	sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
+}
+EXPORT_SYMBOL_GPL(wusbhc_b_destroy);
+
+void wusbhc_destroy(struct wusbhc *wusbhc)
+{
+	wusbhc_sec_destroy(wusbhc);
+	wusbhc_rh_destroy(wusbhc);
+	wusbhc_devconnect_destroy(wusbhc);
+	wusbhc_mmcie_destroy(wusbhc);
+}
+EXPORT_SYMBOL_GPL(wusbhc_destroy);
+
+struct workqueue_struct *wusbd;
+EXPORT_SYMBOL_GPL(wusbd);
+
+/*
+ * WUSB Cluster ID allocation map
+ *
+ * Each WUSB bus in a channel is identified with a Cluster Id in the
+ * unauth address pace (WUSB1.0[4.3]). We take the range 0xe0 to 0xff
+ * (that's space for 31 WUSB controllers, as 0xff can't be taken). We
+ * start taking from 0xff, 0xfe, 0xfd... (hence the += or -= 0xff).
+ *
+ * For each one we taken, we pin it in the bitap
+ */
+#define CLUSTER_IDS 32
+static DECLARE_BITMAP(wusb_cluster_id_table, CLUSTER_IDS);
+static DEFINE_SPINLOCK(wusb_cluster_ids_lock);
+
+/*
+ * Get a WUSB Cluster ID
+ *
+ * Need to release with wusb_cluster_id_put() when done w/ it.
+ */
+/* FIXME: coordinate with the choose_addres() from the USB stack */
+/* we want to leave the top of the 128 range for cluster addresses and
+ * the bottom for device addresses (as we map them one on one with
+ * ports). */
+u8 wusb_cluster_id_get(void)
+{
+	u8 id;
+	spin_lock(&wusb_cluster_ids_lock);
+	id = find_first_zero_bit(wusb_cluster_id_table, CLUSTER_IDS);
+	if (id > CLUSTER_IDS) {
+		id = 0;
+		goto out;
+	}
+	set_bit(id, wusb_cluster_id_table);
+	id = (u8) 0xff - id;
+out:
+	spin_unlock(&wusb_cluster_ids_lock);
+	return id;
+
+}
+EXPORT_SYMBOL_GPL(wusb_cluster_id_get);
+
+/*
+ * Release a WUSB Cluster ID
+ *
+ * Obtained it with wusb_cluster_id_get()
+ */
+void wusb_cluster_id_put(u8 id)
+{
+	id = 0xff - id;
+	BUG_ON(id >= CLUSTER_IDS);
+	spin_lock(&wusb_cluster_ids_lock);
+	WARN_ON(!test_bit(id, wusb_cluster_id_table));
+	clear_bit(id, wusb_cluster_id_table);
+	spin_unlock(&wusb_cluster_ids_lock);
+}
+EXPORT_SYMBOL_GPL(wusb_cluster_id_put);
+
+/**
+ * wusbhc_giveback_urb - return an URB to the USB core
+ * @wusbhc: the host controller the URB is from.
+ * @urb:    the URB.
+ * @status: the URB's status.
+ *
+ * Return an URB to the USB core doing some additional WUSB specific
+ * processing.
+ *
+ *  - After a successful transfer, update the trust timeout timestamp
+ *    for the WUSB device.
+ *
+ *  - [WUSB] sections 4.13 and 7.5.1 specifies the stop retrasmittion
+ *    condition for the WCONNECTACK_IE is that the host has observed
+ *    the associated device responding to a control transfer.
+ */
+void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status)
+{
+	struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
+
+	if (status == 0) {
+		wusb_dev->entry_ts = jiffies;
+
+		/* wusbhc_devconnect_acked() can't be called from from
+		   atomic context so defer it to a work queue. */
+		if (!list_empty(&wusb_dev->cack_node))
+			queue_work(wusbd, &wusb_dev->devconnect_acked_work);
+	}
+
+	usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status);
+}
+EXPORT_SYMBOL_GPL(wusbhc_giveback_urb);
+
+/**
+ * wusbhc_reset_all - reset the HC hardware
+ * @wusbhc: the host controller to reset.
+ *
+ * Request a full hardware reset of the chip.  This will also reset
+ * the radio controller and any other PALs.
+ */
+void wusbhc_reset_all(struct wusbhc *wusbhc)
+{
+	uwb_rc_reset_all(wusbhc->uwb_rc);
+}
+EXPORT_SYMBOL_GPL(wusbhc_reset_all);
+
+static struct notifier_block wusb_usb_notifier = {
+	.notifier_call = wusb_usb_ncb,
+	.priority = INT_MAX	/* Need to be called first of all */
+};
+
+static int __init wusbcore_init(void)
+{
+	int result;
+	result = wusb_crypto_init();
+	if (result < 0)
+		goto error_crypto_init;
+	/* WQ is singlethread because we need to serialize notifications */
+	wusbd = create_singlethread_workqueue("wusbd");
+	if (wusbd == NULL) {
+		result = -ENOMEM;
+		printk(KERN_ERR "WUSB-core: Cannot create wusbd workqueue\n");
+		goto error_wusbd_create;
+	}
+	usb_register_notify(&wusb_usb_notifier);
+	bitmap_zero(wusb_cluster_id_table, CLUSTER_IDS);
+	set_bit(0, wusb_cluster_id_table);	/* reserve Cluster ID 0xff */
+	return 0;
+
+error_wusbd_create:
+	wusb_crypto_exit();
+error_crypto_init:
+	return result;
+
+}
+module_init(wusbcore_init);
+
+static void __exit wusbcore_exit(void)
+{
+	clear_bit(0, wusb_cluster_id_table);
+	if (!bitmap_empty(wusb_cluster_id_table, CLUSTER_IDS)) {
+		char buf[256];
+		bitmap_scnprintf(buf, sizeof(buf), wusb_cluster_id_table,
+				 CLUSTER_IDS);
+		printk(KERN_ERR "BUG: WUSB Cluster IDs not released "
+		       "on exit: %s\n", buf);
+		WARN_ON(1);
+	}
+	usb_unregister_notify(&wusb_usb_notifier);
+	destroy_workqueue(wusbd);
+	wusb_crypto_exit();
+}
+module_exit(wusbcore_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Wireless USB core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
new file mode 100644
index 0000000..d0c1324
--- /dev/null
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -0,0 +1,495 @@
+/*
+ * Wireless USB Host Controller
+ * Common infrastructure for WHCI and HWA WUSB-HC drivers
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This driver implements parts common to all Wireless USB Host
+ * Controllers (struct wusbhc, embedding a struct usb_hcd) and is used
+ * by:
+ *
+ *   - hwahc: HWA, USB-dongle that implements a Wireless USB host
+ *     controller, (Wireless USB 1.0 Host-Wire-Adapter specification).
+ *
+ *   - whci: WHCI, a PCI card with a wireless host controller
+ *     (Wireless Host Controller Interface 1.0 specification).
+ *
+ * Check out the Design-overview.txt file in the source documentation
+ * for other details on the implementation.
+ *
+ * Main blocks:
+ *
+ *  rh         Root Hub emulation (part of the HCD glue)
+ *
+ *  devconnect Handle all the issues related to device connection,
+ *             authentication, disconnection, timeout, reseting,
+ *             keepalives, etc.
+ *
+ *  mmc        MMC IE broadcasting handling
+ *
+ * A host controller driver just initializes its stuff and as part of
+ * that, creates a 'struct wusbhc' instance that handles all the
+ * common WUSB mechanisms. Links in the function ops that are specific
+ * to it and then registers the host controller. Ready to run.
+ */
+
+#ifndef __WUSBHC_H__
+#define __WUSBHC_H__
+
+#include <linux/usb.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+/* FIXME: Yes, I know: BAD--it's not my fault the USB HC iface is not
+ *        public */
+#include <linux/../../drivers/usb/core/hcd.h>
+#include <linux/uwb.h>
+#include <linux/usb/wusb.h>
+
+
+/**
+ * Wireless USB device
+ *
+ * Describe a WUSB device connected to the cluster. This struct
+ * belongs to the 'struct wusb_port' it is attached to and it is
+ * responsible for putting and clearing the pointer to it.
+ *
+ * Note this "complements" the 'struct usb_device' that the usb_hcd
+ * keeps for each connected USB device. However, it extends some
+ * information that is not available (there is no hcpriv ptr in it!)
+ * *and* most importantly, it's life cycle is different. It is created
+ * as soon as we get a DN_Connect (connect request notification) from
+ * the device through the WUSB host controller; the USB stack doesn't
+ * create the device until we authenticate it. FIXME: this will
+ * change.
+ *
+ * @bos:    This is allocated when the BOS descriptors are read from
+ *          the device and freed upon the wusb_dev struct dying.
+ * @wusb_cap_descr: points into @bos, and has been verified to be size
+ *                  safe.
+ */
+struct wusb_dev {
+	struct kref refcnt;
+	struct wusbhc *wusbhc;
+	struct list_head cack_node;	/* Connect-Ack list */
+	u8 port_idx;
+	u8 addr;
+	u8 beacon_type:4;
+	struct usb_encryption_descriptor ccm1_etd;
+	struct wusb_ckhdid cdid;
+	unsigned long entry_ts;
+	struct usb_bos_descriptor *bos;
+	struct usb_wireless_cap_descriptor *wusb_cap_descr;
+	struct uwb_mas_bm availability;
+	struct work_struct devconnect_acked_work;
+	struct urb *set_gtk_urb;
+	struct usb_ctrlrequest *set_gtk_req;
+	struct usb_device *usb_dev;
+};
+
+#define WUSB_DEV_ADDR_UNAUTH 0x80
+
+static inline void wusb_dev_init(struct wusb_dev *wusb_dev)
+{
+	kref_init(&wusb_dev->refcnt);
+	/* no need to init the cack_node */
+}
+
+extern void wusb_dev_destroy(struct kref *_wusb_dev);
+
+static inline struct wusb_dev *wusb_dev_get(struct wusb_dev *wusb_dev)
+{
+	kref_get(&wusb_dev->refcnt);
+	return wusb_dev;
+}
+
+static inline void wusb_dev_put(struct wusb_dev *wusb_dev)
+{
+	kref_put(&wusb_dev->refcnt, wusb_dev_destroy);
+}
+
+/**
+ * Wireless USB Host Controlller root hub "fake" ports
+ * (state and device information)
+ *
+ * Wireless USB is wireless, so there are no ports; but we
+ * fake'em. Each RC can connect a max of devices at the same time
+ * (given in the Wireless Adapter descriptor, bNumPorts or WHCI's
+ * caps), referred to in wusbhc->ports_max.
+ *
+ * See rh.c for more information.
+ *
+ * The @status and @change use the same bits as in USB2.0[11.24.2.7],
+ * so we don't have to do much when getting the port's status.
+ *
+ * WUSB1.0[7.1], USB2.0[11.24.2.7.1,fig 11-10],
+ * include/linux/usb_ch9.h (#define USB_PORT_STAT_*)
+ */
+struct wusb_port {
+	u16 status;
+	u16 change;
+	struct wusb_dev *wusb_dev;	/* connected device's info */
+	unsigned reset_count;
+	u32 ptk_tkid;
+};
+
+/**
+ * WUSB Host Controller specifics
+ *
+ * All fields that are common to all Wireless USB controller types
+ * (HWA and WHCI) are grouped here. Host Controller
+ * functions/operations that only deal with general Wireless USB HC
+ * issues use this data type to refer to the host.
+ *
+ * @usb_hcd 	   Instantiation of a USB host controller
+ *                 (initialized by upper layer [HWA=HC or WHCI].
+ *
+ * @dev		   Device that implements this; initialized by the
+ *                 upper layer (HWA-HC, WHCI...); this device should
+ *                 have a refcount.
+ *
+ * @trust_timeout  After this time without hearing for device
+ *                 activity, we consider the device gone and we have to
+ *                 re-authenticate.
+ *
+ *                 Can be accessed w/o locking--however, read to a
+ *                 local variable then use.
+ *
+ * @chid           WUSB Cluster Host ID: this is supposed to be a
+ *                 unique value that doesn't change across reboots (so
+ *                 that your devices do not require re-association).
+ *
+ *                 Read/Write protected by @mutex
+ *
+ * @dev_info       This array has ports_max elements. It is used to
+ *                 give the HC information about the WUSB devices (see
+ *                 'struct wusb_dev_info').
+ *
+ *	           For HWA we need to allocate it in heap; for WHCI it
+ *                 needs to be permanently mapped, so we keep it for
+ *                 both and make it easy. Call wusbhc->dev_info_set()
+ *                 to update an entry.
+ *
+ * @ports_max	   Number of simultaneous device connections (fake
+ *                 ports) this HC will take. Read-only.
+ *
+ * @port      	   Array of port status for each fake root port. Guaranteed to
+ *                 always be the same lenght during device existence
+ *                 [this allows for some unlocked but referenced reading].
+ *
+ * @mmcies_max	   Max number of Information Elements this HC can send
+ *                 in its MMC. Read-only.
+ *
+ * @mmcie_add	   HC specific operation (WHCI or HWA) for adding an
+ *                 MMCIE.
+ *
+ * @mmcie_rm	   HC specific operation (WHCI or HWA) for removing an
+ *                 MMCIE.
+ *
+ * @enc_types	   Array which describes the encryptions methods
+ *                 supported by the host as described in WUSB1.0 --
+ *                 one entry per supported method. As of WUSB1.0 there
+ *                 is only four methods, we make space for eight just in
+ *                 case they decide to add some more (and pray they do
+ *                 it in sequential order). if 'enc_types[enc_method]
+ *                 != 0', then it is supported by the host. enc_method
+ *                 is USB_ENC_TYPE*.
+ *
+ * @set_ptk:       Set the PTK and enable encryption for a device. Or, if
+ *                 the supplied key is NULL, disable encryption for that
+ *                 device.
+ *
+ * @set_gtk:       Set the GTK to be used for all future broadcast packets
+ *                 (i.e., MMCs).  With some hardware, setting the GTK may start
+ *                 MMC transmission.
+ *
+ * NOTE:
+ *
+ *  - If wusb_dev->usb_dev is not NULL, then usb_dev is valid
+ *    (wusb_dev has a refcount on it). Likewise, if usb_dev->wusb_dev
+ *    is not NULL, usb_dev->wusb_dev is valid (usb_dev keeps a
+ *    refcount on it).
+ *
+ *    Most of the times when you need to use it, it will be non-NULL,
+ *    so there is no real need to check for it (wusb_dev will
+ *    dissapear before usb_dev).
+ *
+ *  - The following fields need to be filled out before calling
+ *    wusbhc_create(): ports_max, mmcies_max, mmcie_{add,rm}.
+ *
+ *  - there is no wusbhc_init() method, we do everything in
+ *    wusbhc_create().
+ *
+ *  - Creation is done in two phases, wusbhc_create() and
+ *    wusbhc_create_b(); b are the parts that need to be called after
+ *    calling usb_hcd_add(&wusbhc->usb_hcd).
+ */
+struct wusbhc {
+	struct usb_hcd usb_hcd;		/* HAS TO BE 1st */
+	struct device *dev;
+	struct uwb_rc *uwb_rc;
+	struct uwb_pal pal;
+
+	unsigned trust_timeout;			/* in jiffies */
+	struct wuie_host_info *wuie_host_info;	/* Includes CHID */
+
+	struct mutex mutex;			/* locks everything else */
+	u16 cluster_id;				/* Wireless USB Cluster ID */
+	struct wusb_port *port;			/* Fake port status handling */
+	struct wusb_dev_info *dev_info;		/* for Set Device Info mgmt */
+	u8 ports_max;
+	unsigned active:1;			/* currently xmit'ing MMCs */
+	struct wuie_keep_alive keep_alive_ie;	/* protected by mutex */
+	struct delayed_work keep_alive_timer;
+	struct list_head cack_list;		/* Connect acknowledging */
+	size_t cack_count;			/* protected by 'mutex' */
+	struct wuie_connect_ack cack_ie;
+	struct uwb_rsv *rsv;		/* cluster bandwidth reservation */
+
+	struct mutex mmcie_mutex;		/* MMC WUIE handling */
+	struct wuie_hdr **mmcie;		/* WUIE array */
+	u8 mmcies_max;
+	/* FIXME: make wusbhc_ops? */
+	int (*start)(struct wusbhc *wusbhc);
+	void (*stop)(struct wusbhc *wusbhc);
+	int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+			 u8 handle, struct wuie_hdr *wuie);
+	int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle);
+	int (*dev_info_set)(struct wusbhc *, struct wusb_dev *wusb_dev);
+	int (*bwa_set)(struct wusbhc *wusbhc, s8 stream_index,
+		       const struct uwb_mas_bm *);
+	int (*set_ptk)(struct wusbhc *wusbhc, u8 port_idx,
+		       u32 tkid, const void *key, size_t key_size);
+	int (*set_gtk)(struct wusbhc *wusbhc,
+		       u32 tkid, const void *key, size_t key_size);
+	int (*set_num_dnts)(struct wusbhc *wusbhc, u8 interval, u8 slots);
+
+	struct {
+		struct usb_key_descriptor descr;
+		u8 data[16];				/* GTK key data */
+	} __attribute__((packed)) gtk;
+	u8 gtk_index;
+	u32 gtk_tkid;
+	struct work_struct gtk_rekey_done_work;
+	int pending_set_gtks;
+
+	struct usb_encryption_descriptor *ccm1_etd;
+};
+
+#define usb_hcd_to_wusbhc(u) container_of((u), struct wusbhc, usb_hcd)
+
+
+extern int wusbhc_create(struct wusbhc *);
+extern int wusbhc_b_create(struct wusbhc *);
+extern void wusbhc_b_destroy(struct wusbhc *);
+extern void wusbhc_destroy(struct wusbhc *);
+extern int wusb_dev_sysfs_add(struct wusbhc *, struct usb_device *,
+			      struct wusb_dev *);
+extern void wusb_dev_sysfs_rm(struct wusb_dev *);
+extern int wusbhc_sec_create(struct wusbhc *);
+extern int wusbhc_sec_start(struct wusbhc *);
+extern void wusbhc_sec_stop(struct wusbhc *);
+extern void wusbhc_sec_destroy(struct wusbhc *);
+extern void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb,
+				int status);
+void wusbhc_reset_all(struct wusbhc *wusbhc);
+
+int wusbhc_pal_register(struct wusbhc *wusbhc);
+void wusbhc_pal_unregister(struct wusbhc *wusbhc);
+
+/*
+ * Return @usb_dev's @usb_hcd (properly referenced) or NULL if gone
+ *
+ * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
+ *
+ * This is a safe assumption as @usb_dev->bus is referenced all the
+ * time during the @usb_dev life cycle.
+ */
+static inline struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
+{
+	struct usb_hcd *usb_hcd;
+	usb_hcd = container_of(usb_dev->bus, struct usb_hcd, self);
+	return usb_get_hcd(usb_hcd);
+}
+
+/*
+ * Increment the reference count on a wusbhc.
+ *
+ * @wusbhc's life cycle is identical to that of the underlying usb_hcd.
+ */
+static inline struct wusbhc *wusbhc_get(struct wusbhc *wusbhc)
+{
+	return usb_get_hcd(&wusbhc->usb_hcd) ? wusbhc : NULL;
+}
+
+/*
+ * Return the wusbhc associated to a @usb_dev
+ *
+ * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
+ *
+ * @returns: wusbhc for @usb_dev; NULL if the @usb_dev is being torn down.
+ *           WARNING: referenced at the usb_hcd level, unlocked
+ *
+ * FIXME: move offline
+ */
+static inline struct wusbhc *wusbhc_get_by_usb_dev(struct usb_device *usb_dev)
+{
+	struct wusbhc *wusbhc = NULL;
+	struct usb_hcd *usb_hcd;
+	if (usb_dev->devnum > 1 && !usb_dev->wusb) {
+		/* but root hubs */
+		dev_err(&usb_dev->dev, "devnum %d wusb %d\n", usb_dev->devnum,
+			usb_dev->wusb);
+		BUG_ON(usb_dev->devnum > 1 && !usb_dev->wusb);
+	}
+	usb_hcd = usb_hcd_get_by_usb_dev(usb_dev);
+	if (usb_hcd == NULL)
+		return NULL;
+	BUG_ON(usb_hcd->wireless == 0);
+	return wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+}
+
+
+static inline void wusbhc_put(struct wusbhc *wusbhc)
+{
+	usb_put_hcd(&wusbhc->usb_hcd);
+}
+
+int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid);
+void wusbhc_stop(struct wusbhc *wusbhc);
+extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *);
+
+/* Device connect handling */
+extern int wusbhc_devconnect_create(struct wusbhc *);
+extern void wusbhc_devconnect_destroy(struct wusbhc *);
+extern int wusbhc_devconnect_start(struct wusbhc *wusbhc,
+				   const struct wusb_ckhdid *chid);
+extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc);
+extern int wusbhc_devconnect_auth(struct wusbhc *, u8);
+extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr,
+			     struct wusb_dn_hdr *dn_hdr, size_t size);
+extern int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port);
+extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port);
+extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
+			void *priv);
+extern int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
+			     u8 addr);
+
+/* Wireless USB fake Root Hub methods */
+extern int wusbhc_rh_create(struct wusbhc *);
+extern void wusbhc_rh_destroy(struct wusbhc *);
+
+extern int wusbhc_rh_status_data(struct usb_hcd *, char *);
+extern int wusbhc_rh_control(struct usb_hcd *, u16, u16, u16, char *, u16);
+extern int wusbhc_rh_suspend(struct usb_hcd *);
+extern int wusbhc_rh_resume(struct usb_hcd *);
+extern int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned);
+
+/* MMC handling */
+extern int wusbhc_mmcie_create(struct wusbhc *);
+extern void wusbhc_mmcie_destroy(struct wusbhc *);
+extern int wusbhc_mmcie_set(struct wusbhc *, u8 interval, u8 repeat_cnt,
+			    struct wuie_hdr *);
+extern void wusbhc_mmcie_rm(struct wusbhc *, struct wuie_hdr *);
+
+/* Bandwidth reservation */
+int wusbhc_rsv_establish(struct wusbhc *wusbhc);
+void wusbhc_rsv_terminate(struct wusbhc *wusbhc);
+
+/*
+ * I've always said
+ * I wanted a wedding in a church...
+ *
+ * but lately I've been thinking about
+ * the Botanical Gardens.
+ *
+ * We could do it by the tulips.
+ * It'll be beautiful
+ *
+ * --Security!
+ */
+extern int wusb_dev_sec_add(struct wusbhc *, struct usb_device *,
+				struct wusb_dev *);
+extern void wusb_dev_sec_rm(struct wusb_dev *) ;
+extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *,
+				   struct wusb_ckhdid *ck);
+void wusbhc_gtk_rekey(struct wusbhc *wusbhc);
+
+
+/* WUSB Cluster ID handling */
+extern u8 wusb_cluster_id_get(void);
+extern void wusb_cluster_id_put(u8);
+
+/*
+ * wusb_port_by_idx - return the port associated to a zero-based port index
+ *
+ * NOTE: valid without locking as long as wusbhc is referenced (as the
+ *       number of ports doesn't change). The data pointed to has to
+ *       be verified though :)
+ */
+static inline struct wusb_port *wusb_port_by_idx(struct wusbhc *wusbhc,
+						 u8 port_idx)
+{
+	return &wusbhc->port[port_idx];
+}
+
+/*
+ * wusb_port_no_to_idx - Convert port number (per usb_dev->portnum) to
+ * a port_idx.
+ *
+ * USB stack USB ports are 1 based!!
+ *
+ * NOTE: only valid for WUSB devices!!!
+ */
+static inline u8 wusb_port_no_to_idx(u8 port_no)
+{
+	return port_no - 1;
+}
+
+extern struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *,
+						  struct usb_device *);
+
+/*
+ * Return a referenced wusb_dev given a @usb_dev
+ *
+ * Returns NULL if the usb_dev is being torn down.
+ *
+ * FIXME: move offline
+ */
+static inline
+struct wusb_dev *wusb_dev_get_by_usb_dev(struct usb_device *usb_dev)
+{
+	struct wusbhc *wusbhc;
+	struct wusb_dev *wusb_dev;
+	wusbhc = wusbhc_get_by_usb_dev(usb_dev);
+	if (wusbhc == NULL)
+		return NULL;
+	mutex_lock(&wusbhc->mutex);
+	wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev);
+	mutex_unlock(&wusbhc->mutex);
+	wusbhc_put(wusbhc);
+	return wusb_dev;
+}
+
+/* Misc */
+
+extern struct workqueue_struct *wusbd;
+#endif /* #ifndef __WUSBHC_H__ */
diff --git a/drivers/uwb/Kconfig b/drivers/uwb/Kconfig
new file mode 100644
index 0000000..ca78312
--- /dev/null
+++ b/drivers/uwb/Kconfig
@@ -0,0 +1,90 @@
+#
+# UWB device configuration
+#
+
+menuconfig UWB
+	tristate "Ultra Wideband devices (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	depends on PCI
+	default n
+	help
+	  UWB is a high-bandwidth, low-power, point-to-point radio
+	  technology using a wide spectrum (3.1-10.6GHz). It is
+	  optimized for in-room use (480Mbps at 2 meters, 110Mbps at
+	  10m). It serves as the transport layer for other protocols,
+	  such as Wireless USB (WUSB), IP (WLP) and upcoming
+	  Bluetooth and 1394
+
+	  The topology is peer to peer; however, higher level
+	  protocols (such as WUSB) might impose a master/slave
+	  relationship.
+
+	  Say Y here if your computer has UWB radio controllers (USB or PCI)
+	  based. You will need to enable the radio controllers
+	  below.  It is ok to select all of them, no harm done.
+
+	  For more help check the UWB and WUSB related files in
+	  <file:Documentation/usb/>.
+
+	  To compile the UWB stack as a module, choose M here.
+
+if UWB
+
+config UWB_HWA
+	tristate "UWB Radio Control driver for WUSB-compliant USB dongles (HWA)"
+	depends on USB
+	help
+	  This driver enables the radio controller for HWA USB
+	  devices. HWA stands for Host Wire Adapter, and it is a UWB
+	  Radio Controller connected to your system via USB. Most of
+	  them come with a Wireless USB host controller also.
+
+	  To compile this driver select Y (built in) or M (module). It
+	  is safe to select any even if you do not have the hardware.
+
+config UWB_WHCI
+        tristate "UWB Radio Control driver for WHCI-compliant cards"
+        depends on PCI
+        help
+          This driver enables the radio controller for WHCI cards.
+
+          WHCI is an specification developed by Intel
+          (http://www.intel.com/technology/comms/wusb/whci.htm) much
+          in the spirit of USB's EHCI, but for UWB and Wireless USB
+          radio/host controllers connected via memmory mapping (eg:
+          PCI). Most of these cards come also with a Wireless USB host
+          controller.
+
+          To compile this driver select Y (built in) or M (module). It
+          is safe to select any even if you do not have the hardware.
+
+config UWB_WLP
+	tristate "Support WiMedia Link Protocol (Ethernet/IP over UWB)"
+	depends on UWB && NET
+	help
+	  This is a common library for drivers that implement
+	  networking over UWB.
+
+config UWB_I1480U
+        tristate "Support for Intel Wireless UWB Link 1480 HWA"
+        depends on UWB_HWA
+        select FW_LOADER
+        help
+         This driver enables support for the i1480 when connected via
+         USB. It consists of a firmware uploader that will enable it
+         to behave as an HWA device.
+
+         To compile this driver select Y (built in) or M (module). It
+         is safe to select any even if you do not have the hardware.
+
+config UWB_I1480U_WLP
+        tristate "Support for Intel Wireless UWB Link 1480 HWA's WLP interface"
+        depends on UWB_I1480U &&  UWB_WLP && NET
+        help
+         This driver enables WLP support for the i1480 when connected via
+         USB. WLP is the WiMedia Link Protocol, or IP over UWB.
+
+         To compile this driver select Y (built in) or M (module). It
+         is safe to select any even if you don't have the hardware.
+
+endif # UWB
diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile
new file mode 100644
index 0000000..257e690
--- /dev/null
+++ b/drivers/uwb/Makefile
@@ -0,0 +1,29 @@
+obj-$(CONFIG_UWB)		+= uwb.o
+obj-$(CONFIG_UWB_WLP)		+= wlp/
+obj-$(CONFIG_UWB_WHCI)		+= umc.o whci.o whc-rc.o
+obj-$(CONFIG_UWB_HWA)		+= hwa-rc.o
+obj-$(CONFIG_UWB_I1480U)	+= i1480/
+
+uwb-objs :=		\
+	address.o	\
+	beacon.o	\
+	driver.o	\
+	drp.o		\
+	drp-avail.o	\
+	drp-ie.o	\
+	est.o		\
+	ie.o		\
+	lc-dev.o	\
+	lc-rc.o		\
+	neh.o		\
+	pal.o		\
+	reset.o		\
+	rsv.o		\
+	scan.o		\
+	uwb-debug.o	\
+	uwbd.o
+
+umc-objs :=		\
+	umc-bus.o	\
+	umc-dev.o	\
+	umc-drv.o
diff --git a/drivers/uwb/address.c b/drivers/uwb/address.c
new file mode 100644
index 0000000..1664ae5
--- /dev/null
+++ b/drivers/uwb/address.c
@@ -0,0 +1,374 @@
+/*
+ * Ultra Wide Band
+ * Address management
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/random.h>
+#include <linux/etherdevice.h>
+#include <linux/uwb/debug.h>
+#include "uwb-internal.h"
+
+
+/** Device Address Management command */
+struct uwb_rc_cmd_dev_addr_mgmt {
+	struct uwb_rccb rccb;
+	u8 bmOperationType;
+	u8 baAddr[6];
+} __attribute__((packed));
+
+
+/**
+ * Low level command for setting/getting UWB radio's addresses
+ *
+ * @hwarc:	HWA Radio Control interface instance
+ * @bmOperationType:
+ * 		Set/get, MAC/DEV (see WUSB1.0[8.6.2.2])
+ * @baAddr:	address buffer--assumed to have enough data to hold
+ *              the address type requested.
+ * @reply:	Pointer to reply buffer (can be stack allocated)
+ * @returns:	0 if ok, < 0 errno code on error.
+ *
+ * @cmd has to be allocated because USB cannot grok USB or vmalloc
+ * buffers depending on your combination of host architecture.
+ */
+static
+int uwb_rc_dev_addr_mgmt(struct uwb_rc *rc,
+			 u8 bmOperationType, const u8 *baAddr,
+			 struct uwb_rc_evt_dev_addr_mgmt *reply)
+{
+	int result;
+	struct uwb_rc_cmd_dev_addr_mgmt *cmd;
+
+	result = -ENOMEM;
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		goto error_kzalloc;
+	cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
+	cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_DEV_ADDR_MGMT);
+	cmd->bmOperationType = bmOperationType;
+	if (baAddr) {
+		size_t size = 0;
+		switch (bmOperationType >> 1) {
+		case 0:	size = 2; break;
+		case 1:	size = 6; break;
+		default: BUG();
+		}
+		memcpy(cmd->baAddr, baAddr, size);
+	}
+	reply->rceb.bEventType = UWB_RC_CET_GENERAL;
+	reply->rceb.wEvent = UWB_RC_CMD_DEV_ADDR_MGMT;
+	result = uwb_rc_cmd(rc, "DEV-ADDR-MGMT",
+			    &cmd->rccb, sizeof(*cmd),
+			    &reply->rceb, sizeof(*reply));
+	if (result < 0)
+		goto error_cmd;
+	if (result < sizeof(*reply)) {
+		dev_err(&rc->uwb_dev.dev,
+			"DEV-ADDR-MGMT: not enough data replied: "
+			"%d vs %zu bytes needed\n", result, sizeof(*reply));
+		result = -ENOMSG;
+	} else if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
+		dev_err(&rc->uwb_dev.dev,
+			"DEV-ADDR-MGMT: command execution failed: %s (%d)\n",
+			uwb_rc_strerror(reply->bResultCode),
+			reply->bResultCode);
+		result = -EIO;
+	} else
+		result = 0;
+error_cmd:
+	kfree(cmd);
+error_kzalloc:
+	return result;
+}
+
+
+/**
+ * Set the UWB RC MAC or device address.
+ *
+ * @rc:      UWB Radio Controller
+ * @_addr:   Pointer to address to write [assumed to be either a
+ *           'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *'].
+ * @type:    Type of address to set (UWB_ADDR_DEV or UWB_ADDR_MAC).
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * Some anal retentivity here: even if both 'struct
+ * uwb_{dev,mac}_addr' have the actual byte array in the same offset
+ * and I could just pass _addr to hwarc_cmd_dev_addr_mgmt(), I prefer
+ * to use some syntatic sugar in case someday we decide to change the
+ * format of the structs. The compiler will optimize it out anyway.
+ */
+static int uwb_rc_addr_set(struct uwb_rc *rc,
+		    const void *_addr, enum uwb_addr_type type)
+{
+	int result;
+	u8 bmOperationType = 0x1; 		/* Set address */
+	const struct uwb_dev_addr *dev_addr = _addr;
+	const struct uwb_mac_addr *mac_addr = _addr;
+	struct uwb_rc_evt_dev_addr_mgmt reply;
+	const u8 *baAddr;
+
+	result = -EINVAL;
+	switch (type) {
+	case UWB_ADDR_DEV:
+		baAddr = dev_addr->data;
+		break;
+	case UWB_ADDR_MAC:
+		baAddr = mac_addr->data;
+		bmOperationType |= 0x2;
+		break;
+	default:
+		return result;
+	}
+	return uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &reply);
+}
+
+
+/**
+ * Get the UWB radio's MAC or device address.
+ *
+ * @rc:      UWB Radio Controller
+ * @_addr:   Where to write the address data [assumed to be either a
+ *           'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *'].
+ * @type:    Type of address to get (UWB_ADDR_DEV or UWB_ADDR_MAC).
+ * @returns: 0 if ok (and *_addr set), < 0 errno code on error.
+ *
+ * See comment in uwb_rc_addr_set() about anal retentivity in the
+ * type handling of the address variables.
+ */
+static int uwb_rc_addr_get(struct uwb_rc *rc,
+		    void *_addr, enum uwb_addr_type type)
+{
+	int result;
+	u8 bmOperationType = 0x0; 		/* Get address */
+	struct uwb_rc_evt_dev_addr_mgmt evt;
+	struct uwb_dev_addr *dev_addr = _addr;
+	struct uwb_mac_addr *mac_addr = _addr;
+	u8 *baAddr;
+
+	result = -EINVAL;
+	switch (type) {
+	case UWB_ADDR_DEV:
+		baAddr = dev_addr->data;
+		break;
+	case UWB_ADDR_MAC:
+		bmOperationType |= 0x2;
+		baAddr = mac_addr->data;
+		break;
+	default:
+		return result;
+	}
+	result = uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &evt);
+	if (result == 0)
+		switch (type) {
+		case UWB_ADDR_DEV:
+			memcpy(&dev_addr->data, evt.baAddr,
+			       sizeof(dev_addr->data));
+			break;
+		case UWB_ADDR_MAC:
+			memcpy(&mac_addr->data, evt.baAddr,
+			       sizeof(mac_addr->data));
+			break;
+		default:		/* shut gcc up */
+			BUG();
+		}
+	return result;
+}
+
+
+/** Get @rc's MAC address to @addr */
+int uwb_rc_mac_addr_get(struct uwb_rc *rc,
+			struct uwb_mac_addr *addr) {
+	return uwb_rc_addr_get(rc, addr, UWB_ADDR_MAC);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_mac_addr_get);
+
+
+/** Get @rc's device address to @addr */
+int uwb_rc_dev_addr_get(struct uwb_rc *rc,
+			struct uwb_dev_addr *addr) {
+	return uwb_rc_addr_get(rc, addr, UWB_ADDR_DEV);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_dev_addr_get);
+
+
+/** Set @rc's address to @addr */
+int uwb_rc_mac_addr_set(struct uwb_rc *rc,
+			const struct uwb_mac_addr *addr)
+{
+	int result = -EINVAL;
+	mutex_lock(&rc->uwb_dev.mutex);
+	result = uwb_rc_addr_set(rc, addr, UWB_ADDR_MAC);
+	mutex_unlock(&rc->uwb_dev.mutex);
+	return result;
+}
+
+
+/** Set @rc's address to @addr */
+int uwb_rc_dev_addr_set(struct uwb_rc *rc,
+			const struct uwb_dev_addr *addr)
+{
+	int result = -EINVAL;
+	mutex_lock(&rc->uwb_dev.mutex);
+	result = uwb_rc_addr_set(rc, addr, UWB_ADDR_DEV);
+	rc->uwb_dev.dev_addr = *addr;
+	mutex_unlock(&rc->uwb_dev.mutex);
+	return result;
+}
+
+/* Returns !0 if given address is already assigned to device. */
+int __uwb_mac_addr_assigned_check(struct device *dev, void *_addr)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	struct uwb_mac_addr *addr = _addr;
+
+	if (!uwb_mac_addr_cmp(addr, &uwb_dev->mac_addr))
+		return !0;
+	return 0;
+}
+
+/* Returns !0 if given address is already assigned to device. */
+int __uwb_dev_addr_assigned_check(struct device *dev, void *_addr)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	struct uwb_dev_addr *addr = _addr;
+	if (!uwb_dev_addr_cmp(addr, &uwb_dev->dev_addr))
+		return !0;
+	return 0;
+}
+
+/**
+ * uwb_dev_addr_assign - assigned a generated DevAddr to a radio controller
+ * @rc:      the (local) radio controller device requiring a new DevAddr
+ *
+ * A new DevAddr is required when:
+ *    - first setting up a radio controller
+ *    - if the hardware reports a DevAddr conflict
+ *
+ * The DevAddr is randomly generated in the generated DevAddr range
+ * [0x100, 0xfeff]. The number of devices in a beacon group is limited
+ * by mMaxBPLength (96) so this address space will never be exhausted.
+ *
+ * [ECMA-368] 17.1.1, 17.16.
+ */
+int uwb_rc_dev_addr_assign(struct uwb_rc *rc)
+{
+	struct uwb_dev_addr new_addr;
+
+	do {
+		get_random_bytes(new_addr.data, sizeof(new_addr.data));
+	} while (new_addr.data[0] == 0x00 || new_addr.data[0] == 0xff
+		 || __uwb_dev_addr_assigned(rc, &new_addr));
+
+	return uwb_rc_dev_addr_set(rc, &new_addr);
+}
+
+/**
+ * uwbd_evt_handle_rc_dev_addr_conflict - handle a DEV_ADDR_CONFLICT event
+ * @evt: the DEV_ADDR_CONFLICT notification from the radio controller
+ *
+ * A new (non-conflicting) DevAddr is assigned to the radio controller.
+ *
+ * [ECMA-368] 17.1.1.1.
+ */
+int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt)
+{
+	struct uwb_rc *rc = evt->rc;
+
+	return uwb_rc_dev_addr_assign(rc);
+}
+
+/*
+ * Print the 48-bit EUI MAC address of the radio controller when
+ * reading /sys/class/uwb_rc/XX/mac_address
+ */
+static ssize_t uwb_rc_mac_addr_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	struct uwb_rc *rc = uwb_dev->rc;
+	struct uwb_mac_addr addr;
+	ssize_t result;
+
+	mutex_lock(&rc->uwb_dev.mutex);
+	result = uwb_rc_addr_get(rc, &addr, UWB_ADDR_MAC);
+	mutex_unlock(&rc->uwb_dev.mutex);
+	if (result >= 0) {
+		result = uwb_mac_addr_print(buf, UWB_ADDR_STRSIZE, &addr);
+		buf[result++] = '\n';
+	}
+	return result;
+}
+
+/*
+ * Parse a 48 bit address written to /sys/class/uwb_rc/XX/mac_address
+ * and if correct, set it.
+ */
+static ssize_t uwb_rc_mac_addr_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	struct uwb_rc *rc = uwb_dev->rc;
+	struct uwb_mac_addr addr;
+	ssize_t result;
+
+	result = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx\n",
+			&addr.data[0], &addr.data[1], &addr.data[2],
+			&addr.data[3], &addr.data[4], &addr.data[5]);
+	if (result != 6) {
+		result = -EINVAL;
+		goto out;
+	}
+	if (is_multicast_ether_addr(addr.data)) {
+		dev_err(&rc->uwb_dev.dev, "refusing to set multicast "
+			"MAC address %s\n", buf);
+		result = -EINVAL;
+		goto out;
+	}
+	result = uwb_rc_mac_addr_set(rc, &addr);
+	if (result == 0)
+		rc->uwb_dev.mac_addr = addr;
+out:
+	return result < 0 ? result : size;
+}
+DEVICE_ATTR(mac_address, S_IRUGO | S_IWUSR, uwb_rc_mac_addr_show, uwb_rc_mac_addr_store);
+
+/** Print @addr to @buf, @return bytes written */
+size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr,
+			int type)
+{
+	size_t result;
+	if (type)
+		result = scnprintf(buf, buf_size,
+				  "%02x:%02x:%02x:%02x:%02x:%02x",
+				  addr[0], addr[1], addr[2],
+				  addr[3], addr[4], addr[5]);
+	else
+		result = scnprintf(buf, buf_size, "%02x:%02x",
+				  addr[1], addr[0]);
+	return result;
+}
+EXPORT_SYMBOL_GPL(__uwb_addr_print);
diff --git a/drivers/uwb/beacon.c b/drivers/uwb/beacon.c
new file mode 100644
index 0000000..46b18ee
--- /dev/null
+++ b/drivers/uwb/beacon.c
@@ -0,0 +1,642 @@
+/*
+ * Ultra Wide Band
+ * Beacon management
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kdev_t.h>
+#include "uwb-internal.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/** Start Beaconing command structure */
+struct uwb_rc_cmd_start_beacon {
+	struct uwb_rccb rccb;
+	__le16 wBPSTOffset;
+	u8 bChannelNumber;
+} __attribute__((packed));
+
+
+static int uwb_rc_start_beacon(struct uwb_rc *rc, u16 bpst_offset, u8 channel)
+{
+	int result;
+	struct uwb_rc_cmd_start_beacon *cmd;
+	struct uwb_rc_evt_confirm reply;
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		return -ENOMEM;
+	cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
+	cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_START_BEACON);
+	cmd->wBPSTOffset = cpu_to_le16(bpst_offset);
+	cmd->bChannelNumber = channel;
+	reply.rceb.bEventType = UWB_RC_CET_GENERAL;
+	reply.rceb.wEvent = UWB_RC_CMD_START_BEACON;
+	result = uwb_rc_cmd(rc, "START-BEACON", &cmd->rccb, sizeof(*cmd),
+			    &reply.rceb, sizeof(reply));
+	if (result < 0)
+		goto error_cmd;
+	if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
+		dev_err(&rc->uwb_dev.dev,
+			"START-BEACON: command execution failed: %s (%d)\n",
+			uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
+		result = -EIO;
+	}
+error_cmd:
+	kfree(cmd);
+	return result;
+}
+
+static int uwb_rc_stop_beacon(struct uwb_rc *rc)
+{
+	int result;
+	struct uwb_rccb *cmd;
+	struct uwb_rc_evt_confirm reply;
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		return -ENOMEM;
+	cmd->bCommandType = UWB_RC_CET_GENERAL;
+	cmd->wCommand = cpu_to_le16(UWB_RC_CMD_STOP_BEACON);
+	reply.rceb.bEventType = UWB_RC_CET_GENERAL;
+	reply.rceb.wEvent = UWB_RC_CMD_STOP_BEACON;
+	result = uwb_rc_cmd(rc, "STOP-BEACON", cmd, sizeof(*cmd),
+			    &reply.rceb, sizeof(reply));
+	if (result < 0)
+		goto error_cmd;
+	if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
+		dev_err(&rc->uwb_dev.dev,
+			"STOP-BEACON: command execution failed: %s (%d)\n",
+			uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
+		result = -EIO;
+	}
+error_cmd:
+	kfree(cmd);
+	return result;
+}
+
+/*
+ * Start/stop beacons
+ *
+ * @rc:          UWB Radio Controller to operate on
+ * @channel:     UWB channel on which to beacon (WUSB[table
+ *               5-12]). If -1, stop beaconing.
+ * @bpst_offset: Beacon Period Start Time offset; FIXME-do zero
+ *
+ * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB
+ * of a SET IE command after the device sent the first beacon that includes
+ * the IEs specified in the SET IE command. So, after we start beaconing we
+ * check if there is anything in the IE cache and call the SET IE command
+ * if needed.
+ */
+int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset)
+{
+	int result;
+	struct device *dev = &rc->uwb_dev.dev;
+
+	mutex_lock(&rc->uwb_dev.mutex);
+	if (channel < 0)
+		channel = -1;
+	if (channel == -1)
+		result = uwb_rc_stop_beacon(rc);
+	else {
+		/* channel >= 0...dah */
+		result = uwb_rc_start_beacon(rc, bpst_offset, channel);
+		if (result < 0)
+			goto out_up;
+		if (le16_to_cpu(rc->ies->wIELength) > 0) {
+			result = uwb_rc_set_ie(rc, rc->ies);
+			if (result < 0) {
+				dev_err(dev, "Cannot set new IE on device: "
+					"%d\n", result);
+				result = uwb_rc_stop_beacon(rc);
+				channel = -1;
+				bpst_offset = 0;
+			} else
+				result = 0;
+		}
+	}
+
+	if (result < 0)
+		goto out_up;
+	rc->beaconing = channel;
+
+	uwb_notify(rc, NULL, uwb_bg_joined(rc) ? UWB_NOTIF_BG_JOIN : UWB_NOTIF_BG_LEAVE);
+
+out_up:
+	mutex_unlock(&rc->uwb_dev.mutex);
+	return result;
+}
+
+/*
+ * Beacon cache
+ *
+ * The purpose of this is to speed up the lookup of becon information
+ * when a new beacon arrives. The UWB Daemon uses it also to keep a
+ * tab of which devices are in radio distance and which not. When a
+ * device's beacon stays present for more than a certain amount of
+ * time, it is considered a new, usable device. When a beacon ceases
+ * to be received for a certain amount of time, it is considered that
+ * the device is gone.
+ *
+ * FIXME: use an allocator for the entries
+ * FIXME: use something faster for search than a list
+ */
+
+struct uwb_beca uwb_beca = {
+	.list = LIST_HEAD_INIT(uwb_beca.list),
+	.mutex = __MUTEX_INITIALIZER(uwb_beca.mutex)
+};
+
+
+void uwb_bce_kfree(struct kref *_bce)
+{
+	struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt);
+
+	kfree(bce->be);
+	kfree(bce);
+}
+
+
+/* Find a beacon by dev addr in the cache */
+static
+struct uwb_beca_e *__uwb_beca_find_bydev(const struct uwb_dev_addr *dev_addr)
+{
+	struct uwb_beca_e *bce, *next;
+	list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
+		d_printf(6, NULL, "looking for addr %02x:%02x in %02x:%02x\n",
+			 dev_addr->data[0], dev_addr->data[1],
+			 bce->dev_addr.data[0], bce->dev_addr.data[1]);
+		if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr)))
+			goto out;
+	}
+	bce = NULL;
+out:
+	return bce;
+}
+
+/* Find a beacon by dev addr in the cache */
+static
+struct uwb_beca_e *__uwb_beca_find_bymac(const struct uwb_mac_addr *mac_addr)
+{
+	struct uwb_beca_e *bce, *next;
+	list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
+		if (!memcmp(bce->mac_addr, mac_addr->data,
+			    sizeof(struct uwb_mac_addr)))
+			goto out;
+	}
+	bce = NULL;
+out:
+	return bce;
+}
+
+/**
+ * uwb_dev_get_by_devaddr - get a UWB device with a specific DevAddr
+ * @rc:      the radio controller that saw the device
+ * @devaddr: DevAddr of the UWB device to find
+ *
+ * There may be more than one matching device (in the case of a
+ * DevAddr conflict), but only the first one is returned.
+ */
+struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
+				       const struct uwb_dev_addr *devaddr)
+{
+	struct uwb_dev *found = NULL;
+	struct uwb_beca_e *bce;
+
+	mutex_lock(&uwb_beca.mutex);
+	bce = __uwb_beca_find_bydev(devaddr);
+	if (bce)
+		found = uwb_dev_try_get(rc, bce->uwb_dev);
+	mutex_unlock(&uwb_beca.mutex);
+
+	return found;
+}
+
+/**
+ * uwb_dev_get_by_macaddr - get a UWB device with a specific EUI-48
+ * @rc:      the radio controller that saw the device
+ * @devaddr: EUI-48 of the UWB device to find
+ */
+struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc,
+				       const struct uwb_mac_addr *macaddr)
+{
+	struct uwb_dev *found = NULL;
+	struct uwb_beca_e *bce;
+
+	mutex_lock(&uwb_beca.mutex);
+	bce = __uwb_beca_find_bymac(macaddr);
+	if (bce)
+		found = uwb_dev_try_get(rc, bce->uwb_dev);
+	mutex_unlock(&uwb_beca.mutex);
+
+	return found;
+}
+
+/* Initialize a beacon cache entry */
+static void uwb_beca_e_init(struct uwb_beca_e *bce)
+{
+	mutex_init(&bce->mutex);
+	kref_init(&bce->refcnt);
+	stats_init(&bce->lqe_stats);
+	stats_init(&bce->rssi_stats);
+}
+
+/*
+ * Add a beacon to the cache
+ *
+ * @be:         Beacon event information
+ * @bf:         Beacon frame (part of b, really)
+ * @ts_jiffies: Timestamp (in jiffies) when the beacon was received
+ */
+struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be,
+				  struct uwb_beacon_frame *bf,
+				  unsigned long ts_jiffies)
+{
+	struct uwb_beca_e *bce;
+
+	bce = kzalloc(sizeof(*bce), GFP_KERNEL);
+	if (bce == NULL)
+		return NULL;
+	uwb_beca_e_init(bce);
+	bce->ts_jiffies = ts_jiffies;
+	bce->uwb_dev = NULL;
+	list_add(&bce->node, &uwb_beca.list);
+	return bce;
+}
+
+/*
+ * Wipe out beacon entries that became stale
+ *
+ * Remove associated devicest too.
+ */
+void uwb_beca_purge(void)
+{
+	struct uwb_beca_e *bce, *next;
+	unsigned long expires;
+
+	mutex_lock(&uwb_beca.mutex);
+	list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
+		expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms);
+		if (time_after(jiffies, expires)) {
+			uwbd_dev_offair(bce);
+			list_del(&bce->node);
+			uwb_bce_put(bce);
+		}
+	}
+	mutex_unlock(&uwb_beca.mutex);
+}
+
+/* Clean up the whole beacon cache. Called on shutdown */
+void uwb_beca_release(void)
+{
+	struct uwb_beca_e *bce, *next;
+	mutex_lock(&uwb_beca.mutex);
+	list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
+		list_del(&bce->node);
+		uwb_bce_put(bce);
+	}
+	mutex_unlock(&uwb_beca.mutex);
+}
+
+static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be,
+			     struct uwb_beacon_frame *bf)
+{
+	char macbuf[UWB_ADDR_STRSIZE];
+	char devbuf[UWB_ADDR_STRSIZE];
+	char dstbuf[UWB_ADDR_STRSIZE];
+
+	uwb_mac_addr_print(macbuf, sizeof(macbuf), &bf->Device_Identifier);
+	uwb_dev_addr_print(devbuf, sizeof(devbuf), &bf->hdr.SrcAddr);
+	uwb_dev_addr_print(dstbuf, sizeof(dstbuf), &bf->hdr.DestAddr);
+	dev_info(&rc->uwb_dev.dev,
+		 "BEACON from %s to %s (ch%u offset %u slot %u MAC %s)\n",
+		 devbuf, dstbuf, be->bChannelNumber, be->wBPSTOffset,
+		 bf->Beacon_Slot_Number, macbuf);
+}
+
+/*
+ * @bce: beacon cache entry, referenced
+ */
+ssize_t uwb_bce_print_IEs(struct uwb_dev *uwb_dev, struct uwb_beca_e *bce,
+			  char *buf, size_t size)
+{
+	ssize_t result = 0;
+	struct uwb_rc_evt_beacon *be;
+	struct uwb_beacon_frame *bf;
+	struct uwb_buf_ctx ctx = {
+		.buf = buf,
+		.bytes = 0,
+		.size = size
+	};
+
+	mutex_lock(&bce->mutex);
+	be = bce->be;
+	if (be == NULL)
+		goto out;
+	bf = (void *) be->BeaconInfo;
+	uwb_ie_for_each(uwb_dev, uwb_ie_dump_hex, &ctx,
+			bf->IEData, be->wBeaconInfoLength - sizeof(*bf));
+	result = ctx.bytes;
+out:
+	mutex_unlock(&bce->mutex);
+	return result;
+}
+
+/*
+ * Verify that the beacon event, frame and IEs are ok
+ */
+static int uwb_verify_beacon(struct uwb_rc *rc, struct uwb_event *evt,
+			     struct uwb_rc_evt_beacon *be)
+{
+	int result = -EINVAL;
+	struct uwb_beacon_frame *bf;
+	struct device *dev = &rc->uwb_dev.dev;
+
+	/* Is there enough data to decode a beacon frame? */
+	if (evt->notif.size < sizeof(*be) + sizeof(*bf)) {
+		dev_err(dev, "BEACON event: Not enough data to decode "
+			"(%zu vs %zu bytes needed)\n", evt->notif.size,
+			sizeof(*be) + sizeof(*bf));
+		goto error;
+	}
+	/* FIXME: make sure beacon frame IEs are fine and that the whole thing
+	 * is consistent */
+	result = 0;
+error:
+	return result;
+}
+
+/*
+ * Handle UWB_RC_EVT_BEACON events
+ *
+ * We check the beacon cache to see how the received beacon fares. If
+ * is there already we refresh the timestamp. If not we create a new
+ * entry.
+ *
+ * According to the WHCI and WUSB specs, only one beacon frame is
+ * allowed per notification block, so we don't bother about scanning
+ * for more.
+ */
+int uwbd_evt_handle_rc_beacon(struct uwb_event *evt)
+{
+	int result = -EINVAL;
+	struct uwb_rc *rc;
+	struct uwb_rc_evt_beacon *be;
+	struct uwb_beacon_frame *bf;
+	struct uwb_beca_e *bce;
+	unsigned long last_ts;
+
+	rc = evt->rc;
+	be = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon, rceb);
+	result = uwb_verify_beacon(rc, evt, be);
+	if (result < 0)
+		return result;
+
+	/* FIXME: handle alien beacons. */
+	if (be->bBeaconType == UWB_RC_BEACON_TYPE_OL_ALIEN ||
+	    be->bBeaconType == UWB_RC_BEACON_TYPE_NOL_ALIEN) {
+		return -ENOSYS;
+	}
+
+	bf = (struct uwb_beacon_frame *) be->BeaconInfo;
+
+	/*
+	 * Drop beacons from devices with a NULL EUI-48 -- they cannot
+	 * be uniquely identified.
+	 *
+	 * It's expected that these will all be WUSB devices and they
+	 * have a WUSB specific connection method so ignoring them
+	 * here shouldn't be a problem.
+	 */
+	if (uwb_mac_addr_bcast(&bf->Device_Identifier))
+		return 0;
+
+	mutex_lock(&uwb_beca.mutex);
+	bce = __uwb_beca_find_bymac(&bf->Device_Identifier);
+	if (bce == NULL) {
+		/* Not in there, a new device is pinging */
+		uwb_beacon_print(evt->rc, be, bf);
+		bce = __uwb_beca_add(be, bf, evt->ts_jiffies);
+		if (bce == NULL) {
+			mutex_unlock(&uwb_beca.mutex);
+			return -ENOMEM;
+		}
+	}
+	mutex_unlock(&uwb_beca.mutex);
+
+	mutex_lock(&bce->mutex);
+	/* purge old beacon data */
+	kfree(bce->be);
+
+	last_ts = bce->ts_jiffies;
+
+	/* Update commonly used fields */
+	bce->ts_jiffies = evt->ts_jiffies;
+	bce->be = be;
+	bce->dev_addr = bf->hdr.SrcAddr;
+	bce->mac_addr = &bf->Device_Identifier;
+	be->wBPSTOffset = le16_to_cpu(be->wBPSTOffset);
+	be->wBeaconInfoLength = le16_to_cpu(be->wBeaconInfoLength);
+	stats_add_sample(&bce->lqe_stats, be->bLQI - 7);
+	stats_add_sample(&bce->rssi_stats, be->bRSSI + 18);
+
+	/*
+	 * This might be a beacon from a new device.
+	 */
+	if (bce->uwb_dev == NULL)
+		uwbd_dev_onair(evt->rc, bce);
+
+	mutex_unlock(&bce->mutex);
+
+	return 1; /* we keep the event data */
+}
+
+/*
+ * Handle UWB_RC_EVT_BEACON_SIZE events
+ *
+ * XXXXX
+ */
+int uwbd_evt_handle_rc_beacon_size(struct uwb_event *evt)
+{
+	int result = -EINVAL;
+	struct device *dev = &evt->rc->uwb_dev.dev;
+	struct uwb_rc_evt_beacon_size *bs;
+
+	/* Is there enough data to decode the event? */
+	if (evt->notif.size < sizeof(*bs)) {
+		dev_err(dev, "BEACON SIZE notification: Not enough data to "
+			"decode (%zu vs %zu bytes needed)\n",
+			evt->notif.size, sizeof(*bs));
+		goto error;
+	}
+	bs = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon_size, rceb);
+	if (0)
+		dev_info(dev, "Beacon size changed to %u bytes "
+			"(FIXME: action?)\n", le16_to_cpu(bs->wNewBeaconSize));
+	else {
+		/* temporary hack until we do something with this message... */
+		static unsigned count;
+		if (++count % 1000 == 0)
+			dev_info(dev, "Beacon size changed %u times "
+				"(FIXME: action?)\n", count);
+	}
+	result = 0;
+error:
+	return result;
+}
+
+/**
+ * uwbd_evt_handle_rc_bp_slot_change - handle a BP_SLOT_CHANGE event
+ * @evt: the BP_SLOT_CHANGE notification from the radio controller
+ *
+ * If the event indicates that no beacon period slots were available
+ * then radio controller has transitioned to a non-beaconing state.
+ * Otherwise, simply save the current beacon slot.
+ */
+int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *evt)
+{
+	struct uwb_rc *rc = evt->rc;
+	struct device *dev = &rc->uwb_dev.dev;
+	struct uwb_rc_evt_bp_slot_change *bpsc;
+
+	if (evt->notif.size < sizeof(*bpsc)) {
+		dev_err(dev, "BP SLOT CHANGE event: Not enough data\n");
+		return -EINVAL;
+	}
+	bpsc = container_of(evt->notif.rceb, struct uwb_rc_evt_bp_slot_change, rceb);
+
+	mutex_lock(&rc->uwb_dev.mutex);
+	if (uwb_rc_evt_bp_slot_change_no_slot(bpsc)) {
+		dev_info(dev, "stopped beaconing: No free slots in BP\n");
+		rc->beaconing = -1;
+	} else
+		rc->uwb_dev.beacon_slot = uwb_rc_evt_bp_slot_change_slot_num(bpsc);
+	mutex_unlock(&rc->uwb_dev.mutex);
+
+	return 0;
+}
+
+/**
+ * Handle UWB_RC_EVT_BPOIE_CHANGE events
+ *
+ * XXXXX
+ */
+struct uwb_ie_bpo {
+	struct uwb_ie_hdr hdr;
+	u8                bp_length;
+	u8                data[];
+} __attribute__((packed));
+
+int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *evt)
+{
+	int result = -EINVAL;
+	struct device *dev = &evt->rc->uwb_dev.dev;
+	struct uwb_rc_evt_bpoie_change *bpoiec;
+	struct uwb_ie_bpo *bpoie;
+	static unsigned count;	/* FIXME: this is a temp hack */
+	size_t iesize;
+
+	/* Is there enough data to decode it? */
+	if (evt->notif.size < sizeof(*bpoiec)) {
+		dev_err(dev, "BPOIEC notification: Not enough data to "
+			"decode (%zu vs %zu bytes needed)\n",
+			evt->notif.size, sizeof(*bpoiec));
+		goto error;
+	}
+	bpoiec = container_of(evt->notif.rceb, struct uwb_rc_evt_bpoie_change, rceb);
+	iesize = le16_to_cpu(bpoiec->wBPOIELength);
+	if (iesize < sizeof(*bpoie)) {
+		dev_err(dev, "BPOIEC notification: Not enough IE data to "
+			"decode (%zu vs %zu bytes needed)\n",
+			iesize, sizeof(*bpoie));
+		goto error;
+	}
+	if (++count % 1000 == 0)	/* Lame placeholder */
+		dev_info(dev, "BPOIE: %u changes received\n", count);
+	/*
+	 * FIXME: At this point we should go over all the IEs in the
+	 *        bpoiec->BPOIE array and act on each.
+	 */
+	result = 0;
+error:
+	return result;
+}
+
+/**
+ * uwb_bg_joined - is the RC in a beacon group?
+ * @rc: the radio controller
+ *
+ * Returns true if the radio controller is in a beacon group (even if
+ * it's the sole member).
+ */
+int uwb_bg_joined(struct uwb_rc *rc)
+{
+	return rc->beaconing != -1;
+}
+EXPORT_SYMBOL_GPL(uwb_bg_joined);
+
+/*
+ * Print beaconing state.
+ */
+static ssize_t uwb_rc_beacon_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	struct uwb_rc *rc = uwb_dev->rc;
+	ssize_t result;
+
+	mutex_lock(&rc->uwb_dev.mutex);
+	result = sprintf(buf, "%d\n", rc->beaconing);
+	mutex_unlock(&rc->uwb_dev.mutex);
+	return result;
+}
+
+/*
+ * Start beaconing on the specified channel, or stop beaconing.
+ *
+ * The BPST offset of when to start searching for a beacon group to
+ * join may be specified.
+ */
+static ssize_t uwb_rc_beacon_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	struct uwb_rc *rc = uwb_dev->rc;
+	int channel;
+	unsigned bpst_offset = 0;
+	ssize_t result = -EINVAL;
+
+	result = sscanf(buf, "%d %u\n", &channel, &bpst_offset);
+	if (result >= 1)
+		result = uwb_rc_beacon(rc, channel, bpst_offset);
+
+	return result < 0 ? result : size;
+}
+DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, uwb_rc_beacon_show, uwb_rc_beacon_store);
diff --git a/drivers/uwb/driver.c b/drivers/uwb/driver.c
new file mode 100644
index 0000000..521cdeb
--- /dev/null
+++ b/drivers/uwb/driver.c
@@ -0,0 +1,144 @@
+/*
+ * Ultra Wide Band
+ * Driver initialization, etc
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * Life cycle: FIXME: explain
+ *
+ *  UWB radio controller:
+ *
+ *    1. alloc a uwb_rc, zero it
+ *    2. call uwb_rc_init() on it to set it up + ops (won't do any
+ *       kind of allocation)
+ *    3. register (now it is owned by the UWB stack--deregister before
+ *       freeing/destroying).
+ *    4. It lives on it's own now (UWB stack handles)--when it
+ *       disconnects, call unregister()
+ *    5. free it.
+ *
+ *    Make sure you have a reference to the uwb_rc before calling
+ *    any of the UWB API functions.
+ *
+ * TODO:
+ *
+ * 1. Locking and life cycle management is crappy still. All entry
+ *    points to the UWB HCD API assume you have a reference on the
+ *    uwb_rc structure and that it won't go away. They mutex lock it
+ *    before doing anything.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kdev_t.h>
+#include <linux/random.h>
+#include <linux/uwb/debug.h>
+#include "uwb-internal.h"
+
+
+/* UWB stack attributes (or 'global' constants) */
+
+
+/**
+ * If a beacon dissapears for longer than this, then we consider the
+ * device who was represented by that beacon to be gone.
+ *
+ * ECMA-368[17.2.3, last para] establishes that a device must not
+ * consider a device to be its neighbour if he doesn't receive a beacon
+ * for more than mMaxLostBeacons. mMaxLostBeacons is defined in
+ * ECMA-368[17.16] as 3; because we can get only one beacon per
+ * superframe, that'd be 3 * 65ms = 195 ~ 200 ms. Let's give it time
+ * for jitter and stuff and make it 500 ms.
+ */
+unsigned long beacon_timeout_ms = 500;
+
+static
+ssize_t beacon_timeout_ms_show(struct class *class, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%lu\n", beacon_timeout_ms);
+}
+
+static
+ssize_t beacon_timeout_ms_store(struct class *class,
+				const char *buf, size_t size)
+{
+	unsigned long bt;
+	ssize_t result;
+	result = sscanf(buf, "%lu", &bt);
+	if (result != 1)
+		return -EINVAL;
+	beacon_timeout_ms = bt;
+	return size;
+}
+
+static struct class_attribute uwb_class_attrs[] = {
+	__ATTR(beacon_timeout_ms, S_IWUSR | S_IRUGO,
+	       beacon_timeout_ms_show, beacon_timeout_ms_store),
+	__ATTR_NULL,
+};
+
+/** Device model classes */
+struct class uwb_rc_class = {
+	.name        = "uwb_rc",
+	.class_attrs = uwb_class_attrs,
+};
+
+
+static int __init uwb_subsys_init(void)
+{
+	int result = 0;
+
+	result = uwb_est_create();
+	if (result < 0) {
+		printk(KERN_ERR "uwb: Can't initialize EST subsystem\n");
+		goto error_est_init;
+	}
+
+	result = class_register(&uwb_rc_class);
+	if (result < 0)
+		goto error_uwb_rc_class_register;
+	uwbd_start();
+	uwb_dbg_init();
+	return 0;
+
+error_uwb_rc_class_register:
+	uwb_est_destroy();
+error_est_init:
+	return result;
+}
+module_init(uwb_subsys_init);
+
+static void __exit uwb_subsys_exit(void)
+{
+	uwb_dbg_exit();
+	uwbd_stop();
+	class_unregister(&uwb_rc_class);
+	uwb_est_destroy();
+	return;
+}
+module_exit(uwb_subsys_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Ultra Wide Band core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/drp-avail.c b/drivers/uwb/drp-avail.c
new file mode 100644
index 0000000..3febd855
--- /dev/null
+++ b/drivers/uwb/drp-avail.c
@@ -0,0 +1,288 @@
+/*
+ * Ultra Wide Band
+ * DRP availability management
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Manage DRP Availability (the MAS available for DRP
+ * reservations). Thus:
+ *
+ * - Handle DRP Availability Change notifications
+ *
+ * - Allow the reservation manager to indicate MAS reserved/released
+ *   by local (owned by/targeted at the radio controller)
+ *   reservations.
+ *
+ * - Based on the two sources above, generate a DRP Availability IE to
+ *   be included in the beacon.
+ *
+ * See also the documentation for struct uwb_drp_avail.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/bitmap.h>
+#include "uwb-internal.h"
+
+/**
+ * uwb_drp_avail_init - initialize an RC's MAS availability
+ *
+ * All MAS are available initially.  The RC will inform use which
+ * slots are used for the BP (it may change in size).
+ */
+void uwb_drp_avail_init(struct uwb_rc *rc)
+{
+	bitmap_fill(rc->drp_avail.global, UWB_NUM_MAS);
+	bitmap_fill(rc->drp_avail.local, UWB_NUM_MAS);
+	bitmap_fill(rc->drp_avail.pending, UWB_NUM_MAS);
+}
+
+/*
+ * Determine MAS available for new local reservations.
+ *
+ * avail = global & local & pending
+ */
+static void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail)
+{
+	bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
+	bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS);
+}
+
+/**
+ * uwb_drp_avail_reserve_pending - reserve MAS for a new reservation
+ * @rc: the radio controller
+ * @mas: the MAS to reserve
+ *
+ * Returns 0 on success, or -EBUSY if the MAS requested aren't available.
+ */
+int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas)
+{
+	struct uwb_mas_bm avail;
+
+	uwb_drp_available(rc, &avail);
+	if (!bitmap_subset(mas->bm, avail.bm, UWB_NUM_MAS))
+		return -EBUSY;
+
+	bitmap_andnot(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
+	return 0;
+}
+
+/**
+ * uwb_drp_avail_reserve - reserve MAS for an established reservation
+ * @rc: the radio controller
+ * @mas: the MAS to reserve
+ */
+void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas)
+{
+	bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
+	bitmap_andnot(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
+	rc->drp_avail.ie_valid = false;
+}
+
+/**
+ * uwb_drp_avail_release - release MAS from a pending or established reservation
+ * @rc: the radio controller
+ * @mas: the MAS to release
+ */
+void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas)
+{
+	bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
+	bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
+	rc->drp_avail.ie_valid = false;
+}
+
+/**
+ * uwb_drp_avail_ie_update - update the DRP Availability IE
+ * @rc: the radio controller
+ *
+ * avail = global & local
+ */
+void uwb_drp_avail_ie_update(struct uwb_rc *rc)
+{
+	struct uwb_mas_bm avail;
+
+	bitmap_and(avail.bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
+
+	rc->drp_avail.ie.hdr.element_id = UWB_IE_DRP_AVAILABILITY;
+	rc->drp_avail.ie.hdr.length = UWB_NUM_MAS / 8;
+	uwb_mas_bm_copy_le(rc->drp_avail.ie.bmp, &avail);
+	rc->drp_avail.ie_valid = true;
+}
+
+/**
+ * Create an unsigned long from a buffer containing a byte stream.
+ *
+ * @array: pointer to buffer
+ * @itr:   index of buffer from where we start
+ * @len:   the buffer's remaining size may not be exact multiple of
+ *         sizeof(unsigned long), @len is the length of buffer that needs
+ *         to be converted. This will be sizeof(unsigned long) or smaller
+ *         (BUG if not). If it is smaller then we will pad the remaining
+ *         space of the result with zeroes.
+ */
+static
+unsigned long get_val(u8 *array, size_t itr, size_t len)
+{
+	unsigned long val = 0;
+	size_t top = itr + len;
+
+	BUG_ON(len > sizeof(val));
+
+	while (itr < top) {
+		val <<= 8;
+		val |= array[top - 1];
+		top--;
+	}
+	val <<= 8 * (sizeof(val) - len); /* padding */
+	return val;
+}
+
+/**
+ * Initialize bitmap from data buffer.
+ *
+ * The bitmap to be converted could come from a IE, for example a
+ * DRP Availability IE.
+ * From ECMA-368 1.0 [16.8.7]: "
+ * octets: 1            1               N * (0 to 32)
+ *         Element ID   Length (=N)     DRP Availability Bitmap
+ *
+ * The DRP Availability Bitmap field is up to 256 bits long, one
+ * bit for each MAS in the superframe, where the least-significant
+ * bit of the field corresponds to the first MAS in the superframe
+ * and successive bits correspond to successive MASs."
+ *
+ * The DRP Availability bitmap is in octets from 0 to 32, so octet
+ * 32 contains bits for MAS 1-8, etc. If the bitmap is smaller than 32
+ * octets, the bits in octets not included at the end of the bitmap are
+ * treated as zero. In this case (when the bitmap is smaller than 32
+ * octets) the MAS represented range from MAS 1 to MAS (size of bitmap)
+ * with the last octet still containing bits for MAS 1-8, etc.
+ *
+ * For example:
+ * F00F0102 03040506 0708090A 0B0C0D0E 0F010203
+ * ^^^^
+ * ||||
+ * ||||
+ * |||\LSB of byte is MAS 9
+ * ||\MSB of byte is MAS 16
+ * |\LSB of first byte is MAS 1
+ * \ MSB of byte is MAS 8
+ *
+ * An example of this encoding can be found in ECMA-368 Annex-D [Table D.11]
+ *
+ * The resulting bitmap will have the following mapping:
+ *	bit position 0 == MAS 1
+ *	bit position 1 == MAS 2
+ *	...
+ *	bit position (UWB_NUM_MAS - 1) == MAS UWB_NUM_MAS
+ *
+ * @bmp_itr:	pointer to bitmap (can be declared with DECLARE_BITMAP)
+ * @buffer:	pointer to buffer containing bitmap data in big endian
+ *              format (MSB first)
+ * @buffer_size:number of bytes with which bitmap should be initialized
+ */
+static
+void buffer_to_bmp(unsigned long *bmp_itr, void *_buffer,
+		   size_t buffer_size)
+{
+	u8 *buffer = _buffer;
+	size_t itr, len;
+	unsigned long val;
+
+	itr = 0;
+	while (itr < buffer_size) {
+		len = buffer_size - itr >= sizeof(val) ?
+			sizeof(val) : buffer_size - itr;
+		val = get_val(buffer, itr, len);
+		bmp_itr[itr / sizeof(val)] = val;
+		itr += sizeof(val);
+	}
+}
+
+
+/**
+ * Extract DRP Availability bitmap from the notification.
+ *
+ * The notification that comes in contains a bitmap of (UWB_NUM_MAS / 8) bytes
+ * We convert that to our internal representation.
+ */
+static
+int uwbd_evt_get_drp_avail(struct uwb_event *evt, unsigned long *bmp)
+{
+	struct device *dev = &evt->rc->uwb_dev.dev;
+	struct uwb_rc_evt_drp_avail *drp_evt;
+	int result = -EINVAL;
+
+	/* Is there enough data to decode the event? */
+	if (evt->notif.size < sizeof(*drp_evt)) {
+		dev_err(dev, "DRP Availability Change: Not enough "
+			"data to decode event [%zu bytes, %zu "
+			"needed]\n", evt->notif.size, sizeof(*drp_evt));
+		goto error;
+	}
+	drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp_avail, rceb);
+	buffer_to_bmp(bmp, drp_evt->bmp, UWB_NUM_MAS/8);
+	result = 0;
+error:
+	return result;
+}
+
+
+/**
+ * Process an incoming DRP Availability notification.
+ *
+ * @evt:	Event information (packs the actual event data, which
+ *              radio controller it came to, etc).
+ *
+ * @returns:    0 on success (so uwbd() frees the event buffer), < 0
+ *              on error.
+ *
+ * According to ECMA-368 1.0 [16.8.7], bits set to ONE indicate that
+ * the MAS slot is available, bits set to ZERO indicate that the slot
+ * is busy.
+ *
+ * So we clear available slots, we set used slots :)
+ *
+ * The notification only marks non-availability based on the BP and
+ * received DRP IEs that are not for this radio controller.  A copy of
+ * this bitmap is needed to generate the real availability (which
+ * includes local and pending reservations).
+ *
+ * The DRP Availability IE that this radio controller emits will need
+ * to be updated.
+ */
+int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt)
+{
+	int result;
+	struct uwb_rc *rc = evt->rc;
+	DECLARE_BITMAP(bmp, UWB_NUM_MAS);
+
+	result = uwbd_evt_get_drp_avail(evt, bmp);
+	if (result < 0)
+		return result;
+
+	mutex_lock(&rc->rsvs_mutex);
+	bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS);
+	rc->drp_avail.ie_valid = false;
+	mutex_unlock(&rc->rsvs_mutex);
+
+	uwb_rsv_sched_update(rc);
+
+	return 0;
+}
diff --git a/drivers/uwb/drp-ie.c b/drivers/uwb/drp-ie.c
new file mode 100644
index 0000000..882724c
--- /dev/null
+++ b/drivers/uwb/drp-ie.c
@@ -0,0 +1,232 @@
+/*
+ * UWB DRP IE management.
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/uwb.h>
+
+#include "uwb-internal.h"
+
+/*
+ * Allocate a DRP IE.
+ *
+ * To save having to free/allocate a DRP IE when its MAS changes,
+ * enough memory is allocated for the maxiumum number of DRP
+ * allocation fields.  This gives an overhead per reservation of up to
+ * (UWB_NUM_ZONES - 1) * 4 = 60 octets.
+ */
+static struct uwb_ie_drp *uwb_drp_ie_alloc(void)
+{
+	struct uwb_ie_drp *drp_ie;
+	unsigned tiebreaker;
+
+	drp_ie = kzalloc(sizeof(struct uwb_ie_drp) +
+			UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc),
+			GFP_KERNEL);
+	if (drp_ie) {
+		drp_ie->hdr.element_id = UWB_IE_DRP;
+
+		get_random_bytes(&tiebreaker, sizeof(unsigned));
+		uwb_ie_drp_set_tiebreaker(drp_ie, tiebreaker & 1);
+	}
+	return drp_ie;
+}
+
+
+/*
+ * Fill a DRP IE's allocation fields from a MAS bitmap.
+ */
+static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
+			       struct uwb_mas_bm *mas)
+{
+	int z, i, num_fields = 0, next = 0;
+	struct uwb_drp_alloc *zones;
+	__le16 current_bmp;
+	DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS);
+	DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE);
+
+	zones = drp_ie->allocs;
+
+	bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS);
+
+	/* Determine unique MAS bitmaps in zones from bitmap. */
+	for (z = 0; z < UWB_NUM_ZONES; z++) {
+		bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE);
+		if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) {
+			bool found = false;
+			current_bmp = (__le16) *tmp_mas_bm;
+			for (i = 0; i < next; i++) {
+				if (current_bmp == zones[i].mas_bm) {
+					zones[i].zone_bm |= 1 << z;
+					found = true;
+					break;
+				}
+			}
+			if (!found)  {
+				num_fields++;
+				zones[next].zone_bm = 1 << z;
+				zones[next].mas_bm = current_bmp;
+				next++;
+			}
+		}
+		bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
+	}
+
+	/* Store in format ready for transmission (le16). */
+	for (i = 0; i < num_fields; i++) {
+		drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm);
+		drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm);
+	}
+
+	drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr)
+		+ num_fields * sizeof(struct uwb_drp_alloc);
+}
+
+/**
+ * uwb_drp_ie_update - update a reservation's DRP IE
+ * @rsv: the reservation
+ */
+int uwb_drp_ie_update(struct uwb_rsv *rsv)
+{
+	struct device *dev = &rsv->rc->uwb_dev.dev;
+	struct uwb_ie_drp *drp_ie;
+	int reason_code, status;
+
+	switch (rsv->state) {
+	case UWB_RSV_STATE_NONE:
+		kfree(rsv->drp_ie);
+		rsv->drp_ie = NULL;
+		return 0;
+	case UWB_RSV_STATE_O_INITIATED:
+		reason_code = UWB_DRP_REASON_ACCEPTED;
+		status = 0;
+		break;
+	case UWB_RSV_STATE_O_PENDING:
+		reason_code = UWB_DRP_REASON_ACCEPTED;
+		status = 0;
+		break;
+	case UWB_RSV_STATE_O_MODIFIED:
+		reason_code = UWB_DRP_REASON_MODIFIED;
+		status = 1;
+		break;
+	case UWB_RSV_STATE_O_ESTABLISHED:
+		reason_code = UWB_DRP_REASON_ACCEPTED;
+		status = 1;
+		break;
+	case UWB_RSV_STATE_T_ACCEPTED:
+		reason_code = UWB_DRP_REASON_ACCEPTED;
+		status = 1;
+		break;
+	case UWB_RSV_STATE_T_DENIED:
+		reason_code = UWB_DRP_REASON_DENIED;
+		status = 0;
+		break;
+	default:
+		dev_dbg(dev, "rsv with unhandled state (%d)\n", rsv->state);
+		return -EINVAL;
+	}
+
+	if (rsv->drp_ie == NULL) {
+		rsv->drp_ie = uwb_drp_ie_alloc();
+		if (rsv->drp_ie == NULL)
+			return -ENOMEM;
+	}
+	drp_ie = rsv->drp_ie;
+
+	uwb_ie_drp_set_owner(drp_ie,        uwb_rsv_is_owner(rsv));
+	uwb_ie_drp_set_status(drp_ie,       status);
+	uwb_ie_drp_set_reason_code(drp_ie,  reason_code);
+	uwb_ie_drp_set_stream_index(drp_ie, rsv->stream);
+	uwb_ie_drp_set_type(drp_ie,         rsv->type);
+
+	if (uwb_rsv_is_owner(rsv)) {
+		switch (rsv->target.type) {
+		case UWB_RSV_TARGET_DEV:
+			drp_ie->dev_addr = rsv->target.dev->dev_addr;
+			break;
+		case UWB_RSV_TARGET_DEVADDR:
+			drp_ie->dev_addr = rsv->target.devaddr;
+			break;
+		}
+	} else
+		drp_ie->dev_addr = rsv->owner->dev_addr;
+
+	uwb_drp_ie_from_bm(drp_ie, &rsv->mas);
+
+	rsv->ie_valid = true;
+	return 0;
+}
+
+/*
+ * Set MAS bits from given MAS bitmap in a single zone of large bitmap.
+ *
+ * We are given a zone id and the MAS bitmap of bits that need to be set in
+ * this zone. Note that this zone may already have bits set and this only
+ * adds settings - we cannot simply assign the MAS bitmap contents to the
+ * zone contents. We iterate over the the bits (MAS) in the zone and set the
+ * bits that are set in the given MAS bitmap.
+ */
+static
+void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm)
+{
+	int mas;
+	u16 mas_mask;
+
+	for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) {
+		mas_mask = 1 << mas;
+		if (mas_bm & mas_mask)
+			set_bit(zone * UWB_NUM_ZONES + mas, bm->bm);
+	}
+}
+
+/**
+ * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap
+ * @mas:    MAS bitmap that will be populated to correspond to the
+ *          allocation fields in the DRP IE
+ * @drp_ie: the DRP IE that contains the allocation fields.
+ *
+ * The input format is an array of MAS allocation fields (16 bit Zone
+ * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section
+ * 16.8.6. The output is a full 256 bit MAS bitmap.
+ *
+ * We go over all the allocation fields, for each allocation field we
+ * know which zones are impacted. We iterate over all the zones
+ * impacted and call a function that will set the correct MAS bits in
+ * each zone.
+ */
+void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie)
+{
+	int numallocs = (drp_ie->hdr.length - 4) / 4;
+	const struct uwb_drp_alloc *alloc;
+	int cnt;
+	u16 zone_bm, mas_bm;
+	u8 zone;
+	u16 zone_mask;
+
+	for (cnt = 0; cnt < numallocs; cnt++) {
+		alloc = &drp_ie->allocs[cnt];
+		zone_bm = le16_to_cpu(alloc->zone_bm);
+		mas_bm = le16_to_cpu(alloc->mas_bm);
+		for (zone = 0; zone < UWB_NUM_ZONES; zone++)   {
+			zone_mask = 1 << zone;
+			if (zone_bm & zone_mask)
+				uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm);
+		}
+	}
+}
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c
new file mode 100644
index 0000000..c0b1e5e
--- /dev/null
+++ b/drivers/uwb/drp.c
@@ -0,0 +1,461 @@
+/*
+ * Ultra Wide Band
+ * Dynamic Reservation Protocol handling
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+#include "uwb-internal.h"
+
+/**
+ * Construct and send the SET DRP IE
+ *
+ * @rc:         UWB Host controller
+ * @returns:    >= 0 number of bytes still available in the beacon
+ *              < 0 errno code on error.
+ *
+ * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
+ * device to include in its beacon at the same time. We thus have to
+ * traverse all reservations and include the DRP IEs of all PENDING
+ * and NEGOTIATED reservations in a SET DRP command for transmission.
+ *
+ * A DRP Availability IE is appended.
+ *
+ * rc->uwb_dev.mutex is held
+ *
+ * FIXME We currently ignore the returned value indicating the remaining space
+ * in beacon. This could be used to deny reservation requests earlier if
+ * determined that they would cause the beacon space to be exceeded.
+ */
+static
+int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc)
+{
+	int result;
+	struct device *dev = &rc->uwb_dev.dev;
+	struct uwb_rc_cmd_set_drp_ie *cmd;
+	struct uwb_rc_evt_set_drp_ie reply;
+	struct uwb_rsv *rsv;
+	int num_bytes = 0;
+	u8 *IEDataptr;
+
+	result = -ENOMEM;
+	/* First traverse all reservations to determine memory needed. */
+	list_for_each_entry(rsv, &rc->reservations, rc_node) {
+		if (rsv->drp_ie != NULL)
+			num_bytes += rsv->drp_ie->hdr.length + 2;
+	}
+	num_bytes += sizeof(rc->drp_avail.ie);
+	cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
+	if (cmd == NULL)
+		goto error;
+	cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
+	cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
+	cmd->wIELength = num_bytes;
+	IEDataptr = (u8 *)&cmd->IEData[0];
+
+	/* Next traverse all reservations to place IEs in allocated memory. */
+	list_for_each_entry(rsv, &rc->reservations, rc_node) {
+		if (rsv->drp_ie != NULL) {
+			memcpy(IEDataptr, rsv->drp_ie,
+			       rsv->drp_ie->hdr.length + 2);
+			IEDataptr += rsv->drp_ie->hdr.length + 2;
+		}
+	}
+	memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
+
+	reply.rceb.bEventType = UWB_RC_CET_GENERAL;
+	reply.rceb.wEvent = UWB_RC_CMD_SET_DRP_IE;
+	result = uwb_rc_cmd(rc, "SET-DRP-IE", &cmd->rccb,
+			sizeof(*cmd) + num_bytes, &reply.rceb,
+			sizeof(reply));
+	if (result < 0)
+		goto error_cmd;
+	result = le16_to_cpu(reply.wRemainingSpace);
+	if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
+		dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: command execution "
+				"failed: %s (%d). RemainingSpace in beacon "
+				"= %d\n", uwb_rc_strerror(reply.bResultCode),
+				reply.bResultCode, result);
+		result = -EIO;
+	} else {
+		dev_dbg(dev, "SET-DRP-IE sent. RemainingSpace in beacon "
+			     "= %d.\n", result);
+		result = 0;
+	}
+error_cmd:
+	kfree(cmd);
+error:
+	return result;
+
+}
+/**
+ * Send all DRP IEs associated with this host
+ *
+ * @returns:    >= 0 number of bytes still available in the beacon
+ *              < 0 errno code on error.
+ *
+ * As per the protocol we obtain the host controller device lock to access
+ * bandwidth structures.
+ */
+int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
+{
+	int result;
+
+	mutex_lock(&rc->uwb_dev.mutex);
+	result = uwb_rc_gen_send_drp_ie(rc);
+	mutex_unlock(&rc->uwb_dev.mutex);
+	return result;
+}
+
+void uwb_drp_handle_timeout(struct uwb_rsv *rsv)
+{
+	struct device *dev = &rsv->rc->uwb_dev.dev;
+
+	dev_dbg(dev, "reservation timeout in state %s (%d)\n",
+		uwb_rsv_state_str(rsv->state), rsv->state);
+
+	switch (rsv->state) {
+	case UWB_RSV_STATE_O_INITIATED:
+		if (rsv->is_multicast) {
+			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
+			return;
+		}
+		break;
+	case UWB_RSV_STATE_O_ESTABLISHED:
+		if (rsv->is_multicast)
+			return;
+		break;
+	default:
+		break;
+	}
+	uwb_rsv_remove(rsv);
+}
+
+/*
+ * Based on the DRP IE, transition a target reservation to a new
+ * state.
+ */
+static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
+				   struct uwb_ie_drp *drp_ie)
+{
+	struct device *dev = &rc->uwb_dev.dev;
+	int status;
+	enum uwb_drp_reason reason_code;
+
+	status = uwb_ie_drp_status(drp_ie);
+	reason_code = uwb_ie_drp_reason_code(drp_ie);
+
+	if (status) {
+		switch (reason_code) {
+		case UWB_DRP_REASON_ACCEPTED:
+			uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
+			break;
+		case UWB_DRP_REASON_MODIFIED:
+			dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
+				reason_code, status);
+			break;
+		default:
+			dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
+				 reason_code, status);
+		}
+	} else {
+		switch (reason_code) {
+		case UWB_DRP_REASON_ACCEPTED:
+			/* New reservations are handled in uwb_rsv_find(). */
+			break;
+		case UWB_DRP_REASON_DENIED:
+			uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
+			break;
+		case UWB_DRP_REASON_CONFLICT:
+		case UWB_DRP_REASON_MODIFIED:
+			dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
+				reason_code, status);
+			break;
+		default:
+			dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
+				 reason_code, status);
+		}
+	}
+}
+
+/*
+ * Based on the DRP IE, transition an owner reservation to a new
+ * state.
+ */
+static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
+				  struct uwb_ie_drp *drp_ie)
+{
+	struct device *dev = &rc->uwb_dev.dev;
+	int status;
+	enum uwb_drp_reason reason_code;
+
+	status = uwb_ie_drp_status(drp_ie);
+	reason_code = uwb_ie_drp_reason_code(drp_ie);
+
+	if (status) {
+		switch (reason_code) {
+		case UWB_DRP_REASON_ACCEPTED:
+			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
+			break;
+		case UWB_DRP_REASON_MODIFIED:
+			dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
+				reason_code, status);
+			break;
+		default:
+			dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
+				 reason_code, status);
+		}
+	} else {
+		switch (reason_code) {
+		case UWB_DRP_REASON_PENDING:
+			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
+			break;
+		case UWB_DRP_REASON_DENIED:
+			uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
+			break;
+		case UWB_DRP_REASON_CONFLICT:
+		case UWB_DRP_REASON_MODIFIED:
+			dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
+				reason_code, status);
+			break;
+		default:
+			dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
+				 reason_code, status);
+		}
+	}
+}
+
+/*
+ * Process a received DRP IE, it's either for a reservation owned by
+ * the RC or targeted at it (or it's for a WUSB cluster reservation).
+ */
+static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src,
+		     struct uwb_ie_drp *drp_ie)
+{
+	struct uwb_rsv *rsv;
+
+	rsv = uwb_rsv_find(rc, src, drp_ie);
+	if (!rsv) {
+		/*
+		 * No reservation? It's either for a recently
+		 * terminated reservation; or the DRP IE couldn't be
+		 * processed (e.g., an invalid IE or out of memory).
+		 */
+		return;
+	}
+
+	/*
+	 * Do nothing with DRP IEs for reservations that have been
+	 * terminated.
+	 */
+	if (rsv->state == UWB_RSV_STATE_NONE) {
+		uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
+		return;
+	}
+
+	if (uwb_ie_drp_owner(drp_ie))
+		uwb_drp_process_target(rc, rsv, drp_ie);
+	else
+		uwb_drp_process_owner(rc, rsv, drp_ie);
+}
+
+
+/*
+ * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
+ * from a device.
+ */
+static
+void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
+			 size_t ielen, struct uwb_dev *src_dev)
+{
+	struct device *dev = &rc->uwb_dev.dev;
+	struct uwb_ie_hdr *ie_hdr;
+	void *ptr;
+
+	ptr = drp_evt->ie_data;
+	for (;;) {
+		ie_hdr = uwb_ie_next(&ptr, &ielen);
+		if (!ie_hdr)
+			break;
+
+		switch (ie_hdr->element_id) {
+		case UWB_IE_DRP_AVAILABILITY:
+			/* FIXME: does something need to be done with this? */
+			break;
+		case UWB_IE_DRP:
+			uwb_drp_process(rc, src_dev, (struct uwb_ie_drp *)ie_hdr);
+			break;
+		default:
+			dev_warn(dev, "unexpected IE in DRP notification\n");
+			break;
+		}
+	}
+
+	if (ielen > 0)
+		dev_warn(dev, "%d octets remaining in DRP notification\n",
+			 (int)ielen);
+}
+
+
+/*
+ * Go through all the DRP IEs and find the ones that conflict with our
+ * reservations.
+ *
+ * FIXME: must resolve the conflict according the the rules in
+ * [ECMA-368].
+ */
+static
+void uwb_drp_process_conflict_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
+				  size_t ielen, struct uwb_dev *src_dev)
+{
+	struct device *dev = &rc->uwb_dev.dev;
+	struct uwb_ie_hdr *ie_hdr;
+	struct uwb_ie_drp *drp_ie;
+	void *ptr;
+
+	ptr = drp_evt->ie_data;
+	for (;;) {
+		ie_hdr = uwb_ie_next(&ptr, &ielen);
+		if (!ie_hdr)
+			break;
+
+		drp_ie = container_of(ie_hdr, struct uwb_ie_drp, hdr);
+
+		/* FIXME: check if this DRP IE conflicts. */
+	}
+
+	if (ielen > 0)
+		dev_warn(dev, "%d octets remaining in DRP notification\n",
+			 (int)ielen);
+}
+
+
+/*
+ * Terminate all reservations owned by, or targeted at, 'uwb_dev'.
+ */
+static void uwb_drp_terminate_all(struct uwb_rc *rc, struct uwb_dev *uwb_dev)
+{
+	struct uwb_rsv *rsv;
+
+	list_for_each_entry(rsv, &rc->reservations, rc_node) {
+		if (rsv->owner == uwb_dev
+		    || (rsv->target.type == UWB_RSV_TARGET_DEV && rsv->target.dev == uwb_dev))
+			uwb_rsv_remove(rsv);
+	}
+}
+
+
+/**
+ * uwbd_evt_handle_rc_drp - handle a DRP_IE event
+ * @evt: the DRP_IE event from the radio controller
+ *
+ * This processes DRP notifications from the radio controller, either
+ * initiating a new reservation or transitioning an existing
+ * reservation into a different state.
+ *
+ * DRP notifications can occur for three different reasons:
+ *
+ * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
+ *   the target or source have been recieved.
+ *
+ *   These DRP IEs could be new or for an existing reservation.
+ *
+ *   If the DRP IE for an existing reservation ceases to be to
+ *   recieved for at least mMaxLostBeacons, the reservation should be
+ *   considered to be terminated.  Note that the TERMINATE reason (see
+ *   below) may not always be signalled (e.g., the remote device has
+ *   two or more reservations established with the RC).
+ *
+ * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
+ *   group conflict with the RC's reservations.
+ *
+ * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
+ *   from a device (i.e., it's terminated all reservations).
+ *
+ * Only the software state of the reservations is changed; the setting
+ * of the radio controller's DRP IEs is done after all the events in
+ * an event buffer are processed.  This saves waiting multiple times
+ * for the SET_DRP_IE command to complete.
+ */
+int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
+{
+	struct device *dev = &evt->rc->uwb_dev.dev;
+	struct uwb_rc *rc = evt->rc;
+	struct uwb_rc_evt_drp *drp_evt;
+	size_t ielength, bytes_left;
+	struct uwb_dev_addr src_addr;
+	struct uwb_dev *src_dev;
+	int reason;
+
+	/* Is there enough data to decode the event (and any IEs in
+	   its payload)? */
+	if (evt->notif.size < sizeof(*drp_evt)) {
+		dev_err(dev, "DRP event: Not enough data to decode event "
+			"[%zu bytes left, %zu needed]\n",
+			evt->notif.size, sizeof(*drp_evt));
+		return 0;
+	}
+	bytes_left = evt->notif.size - sizeof(*drp_evt);
+	drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
+	ielength = le16_to_cpu(drp_evt->ie_length);
+	if (bytes_left != ielength) {
+		dev_err(dev, "DRP event: Not enough data in payload [%zu"
+			"bytes left, %zu declared in the event]\n",
+			bytes_left, ielength);
+		return 0;
+	}
+
+	memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
+	src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
+	if (!src_dev) {
+		/*
+		 * A DRP notification from an unrecognized device.
+		 *
+		 * This is probably from a WUSB device that doesn't
+		 * have an EUI-48 and therefore doesn't show up in the
+		 * UWB device database.  It's safe to simply ignore
+		 * these.
+		 */
+		return 0;
+	}
+
+	mutex_lock(&rc->rsvs_mutex);
+
+	reason = uwb_rc_evt_drp_reason(drp_evt);
+
+	switch (reason) {
+	case UWB_DRP_NOTIF_DRP_IE_RCVD:
+		uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
+		break;
+	case UWB_DRP_NOTIF_CONFLICT:
+		uwb_drp_process_conflict_all(rc, drp_evt, ielength, src_dev);
+		break;
+	case UWB_DRP_NOTIF_TERMINATE:
+		uwb_drp_terminate_all(rc, src_dev);
+		break;
+	default:
+		dev_warn(dev, "ignored DRP event with reason code: %d\n", reason);
+		break;
+	}
+
+	mutex_unlock(&rc->rsvs_mutex);
+
+	uwb_dev_put(src_dev);
+	return 0;
+}
diff --git a/drivers/uwb/est.c b/drivers/uwb/est.c
new file mode 100644
index 0000000..5fe566b
--- /dev/null
+++ b/drivers/uwb/est.c
@@ -0,0 +1,477 @@
+/*
+ * Ultra Wide Band Radio Control
+ * Event Size Tables management
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * Infrastructure, code and data tables for guessing the size of
+ * events received on the notification endpoints of UWB radio
+ * controllers.
+ *
+ * You define a table of events and for each, its size and how to get
+ * the extra size.
+ *
+ * ENTRY POINTS:
+ *
+ * uwb_est_{init/destroy}(): To initialize/release the EST subsystem.
+ *
+ * uwb_est_[u]register(): To un/register event size tables
+ *   uwb_est_grow()
+ *
+ * uwb_est_find_size(): Get the size of an event
+ *   uwb_est_get_size()
+ */
+#include <linux/spinlock.h>
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+#include "uwb-internal.h"
+
+
+struct uwb_est {
+	u16 type_event_high;
+	u16 vendor, product;
+	u8 entries;
+	const struct uwb_est_entry *entry;
+};
+
+
+static struct uwb_est *uwb_est;
+static u8 uwb_est_size;
+static u8 uwb_est_used;
+static DEFINE_RWLOCK(uwb_est_lock);
+
+/**
+ * WUSB Standard Event Size Table, HWA-RC interface
+ *
+ * Sizes for events and notifications type 0 (general), high nibble 0.
+ */
+static
+struct uwb_est_entry uwb_est_00_00xx[] = {
+	[UWB_RC_EVT_IE_RCV] = {
+		.size = sizeof(struct uwb_rc_evt_ie_rcv),
+		.offset = 1 + offsetof(struct uwb_rc_evt_ie_rcv, wIELength),
+	},
+	[UWB_RC_EVT_BEACON] = {
+		.size = sizeof(struct uwb_rc_evt_beacon),
+		.offset = 1 + offsetof(struct uwb_rc_evt_beacon, wBeaconInfoLength),
+	},
+	[UWB_RC_EVT_BEACON_SIZE] = {
+		.size = sizeof(struct uwb_rc_evt_beacon_size),
+	},
+	[UWB_RC_EVT_BPOIE_CHANGE] = {
+		.size = sizeof(struct uwb_rc_evt_bpoie_change),
+		.offset = 1 + offsetof(struct uwb_rc_evt_bpoie_change,
+				       wBPOIELength),
+	},
+	[UWB_RC_EVT_BP_SLOT_CHANGE] = {
+		.size = sizeof(struct uwb_rc_evt_bp_slot_change),
+	},
+	[UWB_RC_EVT_BP_SWITCH_IE_RCV] = {
+		.size = sizeof(struct uwb_rc_evt_bp_switch_ie_rcv),
+		.offset = 1 + offsetof(struct uwb_rc_evt_bp_switch_ie_rcv, wIELength),
+	},
+	[UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
+		.size = sizeof(struct uwb_rc_evt_dev_addr_conflict),
+	},
+	[UWB_RC_EVT_DRP_AVAIL] = {
+		.size = sizeof(struct uwb_rc_evt_drp_avail)
+	},
+	[UWB_RC_EVT_DRP] = {
+		.size = sizeof(struct uwb_rc_evt_drp),
+		.offset = 1 + offsetof(struct uwb_rc_evt_drp, ie_length),
+	},
+	[UWB_RC_EVT_BP_SWITCH_STATUS] = {
+		.size = sizeof(struct uwb_rc_evt_bp_switch_status),
+	},
+	[UWB_RC_EVT_CMD_FRAME_RCV] = {
+		.size = sizeof(struct uwb_rc_evt_cmd_frame_rcv),
+		.offset = 1 + offsetof(struct uwb_rc_evt_cmd_frame_rcv, dataLength),
+	},
+	[UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV] = {
+		.size = sizeof(struct uwb_rc_evt_channel_change_ie_rcv),
+		.offset = 1 + offsetof(struct uwb_rc_evt_channel_change_ie_rcv, wIELength),
+	},
+	[UWB_RC_CMD_CHANNEL_CHANGE] = {
+		.size = sizeof(struct uwb_rc_evt_confirm),
+	},
+	[UWB_RC_CMD_DEV_ADDR_MGMT] = {
+		.size = sizeof(struct uwb_rc_evt_dev_addr_mgmt) },
+	[UWB_RC_CMD_GET_IE] = {
+		.size = sizeof(struct uwb_rc_evt_get_ie),
+		.offset = 1 + offsetof(struct uwb_rc_evt_get_ie, wIELength),
+	},
+	[UWB_RC_CMD_RESET] = {
+		.size = sizeof(struct uwb_rc_evt_confirm),
+	},
+	[UWB_RC_CMD_SCAN] = {
+		.size = sizeof(struct uwb_rc_evt_confirm),
+	},
+	[UWB_RC_CMD_SET_BEACON_FILTER] = {
+		.size = sizeof(struct uwb_rc_evt_confirm),
+	},
+	[UWB_RC_CMD_SET_DRP_IE] = {
+		.size = sizeof(struct uwb_rc_evt_set_drp_ie),
+	},
+	[UWB_RC_CMD_SET_IE] = {
+		.size = sizeof(struct uwb_rc_evt_set_ie),
+	},
+	[UWB_RC_CMD_SET_NOTIFICATION_FILTER] = {
+		.size = sizeof(struct uwb_rc_evt_confirm),
+	},
+	[UWB_RC_CMD_SET_TX_POWER] = {
+		.size = sizeof(struct uwb_rc_evt_confirm),
+	},
+	[UWB_RC_CMD_SLEEP] = {
+		.size = sizeof(struct uwb_rc_evt_confirm),
+	},
+	[UWB_RC_CMD_START_BEACON] = {
+		.size = sizeof(struct uwb_rc_evt_confirm),
+	},
+	[UWB_RC_CMD_STOP_BEACON] = {
+		.size = sizeof(struct uwb_rc_evt_confirm),
+	},
+	[UWB_RC_CMD_BP_MERGE] = {
+		.size = sizeof(struct uwb_rc_evt_confirm),
+	},
+	[UWB_RC_CMD_SEND_COMMAND_FRAME] = {
+		.size = sizeof(struct uwb_rc_evt_confirm),
+	},
+	[UWB_RC_CMD_SET_ASIE_NOTIF] = {
+		.size = sizeof(struct uwb_rc_evt_confirm),
+	},
+};
+
+static
+struct uwb_est_entry uwb_est_01_00xx[] = {
+	[UWB_RC_DAA_ENERGY_DETECTED] = {
+		.size = sizeof(struct uwb_rc_evt_daa_energy_detected),
+	},
+	[UWB_RC_SET_DAA_ENERGY_MASK] = {
+		.size = sizeof(struct uwb_rc_evt_set_daa_energy_mask),
+	},
+	[UWB_RC_SET_NOTIFICATION_FILTER_EX] = {
+		.size = sizeof(struct uwb_rc_evt_set_notification_filter_ex),
+	},
+};
+
+/**
+ * Initialize the EST subsystem
+ *
+ * Register the standard tables also.
+ *
+ * FIXME: tag init
+ */
+int uwb_est_create(void)
+{
+	int result;
+
+	uwb_est_size = 2;
+	uwb_est_used = 0;
+	uwb_est = kzalloc(uwb_est_size * sizeof(uwb_est[0]), GFP_KERNEL);
+	if (uwb_est == NULL)
+		return -ENOMEM;
+
+	result = uwb_est_register(UWB_RC_CET_GENERAL, 0, 0xffff, 0xffff,
+				  uwb_est_00_00xx, ARRAY_SIZE(uwb_est_00_00xx));
+	if (result < 0)
+		goto out;
+	result = uwb_est_register(UWB_RC_CET_EX_TYPE_1, 0, 0xffff, 0xffff,
+				  uwb_est_01_00xx, ARRAY_SIZE(uwb_est_01_00xx));
+out:
+	return result;
+}
+
+
+/** Clean it up */
+void uwb_est_destroy(void)
+{
+	kfree(uwb_est);
+	uwb_est = NULL;
+	uwb_est_size = uwb_est_used = 0;
+}
+
+
+/**
+ * Double the capacity of the EST table
+ *
+ * @returns 0 if ok, < 0 errno no error.
+ */
+static
+int uwb_est_grow(void)
+{
+	size_t actual_size = uwb_est_size * sizeof(uwb_est[0]);
+	void *new = kmalloc(2 * actual_size, GFP_ATOMIC);
+	if (new == NULL)
+		return -ENOMEM;
+	memcpy(new, uwb_est, actual_size);
+	memset(new + actual_size, 0, actual_size);
+	kfree(uwb_est);
+	uwb_est = new;
+	uwb_est_size *= 2;
+	return 0;
+}
+
+
+/**
+ * Register an event size table
+ *
+ * Makes room for it if the table is full, and then inserts  it in the
+ * right position (entries are sorted by type, event_high, vendor and
+ * then product).
+ *
+ * @vendor:  vendor code for matching against the device (0x0000 and
+ *           0xffff mean any); use 0x0000 to force all to match without
+ *           checking possible vendor specific ones, 0xfffff to match
+ *           after checking vendor specific ones.
+ *
+ * @product: product code from that vendor; same matching rules, use
+ *           0x0000 for not allowing vendor specific matches, 0xffff
+ *           for allowing.
+ *
+ * This arragement just makes the tables sort differenty. Because the
+ * table is sorted by growing type-event_high-vendor-product, a zero
+ * vendor will match before than a 0x456a vendor, that will match
+ * before a 0xfffff vendor.
+ *
+ * @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
+ */
+/* FIXME: add bus type to vendor/product code */
+int uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product,
+		     const struct uwb_est_entry *entry, size_t entries)
+{
+	unsigned long flags;
+	unsigned itr;
+	u16 type_event_high;
+	int result = 0;
+
+	write_lock_irqsave(&uwb_est_lock, flags);
+	if (uwb_est_used == uwb_est_size) {
+		result = uwb_est_grow();
+		if (result < 0)
+			goto out;
+	}
+	/* Find the right spot to insert it in */
+	type_event_high = type << 8 | event_high;
+	for (itr = 0; itr < uwb_est_used; itr++)
+		if (uwb_est[itr].type_event_high < type
+		    && uwb_est[itr].vendor < vendor
+		    && uwb_est[itr].product < product)
+			break;
+
+	/* Shift others to make room for the new one? */
+	if (itr < uwb_est_used)
+		memmove(&uwb_est[itr+1], &uwb_est[itr], uwb_est_used - itr);
+	uwb_est[itr].type_event_high = type << 8 | event_high;
+	uwb_est[itr].vendor = vendor;
+	uwb_est[itr].product = product;
+	uwb_est[itr].entry = entry;
+	uwb_est[itr].entries = entries;
+	uwb_est_used++;
+out:
+	write_unlock_irqrestore(&uwb_est_lock, flags);
+	return result;
+}
+EXPORT_SYMBOL_GPL(uwb_est_register);
+
+
+/**
+ * Unregister an event size table
+ *
+ * This just removes the specified entry and moves the ones after it
+ * to fill in the gap. This is needed to keep the list sorted; no
+ * reallocation is done to reduce the size of the table.
+ *
+ * We unregister by all the data we used to register instead of by
+ * pointer to the @entry array because we might have used the same
+ * table for a bunch of IDs (for example).
+ *
+ * @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
+ */
+int uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product,
+		       const struct uwb_est_entry *entry, size_t entries)
+{
+	unsigned long flags;
+	unsigned itr;
+	struct uwb_est est_cmp = {
+		.type_event_high = type << 8 | event_high,
+		.vendor = vendor,
+		.product = product,
+		.entry = entry,
+		.entries = entries
+	};
+	write_lock_irqsave(&uwb_est_lock, flags);
+	for (itr = 0; itr < uwb_est_used; itr++)
+		if (!memcmp(&uwb_est[itr], &est_cmp, sizeof(est_cmp)))
+			goto found;
+	write_unlock_irqrestore(&uwb_est_lock, flags);
+	return -ENOENT;
+
+found:
+	if (itr < uwb_est_used - 1)	/* Not last one? move ones above */
+		memmove(&uwb_est[itr], &uwb_est[itr+1], uwb_est_used - itr - 1);
+	uwb_est_used--;
+	write_unlock_irqrestore(&uwb_est_lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uwb_est_unregister);
+
+
+/**
+ * Get the size of an event from a table
+ *
+ * @rceb: pointer to the buffer with the event
+ * @rceb_size: size of the area pointed to by @rceb in bytes.
+ * @returns: > 0      Size of the event
+ *	     -ENOSPC  An area big enough was not provided to look
+ *		      ahead into the event's guts and guess the size.
+ *	     -EINVAL  Unknown event code (wEvent).
+ *
+ * This will look at the received RCEB and guess what is the total
+ * size. For variable sized events, it will look further ahead into
+ * their length field to see how much data should be read.
+ *
+ * Note this size is *not* final--the neh (Notification/Event Handle)
+ * might specificy an extra size to add.
+ */
+static
+ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est,
+			 u8 event_low, const struct uwb_rceb *rceb,
+			 size_t rceb_size)
+{
+	unsigned offset;
+	ssize_t size;
+	struct device *dev = &uwb_rc->uwb_dev.dev;
+	const struct uwb_est_entry *entry;
+
+	size = -ENOENT;
+	if (event_low >= est->entries) {	/* in range? */
+		dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u out of range\n",
+			est, est->type_event_high, est->vendor, est->product,
+			est->entries, event_low);
+		goto out;
+	}
+	size = -ENOENT;
+	entry = &est->entry[event_low];
+	if (entry->size == 0 && entry->offset == 0) {	/* unknown? */
+		dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u unknown\n",
+			est, est->type_event_high, est->vendor,	est->product,
+			est->entries, event_low);
+		goto out;
+	}
+	offset = entry->offset;	/* extra fries with that? */
+	if (offset == 0)
+		size = entry->size;
+	else {
+		/* Ops, got an extra size field at 'offset'--read it */
+		const void *ptr = rceb;
+		size_t type_size = 0;
+		offset--;
+		size = -ENOSPC;			/* enough data for more? */
+		switch (entry->type) {
+		case UWB_EST_16:  type_size = sizeof(__le16); break;
+		case UWB_EST_8:   type_size = sizeof(u8);     break;
+		default: 	 BUG();
+		}
+		if (offset + type_size > rceb_size) {
+			dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: "
+				"not enough data to read extra size\n",
+				est, est->type_event_high, est->vendor,
+				est->product, est->entries);
+			goto out;
+		}
+		size = entry->size;
+		ptr += offset;
+		switch (entry->type) {
+		case UWB_EST_16:  size += le16_to_cpu(*(__le16 *)ptr); break;
+		case UWB_EST_8:   size += *(u8 *)ptr;                  break;
+		default: 	 BUG();
+		}
+	}
+out:
+	return size;
+}
+
+
+/**
+ * Guesses the size of a WA event
+ *
+ * @rceb: pointer to the buffer with the event
+ * @rceb_size: size of the area pointed to by @rceb in bytes.
+ * @returns: > 0      Size of the event
+ *	     -ENOSPC  An area big enough was not provided to look
+ *		      ahead into the event's guts and guess the size.
+ *	     -EINVAL  Unknown event code (wEvent).
+ *
+ * This will look at the received RCEB and guess what is the total
+ * size by checking all the tables registered with
+ * uwb_est_register(). For variable sized events, it will look further
+ * ahead into their length field to see how much data should be read.
+ *
+ * Note this size is *not* final--the neh (Notification/Event Handle)
+ * might specificy an extra size to add or replace.
+ */
+ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
+			  size_t rceb_size)
+{
+	/* FIXME: add vendor/product data */
+	ssize_t size;
+	struct device *dev = &rc->uwb_dev.dev;
+	unsigned long flags;
+	unsigned itr;
+	u16 type_event_high, event;
+	u8 *ptr = (u8 *) rceb;
+
+	read_lock_irqsave(&uwb_est_lock, flags);
+	d_printf(2, dev, "Size query for event 0x%02x/%04x/%02x,"
+		 " buffer size %ld\n",
+		 (unsigned) rceb->bEventType,
+		 (unsigned) le16_to_cpu(rceb->wEvent),
+		 (unsigned) rceb->bEventContext,
+		 (long) rceb_size);
+	size = -ENOSPC;
+	if (rceb_size < sizeof(*rceb))
+		goto out;
+	event = le16_to_cpu(rceb->wEvent);
+	type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8;
+	for (itr = 0; itr < uwb_est_used; itr++) {
+		d_printf(3, dev, "Checking EST 0x%04x/%04x/%04x\n",
+			uwb_est[itr].type_event_high, uwb_est[itr].vendor,
+			uwb_est[itr].product);
+		if (uwb_est[itr].type_event_high != type_event_high)
+			continue;
+		size = uwb_est_get_size(rc, &uwb_est[itr],
+					event & 0x00ff, rceb, rceb_size);
+		/* try more tables that might handle the same type */
+		if (size != -ENOENT)
+			goto out;
+	}
+	dev_dbg(dev, "event 0x%02x/%04x/%02x: no handlers available; "
+		"RCEB %02x %02x %02x %02x\n",
+		(unsigned) rceb->bEventType,
+		(unsigned) le16_to_cpu(rceb->wEvent),
+		(unsigned) rceb->bEventContext,
+		ptr[0], ptr[1], ptr[2], ptr[3]);
+	size = -ENOENT;
+out:
+	read_unlock_irqrestore(&uwb_est_lock, flags);
+	return size;
+}
+EXPORT_SYMBOL_GPL(uwb_est_find_size);
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
new file mode 100644
index 0000000..3d26fa0
--- /dev/null
+++ b/drivers/uwb/hwa-rc.c
@@ -0,0 +1,926 @@
+/*
+ * WUSB Host Wire Adapter: Radio Control Interface (WUSB[8.6])
+ * Radio Control command/event transport
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Initialize the Radio Control interface Driver.
+ *
+ * For each device probed, creates an 'struct hwarc' which contains
+ * just the representation of the UWB Radio Controller, and the logic
+ * for reading notifications and passing them to the UWB Core.
+ *
+ * So we initialize all of those, register the UWB Radio Controller
+ * and setup the notification/event handle to pipe the notifications
+ * to the UWB management Daemon.
+ *
+ * Command and event filtering.
+ *
+ * This is the driver for the Radio Control Interface described in WUSB
+ * 1.0. The core UWB module assumes that all drivers are compliant to the
+ * WHCI 0.95 specification. We thus create a filter that parses all
+ * incoming messages from the (WUSB 1.0) device and manipulate them to
+ * conform to the WHCI 0.95 specification. Similarly, outgoing messages
+ * are parsed and manipulated to conform to the WUSB 1.0 compliant messages
+ * that the device expects. Only a few messages are affected:
+ * Affected events:
+ *    UWB_RC_EVT_BEACON
+ *    UWB_RC_EVT_BP_SLOT_CHANGE
+ *    UWB_RC_EVT_DRP_AVAIL
+ *    UWB_RC_EVT_DRP
+ * Affected commands:
+ *    UWB_RC_CMD_SCAN
+ *    UWB_RC_CMD_SET_DRP_IE
+ *
+ *
+ *
+ */
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/wusb.h>
+#include <linux/usb/wusb-wa.h>
+#include <linux/uwb.h>
+#include "uwb-internal.h"
+#define D_LOCAL 1
+#include <linux/uwb/debug.h>
+
+/* The device uses commands and events from the WHCI specification, although
+ * reporting itself as WUSB compliant. */
+#define WUSB_QUIRK_WHCI_CMD_EVT		0x01
+
+/**
+ * Descriptor for an instance of the UWB Radio Control Driver that
+ * attaches to the RCI interface of the Host Wired Adapter.
+ *
+ * Unless there is a lock specific to the 'data members', all access
+ * is protected by uwb_rc->mutex.
+ *
+ * The NEEP (Notification/Event EndPoint) URB (@neep_urb) writes to
+ * @rd_buffer. Note there is no locking because it is perfectly (heh!)
+ * serialized--probe() submits an URB, callback is called, processes
+ * the data (synchronously), submits another URB, and so on. There is
+ * no concurrent access to the buffer.
+ */
+struct hwarc {
+	struct usb_device *usb_dev;
+	struct usb_interface *usb_iface;
+	struct uwb_rc *uwb_rc;		/* UWB host controller */
+	struct urb *neep_urb;		/* Notification endpoint handling */
+	struct edc neep_edc;
+	void *rd_buffer;		/* NEEP read buffer */
+};
+
+
+/* Beacon received notification (WUSB 1.0 [8.6.3.2]) */
+struct uwb_rc_evt_beacon_WUSB_0100 {
+	struct uwb_rceb rceb;
+	u8	bChannelNumber;
+	__le16	wBPSTOffset;
+	u8	bLQI;
+	u8	bRSSI;
+	__le16	wBeaconInfoLength;
+	u8	BeaconInfo[];
+} __attribute__((packed));
+
+/**
+ * Filter WUSB 1.0 BEACON RCV notification to be WHCI 0.95
+ *
+ * @header: the incoming event
+ * @buf_size: size of buffer containing incoming event
+ * @new_size: size of event after filtering completed
+ *
+ * The WHCI 0.95 spec has a "Beacon Type" field. This value is unknown at
+ * the time we receive the beacon from WUSB so we just set it to
+ * UWB_RC_BEACON_TYPE_NEIGHBOR as a default.
+ * The solution below allocates memory upon receipt of every beacon from a
+ * WUSB device. This will deteriorate performance. What is the right way to
+ * do this?
+ */
+static
+int hwarc_filter_evt_beacon_WUSB_0100(struct uwb_rc *rc,
+				      struct uwb_rceb **header,
+				      const size_t buf_size,
+				      size_t *new_size)
+{
+	struct uwb_rc_evt_beacon_WUSB_0100 *be;
+	struct uwb_rc_evt_beacon *newbe;
+	size_t bytes_left, ielength;
+	struct device *dev = &rc->uwb_dev.dev;
+
+	be = container_of(*header, struct uwb_rc_evt_beacon_WUSB_0100, rceb);
+	bytes_left = buf_size;
+	if (bytes_left < sizeof(*be)) {
+		dev_err(dev, "Beacon Received Notification: Not enough data "
+			"to decode for filtering (%zu vs %zu bytes needed)\n",
+			bytes_left, sizeof(*be));
+		return -EINVAL;
+	}
+	bytes_left -= sizeof(*be);
+	ielength = le16_to_cpu(be->wBeaconInfoLength);
+	if (bytes_left < ielength) {
+		dev_err(dev, "Beacon Received Notification: Not enough data "
+			"to decode IEs (%zu vs %zu bytes needed)\n",
+			bytes_left, ielength);
+		return -EINVAL;
+	}
+	newbe = kzalloc(sizeof(*newbe) + ielength, GFP_ATOMIC);
+	if (newbe == NULL)
+		return -ENOMEM;
+	newbe->rceb = be->rceb;
+	newbe->bChannelNumber = be->bChannelNumber;
+	newbe->bBeaconType = UWB_RC_BEACON_TYPE_NEIGHBOR;
+	newbe->wBPSTOffset = be->wBPSTOffset;
+	newbe->bLQI = be->bLQI;
+	newbe->bRSSI = be->bRSSI;
+	newbe->wBeaconInfoLength = be->wBeaconInfoLength;
+	memcpy(newbe->BeaconInfo, be->BeaconInfo, ielength);
+	*header = &newbe->rceb;
+	*new_size = sizeof(*newbe) + ielength;
+	return 1;  /* calling function will free memory */
+}
+
+
+/* DRP Availability change notification (WUSB 1.0 [8.6.3.8]) */
+struct uwb_rc_evt_drp_avail_WUSB_0100 {
+	struct uwb_rceb rceb;
+	__le16 wIELength;
+	u8 IEData[];
+} __attribute__((packed));
+
+/**
+ * Filter WUSB 1.0 DRP AVAILABILITY CHANGE notification to be WHCI 0.95
+ *
+ * @header: the incoming event
+ * @buf_size: size of buffer containing incoming event
+ * @new_size: size of event after filtering completed
+ */
+static
+int hwarc_filter_evt_drp_avail_WUSB_0100(struct uwb_rc *rc,
+					 struct uwb_rceb **header,
+					 const size_t buf_size,
+					 size_t *new_size)
+{
+	struct uwb_rc_evt_drp_avail_WUSB_0100 *da;
+	struct uwb_rc_evt_drp_avail *newda;
+	struct uwb_ie_hdr *ie_hdr;
+	size_t bytes_left, ielength;
+	struct device *dev = &rc->uwb_dev.dev;
+
+
+	da = container_of(*header, struct uwb_rc_evt_drp_avail_WUSB_0100, rceb);
+	bytes_left = buf_size;
+	if (bytes_left < sizeof(*da)) {
+		dev_err(dev, "Not enough data to decode DRP Avail "
+			"Notification for filtering. Expected %zu, "
+			"received %zu.\n", (size_t)sizeof(*da), bytes_left);
+		return -EINVAL;
+	}
+	bytes_left -= sizeof(*da);
+	ielength = le16_to_cpu(da->wIELength);
+	if (bytes_left < ielength) {
+		dev_err(dev, "DRP Avail Notification filter: IE length "
+			"[%zu bytes] does not match actual length "
+			"[%zu bytes].\n", ielength, bytes_left);
+		return -EINVAL;
+	}
+	if (ielength < sizeof(*ie_hdr)) {
+		dev_err(dev, "DRP Avail Notification filter: Not enough "
+			"data to decode IE [%zu bytes, %zu needed]\n",
+			ielength, sizeof(*ie_hdr));
+		return -EINVAL;
+	}
+	ie_hdr = (void *) da->IEData;
+	if (ie_hdr->length > 32) {
+		dev_err(dev, "DRP Availability Change event has unexpected "
+			"length for filtering. Expected < 32 bytes, "
+			"got %zu bytes.\n", (size_t)ie_hdr->length);
+		return -EINVAL;
+	}
+	newda = kzalloc(sizeof(*newda), GFP_ATOMIC);
+	if (newda == NULL)
+		return -ENOMEM;
+	newda->rceb = da->rceb;
+	memcpy(newda->bmp, (u8 *) ie_hdr + sizeof(*ie_hdr), ie_hdr->length);
+	*header = &newda->rceb;
+	*new_size = sizeof(*newda);
+	return 1; /* calling function will free memory */
+}
+
+
+/* DRP notification (WUSB 1.0 [8.6.3.9]) */
+struct uwb_rc_evt_drp_WUSB_0100 {
+	struct uwb_rceb rceb;
+	struct uwb_dev_addr wSrcAddr;
+	u8 bExplicit;
+	__le16 wIELength;
+	u8 IEData[];
+} __attribute__((packed));
+
+/**
+ * Filter WUSB 1.0 DRP Notification to be WHCI 0.95
+ *
+ * @header: the incoming event
+ * @buf_size: size of buffer containing incoming event
+ * @new_size: size of event after filtering completed
+ *
+ * It is hard to manage DRP reservations without having a Reason code.
+ * Unfortunately there is none in the WUSB spec. We just set the default to
+ * DRP IE RECEIVED.
+ * We do not currently use the bBeaconSlotNumber value, so we set this to
+ * zero for now.
+ */
+static
+int hwarc_filter_evt_drp_WUSB_0100(struct uwb_rc *rc,
+				   struct uwb_rceb **header,
+				   const size_t buf_size,
+				   size_t *new_size)
+{
+	struct uwb_rc_evt_drp_WUSB_0100 *drpev;
+	struct uwb_rc_evt_drp *newdrpev;
+	size_t bytes_left, ielength;
+	struct device *dev = &rc->uwb_dev.dev;
+
+	drpev = container_of(*header, struct uwb_rc_evt_drp_WUSB_0100, rceb);
+	bytes_left = buf_size;
+	if (bytes_left < sizeof(*drpev)) {
+		dev_err(dev, "Not enough data to decode DRP Notification "
+			"for filtering. Expected %zu, received %zu.\n",
+			(size_t)sizeof(*drpev), bytes_left);
+		return -EINVAL;
+	}
+	ielength = le16_to_cpu(drpev->wIELength);
+	bytes_left -= sizeof(*drpev);
+	if (bytes_left < ielength) {
+		dev_err(dev, "DRP Notification filter: header length [%zu "
+			"bytes] does not match actual length [%zu "
+			"bytes].\n", ielength, bytes_left);
+		return -EINVAL;
+	}
+	newdrpev = kzalloc(sizeof(*newdrpev) + ielength, GFP_ATOMIC);
+	if (newdrpev == NULL)
+		return -ENOMEM;
+	newdrpev->rceb = drpev->rceb;
+	newdrpev->src_addr = drpev->wSrcAddr;
+	newdrpev->reason = UWB_DRP_NOTIF_DRP_IE_RCVD;
+	newdrpev->beacon_slot_number = 0;
+	newdrpev->ie_length = drpev->wIELength;
+	memcpy(newdrpev->ie_data, drpev->IEData, ielength);
+	*header = &newdrpev->rceb;
+	*new_size = sizeof(*newdrpev) + ielength;
+	return 1; /* calling function will free memory */
+}
+
+
+/* Scan Command (WUSB 1.0 [8.6.2.5]) */
+struct uwb_rc_cmd_scan_WUSB_0100 {
+	struct uwb_rccb rccb;
+	u8 bChannelNumber;
+	u8 bScanState;
+} __attribute__((packed));
+
+/**
+ * Filter WHCI 0.95 SCAN command to be WUSB 1.0 SCAN command
+ *
+ * @header:   command sent to device (compliant to WHCI 0.95)
+ * @size:     size of command sent to device
+ *
+ * We only reduce the size by two bytes because the WUSB 1.0 scan command
+ * does not have the last field (wStarttime). Also, make sure we don't send
+ * the device an unexpected scan type.
+ */
+static
+int hwarc_filter_cmd_scan_WUSB_0100(struct uwb_rc *rc,
+				    struct uwb_rccb **header,
+				    size_t *size)
+{
+	struct uwb_rc_cmd_scan *sc;
+
+	sc = container_of(*header, struct uwb_rc_cmd_scan, rccb);
+
+	if (sc->bScanState == UWB_SCAN_ONLY_STARTTIME)
+		sc->bScanState = UWB_SCAN_ONLY;
+	/* Don't send the last two bytes. */
+	*size -= 2;
+	return 0;
+}
+
+
+/* SET DRP IE command (WUSB 1.0 [8.6.2.7]) */
+struct uwb_rc_cmd_set_drp_ie_WUSB_0100 {
+	struct uwb_rccb rccb;
+	u8 bExplicit;
+	__le16 wIELength;
+	struct uwb_ie_drp IEData[];
+} __attribute__((packed));
+
+/**
+ * Filter WHCI 0.95 SET DRP IE command to be WUSB 1.0 SET DRP IE command
+ *
+ * @header:   command sent to device (compliant to WHCI 0.95)
+ * @size:     size of command sent to device
+ *
+ * WUSB has an extra bExplicit field - we assume always explicit
+ * negotiation so this field is set. The command expected by the device is
+ * thus larger than the one prepared by the driver so we need to
+ * reallocate memory to accommodate this.
+ * We trust the driver to send us the correct data so no checking is done
+ * on incoming data - evn though it is variable length.
+ */
+static
+int hwarc_filter_cmd_set_drp_ie_WUSB_0100(struct uwb_rc *rc,
+					  struct uwb_rccb **header,
+					  size_t *size)
+{
+	struct uwb_rc_cmd_set_drp_ie *orgcmd;
+	struct uwb_rc_cmd_set_drp_ie_WUSB_0100 *cmd;
+	size_t ielength;
+
+	orgcmd = container_of(*header, struct uwb_rc_cmd_set_drp_ie, rccb);
+	ielength = le16_to_cpu(orgcmd->wIELength);
+	cmd = kzalloc(sizeof(*cmd) + ielength, GFP_KERNEL);
+	if (cmd == NULL)
+		return -ENOMEM;
+	cmd->rccb = orgcmd->rccb;
+	cmd->bExplicit = 0;
+	cmd->wIELength = orgcmd->wIELength;
+	memcpy(cmd->IEData, orgcmd->IEData, ielength);
+	*header = &cmd->rccb;
+	*size = sizeof(*cmd) + ielength;
+	return 1; /* calling function will free memory */
+}
+
+
+/**
+ * Filter data from WHCI driver to WUSB device
+ *
+ * @header: WHCI 0.95 compliant command from driver
+ * @size:   length of command
+ *
+ * The routine managing commands to the device (uwb_rc_cmd()) will call the
+ * filtering function pointer (if it exists) before it passes any data to
+ * the device. At this time the command has been formatted according to
+ * WHCI 0.95 and is ready to be sent to the device.
+ *
+ * The filter function will be provided with the current command and its
+ * length. The function will manipulate the command if necessary and
+ * potentially reallocate memory for a command that needed more memory that
+ * the given command. If new memory was created the function will return 1
+ * to indicate to the calling function that the memory need to be freed
+ * when not needed any more. The size will contain the new length of the
+ * command.
+ * If memory has not been allocated we rely on the original mechanisms to
+ * free the memory of the command - even when we reduce the value of size.
+ */
+static
+int hwarc_filter_cmd_WUSB_0100(struct uwb_rc *rc, struct uwb_rccb **header,
+			       size_t *size)
+{
+	int result;
+	struct uwb_rccb *rccb = *header;
+	int cmd = le16_to_cpu(rccb->wCommand);
+	switch (cmd) {
+	case UWB_RC_CMD_SCAN:
+		result = hwarc_filter_cmd_scan_WUSB_0100(rc, header, size);
+		break;
+	case UWB_RC_CMD_SET_DRP_IE:
+		result = hwarc_filter_cmd_set_drp_ie_WUSB_0100(rc, header, size);
+		break;
+	default:
+		result = -ENOANO;
+		break;
+	}
+	return result;
+}
+
+
+/**
+ * Filter data from WHCI driver to WUSB device
+ *
+ * @header: WHCI 0.95 compliant command from driver
+ * @size:   length of command
+ *
+ * Filter commands based on which protocol the device supports. The WUSB
+ * errata should be the same as WHCI 0.95 so we do not filter that here -
+ * only WUSB 1.0.
+ */
+static
+int hwarc_filter_cmd(struct uwb_rc *rc, struct uwb_rccb **header,
+		     size_t *size)
+{
+	int result = -ENOANO;
+	if (rc->version == 0x0100)
+		result = hwarc_filter_cmd_WUSB_0100(rc, header, size);
+	return result;
+}
+
+
+/**
+ * Compute return value as sum of incoming value and value at given offset
+ *
+ * @rceb:      event for which we compute the size, it contains a variable
+ *	       length field.
+ * @core_size: size of the "non variable" part of the event
+ * @offset:    place in event where the length of the variable part is stored
+ * @buf_size: total length of buffer in which event arrived - we need to make
+ *	       sure we read the offset in memory that is still part of the event
+ */
+static
+ssize_t hwarc_get_event_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
+			     size_t core_size, size_t offset,
+			     const size_t buf_size)
+{
+	ssize_t size = -ENOSPC;
+	const void *ptr = rceb;
+	size_t type_size = sizeof(__le16);
+	struct device *dev = &rc->uwb_dev.dev;
+
+	if (offset + type_size >= buf_size) {
+		dev_err(dev, "Not enough data to read extra size of event "
+			"0x%02x/%04x/%02x, only got %zu bytes.\n",
+			rceb->bEventType, le16_to_cpu(rceb->wEvent),
+			rceb->bEventContext, buf_size);
+		goto out;
+	}
+	ptr += offset;
+	size = core_size + le16_to_cpu(*(__le16 *)ptr);
+out:
+	return size;
+}
+
+
+/* Beacon slot change notification (WUSB 1.0 [8.6.3.5]) */
+struct uwb_rc_evt_bp_slot_change_WUSB_0100 {
+	struct uwb_rceb rceb;
+	u8 bSlotNumber;
+} __attribute__((packed));
+
+
+/**
+ * Filter data from WUSB device to WHCI driver
+ *
+ * @header:	 incoming event
+ * @buf_size:	 size of buffer in which event arrived
+ * @_event_size: actual size of event in the buffer
+ * @new_size:	 size of event after filtered
+ *
+ * We don't know how the buffer is constructed - there may be more than one
+ * event in it so buffer length does not determine event length. We first
+ * determine the expected size of the incoming event. This value is passed
+ * back only if the actual filtering succeeded (so we know the computed
+ * expected size is correct). This value will be zero if
+ * the event did not need any filtering.
+ *
+ * WHCI interprets the BP Slot Change event's data differently than
+ * WUSB. The event sizes are exactly the same. The data field
+ * indicates the new beacon slot in which a RC is transmitting its
+ * beacon. The maximum value of this is 96 (wMacBPLength ECMA-368
+ * 17.16 (Table 117)). We thus know that the WUSB value will not set
+ * the bit bNoSlot, so we don't really do anything (placeholder).
+ */
+static
+int hwarc_filter_event_WUSB_0100(struct uwb_rc *rc, struct uwb_rceb **header,
+				 const size_t buf_size, size_t *_real_size,
+				 size_t *_new_size)
+{
+	int result = -ENOANO;
+	struct uwb_rceb *rceb = *header;
+	int event = le16_to_cpu(rceb->wEvent);
+	size_t event_size;
+	size_t core_size, offset;
+
+	if (rceb->bEventType != UWB_RC_CET_GENERAL)
+		goto out;
+	switch (event) {
+	case UWB_RC_EVT_BEACON:
+		core_size = sizeof(struct uwb_rc_evt_beacon_WUSB_0100);
+		offset = offsetof(struct uwb_rc_evt_beacon_WUSB_0100,
+				  wBeaconInfoLength);
+		event_size = hwarc_get_event_size(rc, rceb, core_size,
+						  offset, buf_size);
+		if (event_size < 0)
+			goto out;
+		*_real_size = event_size;
+		result = hwarc_filter_evt_beacon_WUSB_0100(rc, header,
+							   buf_size, _new_size);
+		break;
+	case UWB_RC_EVT_BP_SLOT_CHANGE:
+		*_new_size = *_real_size =
+			sizeof(struct uwb_rc_evt_bp_slot_change_WUSB_0100);
+		result = 0;
+		break;
+
+	case UWB_RC_EVT_DRP_AVAIL:
+		core_size = sizeof(struct uwb_rc_evt_drp_avail_WUSB_0100);
+		offset = offsetof(struct uwb_rc_evt_drp_avail_WUSB_0100,
+				  wIELength);
+		event_size = hwarc_get_event_size(rc, rceb, core_size,
+						  offset, buf_size);
+		if (event_size < 0)
+			goto out;
+		*_real_size = event_size;
+		result = hwarc_filter_evt_drp_avail_WUSB_0100(
+			rc, header, buf_size, _new_size);
+		break;
+
+	case UWB_RC_EVT_DRP:
+		core_size = sizeof(struct uwb_rc_evt_drp_WUSB_0100);
+		offset = offsetof(struct uwb_rc_evt_drp_WUSB_0100, wIELength);
+		event_size = hwarc_get_event_size(rc, rceb, core_size,
+						  offset, buf_size);
+		if (event_size < 0)
+			goto out;
+		*_real_size = event_size;
+		result = hwarc_filter_evt_drp_WUSB_0100(rc, header,
+							buf_size, _new_size);
+		break;
+
+	default:
+		break;
+	}
+out:
+	return result;
+}
+
+/**
+ * Filter data from WUSB device to WHCI driver
+ *
+ * @header:	 incoming event
+ * @buf_size:	 size of buffer in which event arrived
+ * @_event_size: actual size of event in the buffer
+ * @_new_size:	 size of event after filtered
+ *
+ * Filter events based on which protocol the device supports. The WUSB
+ * errata should be the same as WHCI 0.95 so we do not filter that here -
+ * only WUSB 1.0.
+ *
+ * If we don't handle it, we return -ENOANO (why the weird error code?
+ * well, so if I get it, I can pinpoint in the code that raised
+ * it...after all, not too many places use the higher error codes).
+ */
+static
+int hwarc_filter_event(struct uwb_rc *rc, struct uwb_rceb **header,
+		       const size_t buf_size, size_t *_real_size,
+		       size_t *_new_size)
+{
+	int result = -ENOANO;
+	if (rc->version == 0x0100)
+		result =  hwarc_filter_event_WUSB_0100(
+			rc, header, buf_size, _real_size, _new_size);
+	return result;
+}
+
+
+/**
+ * Execute an UWB RC command on HWA
+ *
+ * @rc:	      Instance of a Radio Controller that is a HWA
+ * @cmd:      Buffer containing the RCCB and payload to execute
+ * @cmd_size: Size of the command buffer.
+ *
+ * NOTE: rc's mutex has to be locked
+ */
+static
+int hwarc_cmd(struct uwb_rc *uwb_rc, const struct uwb_rccb *cmd, size_t cmd_size)
+{
+	struct hwarc *hwarc = uwb_rc->priv;
+	return usb_control_msg(
+		hwarc->usb_dev, usb_sndctrlpipe(hwarc->usb_dev, 0),
+		WA_EXEC_RC_CMD, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+		0, hwarc->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+		(void *) cmd, cmd_size, 100 /* FIXME: this is totally arbitrary */);
+}
+
+static
+int hwarc_reset(struct uwb_rc *uwb_rc)
+{
+	struct hwarc *hwarc = uwb_rc->priv;
+	return usb_reset_device(hwarc->usb_dev);
+}
+
+/**
+ * Callback for the notification and event endpoint
+ *
+ * Check's that everything is fine and then passes the read data to
+ * the notification/event handling mechanism (neh).
+ */
+static
+void hwarc_neep_cb(struct urb *urb)
+{
+	struct hwarc *hwarc = urb->context;
+	struct usb_interface *usb_iface = hwarc->usb_iface;
+	struct device *dev = &usb_iface->dev;
+	int result;
+
+	switch (result = urb->status) {
+	case 0:
+		d_printf(3, dev, "NEEP: receive stat %d, %zu bytes\n",
+			 urb->status, (size_t)urb->actual_length);
+		uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer,
+				urb->actual_length);
+		break;
+	case -ECONNRESET:	/* Not an error, but a controlled situation; */
+	case -ENOENT:		/* (we killed the URB)...so, no broadcast */
+		d_printf(2, dev, "NEEP: URB reset/noent %d\n", urb->status);
+		goto out;
+	case -ESHUTDOWN:	/* going away! */
+		d_printf(2, dev, "NEEP: URB down %d\n", urb->status);
+		goto out;
+	default:		/* On general errors, retry unless it gets ugly */
+		if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS,
+			    EDC_ERROR_TIMEFRAME))
+			goto error_exceeded;
+		dev_err(dev, "NEEP: URB error %d\n", urb->status);
+	}
+	result = usb_submit_urb(urb, GFP_ATOMIC);
+	d_printf(3, dev, "NEEP: submit %d\n", result);
+	if (result < 0) {
+		dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n",
+			result);
+		goto error;
+	}
+out:
+	return;
+
+error_exceeded:
+	dev_err(dev, "NEEP: URB max acceptable errors "
+		"exceeded, resetting device\n");
+error:
+	uwb_rc_neh_error(hwarc->uwb_rc, result);
+	uwb_rc_reset_all(hwarc->uwb_rc);
+	return;
+}
+
+static void hwarc_init(struct hwarc *hwarc)
+{
+	edc_init(&hwarc->neep_edc);
+}
+
+/**
+ * Initialize the notification/event endpoint stuff
+ *
+ * Note this is effectively a parallel thread; it knows that
+ * hwarc->uwb_rc always exists because the existence of a 'hwarc'
+ * means that there is a reverence on the hwarc->uwb_rc (see
+ * _probe()), and thus _neep_cb() can execute safely.
+ */
+static int hwarc_neep_init(struct uwb_rc *rc)
+{
+	struct hwarc *hwarc = rc->priv;
+	struct usb_interface *iface = hwarc->usb_iface;
+	struct usb_device *usb_dev = interface_to_usbdev(iface);
+	struct device *dev = &iface->dev;
+	int result;
+	struct usb_endpoint_descriptor *epd;
+
+	epd = &iface->cur_altsetting->endpoint[0].desc;
+	hwarc->rd_buffer = (void *) __get_free_page(GFP_KERNEL);
+	if (hwarc->rd_buffer == NULL) {
+		dev_err(dev, "Unable to allocate notification's read buffer\n");
+		goto error_rd_buffer;
+	}
+	hwarc->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (hwarc->neep_urb == NULL) {
+		dev_err(dev, "Unable to allocate notification URB\n");
+		goto error_urb_alloc;
+	}
+	usb_fill_int_urb(hwarc->neep_urb, usb_dev,
+			 usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
+			 hwarc->rd_buffer, PAGE_SIZE,
+			 hwarc_neep_cb, hwarc, epd->bInterval);
+	result = usb_submit_urb(hwarc->neep_urb, GFP_ATOMIC);
+	if (result < 0) {
+		dev_err(dev, "Cannot submit notification URB: %d\n", result);
+		goto error_neep_submit;
+	}
+	return 0;
+
+error_neep_submit:
+	usb_free_urb(hwarc->neep_urb);
+error_urb_alloc:
+	free_page((unsigned long)hwarc->rd_buffer);
+error_rd_buffer:
+	return -ENOMEM;
+}
+
+
+/** Clean up all the notification endpoint resources */
+static void hwarc_neep_release(struct uwb_rc *rc)
+{
+	struct hwarc *hwarc = rc->priv;
+
+	usb_kill_urb(hwarc->neep_urb);
+	usb_free_urb(hwarc->neep_urb);
+	free_page((unsigned long)hwarc->rd_buffer);
+}
+
+/**
+ * Get the version from class-specific descriptor
+ *
+ * NOTE: this descriptor comes with the big bundled configuration
+ *	 descriptor that includes the interfaces' and endpoints', so
+ *	 we just look for it in the cached copy kept by the USB stack.
+ *
+ * NOTE2: We convert LE fields to CPU order.
+ */
+static int hwarc_get_version(struct uwb_rc *rc)
+{
+	int result;
+
+	struct hwarc *hwarc = rc->priv;
+	struct uwb_rc_control_intf_class_desc *descr;
+	struct device *dev = &rc->uwb_dev.dev;
+	struct usb_device *usb_dev = hwarc->usb_dev;
+	char *itr;
+	struct usb_descriptor_header *hdr;
+	size_t itr_size, actconfig_idx;
+	u16 version;
+
+	actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
+		sizeof(usb_dev->config[0]);
+	itr = usb_dev->rawdescriptors[actconfig_idx];
+	itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
+	while (itr_size >= sizeof(*hdr)) {
+		hdr = (struct usb_descriptor_header *) itr;
+		d_printf(3, dev, "Extra device descriptor: "
+			 "type %02x/%u bytes @ %zu (%zu left)\n",
+			 hdr->bDescriptorType, hdr->bLength,
+			 (itr - usb_dev->rawdescriptors[actconfig_idx]),
+			 itr_size);
+		if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL)
+			goto found;
+		itr += hdr->bLength;
+		itr_size -= hdr->bLength;
+	}
+	dev_err(dev, "cannot find Radio Control Interface Class descriptor\n");
+	return -ENODEV;
+
+found:
+	result = -EINVAL;
+	if (hdr->bLength > itr_size) {	/* is it available? */
+		dev_err(dev, "incomplete Radio Control Interface Class "
+			"descriptor (%zu bytes left, %u needed)\n",
+			itr_size, hdr->bLength);
+		goto error;
+	}
+	if (hdr->bLength < sizeof(*descr)) {
+		dev_err(dev, "short Radio Control Interface Class "
+			"descriptor\n");
+		goto error;
+	}
+	descr = (struct uwb_rc_control_intf_class_desc *) hdr;
+	/* Make LE fields CPU order */
+	version = __le16_to_cpu(descr->bcdRCIVersion);
+	if (version != 0x0100) {
+		dev_err(dev, "Device reports protocol version 0x%04x. We "
+			"do not support that. \n", version);
+		result = -EINVAL;
+		goto error;
+	}
+	rc->version = version;
+	d_printf(3, dev, "Device supports WUSB protocol version 0x%04x \n",
+		 rc->version);
+	result = 0;
+error:
+	return result;
+}
+
+/*
+ * By creating a 'uwb_rc', we have a reference on it -- that reference
+ * is the one we drop when we disconnect.
+ *
+ * No need to switch altsettings; according to WUSB1.0[8.6.1.1], there
+ * is only one altsetting allowed.
+ */
+static int hwarc_probe(struct usb_interface *iface,
+		       const struct usb_device_id *id)
+{
+	int result;
+	struct uwb_rc *uwb_rc;
+	struct hwarc *hwarc;
+	struct device *dev = &iface->dev;
+
+	result = -ENOMEM;
+	uwb_rc = uwb_rc_alloc();
+	if (uwb_rc == NULL) {
+		dev_err(dev, "unable to allocate RC instance\n");
+		goto error_rc_alloc;
+	}
+	hwarc = kzalloc(sizeof(*hwarc), GFP_KERNEL);
+	if (hwarc == NULL) {
+		dev_err(dev, "unable to allocate HWA RC instance\n");
+		goto error_alloc;
+	}
+	hwarc_init(hwarc);
+	hwarc->usb_dev = usb_get_dev(interface_to_usbdev(iface));
+	hwarc->usb_iface = usb_get_intf(iface);
+	hwarc->uwb_rc = uwb_rc;
+
+	uwb_rc->owner = THIS_MODULE;
+	uwb_rc->start = hwarc_neep_init;
+	uwb_rc->stop  = hwarc_neep_release;
+	uwb_rc->cmd   = hwarc_cmd;
+	uwb_rc->reset = hwarc_reset;
+	if (id->driver_info & WUSB_QUIRK_WHCI_CMD_EVT) {
+		uwb_rc->filter_cmd   = NULL;
+		uwb_rc->filter_event = NULL;
+	} else {
+		uwb_rc->filter_cmd   = hwarc_filter_cmd;
+		uwb_rc->filter_event = hwarc_filter_event;
+	}
+
+	result = uwb_rc_add(uwb_rc, dev, hwarc);
+	if (result < 0)
+		goto error_rc_add;
+	result = hwarc_get_version(uwb_rc);
+	if (result < 0) {
+		dev_err(dev, "cannot retrieve version of RC \n");
+		goto error_get_version;
+	}
+	usb_set_intfdata(iface, hwarc);
+	return 0;
+
+error_get_version:
+	uwb_rc_rm(uwb_rc);
+error_rc_add:
+	usb_put_intf(iface);
+	usb_put_dev(hwarc->usb_dev);
+error_alloc:
+	uwb_rc_put(uwb_rc);
+error_rc_alloc:
+	return result;
+}
+
+static void hwarc_disconnect(struct usb_interface *iface)
+{
+	struct hwarc *hwarc = usb_get_intfdata(iface);
+	struct uwb_rc *uwb_rc = hwarc->uwb_rc;
+
+	usb_set_intfdata(hwarc->usb_iface, NULL);
+	uwb_rc_rm(uwb_rc);
+	usb_put_intf(hwarc->usb_iface);
+	usb_put_dev(hwarc->usb_dev);
+	d_printf(1, &hwarc->usb_iface->dev, "freed hwarc %p\n", hwarc);
+	kfree(hwarc);
+	uwb_rc_put(uwb_rc);	/* when creating the device, refcount = 1 */
+}
+
+/** USB device ID's that we handle */
+static struct usb_device_id hwarc_id_table[] = {
+	/* D-Link DUB-1210 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02),
+	  .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
+	/* Intel i1480 (using firmware 1.3PA2-20070828) */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x8086, 0x0c3b, 0xe0, 0x01, 0x02),
+	  .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
+	/* Generic match for the Radio Control interface */
+	{ USB_INTERFACE_INFO(0xe0, 0x01, 0x02), },
+	{ },
+};
+MODULE_DEVICE_TABLE(usb, hwarc_id_table);
+
+static struct usb_driver hwarc_driver = {
+	.name =		"hwa-rc",
+	.probe =	hwarc_probe,
+	.disconnect =	hwarc_disconnect,
+	.id_table =	hwarc_id_table,
+};
+
+static int __init hwarc_driver_init(void)
+{
+	int result;
+	result = usb_register(&hwarc_driver);
+	if (result < 0)
+		printk(KERN_ERR "HWA-RC: Cannot register USB driver: %d\n",
+		       result);
+	return result;
+
+}
+module_init(hwarc_driver_init);
+
+static void __exit hwarc_driver_exit(void)
+{
+	usb_deregister(&hwarc_driver);
+}
+module_exit(hwarc_driver_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Host Wireless Adapter Radio Control Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/Makefile b/drivers/uwb/i1480/Makefile
new file mode 100644
index 0000000..212bbc7
--- /dev/null
+++ b/drivers/uwb/i1480/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_UWB_I1480U)	+= dfu/ i1480-est.o
+obj-$(CONFIG_UWB_I1480U_WLP)	+= i1480u-wlp/
diff --git a/drivers/uwb/i1480/dfu/Makefile b/drivers/uwb/i1480/dfu/Makefile
new file mode 100644
index 0000000..bd1b9f2
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_UWB_I1480U)	+= i1480-dfu-usb.o
+
+i1480-dfu-usb-objs := \
+	dfu.o 	\
+	mac.o	\
+	phy.o	\
+	usb.o
+
+
diff --git a/drivers/uwb/i1480/dfu/dfu.c b/drivers/uwb/i1480/dfu/dfu.c
new file mode 100644
index 0000000..9097b3b
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/dfu.c
@@ -0,0 +1,217 @@
+/*
+ * Intel Wireless UWB Link 1480
+ * Main driver
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Common code for firmware upload used by the USB and PCI version;
+ * i1480_fw_upload() takes a device descriptor and uses the function
+ * pointers it provides to upload firmware and prepare the PHY.
+ *
+ * As well, provides common functions used by the rest of the code.
+ */
+#include "i1480-dfu.h"
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/uwb.h>
+#include <linux/random.h>
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/**
+ * i1480_rceb_check - Check RCEB for expected field values
+ * @i1480: pointer to device for which RCEB is being checked
+ * @rceb: RCEB being checked
+ * @cmd: which command the RCEB is related to
+ * @context: expected context
+ * @expected_type: expected event type
+ * @expected_event: expected event
+ *
+ * If @cmd is NULL, do not print error messages, but still return an error
+ * code.
+ *
+ * Return 0 if @rceb matches the expected values, -EINVAL otherwise.
+ */
+int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb,
+		     const char *cmd, u8 context, u8 expected_type,
+		     unsigned expected_event)
+{
+	int result = 0;
+	struct device *dev = i1480->dev;
+	if (rceb->bEventContext != context) {
+		if (cmd)
+			dev_err(dev, "%s: unexpected context id 0x%02x "
+				"(expected 0x%02x)\n", cmd,
+				rceb->bEventContext, context);
+		result = -EINVAL;
+	}
+	if (rceb->bEventType != expected_type) {
+		if (cmd)
+			dev_err(dev, "%s: unexpected event type 0x%02x "
+				"(expected 0x%02x)\n", cmd,
+				rceb->bEventType, expected_type);
+		result = -EINVAL;
+	}
+	if (le16_to_cpu(rceb->wEvent) != expected_event) {
+		if (cmd)
+			dev_err(dev, "%s: unexpected event 0x%04x "
+				"(expected 0x%04x)\n", cmd,
+				le16_to_cpu(rceb->wEvent), expected_event);
+		result = -EINVAL;
+	}
+	return result;
+}
+EXPORT_SYMBOL_GPL(i1480_rceb_check);
+
+
+/**
+ * Execute a Radio Control Command
+ *
+ * Command data has to be in i1480->cmd_buf.
+ *
+ * @returns size of the reply data filled in i1480->evt_buf or < 0 errno
+ *          code on error.
+ */
+ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size,
+		  size_t reply_size)
+{
+	ssize_t result;
+	struct uwb_rceb *reply = i1480->evt_buf;
+	struct uwb_rccb *cmd = i1480->cmd_buf;
+	u16 expected_event = reply->wEvent;
+	u8 expected_type = reply->bEventType;
+	u8 context;
+
+	d_fnstart(3, i1480->dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size);
+	init_completion(&i1480->evt_complete);
+	i1480->evt_result = -EINPROGRESS;
+	do {
+		get_random_bytes(&context, 1);
+	} while (context == 0x00 || context == 0xff);
+	cmd->bCommandContext = context;
+	result = i1480->cmd(i1480, cmd_name, cmd_size);
+	if (result < 0)
+		goto error;
+	/* wait for the callback to report a event was received */
+	result = wait_for_completion_interruptible_timeout(
+		&i1480->evt_complete, HZ);
+	if (result == 0) {
+		result = -ETIMEDOUT;
+		goto error;
+	}
+	if (result < 0)
+		goto error;
+	result = i1480->evt_result;
+	if (result < 0) {
+		dev_err(i1480->dev, "%s: command reply reception failed: %zd\n",
+			cmd_name, result);
+		goto error;
+	}
+	/*
+	 * Firmware versions >= 1.4.12224 for IOGear GUWA100U generate a
+	 * spurious notification after firmware is downloaded. So check whether
+	 * the receibed RCEB is such notification before assuming that the
+	 * command has failed.
+	 */
+	if (i1480_rceb_check(i1480, i1480->evt_buf, NULL,
+			     0, 0xfd, 0x0022) == 0) {
+		/* Now wait for the actual RCEB for this command. */
+		result = i1480->wait_init_done(i1480);
+		if (result < 0)
+			goto error;
+		result = i1480->evt_result;
+	}
+	if (result != reply_size) {
+		dev_err(i1480->dev, "%s returned only %zu bytes, %zu expected\n",
+			cmd_name, result, reply_size);
+		result = -EINVAL;
+		goto error;
+	}
+	/* Verify we got the right event in response */
+	result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context,
+				  expected_type, expected_event);
+error:
+	d_fnend(3, i1480->dev, "(%p, %s, %zu) = %zd\n",
+		i1480, cmd_name, cmd_size, result);
+	return result;
+}
+EXPORT_SYMBOL_GPL(i1480_cmd);
+
+
+static
+int i1480_print_state(struct i1480 *i1480)
+{
+	int result;
+	u32 *buf = (u32 *) i1480->cmd_buf;
+
+	result = i1480->read(i1480, 0x80080000, 2 * sizeof(*buf));
+	if (result < 0) {
+		dev_err(i1480->dev, "cannot read U & L states: %d\n", result);
+		goto error;
+	}
+	dev_info(i1480->dev, "state U 0x%08x, L 0x%08x\n", buf[0], buf[1]);
+error:
+	return result;
+}
+
+
+/*
+ * PCI probe, firmware uploader
+ *
+ * _mac_fw_upload() will call rc_setup(), which needs an rc_release().
+ */
+int i1480_fw_upload(struct i1480 *i1480)
+{
+	int result;
+
+	result = i1480_pre_fw_upload(i1480);	/* PHY pre fw */
+	if (result < 0 && result != -ENOENT) {
+		i1480_print_state(i1480);
+		goto error;
+	}
+	result = i1480_mac_fw_upload(i1480);	/* MAC fw */
+	if (result < 0) {
+		if (result == -ENOENT)
+			dev_err(i1480->dev, "Cannot locate MAC FW file '%s'\n",
+				i1480->mac_fw_name);
+		else
+			i1480_print_state(i1480);
+		goto error;
+	}
+	result = i1480_phy_fw_upload(i1480);	/* PHY fw */
+	if (result < 0 && result != -ENOENT) {
+		i1480_print_state(i1480);
+		goto error_rc_release;
+	}
+	/*
+	 * FIXME: find some reliable way to check whether firmware is running
+	 * properly. Maybe use some standard request that has no side effects?
+	 */
+	dev_info(i1480->dev, "firmware uploaded successfully\n");
+error_rc_release:
+	if (i1480->rc_release)
+		i1480->rc_release(i1480);
+	result = 0;
+error:
+	return result;
+}
+EXPORT_SYMBOL_GPL(i1480_fw_upload);
diff --git a/drivers/uwb/i1480/dfu/i1480-dfu.h b/drivers/uwb/i1480/dfu/i1480-dfu.h
new file mode 100644
index 0000000..46f45e8
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/i1480-dfu.h
@@ -0,0 +1,260 @@
+/*
+ * i1480 Device Firmware Upload
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This driver is the firmware uploader for the Intel Wireless UWB
+ * Link 1480 device (both in the USB and PCI incarnations).
+ *
+ * The process is quite simple: we stop the device, write the firmware
+ * to its memory and then restart it. Wait for the device to let us
+ * know it is done booting firmware. Ready.
+ *
+ * We might have to upload before or after a phy firmware (which might
+ * be done in two methods, using a normal firmware image or through
+ * the MPI port).
+ *
+ * Because USB and PCI use common methods, we just make ops out of the
+ * common operations (read, write, wait_init_done and cmd) and
+ * implement them in usb.c and pci.c.
+ *
+ * The flow is (some parts omitted):
+ *
+ * i1480_{usb,pci}_probe()	  On enumerate/discovery
+ *   i1480_fw_upload()
+ *     i1480_pre_fw_upload()
+ *       __mac_fw_upload()
+ *         fw_hdrs_load()
+ *         mac_fw_hdrs_push()
+ *           i1480->write()       [i1480_{usb,pci}_write()]
+ *           i1480_fw_cmp()
+ *             i1480->read()      [i1480_{usb,pci}_read()]
+ *     i1480_mac_fw_upload()
+ *       __mac_fw_upload()
+ *       i1480->setup(()
+ *       i1480->wait_init_done()
+ *       i1480_cmd_reset()
+ *         i1480->cmd()           [i1480_{usb,pci}_cmd()]
+ *         ...
+ *     i1480_phy_fw_upload()
+ *       request_firmware()
+ *       i1480_mpi_write()
+ *         i1480->cmd()           [i1480_{usb,pci}_cmd()]
+ *
+ * Once the probe function enumerates the device and uploads the
+ * firmware, we just exit with -ENODEV, as we don't really want to
+ * attach to the device.
+ */
+#ifndef __i1480_DFU_H__
+#define __i1480_DFU_H__
+
+#include <linux/uwb/spec.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+
+#define i1480_FW_UPLOAD_MODE_MASK (cpu_to_le32(0x00000018))
+
+#if i1480_FW > 0x00000302
+#define i1480_RCEB_EXTENDED
+#endif
+
+struct uwb_rccb;
+struct uwb_rceb;
+
+/*
+ * Common firmware upload handlers
+ *
+ * Normally you embed this struct in another one specific to your hw.
+ *
+ * @write	Write to device's memory from buffer.
+ * @read	Read from device's memory to i1480->evt_buf.
+ * @setup	Setup device after basic firmware is uploaded
+ * @wait_init_done
+ *              Wait for the device to send a notification saying init
+ *              is done.
+ * @cmd         FOP for issuing the command to the hardware. The
+ *              command data is contained in i1480->cmd_buf and the size
+ *              is supplied as an argument. The command replied is put
+ *              in i1480->evt_buf and the size in i1480->evt_result (or if
+ *              an error, a < 0 errno code).
+ *
+ * @cmd_buf	Memory buffer used to send commands to the device.
+ *              Allocated by the upper layers i1480_fw_upload().
+ *              Size has to be @buf_size.
+ * @evt_buf	Memory buffer used to place the async notifications
+ *              received by the hw. Allocated by the upper layers
+ *              i1480_fw_upload().
+ *              Size has to be @buf_size.
+ * @cmd_complete
+ *              Low level driver uses this to notify code waiting afor
+ *              an event that the event has arrived and data is in
+ *              i1480->evt_buf (and size/result in i1480->evt_result).
+ * @hw_rev
+ *              Use this value to activate dfu code to support new revisions
+ *              of hardware.  i1480_init() sets this to a default value.
+ *              It should be updated by the USB and PCI code.
+ */
+struct i1480 {
+	struct device *dev;
+
+	int (*write)(struct i1480 *, u32 addr, const void *, size_t);
+	int (*read)(struct i1480 *, u32 addr, size_t);
+	int (*rc_setup)(struct i1480 *);
+	void (*rc_release)(struct i1480 *);
+	int (*wait_init_done)(struct i1480 *);
+	int (*cmd)(struct i1480 *, const char *cmd_name, size_t cmd_size);
+	const char *pre_fw_name;
+	const char *mac_fw_name;
+	const char *mac_fw_name_deprecate;	/* FIXME: Will go away */
+	const char *phy_fw_name;
+	u8 hw_rev;
+
+	size_t buf_size;	/* size of both evt_buf and cmd_buf */
+	void *evt_buf, *cmd_buf;
+	ssize_t evt_result;
+	struct completion evt_complete;
+};
+
+static inline
+void i1480_init(struct i1480 *i1480)
+{
+	i1480->hw_rev = 1;
+	init_completion(&i1480->evt_complete);
+}
+
+extern int i1480_fw_upload(struct i1480 *);
+extern int i1480_pre_fw_upload(struct i1480 *);
+extern int i1480_mac_fw_upload(struct i1480 *);
+extern int i1480_phy_fw_upload(struct i1480 *);
+extern ssize_t i1480_cmd(struct i1480 *, const char *, size_t, size_t);
+extern int i1480_rceb_check(const struct i1480 *,
+			    const struct uwb_rceb *, const char *, u8,
+			    u8, unsigned);
+
+enum {
+	/* Vendor specific command type */
+	i1480_CET_VS1 = 		0xfd,
+	/* i1480 commands */
+	i1480_CMD_SET_IP_MAS = 		0x000e,
+	i1480_CMD_GET_MAC_PHY_INFO = 	0x0003,
+	i1480_CMD_MPI_WRITE =		0x000f,
+	i1480_CMD_MPI_READ = 		0x0010,
+	/* i1480 events */
+#if i1480_FW > 0x00000302
+	i1480_EVT_CONFIRM = 		0x0002,
+	i1480_EVT_RM_INIT_DONE = 	0x0101,
+	i1480_EVT_DEV_ADD = 		0x0103,
+	i1480_EVT_DEV_RM = 		0x0104,
+	i1480_EVT_DEV_ID_CHANGE = 	0x0105,
+	i1480_EVT_GET_MAC_PHY_INFO =	i1480_CMD_GET_MAC_PHY_INFO,
+#else
+	i1480_EVT_CONFIRM = 		0x0002,
+	i1480_EVT_RM_INIT_DONE = 	0x0101,
+	i1480_EVT_DEV_ADD = 		0x0103,
+	i1480_EVT_DEV_RM = 		0x0104,
+	i1480_EVT_DEV_ID_CHANGE = 	0x0105,
+	i1480_EVT_GET_MAC_PHY_INFO =	i1480_EVT_CONFIRM,
+#endif
+};
+
+
+struct i1480_evt_confirm {
+	struct uwb_rceb rceb;
+#ifdef i1480_RCEB_EXTENDED
+	__le16 wParamLength;
+#endif
+	u8 bResultCode;
+} __attribute__((packed));
+
+
+struct i1480_rceb {
+	struct uwb_rceb rceb;
+#ifdef i1480_RCEB_EXTENDED
+	__le16 wParamLength;
+#endif
+} __attribute__((packed));
+
+
+/**
+ * Get MAC & PHY Information confirm event structure
+ *
+ * Confirm event returned by the command.
+ */
+struct i1480_evt_confirm_GMPI {
+#if i1480_FW > 0x00000302
+	struct uwb_rceb rceb;
+	__le16 wParamLength;
+	__le16 status;
+	u8 mac_addr[6];		/* EUI-64 bit IEEE address [still 8 bytes?] */
+	u8 dev_addr[2];
+	__le16 mac_fw_rev;	/* major = v >> 8; minor = v & 0xff */
+	u8 hw_rev;
+	u8 phy_vendor;
+	u8 phy_rev;		/* major v = >> 8; minor = v & 0xff */
+	__le16 mac_caps;
+	u8 phy_caps[3];
+	u8 key_stores;
+	__le16 mcast_addr_stores;
+	u8 sec_mode_supported;
+#else
+	struct uwb_rceb rceb;
+	u8 status;
+	u8 mac_addr[8];         /* EUI-64 bit IEEE address [still 8 bytes?] */
+	u8 dev_addr[2];
+	__le16 mac_fw_rev;      /* major = v >> 8; minor = v & 0xff */
+	__le16 phy_fw_rev;      /* major v = >> 8; minor = v & 0xff */
+	__le16 mac_caps;
+	u8 phy_caps;
+	u8 key_stores;
+	__le16 mcast_addr_stores;
+	u8 sec_mode_supported;
+#endif
+} __attribute__((packed));
+
+
+struct i1480_cmd_mpi_write {
+	struct uwb_rccb rccb;
+	__le16 size;
+	u8 data[];
+};
+
+
+struct i1480_cmd_mpi_read {
+	struct uwb_rccb rccb;
+	__le16 size;
+	struct {
+		u8 page, offset;
+	} __attribute__((packed)) data[];
+} __attribute__((packed));
+
+
+struct i1480_evt_mpi_read {
+	struct uwb_rceb rceb;
+#ifdef i1480_RCEB_EXTENDED
+	__le16 wParamLength;
+#endif
+	u8 bResultCode;
+	__le16 size;
+	struct {
+		u8 page, offset, value;
+	} __attribute__((packed)) data[];
+} __attribute__((packed));
+
+
+#endif /* #ifndef __i1480_DFU_H__ */
diff --git a/drivers/uwb/i1480/dfu/mac.c b/drivers/uwb/i1480/dfu/mac.c
new file mode 100644
index 0000000..2e4d8f0
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/mac.c
@@ -0,0 +1,527 @@
+/*
+ * Intel Wireless UWB Link 1480
+ * MAC Firmware upload implementation
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Implementation of the code for parsing the firmware file (extract
+ * the headers and binary code chunks) in the fw_*() functions. The
+ * code to upload pre and mac firmwares is the same, so it uses a
+ * common entry point in __mac_fw_upload(), which uses the i1480
+ * function pointers to push the firmware to the device.
+ */
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/uwb.h>
+#include "i1480-dfu.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/*
+ * Descriptor for a continuous segment of MAC fw data
+ */
+struct fw_hdr {
+	unsigned long address;
+	size_t length;
+	const u32 *bin;
+	struct fw_hdr *next;
+};
+
+
+/* Free a chain of firmware headers */
+static
+void fw_hdrs_free(struct fw_hdr *hdr)
+{
+	struct fw_hdr *next;
+
+	while (hdr) {
+		next = hdr->next;
+		kfree(hdr);
+		hdr = next;
+	}
+}
+
+
+/* Fill a firmware header descriptor from a memory buffer */
+static
+int fw_hdr_load(struct i1480 *i1480, struct fw_hdr *hdr, unsigned hdr_cnt,
+		const char *_data, const u32 *data_itr, const u32 *data_top)
+{
+	size_t hdr_offset =  (const char *) data_itr - _data;
+	size_t remaining_size = (void *) data_top - (void *) data_itr;
+	if (data_itr + 2 > data_top) {
+		dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in header at "
+		       "offset %zu, limit %zu\n",
+		       hdr_cnt, hdr_offset,
+		       (const char *) data_itr + 2 - _data,
+		       (const char *) data_top - _data);
+		return -EINVAL;
+	}
+	hdr->next = NULL;
+	hdr->address = le32_to_cpu(*data_itr++);
+	hdr->length = le32_to_cpu(*data_itr++);
+	hdr->bin = data_itr;
+	if (hdr->length > remaining_size) {
+		dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in data; "
+		       "chunk too long (%zu bytes), only %zu left\n",
+		       hdr_cnt, hdr_offset, hdr->length, remaining_size);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+
+/**
+ * Get a buffer where the firmware is supposed to be and create a
+ * chain of headers linking them together.
+ *
+ * @phdr: where to place the pointer to the first header (headers link
+ *        to the next via the @hdr->next ptr); need to free the whole
+ *        chain when done.
+ *
+ * @_data: Pointer to the data buffer.
+ *
+ * @_data_size: Size of the data buffer (bytes); data size has to be a
+ *              multiple of 4. Function will fail if not.
+ *
+ * Goes over the whole binary blob; reads the first chunk and creates
+ * a fw hdr from it (which points to where the data is in @_data and
+ * the length of the chunk); then goes on to the next chunk until
+ * done. Each header is linked to the next.
+ */
+static
+int fw_hdrs_load(struct i1480 *i1480, struct fw_hdr **phdr,
+		 const char *_data, size_t data_size)
+{
+	int result;
+	unsigned hdr_cnt = 0;
+	u32 *data = (u32 *) _data, *data_itr, *data_top;
+	struct fw_hdr *hdr, **prev_hdr = phdr;
+
+	result = -EINVAL;
+	/* Check size is ok and pointer is aligned */
+	if (data_size % sizeof(u32) != 0)
+		goto error;
+	if ((unsigned long) _data % sizeof(u16) != 0)
+		goto error;
+	*phdr = NULL;
+	data_itr = data;
+	data_top = (u32 *) (_data + data_size);
+	while (data_itr < data_top) {
+		result = -ENOMEM;
+		hdr = kmalloc(sizeof(*hdr), GFP_KERNEL);
+		if (hdr == NULL) {
+			dev_err(i1480->dev, "Cannot allocate fw header "
+			       "for chunk #%u\n", hdr_cnt);
+			goto error_alloc;
+		}
+		result = fw_hdr_load(i1480, hdr, hdr_cnt,
+				     _data, data_itr, data_top);
+		if (result < 0)
+			goto error_load;
+		data_itr += 2 + hdr->length;
+		*prev_hdr = hdr;
+		prev_hdr = &hdr->next;
+		hdr_cnt++;
+	};
+	*prev_hdr = NULL;
+	return 0;
+
+error_load:
+	kfree(hdr);
+error_alloc:
+	fw_hdrs_free(*phdr);
+error:
+	return result;
+}
+
+
+/**
+ * Compares a chunk of fw with one in the devices's memory
+ *
+ * @i1480:     Device instance
+ * @hdr:     Pointer to the firmware chunk
+ * @returns: 0 if equal, < 0 errno on error. If > 0, it is the offset
+ *           where the difference was found (plus one).
+ *
+ * Kind of dirty and simplistic, but does the trick in both the PCI
+ * and USB version. We do a quick[er] memcmp(), and if it fails, we do
+ * a byte-by-byte to find the offset.
+ */
+static
+ssize_t i1480_fw_cmp(struct i1480 *i1480, struct fw_hdr *hdr)
+{
+	ssize_t result = 0;
+	u32 src_itr = 0, cnt;
+	size_t size = hdr->length*sizeof(hdr->bin[0]);
+	size_t chunk_size;
+	u8 *bin = (u8 *) hdr->bin;
+
+	while (size > 0) {
+		chunk_size = size < i1480->buf_size ? size : i1480->buf_size;
+		result = i1480->read(i1480, hdr->address + src_itr, chunk_size);
+		if (result < 0) {
+			dev_err(i1480->dev, "error reading for verification: "
+				"%zd\n", result);
+			goto error;
+		}
+		if (memcmp(i1480->cmd_buf, bin + src_itr, result)) {
+			u8 *buf = i1480->cmd_buf;
+			d_printf(2, i1480->dev,
+				 "original data @ %p + %u, %zu bytes\n",
+				 bin, src_itr, result);
+			d_dump(4, i1480->dev, bin + src_itr, result);
+			for (cnt = 0; cnt < result; cnt++)
+				if (bin[src_itr + cnt] != buf[cnt]) {
+					dev_err(i1480->dev, "byte failed at "
+						"src_itr %u cnt %u [0x%02x "
+						"vs 0x%02x]\n", src_itr, cnt,
+						bin[src_itr + cnt], buf[cnt]);
+					result = src_itr + cnt + 1;
+					goto cmp_failed;
+				}
+		}
+		src_itr += result;
+		size -= result;
+	}
+	result = 0;
+error:
+cmp_failed:
+	return result;
+}
+
+
+/**
+ * Writes firmware headers to the device.
+ *
+ * @prd:     PRD instance
+ * @hdr:     Processed firmware
+ * @returns: 0 if ok, < 0 errno on error.
+ */
+static
+int mac_fw_hdrs_push(struct i1480 *i1480, struct fw_hdr *hdr,
+		     const char *fw_name, const char *fw_tag)
+{
+	struct device *dev = i1480->dev;
+	ssize_t result = 0;
+	struct fw_hdr *hdr_itr;
+	int verif_retry_count;
+
+	d_fnstart(3, dev, "(%p, %p)\n", i1480, hdr);
+	/* Now, header by header, push them to the hw */
+	for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) {
+		verif_retry_count = 0;
+retry:
+		dev_dbg(dev, "fw chunk (%zu @ 0x%08lx)\n",
+			hdr_itr->length * sizeof(hdr_itr->bin[0]),
+			hdr_itr->address);
+		result = i1480->write(i1480, hdr_itr->address, hdr_itr->bin,
+				    hdr_itr->length*sizeof(hdr_itr->bin[0]));
+		if (result < 0) {
+			dev_err(dev, "%s fw '%s': write failed (%zuB @ 0x%lx):"
+				" %zd\n", fw_tag, fw_name,
+				hdr_itr->length * sizeof(hdr_itr->bin[0]),
+				hdr_itr->address, result);
+			break;
+		}
+		result = i1480_fw_cmp(i1480, hdr_itr);
+		if (result < 0) {
+			dev_err(dev, "%s fw '%s': verification read "
+				"failed (%zuB @ 0x%lx): %zd\n",
+				fw_tag, fw_name,
+				hdr_itr->length * sizeof(hdr_itr->bin[0]),
+				hdr_itr->address, result);
+			break;
+		}
+		if (result > 0) {	/* Offset where it failed + 1 */
+			result--;
+			dev_err(dev, "%s fw '%s': WARNING: verification "
+				"failed at 0x%lx: retrying\n",
+				fw_tag, fw_name, hdr_itr->address + result);
+			if (++verif_retry_count < 3)
+				goto retry;	/* write this block again! */
+			dev_err(dev, "%s fw '%s': verification failed at 0x%lx: "
+				"tried %d times\n", fw_tag, fw_name,
+				hdr_itr->address + result, verif_retry_count);
+			result = -EINVAL;
+			break;
+		}
+	}
+	d_fnend(3, dev, "(%zd)\n", result);
+	return result;
+}
+
+
+/** Puts the device in firmware upload mode.*/
+static
+int mac_fw_upload_enable(struct i1480 *i1480)
+{
+	int result;
+	u32 reg = 0x800000c0;
+	u32 *buffer = (u32 *)i1480->cmd_buf;
+
+	if (i1480->hw_rev > 1)
+		reg = 0x8000d0d4;
+	result = i1480->read(i1480, reg, sizeof(u32));
+	if (result < 0)
+		goto error_cmd;
+	*buffer &= ~i1480_FW_UPLOAD_MODE_MASK;
+	result = i1480->write(i1480, reg, buffer, sizeof(u32));
+	if (result < 0)
+		goto error_cmd;
+	return 0;
+error_cmd:
+	dev_err(i1480->dev, "can't enable fw upload mode: %d\n", result);
+	return result;
+}
+
+
+/** Gets the device out of firmware upload mode. */
+static
+int mac_fw_upload_disable(struct i1480 *i1480)
+{
+	int result;
+	u32 reg = 0x800000c0;
+	u32 *buffer = (u32 *)i1480->cmd_buf;
+
+	if (i1480->hw_rev > 1)
+		reg = 0x8000d0d4;
+	result = i1480->read(i1480, reg, sizeof(u32));
+	if (result < 0)
+		goto error_cmd;
+	*buffer |= i1480_FW_UPLOAD_MODE_MASK;
+	result = i1480->write(i1480, reg, buffer, sizeof(u32));
+	if (result < 0)
+		goto error_cmd;
+	return 0;
+error_cmd:
+	dev_err(i1480->dev, "can't disable fw upload mode: %d\n", result);
+	return result;
+}
+
+
+
+/**
+ * Generic function for uploading a MAC firmware.
+ *
+ * @i1480:     Device instance
+ * @fw_name: Name of firmware file to upload.
+ * @fw_tag:  Name of the firmware type (for messages)
+ *           [eg: MAC, PRE]
+ * @do_wait: Wait for device to emit initialization done message (0
+ *           for PRE fws, 1 for MAC fws).
+ * @returns: 0 if ok, < 0 errno on error.
+ */
+static
+int __mac_fw_upload(struct i1480 *i1480, const char *fw_name,
+		    const char *fw_tag)
+{
+	int result;
+	const struct firmware *fw;
+	struct fw_hdr *fw_hdrs;
+
+	d_fnstart(3, i1480->dev, "(%p, %s, %s)\n", i1480, fw_name, fw_tag);
+	result = request_firmware(&fw, fw_name, i1480->dev);
+	if (result < 0)	/* Up to caller to complain on -ENOENT */
+		goto out;
+	d_printf(3, i1480->dev, "%s fw '%s': uploading\n", fw_tag, fw_name);
+	result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size);
+	if (result < 0) {
+		dev_err(i1480->dev, "%s fw '%s': failed to parse firmware "
+			"file: %d\n", fw_tag, fw_name, result);
+		goto out_release;
+	}
+	result = mac_fw_upload_enable(i1480);
+	if (result < 0)
+		goto out_hdrs_release;
+	result = mac_fw_hdrs_push(i1480, fw_hdrs, fw_name, fw_tag);
+	mac_fw_upload_disable(i1480);
+out_hdrs_release:
+	if (result >= 0)
+		dev_info(i1480->dev, "%s fw '%s': uploaded\n", fw_tag, fw_name);
+	else
+		dev_err(i1480->dev, "%s fw '%s': failed to upload (%d), "
+			"power cycle device\n", fw_tag, fw_name, result);
+	fw_hdrs_free(fw_hdrs);
+out_release:
+	release_firmware(fw);
+out:
+	d_fnend(3, i1480->dev, "(%p, %s, %s) = %d\n", i1480, fw_name, fw_tag,
+		result);
+	return result;
+}
+
+
+/**
+ * Upload a pre-PHY firmware
+ *
+ */
+int i1480_pre_fw_upload(struct i1480 *i1480)
+{
+	int result;
+	result = __mac_fw_upload(i1480, i1480->pre_fw_name, "PRE");
+	if (result == 0)
+		msleep(400);
+	return result;
+}
+
+
+/**
+ * Reset a the MAC and PHY
+ *
+ * @i1480:     Device's instance
+ * @returns: 0 if ok, < 0 errno code on error
+ *
+ * We put the command on kmalloc'ed memory as some arches cannot do
+ * USB from the stack. The reply event is copied from an stage buffer,
+ * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
+ *
+ * We issue the reset to make sure the UWB controller reinits the PHY;
+ * this way we can now if the PHY init went ok.
+ */
+static
+int i1480_cmd_reset(struct i1480 *i1480)
+{
+	int result;
+	struct uwb_rccb *cmd = (void *) i1480->cmd_buf;
+	struct i1480_evt_reset {
+		struct uwb_rceb rceb;
+		u8 bResultCode;
+	} __attribute__((packed)) *reply = (void *) i1480->evt_buf;
+
+	result = -ENOMEM;
+	cmd->bCommandType = UWB_RC_CET_GENERAL;
+	cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET);
+	reply->rceb.bEventType = UWB_RC_CET_GENERAL;
+	reply->rceb.wEvent = UWB_RC_CMD_RESET;
+	result = i1480_cmd(i1480, "RESET", sizeof(*cmd), sizeof(*reply));
+	if (result < 0)
+		goto out;
+	if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
+		dev_err(i1480->dev, "RESET: command execution failed: %u\n",
+			reply->bResultCode);
+		result = -EIO;
+	}
+out:
+	return result;
+
+}
+
+
+/* Wait for the MAC FW to start running */
+static
+int i1480_fw_is_running_q(struct i1480 *i1480)
+{
+	int cnt = 0;
+	int result;
+	u32 *val = (u32 *) i1480->cmd_buf;
+
+	d_fnstart(3, i1480->dev, "(i1480 %p)\n", i1480);
+	for (cnt = 0; cnt < 10; cnt++) {
+		msleep(100);
+		result = i1480->read(i1480, 0x80080000, 4);
+		if (result < 0) {
+			dev_err(i1480->dev, "Can't read 0x8008000: %d\n", result);
+			goto out;
+		}
+		if (*val == 0x55555555UL)	/* fw running? cool */
+			goto out;
+	}
+	dev_err(i1480->dev, "Timed out waiting for fw to start\n");
+	result = -ETIMEDOUT;
+out:
+	d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result);
+	return result;
+
+}
+
+
+/**
+ * Upload MAC firmware, wait for it to start
+ *
+ * @i1480:     Device instance
+ * @fw_name: Name of the file that contains the firmware
+ *
+ * This has to be called after the pre fw has been uploaded (if
+ * there is any).
+ */
+int i1480_mac_fw_upload(struct i1480 *i1480)
+{
+	int result = 0, deprecated_name = 0;
+	struct i1480_rceb *rcebe = (void *) i1480->evt_buf;
+
+	d_fnstart(3, i1480->dev, "(%p)\n", i1480);
+	result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC");
+	if (result == -ENOENT) {
+		result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate,
+					 "MAC");
+		deprecated_name = 1;
+	}
+	if (result < 0)
+		return result;
+	if (deprecated_name == 1)
+		dev_warn(i1480->dev,
+			 "WARNING: firmware file name %s is deprecated, "
+			 "please rename to %s\n",
+			 i1480->mac_fw_name_deprecate, i1480->mac_fw_name);
+	result = i1480_fw_is_running_q(i1480);
+	if (result < 0)
+		goto error_fw_not_running;
+	result = i1480->rc_setup ? i1480->rc_setup(i1480) : 0;
+	if (result < 0) {
+		dev_err(i1480->dev, "Cannot setup after MAC fw upload: %d\n",
+			result);
+		goto error_setup;
+	}
+	result = i1480->wait_init_done(i1480);	/* wait init'on */
+	if (result < 0) {
+		dev_err(i1480->dev, "MAC fw '%s': Initialization timed out "
+			"(%d)\n", i1480->mac_fw_name, result);
+		goto error_init_timeout;
+	}
+	/* verify we got the right initialization done event */
+	if (i1480->evt_result != sizeof(*rcebe)) {
+		dev_err(i1480->dev, "MAC fw '%s': initialization event returns "
+			"wrong size (%zu bytes vs %zu needed)\n",
+			i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe));
+		dump_bytes(i1480->dev, rcebe, min(i1480->evt_result, (ssize_t)32));
+		goto error_size;
+	}
+	result = -EIO;
+	if (i1480_rceb_check(i1480, &rcebe->rceb, NULL, 0, i1480_CET_VS1,
+			     i1480_EVT_RM_INIT_DONE) < 0) {
+		dev_err(i1480->dev, "wrong initialization event 0x%02x/%04x/%02x "
+			"received; expected 0x%02x/%04x/00\n",
+			rcebe->rceb.bEventType, le16_to_cpu(rcebe->rceb.wEvent),
+			rcebe->rceb.bEventContext, i1480_CET_VS1,
+			i1480_EVT_RM_INIT_DONE);
+		goto error_init_timeout;
+	}
+	result = i1480_cmd_reset(i1480);
+	if (result < 0)
+		dev_err(i1480->dev, "MAC fw '%s': MBOA reset failed (%d)\n",
+			i1480->mac_fw_name, result);
+error_fw_not_running:
+error_init_timeout:
+error_size:
+error_setup:
+	d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result);
+	return result;
+}
diff --git a/drivers/uwb/i1480/dfu/phy.c b/drivers/uwb/i1480/dfu/phy.c
new file mode 100644
index 0000000..3b1a87d
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/phy.c
@@ -0,0 +1,203 @@
+/*
+ * Intel Wireless UWB Link 1480
+ * PHY parameters upload
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Code for uploading the PHY parameters to the PHY through the UWB
+ * Radio Control interface.
+ *
+ * We just send the data through the MPI interface using HWA-like
+ * commands and then reset the PHY to make sure it is ok.
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/usb/wusb.h>
+#include "i1480-dfu.h"
+
+
+/**
+ * Write a value array to an address of the MPI interface
+ *
+ * @i1480:	Device descriptor
+ * @data:	Data array to write
+ * @size:	Size of the data array
+ * @returns:	0 if ok, < 0 errno code on error.
+ *
+ * The data array is organized into pairs:
+ *
+ * ADDRESS VALUE
+ *
+ * ADDRESS is BE 16 bit unsigned, VALUE 8 bit unsigned. Size thus has
+ * to be a multiple of three.
+ */
+static
+int i1480_mpi_write(struct i1480 *i1480, const void *data, size_t size)
+{
+	int result;
+	struct i1480_cmd_mpi_write *cmd = i1480->cmd_buf;
+	struct i1480_evt_confirm *reply = i1480->evt_buf;
+
+	BUG_ON(size > 480);
+	result = -ENOMEM;
+	cmd->rccb.bCommandType = i1480_CET_VS1;
+	cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_WRITE);
+	cmd->size = cpu_to_le16(size);
+	memcpy(cmd->data, data, size);
+	reply->rceb.bEventType = i1480_CET_VS1;
+	reply->rceb.wEvent = i1480_CMD_MPI_WRITE;
+	result = i1480_cmd(i1480, "MPI-WRITE", sizeof(*cmd) + size, sizeof(*reply));
+	if (result < 0)
+		goto out;
+	if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
+		dev_err(i1480->dev, "MPI-WRITE: command execution failed: %d\n",
+			reply->bResultCode);
+		result = -EIO;
+	}
+out:
+	return result;
+}
+
+
+/**
+ * Read a value array to from an address of the MPI interface
+ *
+ * @i1480:	Device descriptor
+ * @data:	where to place the read array
+ * @srcaddr:	Where to read from
+ * @size:	Size of the data read array
+ * @returns:	0 if ok, < 0 errno code on error.
+ *
+ * The command data array is organized into pairs ADDR0 ADDR1..., and
+ * the returned data in ADDR0 VALUE0 ADDR1 VALUE1...
+ *
+ * We generate the command array to be a sequential read and then
+ * rearrange the result.
+ *
+ * We use the i1480->cmd_buf for the command, i1480->evt_buf for the reply.
+ *
+ * As the reply has to fit in 512 bytes (i1480->evt_buffer), the max amount
+ * of values we can read is (512 - sizeof(*reply)) / 3
+ */
+static
+int i1480_mpi_read(struct i1480 *i1480, u8 *data, u16 srcaddr, size_t size)
+{
+	int result;
+	struct i1480_cmd_mpi_read *cmd = i1480->cmd_buf;
+	struct i1480_evt_mpi_read *reply = i1480->evt_buf;
+	unsigned cnt;
+
+	memset(i1480->cmd_buf, 0x69, 512);
+	memset(i1480->evt_buf, 0x69, 512);
+
+	BUG_ON(size > (i1480->buf_size - sizeof(*reply)) / 3);
+	result = -ENOMEM;
+	cmd->rccb.bCommandType = i1480_CET_VS1;
+	cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_READ);
+	cmd->size = cpu_to_le16(3*size);
+	for (cnt = 0; cnt < size; cnt++) {
+		cmd->data[cnt].page = (srcaddr + cnt) >> 8;
+		cmd->data[cnt].offset = (srcaddr + cnt) & 0xff;
+	}
+	reply->rceb.bEventType = i1480_CET_VS1;
+	reply->rceb.wEvent = i1480_CMD_MPI_READ;
+	result = i1480_cmd(i1480, "MPI-READ", sizeof(*cmd) + 2*size,
+			sizeof(*reply) + 3*size);
+	if (result < 0)
+		goto out;
+	if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
+		dev_err(i1480->dev, "MPI-READ: command execution failed: %d\n",
+			reply->bResultCode);
+		result = -EIO;
+	}
+	for (cnt = 0; cnt < size; cnt++) {
+		if (reply->data[cnt].page != (srcaddr + cnt) >> 8)
+			dev_err(i1480->dev, "MPI-READ: page inconsistency at "
+				"index %u: expected 0x%02x, got 0x%02x\n", cnt,
+				(srcaddr + cnt) >> 8, reply->data[cnt].page);
+		if (reply->data[cnt].offset != ((srcaddr + cnt) & 0x00ff))
+			dev_err(i1480->dev, "MPI-READ: offset inconsistency at "
+				"index %u: expected 0x%02x, got 0x%02x\n", cnt,
+				(srcaddr + cnt) & 0x00ff,
+				reply->data[cnt].offset);
+		data[cnt] = reply->data[cnt].value;
+	}
+	result = 0;
+out:
+	return result;
+}
+
+
+/**
+ * Upload a PHY firmware, wait for it to start
+ *
+ * @i1480:     Device instance
+ * @fw_name: Name of the file that contains the firmware
+ *
+ * We assume the MAC fw is up and running. This means we can use the
+ * MPI interface to write the PHY firmware. Once done, we issue an
+ * MBOA Reset, which will force the MAC to reset and reinitialize the
+ * PHY. If that works, we are ready to go.
+ *
+ * Max packet size for the MPI write is 512, so the max buffer is 480
+ * (which gives us 160 byte triads of MSB, LSB and VAL for the data).
+ */
+int i1480_phy_fw_upload(struct i1480 *i1480)
+{
+	int result;
+	const struct firmware *fw;
+	const char *data_itr, *data_top;
+	const size_t MAX_BLK_SIZE = 480;	/* 160 triads */
+	size_t data_size;
+	u8 phy_stat;
+
+	result = request_firmware(&fw, i1480->phy_fw_name, i1480->dev);
+	if (result < 0)
+		goto out;
+	/* Loop writing data in chunks as big as possible until done. */
+	for (data_itr = fw->data, data_top = data_itr + fw->size;
+	     data_itr < data_top; data_itr += MAX_BLK_SIZE) {
+		data_size = min(MAX_BLK_SIZE, (size_t) (data_top - data_itr));
+		result = i1480_mpi_write(i1480, data_itr, data_size);
+		if (result < 0)
+			goto error_mpi_write;
+	}
+	/* Read MPI page 0, offset 6; if 0, PHY was initialized correctly. */
+	result = i1480_mpi_read(i1480, &phy_stat, 0x0006, 1);
+	if (result < 0) {
+		dev_err(i1480->dev, "PHY: can't get status: %d\n", result);
+		goto error_mpi_status;
+	}
+	if (phy_stat != 0) {
+		result = -ENODEV;
+		dev_info(i1480->dev, "error, PHY not ready: %u\n", phy_stat);
+		goto error_phy_status;
+	}
+	dev_info(i1480->dev, "PHY fw '%s': uploaded\n", i1480->phy_fw_name);
+error_phy_status:
+error_mpi_status:
+error_mpi_write:
+	release_firmware(fw);
+	if (result < 0)
+		dev_err(i1480->dev, "PHY fw '%s': failed to upload (%d), "
+			"power cycle device\n", i1480->phy_fw_name, result);
+out:
+	return result;
+}
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
new file mode 100644
index 0000000..98eeeff
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -0,0 +1,500 @@
+/*
+ * Intel Wireless UWB Link 1480
+ * USB SKU firmware upload implementation
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This driver will prepare the i1480 device to behave as a real
+ * Wireless USB HWA adaptor by uploading the firmware.
+ *
+ * When the device is connected or driver is loaded, i1480_usb_probe()
+ * is called--this will allocate and initialize the device structure,
+ * fill in the pointers to the common functions (read, write,
+ * wait_init_done and cmd for HWA command execution) and once that is
+ * done, call the common firmware uploading routine. Then clean up and
+ * return -ENODEV, as we don't attach to the device.
+ *
+ * The rest are the basic ops we implement that the fw upload code
+ * uses to do its job. All the ops in the common code are i1480->NAME,
+ * the functions are i1480_usb_NAME().
+ */
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/usb.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uwb.h>
+#include <linux/usb/wusb.h>
+#include <linux/usb/wusb-wa.h>
+#include "i1480-dfu.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+
+struct i1480_usb {
+	struct i1480 i1480;
+	struct usb_device *usb_dev;
+	struct usb_interface *usb_iface;
+	struct urb *neep_urb;	/* URB for reading from EP1 */
+};
+
+
+static
+void i1480_usb_init(struct i1480_usb *i1480_usb)
+{
+	i1480_init(&i1480_usb->i1480);
+}
+
+
+static
+int i1480_usb_create(struct i1480_usb *i1480_usb, struct usb_interface *iface)
+{
+	struct usb_device *usb_dev = interface_to_usbdev(iface);
+	int result = -ENOMEM;
+
+	i1480_usb->usb_dev = usb_get_dev(usb_dev);	/* bind the USB device */
+	i1480_usb->usb_iface = usb_get_intf(iface);
+	usb_set_intfdata(iface, i1480_usb);		/* Bind the driver to iface0 */
+	i1480_usb->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (i1480_usb->neep_urb == NULL)
+		goto error;
+	return 0;
+
+error:
+	usb_set_intfdata(iface, NULL);
+	usb_put_intf(iface);
+	usb_put_dev(usb_dev);
+	return result;
+}
+
+
+static
+void i1480_usb_destroy(struct i1480_usb *i1480_usb)
+{
+	usb_kill_urb(i1480_usb->neep_urb);
+	usb_free_urb(i1480_usb->neep_urb);
+	usb_set_intfdata(i1480_usb->usb_iface, NULL);
+	usb_put_intf(i1480_usb->usb_iface);
+	usb_put_dev(i1480_usb->usb_dev);
+}
+
+
+/**
+ * Write a buffer to a memory address in the i1480 device
+ *
+ * @i1480:  i1480 instance
+ * @memory_address:
+ *          Address where to write the data buffer to.
+ * @buffer: Buffer to the data
+ * @size:   Size of the buffer [has to be < 512].
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * Data buffers to USB cannot be on the stack or in vmalloc'ed areas,
+ * so we copy it to the local i1480 buffer before proceeding. In any
+ * case, we have a max size we can send, soooo.
+ */
+static
+int i1480_usb_write(struct i1480 *i1480, u32 memory_address,
+		    const void *buffer, size_t size)
+{
+	int result = 0;
+	struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
+	size_t buffer_size, itr = 0;
+
+	d_fnstart(3, i1480->dev, "(%p, 0x%08x, %p, %zu)\n",
+		  i1480, memory_address, buffer, size);
+	BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
+	while (size > 0) {
+		buffer_size = size < i1480->buf_size ? size : i1480->buf_size;
+		memcpy(i1480->cmd_buf, buffer + itr, buffer_size);
+		result = usb_control_msg(
+			i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
+			0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			cpu_to_le16(memory_address & 0xffff),
+			cpu_to_le16((memory_address >> 16) & 0xffff),
+			i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */);
+		if (result < 0)
+			break;
+		d_printf(3, i1480->dev,
+			 "wrote @ 0x%08x %u bytes (of %zu bytes requested)\n",
+			 memory_address, result, buffer_size);
+		d_dump(4, i1480->dev, i1480->cmd_buf, result);
+		itr += result;
+		memory_address += result;
+		size -= result;
+	}
+	d_fnend(3, i1480->dev, "(%p, 0x%08x, %p, %zu) = %d\n",
+		i1480, memory_address, buffer, size, result);
+	return result;
+}
+
+
+/**
+ * Read a block [max size 512] of the device's memory to @i1480's buffer.
+ *
+ * @i1480: i1480 instance
+ * @memory_address:
+ *         Address where to read from.
+ * @size:  Size to read. Smaller than or equal to 512.
+ * @returns: >= 0 number of bytes written if ok, < 0 errno code on error.
+ *
+ * NOTE: if the memory address or block is incorrect, you might get a
+ *       stall or a different memory read. Caller has to verify the
+ *       memory address and size passed back in the @neh structure.
+ */
+static
+int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size)
+{
+	ssize_t result = 0, bytes = 0;
+	size_t itr, read_size = i1480->buf_size;
+	struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
+
+	d_fnstart(3, i1480->dev, "(%p, 0x%08x, %zu)\n",
+		  i1480, addr, size);
+	BUG_ON(size > i1480->buf_size);
+	BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
+	BUG_ON(read_size > 512);
+
+	if (addr >= 0x8000d200 && addr < 0x8000d400)	/* Yeah, HW quirk */
+		read_size = 4;
+
+	for (itr = 0; itr < size; itr += read_size) {
+		size_t itr_addr = addr + itr;
+		size_t itr_size = min(read_size, size - itr);
+		result = usb_control_msg(
+			i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0),
+			0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			cpu_to_le16(itr_addr & 0xffff),
+			cpu_to_le16((itr_addr >> 16) & 0xffff),
+			i1480->cmd_buf + itr, itr_size,
+			100 /* FIXME: arbitrary */);
+		if (result < 0) {
+			dev_err(i1480->dev, "%s: USB read error: %zd\n",
+				__func__, result);
+			goto out;
+		}
+		if (result != itr_size) {
+			result = -EIO;
+			dev_err(i1480->dev,
+				"%s: partial read got only %zu bytes vs %zu expected\n",
+				__func__, result, itr_size);
+			goto out;
+		}
+		bytes += result;
+	}
+	result = bytes;
+out:
+	d_fnend(3, i1480->dev, "(%p, 0x%08x, %zu) = %zd\n",
+		i1480, addr, size, result);
+	if (result > 0)
+		d_dump(4, i1480->dev, i1480->cmd_buf, result);
+	return result;
+}
+
+
+/**
+ * Callback for reads on the notification/event endpoint
+ *
+ * Just enables the completion read handler.
+ */
+static
+void i1480_usb_neep_cb(struct urb *urb)
+{
+	struct i1480 *i1480 = urb->context;
+	struct device *dev = i1480->dev;
+
+	switch (urb->status) {
+	case 0:
+		break;
+	case -ECONNRESET:	/* Not an error, but a controlled situation; */
+	case -ENOENT:		/* (we killed the URB)...so, no broadcast */
+		dev_dbg(dev, "NEEP: reset/noent %d\n", urb->status);
+		break;
+	case -ESHUTDOWN:	/* going away! */
+		dev_dbg(dev, "NEEP: down %d\n", urb->status);
+		break;
+	default:
+		dev_err(dev, "NEEP: unknown status %d\n", urb->status);
+		break;
+	}
+	i1480->evt_result = urb->actual_length;
+	complete(&i1480->evt_complete);
+	return;
+}
+
+
+/**
+ * Wait for the MAC FW to initialize
+ *
+ * MAC FW sends a 0xfd/0101/00 notification to EP1 when done
+ * initializing. Get that notification into i1480->evt_buf; upper layer
+ * will verify it.
+ *
+ * Set i1480->evt_result with the result of getting the event or its
+ * size (if succesful).
+ *
+ * Delivers the data directly to i1480->evt_buf
+ */
+static
+int i1480_usb_wait_init_done(struct i1480 *i1480)
+{
+	int result;
+	struct device *dev = i1480->dev;
+	struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
+	struct usb_endpoint_descriptor *epd;
+
+	d_fnstart(3, dev, "(%p)\n", i1480);
+	init_completion(&i1480->evt_complete);
+	i1480->evt_result = -EINPROGRESS;
+	epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
+	usb_fill_int_urb(i1480_usb->neep_urb, i1480_usb->usb_dev,
+			 usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
+			 i1480->evt_buf, i1480->buf_size,
+			 i1480_usb_neep_cb, i1480, epd->bInterval);
+	result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
+	if (result < 0) {
+		dev_err(dev, "init done: cannot submit NEEP read: %d\n",
+			result);
+		goto error_submit;
+	}
+	/* Wait for the USB callback to get the data */
+	result = wait_for_completion_interruptible_timeout(
+		&i1480->evt_complete, HZ);
+	if (result <= 0) {
+		result = result == 0 ? -ETIMEDOUT : result;
+		goto error_wait;
+	}
+	usb_kill_urb(i1480_usb->neep_urb);
+	d_fnend(3, dev, "(%p) = 0\n", i1480);
+	return 0;
+
+error_wait:
+	usb_kill_urb(i1480_usb->neep_urb);
+error_submit:
+	i1480->evt_result = result;
+	d_fnend(3, dev, "(%p) = %d\n", i1480, result);
+	return result;
+}
+
+
+/**
+ * Generic function for issuing commands to the i1480
+ *
+ * @i1480:      i1480 instance
+ * @cmd_name:   Name of the command (for error messages)
+ * @cmd:        Pointer to command buffer
+ * @cmd_size:   Size of the command buffer
+ * @reply:      Buffer for the reply event
+ * @reply_size: Expected size back (including RCEB); the reply buffer
+ *              is assumed to be as big as this.
+ * @returns:    >= 0 size of the returned event data if ok,
+ *              < 0 errno code on error.
+ *
+ * Arms the NE handle, issues the command to the device and checks the
+ * basics of the reply event.
+ */
+static
+int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size)
+{
+	int result;
+	struct device *dev = i1480->dev;
+	struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
+	struct usb_endpoint_descriptor *epd;
+	struct uwb_rccb *cmd = i1480->cmd_buf;
+	u8 iface_no;
+
+	d_fnstart(3, dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size);
+	/* Post a read on the notification & event endpoint */
+	iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+	epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
+	usb_fill_int_urb(
+		i1480_usb->neep_urb, i1480_usb->usb_dev,
+		usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
+		i1480->evt_buf, i1480->buf_size,
+		i1480_usb_neep_cb, i1480, epd->bInterval);
+	result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
+	if (result < 0) {
+		dev_err(dev, "%s: cannot submit NEEP read: %d\n",
+			cmd_name, result);
+			goto error_submit_ep1;
+	}
+	/* Now post the command on EP0 */
+	result = usb_control_msg(
+		i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
+		WA_EXEC_RC_CMD,
+		USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
+		0, iface_no,
+		cmd, cmd_size,
+		100 /* FIXME: this is totally arbitrary */);
+	if (result < 0) {
+		dev_err(dev, "%s: control request failed: %d\n",
+			cmd_name, result);
+		goto error_submit_ep0;
+	}
+	d_fnend(3, dev, "(%p, %s, %zu) = %d\n",
+		i1480, cmd_name, cmd_size, result);
+	return result;
+
+error_submit_ep0:
+	usb_kill_urb(i1480_usb->neep_urb);
+error_submit_ep1:
+	d_fnend(3, dev, "(%p, %s, %zu) = %d\n",
+		i1480, cmd_name, cmd_size, result);
+	return result;
+}
+
+
+/*
+ * Probe a i1480 device for uploading firmware.
+ *
+ * We attach only to interface #0, which is the radio control interface.
+ */
+static
+int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
+{
+	struct i1480_usb *i1480_usb;
+	struct i1480 *i1480;
+	struct device *dev = &iface->dev;
+	int result;
+
+	result = -ENODEV;
+	if (iface->cur_altsetting->desc.bInterfaceNumber != 0) {
+		dev_dbg(dev, "not attaching to iface %d\n",
+			iface->cur_altsetting->desc.bInterfaceNumber);
+		goto error;
+	}
+	if (iface->num_altsetting > 1
+	    && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) {
+		/* Need altsetting #1 [HW QUIRK] or EP1 won't work */
+		result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
+		if (result < 0)
+			dev_warn(dev,
+				 "can't set altsetting 1 on iface 0: %d\n",
+				 result);
+	}
+
+	result = -ENOMEM;
+	i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
+	if (i1480_usb == NULL) {
+		dev_err(dev, "Unable to allocate instance\n");
+		goto error;
+	}
+	i1480_usb_init(i1480_usb);
+
+	i1480 = &i1480_usb->i1480;
+	i1480->buf_size = 512;
+	i1480->cmd_buf = kmalloc(2 * i1480->buf_size, GFP_KERNEL);
+	if (i1480->cmd_buf == NULL) {
+		dev_err(dev, "Cannot allocate transfer buffers\n");
+		result = -ENOMEM;
+		goto error_buf_alloc;
+	}
+	i1480->evt_buf = i1480->cmd_buf + i1480->buf_size;
+
+	result = i1480_usb_create(i1480_usb, iface);
+	if (result < 0) {
+		dev_err(dev, "Cannot create instance: %d\n", result);
+		goto error_create;
+	}
+
+	/* setup the fops and upload the firmare */
+	i1480->pre_fw_name = "i1480-pre-phy-0.0.bin";
+	i1480->mac_fw_name = "i1480-usb-0.0.bin";
+	i1480->mac_fw_name_deprecate = "ptc-0.0.bin";
+	i1480->phy_fw_name = "i1480-phy-0.0.bin";
+	i1480->dev = &iface->dev;
+	i1480->write = i1480_usb_write;
+	i1480->read = i1480_usb_read;
+	i1480->rc_setup = NULL;
+	i1480->wait_init_done = i1480_usb_wait_init_done;
+	i1480->cmd = i1480_usb_cmd;
+
+	result = i1480_fw_upload(&i1480_usb->i1480);	/* the real thing */
+	if (result >= 0) {
+		usb_reset_device(i1480_usb->usb_dev);
+		result = -ENODEV;	/* we don't want to bind to the iface */
+	}
+	i1480_usb_destroy(i1480_usb);
+error_create:
+	kfree(i1480->cmd_buf);
+error_buf_alloc:
+	kfree(i1480_usb);
+error:
+	return result;
+}
+
+#define i1480_USB_DEV(v, p)				\
+{							\
+	.match_flags = USB_DEVICE_ID_MATCH_DEVICE	\
+		 | USB_DEVICE_ID_MATCH_DEV_INFO		\
+		 | USB_DEVICE_ID_MATCH_INT_INFO,	\
+	.idVendor = (v),				\
+	.idProduct = (p),				\
+	.bDeviceClass = 0xff,				\
+	.bDeviceSubClass = 0xff,			\
+	.bDeviceProtocol = 0xff,			\
+	.bInterfaceClass = 0xff,			\
+	.bInterfaceSubClass = 0xff,			\
+	.bInterfaceProtocol = 0xff,			\
+}
+
+
+/** USB device ID's that we handle */
+static struct usb_device_id i1480_usb_id_table[] = {
+	i1480_USB_DEV(0x8086, 0xdf3b),
+	i1480_USB_DEV(0x15a9, 0x0005),
+	i1480_USB_DEV(0x07d1, 0x3802),
+	i1480_USB_DEV(0x050d, 0x305a),
+	i1480_USB_DEV(0x3495, 0x3007),
+	{},
+};
+MODULE_DEVICE_TABLE(usb, i1480_usb_id_table);
+
+
+static struct usb_driver i1480_dfu_driver = {
+	.name =		"i1480-dfu-usb",
+	.id_table =	i1480_usb_id_table,
+	.probe =	i1480_usb_probe,
+	.disconnect =	NULL,
+};
+
+
+/*
+ * Initialize the i1480 DFU driver.
+ *
+ * We also need to register our function for guessing event sizes.
+ */
+static int __init i1480_dfu_driver_init(void)
+{
+	return usb_register(&i1480_dfu_driver);
+}
+module_init(i1480_dfu_driver_init);
+
+
+static void __exit i1480_dfu_driver_exit(void)
+{
+	usb_deregister(&i1480_dfu_driver);
+}
+module_exit(i1480_dfu_driver_exit);
+
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/i1480-est.c b/drivers/uwb/i1480/i1480-est.c
new file mode 100644
index 0000000..7bf8c6f
--- /dev/null
+++ b/drivers/uwb/i1480/i1480-est.c
@@ -0,0 +1,99 @@
+/*
+ * Intel Wireless UWB Link 1480
+ * Event Size tables for Wired Adaptors
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/uwb.h>
+#include "dfu/i1480-dfu.h"
+
+
+/** Event size table for wEvents 0x00XX */
+static struct uwb_est_entry i1480_est_fd00[] = {
+	/* Anybody expecting this response has to use
+	 * neh->extra_size to specify the real size that will
+	 * come back. */
+	[i1480_EVT_CONFIRM] = { .size = sizeof(struct i1480_evt_confirm) },
+	[i1480_CMD_SET_IP_MAS] = { .size = sizeof(struct i1480_evt_confirm) },
+#ifdef i1480_RCEB_EXTENDED
+	[0x09] = {
+		.size = sizeof(struct i1480_rceb),
+		.offset = 1 + offsetof(struct i1480_rceb, wParamLength),
+	},
+#endif
+};
+
+/** Event size table for wEvents 0x01XX */
+static struct uwb_est_entry i1480_est_fd01[] = {
+	[0xff & i1480_EVT_RM_INIT_DONE] = { .size = sizeof(struct i1480_rceb) },
+	[0xff & i1480_EVT_DEV_ADD] = { .size = sizeof(struct i1480_rceb) + 9 },
+	[0xff & i1480_EVT_DEV_RM] = { .size = sizeof(struct i1480_rceb) + 9 },
+	[0xff & i1480_EVT_DEV_ID_CHANGE] = {
+		.size = sizeof(struct i1480_rceb) + 2 },
+};
+
+static int i1480_est_init(void)
+{
+	int result = uwb_est_register(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
+				      i1480_est_fd00,
+				      ARRAY_SIZE(i1480_est_fd00));
+	if (result < 0) {
+		printk(KERN_ERR "Can't register EST table fd00: %d\n", result);
+		return result;
+	}
+	result = uwb_est_register(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b,
+				  i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01));
+	if (result < 0) {
+		printk(KERN_ERR "Can't register EST table fd01: %d\n", result);
+		return result;
+	}
+	return 0;
+}
+module_init(i1480_est_init);
+
+static void i1480_est_exit(void)
+{
+	uwb_est_unregister(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
+			   i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00));
+	uwb_est_unregister(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b,
+			   i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01));
+}
+module_exit(i1480_est_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("i1480's Vendor Specific Event Size Tables");
+MODULE_LICENSE("GPL");
+
+/**
+ * USB device ID's that we handle
+ *
+ * [so we are loaded when this kind device is connected]
+ */
+static struct usb_device_id i1480_est_id_table[] = {
+	{ USB_DEVICE(0x8086, 0xdf3b), },
+	{ USB_DEVICE(0x8086, 0x0c3b), },
+	{ },
+};
+MODULE_DEVICE_TABLE(usb, i1480_est_id_table);
diff --git a/drivers/uwb/i1480/i1480-wlp.h b/drivers/uwb/i1480/i1480-wlp.h
new file mode 100644
index 0000000..18a8b0e
--- /dev/null
+++ b/drivers/uwb/i1480/i1480-wlp.h
@@ -0,0 +1,200 @@
+/*
+ * Intel 1480 Wireless UWB Link
+ * WLP specific definitions
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#ifndef __i1480_wlp_h__
+#define __i1480_wlp_h__
+
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/uwb.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
+
+/* New simplified header format? */
+#undef WLP_HDR_FMT_2 		/* FIXME: rename */
+
+/**
+ * Values of the Delivery ID & Type field when PCA or DRP
+ *
+ * The Delivery ID & Type field in the WLP TX header indicates whether
+ * the frame is PCA or DRP. This is done based on the high level bit of
+ * this field.
+ * We use this constant to test if the traffic is PCA or DRP as follows:
+ * if (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)
+ * 	this is DRP traffic
+ * else
+ * 	this is PCA traffic
+ */
+enum deliver_id_type_bit {
+	WLP_DRP = 8,
+};
+
+/**
+ * WLP TX header
+ *
+ * Indicates UWB/WLP-specific transmission parameters for a network
+ * packet.
+ */
+struct wlp_tx_hdr {
+	/* dword 0 */
+	struct uwb_dev_addr dstaddr;
+	u8                  key_index;
+	u8                  mac_params;
+	/* dword 1 */
+	u8                  phy_params;
+#ifndef WLP_HDR_FMT_2
+	u8                  reserved;
+	__le16              oui01;              /* FIXME: not so sure if __le16 or u8[2] */
+	/* dword 2 */
+	u8                  oui2;               /*        if all LE, it could be merged */
+	__le16              prid;
+#endif
+} __attribute__((packed));
+
+static inline int wlp_tx_hdr_delivery_id_type(const struct wlp_tx_hdr *hdr)
+{
+	return hdr->mac_params & 0x0f;
+}
+
+static inline int wlp_tx_hdr_ack_policy(const struct wlp_tx_hdr *hdr)
+{
+	return (hdr->mac_params >> 4) & 0x07;
+}
+
+static inline int wlp_tx_hdr_rts_cts(const struct wlp_tx_hdr *hdr)
+{
+	return (hdr->mac_params >> 7) & 0x01;
+}
+
+static inline void wlp_tx_hdr_set_delivery_id_type(struct wlp_tx_hdr *hdr, int id)
+{
+	hdr->mac_params = (hdr->mac_params & ~0x0f) | id;
+}
+
+static inline void wlp_tx_hdr_set_ack_policy(struct wlp_tx_hdr *hdr,
+					     enum uwb_ack_pol policy)
+{
+	hdr->mac_params = (hdr->mac_params & ~0x70) | (policy << 4);
+}
+
+static inline void wlp_tx_hdr_set_rts_cts(struct wlp_tx_hdr *hdr, int rts_cts)
+{
+	hdr->mac_params = (hdr->mac_params & ~0x80) | (rts_cts << 7);
+}
+
+static inline enum uwb_phy_rate wlp_tx_hdr_phy_rate(const struct wlp_tx_hdr *hdr)
+{
+	return hdr->phy_params & 0x0f;
+}
+
+static inline int wlp_tx_hdr_tx_power(const struct wlp_tx_hdr *hdr)
+{
+	return (hdr->phy_params >> 4) & 0x0f;
+}
+
+static inline void wlp_tx_hdr_set_phy_rate(struct wlp_tx_hdr *hdr, enum uwb_phy_rate rate)
+{
+	hdr->phy_params = (hdr->phy_params & ~0x0f) | rate;
+}
+
+static inline void wlp_tx_hdr_set_tx_power(struct wlp_tx_hdr *hdr, int pwr)
+{
+	hdr->phy_params = (hdr->phy_params & ~0xf0) | (pwr << 4);
+}
+
+
+/**
+ * WLP RX header
+ *
+ * Provides UWB/WLP-specific transmission data for a received
+ * network packet.
+ */
+struct wlp_rx_hdr {
+	/* dword 0 */
+	struct uwb_dev_addr dstaddr;
+	struct uwb_dev_addr srcaddr;
+	/* dword 1 */
+	u8 		    LQI;
+	s8		    RSSI;
+	u8		    reserved3;
+#ifndef WLP_HDR_FMT_2
+	u8 		    oui0;
+	/* dword 2 */
+	__le16		    oui12;
+	__le16		    prid;
+#endif
+} __attribute__((packed));
+
+
+/** User configurable options for WLP */
+struct wlp_options {
+	struct mutex mutex; /* access to user configurable options*/
+	struct wlp_tx_hdr def_tx_hdr;	/* default tx hdr */
+	u8 pca_base_priority;
+	u8 bw_alloc; /*index into bw_allocs[] for PCA/DRP reservations*/
+};
+
+
+static inline
+void wlp_options_init(struct wlp_options *options)
+{
+	mutex_init(&options->mutex);
+	wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, UWB_ACK_INM);
+	wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, 1);
+	/* FIXME: default to phy caps */
+	wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, UWB_PHY_RATE_480);
+#ifndef WLP_HDR_FMT_2
+	options->def_tx_hdr.prid = cpu_to_le16(0x0000);
+#endif
+}
+
+
+/* sysfs helpers */
+
+extern ssize_t uwb_pca_base_priority_store(struct wlp_options *,
+					   const char *, size_t);
+extern ssize_t uwb_pca_base_priority_show(const struct wlp_options *, char *);
+extern ssize_t uwb_bw_alloc_store(struct wlp_options *, const char *, size_t);
+extern ssize_t uwb_bw_alloc_show(const struct wlp_options *, char *);
+extern ssize_t uwb_ack_policy_store(struct wlp_options *,
+				    const char *, size_t);
+extern ssize_t uwb_ack_policy_show(const struct wlp_options *, char *);
+extern ssize_t uwb_rts_cts_store(struct wlp_options *, const char *, size_t);
+extern ssize_t uwb_rts_cts_show(const struct wlp_options *, char *);
+extern ssize_t uwb_phy_rate_store(struct wlp_options *, const char *, size_t);
+extern ssize_t uwb_phy_rate_show(const struct wlp_options *, char *);
+
+
+/** Simple bandwidth allocation (temporary and too simple) */
+struct wlp_bw_allocs {
+	const char *name;
+	struct {
+		u8 mask, stream;
+	} tx, rx;
+};
+
+
+#endif /* #ifndef __i1480_wlp_h__ */
diff --git a/drivers/uwb/i1480/i1480u-wlp/Makefile b/drivers/uwb/i1480/i1480u-wlp/Makefile
new file mode 100644
index 0000000..fe6709b
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp.o
+
+i1480u-wlp-objs :=	\
+	lc.o		\
+	netdev.o	\
+	rx.o		\
+	sysfs.o		\
+	tx.o
diff --git a/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
new file mode 100644
index 0000000..5f1b2951
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
@@ -0,0 +1,284 @@
+/*
+ * Intel 1480 Wireless UWB Link USB
+ * Header formats, constants, general internal interfaces
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This is not an standard interface.
+ *
+ * FIXME: docs
+ *
+ * i1480u-wlp is pretty simple: two endpoints, one for tx, one for
+ * rx. rx is polled. Network packets (ethernet, whatever) are wrapped
+ * in i1480 TX or RX headers (for sending over the air), and these
+ * packets are wrapped in UNTD headers (for sending to the WLP UWB
+ * controller).
+ *
+ * UNTD packets (UNTD hdr + i1480 hdr + network packet) packets
+ * cannot be bigger than i1480u_MAX_FRG_SIZE. When this happens, the
+ * i1480 packet is broken in chunks/packets:
+ *
+ * UNTD-1st.hdr + i1480.hdr + payload
+ * UNTD-next.hdr + payload
+ * ...
+ * UNTD-last.hdr + payload
+ *
+ * so that each packet is smaller or equal than i1480u_MAX_FRG_SIZE.
+ *
+ * All HW structures and bitmaps are little endian, so we need to play
+ * ugly tricks when defining bitfields. Hoping for the day GCC
+ * implements __attribute__((endian(1234))).
+ *
+ * FIXME: ROADMAP to the whole implementation
+ */
+
+#ifndef __i1480u_wlp_h__
+#define __i1480u_wlp_h__
+
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/uwb.h>		/* struct uwb_rc, struct uwb_notifs_handler */
+#include <linux/wlp.h>
+#include "../i1480-wlp.h"
+
+#undef i1480u_FLOW_CONTROL	/* Enable flow control code */
+
+/**
+ * Basic flow control
+ */
+enum {
+	i1480u_TX_INFLIGHT_MAX = 1000,
+	i1480u_TX_INFLIGHT_THRESHOLD = 100,
+};
+
+/** Maximum size of a transaction that we can tx/rx */
+enum {
+	/* Maximum packet size computed as follows: max UNTD header (8) +
+	 * i1480 RX header (8) + max Ethernet header and payload (4096) +
+	 * Padding added by skb_reserve (2) to make post Ethernet payload
+	 * start on 16 byte boundary*/
+	i1480u_MAX_RX_PKT_SIZE = 4114,
+	i1480u_MAX_FRG_SIZE = 512,
+	i1480u_RX_BUFS = 9,
+};
+
+
+/**
+ * UNTD packet type
+ *
+ * We need to fragment any payload whose UNTD packet is going to be
+ * bigger than i1480u_MAX_FRG_SIZE.
+ */
+enum i1480u_pkt_type {
+	i1480u_PKT_FRAG_1ST = 0x1,
+	i1480u_PKT_FRAG_NXT = 0x0,
+	i1480u_PKT_FRAG_LST = 0x2,
+	i1480u_PKT_FRAG_CMP = 0x3
+};
+enum {
+	i1480u_PKT_NONE = 0x4,
+};
+
+/** USB Network Transfer Descriptor - common */
+struct untd_hdr {
+	u8     type;
+	__le16 len;
+} __attribute__((packed));
+
+static inline enum i1480u_pkt_type untd_hdr_type(const struct untd_hdr *hdr)
+{
+	return hdr->type & 0x03;
+}
+
+static inline int untd_hdr_rx_tx(const struct untd_hdr *hdr)
+{
+	return (hdr->type >> 2) & 0x01;
+}
+
+static inline void untd_hdr_set_type(struct untd_hdr *hdr, enum i1480u_pkt_type type)
+{
+	hdr->type = (hdr->type & ~0x03) | type;
+}
+
+static inline void untd_hdr_set_rx_tx(struct untd_hdr *hdr, int rx_tx)
+{
+	hdr->type = (hdr->type & ~0x04) | (rx_tx << 2);
+}
+
+
+/**
+ * USB Network Transfer Descriptor - Complete Packet
+ *
+ * This is for a packet that is smaller (header + payload) than
+ * i1480u_MAX_FRG_SIZE.
+ *
+ * @hdr.total_len is the size of the payload; the payload doesn't
+ * count this header nor the padding, but includes the size of i1480
+ * header.
+ */
+struct untd_hdr_cmp {
+	struct untd_hdr	hdr;
+	u8		padding;
+} __attribute__((packed));
+
+
+/**
+ * USB Network Transfer Descriptor - First fragment
+ *
+ * @hdr.len is the size of the *whole packet* (excluding UNTD
+ * headers); @fragment_len is the size of the payload (excluding UNTD
+ * headers, but including i1480 headers).
+ */
+struct untd_hdr_1st {
+	struct untd_hdr	hdr;
+	__le16		fragment_len;
+	u8		padding[3];
+} __attribute__((packed));
+
+
+/**
+ * USB Network Transfer Descriptor - Next / Last [Rest]
+ *
+ * @hdr.len is the size of the payload, not including headrs.
+ */
+struct untd_hdr_rst {
+	struct untd_hdr	hdr;
+	u8		padding;
+} __attribute__((packed));
+
+
+/**
+ * Transmission context
+ *
+ * Wraps all the stuff needed to track a pending/active tx
+ * operation.
+ */
+struct i1480u_tx {
+	struct list_head list_node;
+	struct i1480u *i1480u;
+	struct urb *urb;
+
+	struct sk_buff *skb;
+	struct wlp_tx_hdr *wlp_tx_hdr;
+
+	void *buf;	/* if NULL, no new buf was used */
+	size_t buf_size;
+};
+
+/**
+ * Basic flow control
+ *
+ * We maintain a basic flow control counter. "count" how many TX URBs are
+ * outstanding. Only allow "max"
+ * TX URBs to be outstanding. If this value is reached the queue will be
+ * stopped. The queue will be restarted when there are
+ * "threshold" URBs outstanding.
+ * Maintain a counter of how many time the TX queue needed to be restarted
+ * due to the "max" being exceeded and the "threshold" reached again. The
+ * timestamp "restart_ts" is to keep track from when the counter was last
+ * queried (see sysfs handling of file wlp_tx_inflight).
+ */
+struct i1480u_tx_inflight {
+	atomic_t count;
+	unsigned long max;
+	unsigned long threshold;
+	unsigned long restart_ts;
+	atomic_t restart_count;
+};
+
+/**
+ * Instance of a i1480u WLP interface
+ *
+ * Keeps references to the USB device that wraps it, as well as it's
+ * interface and associated UWB host controller. As well, it also
+ * keeps a link to the netdevice for integration into the networking
+ * stack.
+ * We maintian separate error history for the tx and rx endpoints because
+ * the implementation does not rely on locking - having one shared
+ * structure between endpoints may cause problems. Adding locking to the
+ * implementation will have higher cost than adding a separate structure.
+ */
+struct i1480u {
+	struct usb_device *usb_dev;
+	struct usb_interface *usb_iface;
+	struct net_device *net_dev;
+
+	spinlock_t lock;
+	struct net_device_stats stats;
+
+	/* RX context handling */
+	struct sk_buff *rx_skb;
+	struct uwb_dev_addr rx_srcaddr;
+	size_t rx_untd_pkt_size;
+	struct i1480u_rx_buf {
+		struct i1480u *i1480u;	/* back pointer */
+		struct urb *urb;
+		struct sk_buff *data;	/* i1480u_MAX_RX_PKT_SIZE each */
+	} rx_buf[i1480u_RX_BUFS];	/* N bufs */
+
+	spinlock_t tx_list_lock;	/* TX context */
+	struct list_head tx_list;
+	u8 tx_stream;
+
+	struct stats lqe_stats, rssi_stats;	/* radio statistics */
+
+	/* Options we can set from sysfs */
+	struct wlp_options options;
+	struct uwb_notifs_handler uwb_notifs_handler;
+	struct edc tx_errors;
+	struct edc rx_errors;
+	struct wlp wlp;
+#ifdef i1480u_FLOW_CONTROL
+	struct urb *notif_urb;
+	struct edc notif_edc;		/* error density counter */
+	u8 notif_buffer[1];
+#endif
+	struct i1480u_tx_inflight tx_inflight;
+};
+
+/* Internal interfaces */
+extern void i1480u_rx_cb(struct urb *urb);
+extern int i1480u_rx_setup(struct i1480u *);
+extern void i1480u_rx_release(struct i1480u *);
+extern void i1480u_tx_release(struct i1480u *);
+extern int i1480u_xmit_frame(struct wlp *, struct sk_buff *,
+			     struct uwb_dev_addr *);
+extern void i1480u_stop_queue(struct wlp *);
+extern void i1480u_start_queue(struct wlp *);
+extern int i1480u_sysfs_setup(struct i1480u *);
+extern void i1480u_sysfs_release(struct i1480u *);
+
+/* netdev interface */
+extern int i1480u_open(struct net_device *);
+extern int i1480u_stop(struct net_device *);
+extern int i1480u_hard_start_xmit(struct sk_buff *, struct net_device *);
+extern void i1480u_tx_timeout(struct net_device *);
+extern int i1480u_set_config(struct net_device *, struct ifmap *);
+extern struct net_device_stats *i1480u_get_stats(struct net_device *);
+extern int i1480u_change_mtu(struct net_device *, int);
+extern void i1480u_uwb_notifs_cb(void *, struct uwb_dev *, enum uwb_notifs);
+
+/* bandwidth allocation callback */
+extern void  i1480u_bw_alloc_cb(struct uwb_rsv *);
+
+/* Sys FS */
+extern struct attribute_group i1480u_wlp_attr_group;
+
+#endif /* #ifndef __i1480u_wlp_h__ */
diff --git a/drivers/uwb/i1480/i1480u-wlp/lc.c b/drivers/uwb/i1480/i1480u-wlp/lc.c
new file mode 100644
index 0000000..737d60c
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/lc.c
@@ -0,0 +1,421 @@
+/*
+ * WUSB Wire Adapter: WLP interface
+ * Driver for the Linux Network stack.
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * This implements a very simple network driver for the WLP USB
+ * device that is associated to a UWB (Ultra Wide Band) host.
+ *
+ * This is seen as an interface of a composite device. Once the UWB
+ * host has an association to another WLP capable device, the
+ * networking interface (aka WLP) can start to send packets back and
+ * forth.
+ *
+ * Limitations:
+ *
+ *  - Hand cranked; can't ifup the interface until there is an association
+ *
+ *  - BW allocation very simplistic [see i1480u_mas_set() and callees].
+ *
+ *
+ * ROADMAP:
+ *
+ *   ENTRY POINTS (driver model):
+ *
+ *     i1480u_driver_{exit,init}(): initialization of the driver.
+ *
+ *     i1480u_probe(): called by the driver code when a device
+ *                     matching 'i1480u_id_table' is connected.
+ *
+ *                     This allocs a netdev instance, inits with
+ *                     i1480u_add(), then registers_netdev().
+ *         i1480u_init()
+ *         i1480u_add()
+ *
+ *     i1480u_disconnect(): device has been disconnected/module
+ *                          is being removed.
+ *         i1480u_rm()
+ */
+#include <linux/version.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/uwb/debug.h>
+#include "i1480u-wlp.h"
+
+
+
+static inline
+void i1480u_init(struct i1480u *i1480u)
+{
+	/* nothing so far... doesn't it suck? */
+	spin_lock_init(&i1480u->lock);
+	INIT_LIST_HEAD(&i1480u->tx_list);
+	spin_lock_init(&i1480u->tx_list_lock);
+	wlp_options_init(&i1480u->options);
+	edc_init(&i1480u->tx_errors);
+	edc_init(&i1480u->rx_errors);
+#ifdef i1480u_FLOW_CONTROL
+	edc_init(&i1480u->notif_edc);
+#endif
+	stats_init(&i1480u->lqe_stats);
+	stats_init(&i1480u->rssi_stats);
+	wlp_init(&i1480u->wlp);
+}
+
+/**
+ * Fill WLP device information structure
+ *
+ * The structure will contain a few character arrays, each ending with a
+ * null terminated string. Each string has to fit (excluding terminating
+ * character) into a specified range obtained from the WLP substack.
+ *
+ * It is still not clear exactly how this device information should be
+ * obtained. Until we find out we use the USB device descriptor as backup, some
+ * information elements have intuitive mappings, other not.
+ */
+static
+void i1480u_fill_device_info(struct wlp *wlp, struct wlp_device_info *dev_info)
+{
+	struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
+	struct usb_device *usb_dev = i1480u->usb_dev;
+	/* Treat device name and model name the same */
+	if (usb_dev->descriptor.iProduct) {
+		usb_string(usb_dev, usb_dev->descriptor.iProduct,
+			   dev_info->name, sizeof(dev_info->name));
+		usb_string(usb_dev, usb_dev->descriptor.iProduct,
+			   dev_info->model_name, sizeof(dev_info->model_name));
+	}
+	if (usb_dev->descriptor.iManufacturer)
+		usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
+			   dev_info->manufacturer,
+			   sizeof(dev_info->manufacturer));
+	scnprintf(dev_info->model_nr, sizeof(dev_info->model_nr), "%04x",
+		  __le16_to_cpu(usb_dev->descriptor.bcdDevice));
+	if (usb_dev->descriptor.iSerialNumber)
+		usb_string(usb_dev, usb_dev->descriptor.iSerialNumber,
+			   dev_info->serial, sizeof(dev_info->serial));
+	/* FIXME: where should we obtain category? */
+	dev_info->prim_dev_type.category = cpu_to_le16(WLP_DEV_CAT_OTHER);
+	/* FIXME: Complete OUI and OUIsubdiv attributes */
+}
+
+#ifdef i1480u_FLOW_CONTROL
+/**
+ * Callback for the notification endpoint
+ *
+ * This mostly controls the xon/xoff protocol. In case of hard error,
+ * we stop the queue. If not, we always retry.
+ */
+static
+void i1480u_notif_cb(struct urb *urb, struct pt_regs *regs)
+{
+	struct i1480u *i1480u = urb->context;
+	struct usb_interface *usb_iface = i1480u->usb_iface;
+	struct device *dev = &usb_iface->dev;
+	int result;
+
+	switch (urb->status) {
+	case 0:				/* Got valid data, do xon/xoff */
+		switch (i1480u->notif_buffer[0]) {
+		case 'N':
+			dev_err(dev, "XOFF STOPPING queue at %lu\n", jiffies);
+			netif_stop_queue(i1480u->net_dev);
+			break;
+		case 'A':
+			dev_err(dev, "XON STARTING queue at %lu\n", jiffies);
+			netif_start_queue(i1480u->net_dev);
+			break;
+		default:
+			dev_err(dev, "NEP: unknown data 0x%02hhx\n",
+				i1480u->notif_buffer[0]);
+		}
+		break;
+	case -ECONNRESET:		/* Controlled situation ... */
+	case -ENOENT:			/* we killed the URB... */
+		dev_err(dev, "NEP: URB reset/noent %d\n", urb->status);
+		goto error;
+	case -ESHUTDOWN:		/* going away! */
+		dev_err(dev, "NEP: URB down %d\n", urb->status);
+		goto error;
+	default:			/* Retry unless it gets ugly */
+		if (edc_inc(&i1480u->notif_edc, EDC_MAX_ERRORS,
+			    EDC_ERROR_TIMEFRAME)) {
+			dev_err(dev, "NEP: URB max acceptable errors "
+				"exceeded; resetting device\n");
+			goto error_reset;
+		}
+		dev_err(dev, "NEP: URB error %d\n", urb->status);
+		break;
+	}
+	result = usb_submit_urb(urb, GFP_ATOMIC);
+	if (result < 0) {
+		dev_err(dev, "NEP: Can't resubmit URB: %d; resetting device\n",
+			result);
+		goto error_reset;
+	}
+	return;
+
+error_reset:
+	wlp_reset_all(&i1480-wlp);
+error:
+	netif_stop_queue(i1480u->net_dev);
+	return;
+}
+#endif
+
+static
+int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface)
+{
+	int result = -ENODEV;
+	struct wlp *wlp = &i1480u->wlp;
+	struct usb_device *usb_dev = interface_to_usbdev(iface);
+	struct net_device *net_dev = i1480u->net_dev;
+	struct uwb_rc *rc;
+	struct uwb_dev *uwb_dev;
+#ifdef i1480u_FLOW_CONTROL
+	struct usb_endpoint_descriptor *epd;
+#endif
+
+	i1480u->usb_dev = usb_get_dev(usb_dev);
+	i1480u->usb_iface = iface;
+	rc = uwb_rc_get_by_grandpa(&i1480u->usb_dev->dev);
+	if (rc == NULL) {
+		dev_err(&iface->dev, "Cannot get associated UWB Radio "
+			"Controller\n");
+		goto out;
+	}
+	wlp->xmit_frame = i1480u_xmit_frame;
+	wlp->fill_device_info = i1480u_fill_device_info;
+	wlp->stop_queue = i1480u_stop_queue;
+	wlp->start_queue = i1480u_start_queue;
+	result = wlp_setup(wlp, rc);
+	if (result < 0) {
+		dev_err(&iface->dev, "Cannot setup WLP\n");
+		goto error_wlp_setup;
+	}
+	result = 0;
+	ether_setup(net_dev);			/* make it an etherdevice */
+	uwb_dev = &rc->uwb_dev;
+	/* FIXME: hookup address change notifications? */
+
+	memcpy(net_dev->dev_addr, uwb_dev->mac_addr.data,
+	       sizeof(net_dev->dev_addr));
+
+	net_dev->hard_header_len = sizeof(struct untd_hdr_cmp)
+		+ sizeof(struct wlp_tx_hdr)
+		+ WLP_DATA_HLEN
+		+ ETH_HLEN;
+	net_dev->mtu = 3500;
+	net_dev->tx_queue_len = 20;		/* FIXME: maybe use 1000? */
+
+/*	net_dev->flags &= ~IFF_BROADCAST;	FIXME: BUG in firmware */
+	/* FIXME: multicast disabled */
+	net_dev->flags &= ~IFF_MULTICAST;
+	net_dev->features &= ~NETIF_F_SG;
+	net_dev->features &= ~NETIF_F_FRAGLIST;
+	/* All NETIF_F_*_CSUM disabled */
+	net_dev->features |= NETIF_F_HIGHDMA;
+	net_dev->watchdog_timeo = 5*HZ;		/* FIXME: a better default? */
+
+	net_dev->open = i1480u_open;
+	net_dev->stop = i1480u_stop;
+	net_dev->hard_start_xmit = i1480u_hard_start_xmit;
+	net_dev->tx_timeout = i1480u_tx_timeout;
+	net_dev->get_stats = i1480u_get_stats;
+	net_dev->set_config = i1480u_set_config;
+	net_dev->change_mtu = i1480u_change_mtu;
+
+#ifdef i1480u_FLOW_CONTROL
+	/* Notification endpoint setup (submitted when we open the device) */
+	i1480u->notif_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (i1480u->notif_urb == NULL) {
+		dev_err(&iface->dev, "Unable to allocate notification URB\n");
+		result = -ENOMEM;
+		goto error_urb_alloc;
+	}
+	epd = &iface->cur_altsetting->endpoint[0].desc;
+	usb_fill_int_urb(i1480u->notif_urb, usb_dev,
+			 usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
+			 i1480u->notif_buffer, sizeof(i1480u->notif_buffer),
+			 i1480u_notif_cb, i1480u, epd->bInterval);
+
+#endif
+
+	i1480u->tx_inflight.max = i1480u_TX_INFLIGHT_MAX;
+	i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD;
+	i1480u->tx_inflight.restart_ts = jiffies;
+	usb_set_intfdata(iface, i1480u);
+	return result;
+
+#ifdef i1480u_FLOW_CONTROL
+error_urb_alloc:
+#endif
+	wlp_remove(wlp);
+error_wlp_setup:
+	uwb_rc_put(rc);
+out:
+	usb_put_dev(i1480u->usb_dev);
+	return result;
+}
+
+static void i1480u_rm(struct i1480u *i1480u)
+{
+	struct uwb_rc *rc = i1480u->wlp.rc;
+	usb_set_intfdata(i1480u->usb_iface, NULL);
+#ifdef i1480u_FLOW_CONTROL
+	usb_kill_urb(i1480u->notif_urb);
+	usb_free_urb(i1480u->notif_urb);
+#endif
+	wlp_remove(&i1480u->wlp);
+	uwb_rc_put(rc);
+	usb_put_dev(i1480u->usb_dev);
+}
+
+/** Just setup @net_dev's i1480u private data */
+static void i1480u_netdev_setup(struct net_device *net_dev)
+{
+	struct i1480u *i1480u = netdev_priv(net_dev);
+	/* Initialize @i1480u */
+	memset(i1480u, 0, sizeof(*i1480u));
+	i1480u_init(i1480u);
+}
+
+/**
+ * Probe a i1480u interface and register it
+ *
+ * @iface:   USB interface to link to
+ * @id:      USB class/subclass/protocol id
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * Does basic housekeeping stuff and then allocs a netdev with space
+ * for the i1480u  data. Initializes, registers in i1480u, registers in
+ * netdev, ready to go.
+ */
+static int i1480u_probe(struct usb_interface *iface,
+			const struct usb_device_id *id)
+{
+	int result;
+	struct net_device *net_dev;
+	struct device *dev = &iface->dev;
+	struct i1480u *i1480u;
+
+	/* Allocate instance [calls i1480u_netdev_setup() on it] */
+	result = -ENOMEM;
+	net_dev = alloc_netdev(sizeof(*i1480u), "wlp%d", i1480u_netdev_setup);
+	if (net_dev == NULL) {
+		dev_err(dev, "no memory for network device instance\n");
+		goto error_alloc_netdev;
+	}
+	SET_NETDEV_DEV(net_dev, dev);
+	i1480u = netdev_priv(net_dev);
+	i1480u->net_dev = net_dev;
+	result = i1480u_add(i1480u, iface);	/* Now setup all the wlp stuff */
+	if (result < 0) {
+		dev_err(dev, "cannot add i1480u device: %d\n", result);
+		goto error_i1480u_add;
+	}
+	result = register_netdev(net_dev);	/* Okey dokey, bring it up */
+	if (result < 0) {
+		dev_err(dev, "cannot register network device: %d\n", result);
+		goto error_register_netdev;
+	}
+	i1480u_sysfs_setup(i1480u);
+	if (result < 0)
+		goto error_sysfs_init;
+	return 0;
+
+error_sysfs_init:
+	unregister_netdev(net_dev);
+error_register_netdev:
+	i1480u_rm(i1480u);
+error_i1480u_add:
+	free_netdev(net_dev);
+error_alloc_netdev:
+	return result;
+}
+
+
+/**
+ * Disconect a i1480u from the system.
+ *
+ * i1480u_stop() has been called before, so al the rx and tx contexts
+ * have been taken down already. Make sure the queue is stopped,
+ * unregister netdev and i1480u, free and kill.
+ */
+static void i1480u_disconnect(struct usb_interface *iface)
+{
+	struct i1480u *i1480u;
+	struct net_device *net_dev;
+
+	i1480u = usb_get_intfdata(iface);
+	net_dev = i1480u->net_dev;
+	netif_stop_queue(net_dev);
+#ifdef i1480u_FLOW_CONTROL
+	usb_kill_urb(i1480u->notif_urb);
+#endif
+	i1480u_sysfs_release(i1480u);
+	unregister_netdev(net_dev);
+	i1480u_rm(i1480u);
+	free_netdev(net_dev);
+}
+
+static struct usb_device_id i1480u_id_table[] = {
+	{
+		.match_flags = USB_DEVICE_ID_MATCH_DEVICE \
+				|  USB_DEVICE_ID_MATCH_DEV_INFO \
+				|  USB_DEVICE_ID_MATCH_INT_INFO,
+		.idVendor = 0x8086,
+		.idProduct = 0x0c3b,
+		.bDeviceClass = 0xef,
+		.bDeviceSubClass = 0x02,
+		.bDeviceProtocol = 0x02,
+		.bInterfaceClass = 0xff,
+		.bInterfaceSubClass = 0xff,
+		.bInterfaceProtocol = 0xff,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(usb, i1480u_id_table);
+
+static struct usb_driver i1480u_driver = {
+	.name =		KBUILD_MODNAME,
+	.probe =	i1480u_probe,
+	.disconnect =	i1480u_disconnect,
+	.id_table =	i1480u_id_table,
+};
+
+static int __init i1480u_driver_init(void)
+{
+	return usb_register(&i1480u_driver);
+}
+module_init(i1480u_driver_init);
+
+
+static void __exit i1480u_driver_exit(void)
+{
+	usb_deregister(&i1480u_driver);
+}
+module_exit(i1480u_driver_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("i1480 Wireless UWB Link WLP networking for USB");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c
new file mode 100644
index 0000000..8802ac4
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/netdev.c
@@ -0,0 +1,368 @@
+/*
+ * WUSB Wire Adapter: WLP interface
+ * Driver for the Linux Network stack.
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * Implementation of the netdevice linkage (except tx and rx related stuff).
+ *
+ * ROADMAP:
+ *
+ *   ENTRY POINTS (Net device):
+ *
+ *     i1480u_open(): Called when we ifconfig up the interface;
+ *                    associates to a UWB host controller, reserves
+ *                    bandwidth (MAS), sets up RX USB URB and starts
+ *                    the queue.
+ *
+ *     i1480u_stop(): Called when we ifconfig down a interface;
+ *                    reverses _open().
+ *
+ *     i1480u_set_config():
+ */
+
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/uwb/debug.h>
+#include "i1480u-wlp.h"
+
+struct i1480u_cmd_set_ip_mas {
+	struct uwb_rccb     rccb;
+	struct uwb_dev_addr addr;
+	u8                  stream;
+	u8                  owner;
+	u8                  type;	/* enum uwb_drp_type */
+	u8                  baMAS[32];
+} __attribute__((packed));
+
+
+static
+int i1480u_set_ip_mas(
+	struct uwb_rc *rc,
+	const struct uwb_dev_addr *dstaddr,
+	u8 stream, u8 owner, u8 type, unsigned long *mas)
+{
+
+	int result;
+	struct i1480u_cmd_set_ip_mas *cmd;
+	struct uwb_rc_evt_confirm reply;
+
+	result = -ENOMEM;
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		goto error_kzalloc;
+	cmd->rccb.bCommandType = 0xfd;
+	cmd->rccb.wCommand = cpu_to_le16(0x000e);
+	cmd->addr = *dstaddr;
+	cmd->stream = stream;
+	cmd->owner = owner;
+	cmd->type = type;
+	if (mas == NULL)
+		memset(cmd->baMAS, 0x00, sizeof(cmd->baMAS));
+	else
+		memcpy(cmd->baMAS, mas, sizeof(cmd->baMAS));
+	reply.rceb.bEventType = 0xfd;
+	reply.rceb.wEvent = cpu_to_le16(0x000e);
+	result = uwb_rc_cmd(rc, "SET-IP-MAS", &cmd->rccb, sizeof(*cmd),
+			    &reply.rceb, sizeof(reply));
+	if (result < 0)
+		goto error_cmd;
+	if (reply.bResultCode != UWB_RC_RES_FAIL) {
+		dev_err(&rc->uwb_dev.dev,
+			"SET-IP-MAS: command execution failed: %d\n",
+			reply.bResultCode);
+		result = -EIO;
+	}
+error_cmd:
+	kfree(cmd);
+error_kzalloc:
+	return result;
+}
+
+/*
+ * Inform a WLP interface of a MAS reservation
+ *
+ * @rc is assumed refcnted.
+ */
+/* FIXME: detect if remote device is WLP capable? */
+static int i1480u_mas_set_dev(struct uwb_dev *uwb_dev, struct uwb_rc *rc,
+			      u8 stream, u8 owner, u8 type, unsigned long *mas)
+{
+	int result = 0;
+	struct device *dev = &rc->uwb_dev.dev;
+
+	result = i1480u_set_ip_mas(rc, &uwb_dev->dev_addr, stream, owner,
+				   type, mas);
+	if (result < 0) {
+		char rcaddrbuf[UWB_ADDR_STRSIZE], devaddrbuf[UWB_ADDR_STRSIZE];
+		uwb_dev_addr_print(rcaddrbuf, sizeof(rcaddrbuf),
+				   &rc->uwb_dev.dev_addr);
+		uwb_dev_addr_print(devaddrbuf, sizeof(devaddrbuf),
+				   &uwb_dev->dev_addr);
+		dev_err(dev, "Set IP MAS (%s to %s) failed: %d\n",
+			rcaddrbuf, devaddrbuf, result);
+	}
+	return result;
+}
+
+/**
+ * Called by bandwidth allocator when change occurs in reservation.
+ *
+ * @rsv:     The reservation that is being established, modified, or
+ *           terminated.
+ *
+ * When a reservation is established, modified, or terminated the upper layer
+ * (WLP here) needs set/update the currently available Media Access Slots
+ * that can be use for IP traffic.
+ *
+ * Our action taken during failure depends on how the reservation is being
+ * changed:
+ * - if reservation is being established we do nothing if we cannot set the
+ *   new MAS to be used
+ * - if reservation is being terminated we revert back to PCA whether the
+ *   SET IP MAS command succeeds or not.
+ */
+void i1480u_bw_alloc_cb(struct uwb_rsv *rsv)
+{
+	int result = 0;
+	struct i1480u *i1480u = rsv->pal_priv;
+	struct device *dev = &i1480u->usb_iface->dev;
+	struct uwb_dev *target_dev = rsv->target.dev;
+	struct uwb_rc *rc = i1480u->wlp.rc;
+	u8 stream = rsv->stream;
+	int type = rsv->type;
+	int is_owner = rsv->owner == &rc->uwb_dev;
+	unsigned long *bmp = rsv->mas.bm;
+
+	dev_err(dev, "WLP callback called - sending set ip mas\n");
+	/*user cannot change options while setting configuration*/
+	mutex_lock(&i1480u->options.mutex);
+	switch (rsv->state) {
+	case UWB_RSV_STATE_T_ACCEPTED:
+	case UWB_RSV_STATE_O_ESTABLISHED:
+		result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner,
+					type, bmp);
+		if (result < 0) {
+			dev_err(dev, "MAS reservation failed: %d\n", result);
+			goto out;
+		}
+		if (is_owner) {
+			wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr,
+							WLP_DRP | stream);
+			wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 0);
+		}
+		break;
+	case UWB_RSV_STATE_NONE:
+		/* revert back to PCA */
+		result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner,
+					    type, bmp);
+		if (result < 0)
+			dev_err(dev, "MAS reservation failed: %d\n", result);
+		/* Revert to PCA even though SET IP MAS failed. */
+		wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr,
+						i1480u->options.pca_base_priority);
+		wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 1);
+		break;
+	default:
+		dev_err(dev, "unexpected WLP reservation state: %s (%d).\n",
+			uwb_rsv_state_str(rsv->state), rsv->state);
+		break;
+	}
+out:
+	mutex_unlock(&i1480u->options.mutex);
+	return;
+}
+
+/**
+ *
+ * Called on 'ifconfig up'
+ */
+int i1480u_open(struct net_device *net_dev)
+{
+	int result;
+	struct i1480u *i1480u = netdev_priv(net_dev);
+	struct wlp *wlp = &i1480u->wlp;
+	struct uwb_rc *rc;
+	struct device *dev = &i1480u->usb_iface->dev;
+
+	rc = wlp->rc;
+	result = i1480u_rx_setup(i1480u);		/* Alloc RX stuff */
+	if (result < 0)
+		goto error_rx_setup;
+	netif_wake_queue(net_dev);
+#ifdef i1480u_FLOW_CONTROL
+	result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);;
+	if (result < 0) {
+		dev_err(dev, "Can't submit notification URB: %d\n", result);
+		goto error_notif_urb_submit;
+	}
+#endif
+	i1480u->uwb_notifs_handler.cb = i1480u_uwb_notifs_cb;
+	i1480u->uwb_notifs_handler.data = i1480u;
+	if (uwb_bg_joined(rc))
+		netif_carrier_on(net_dev);
+	else
+		netif_carrier_off(net_dev);
+	uwb_notifs_register(rc, &i1480u->uwb_notifs_handler);
+	/* Interface is up with an address, now we can create WSS */
+	result = wlp_wss_setup(net_dev, &wlp->wss);
+	if (result < 0) {
+		dev_err(dev, "Can't create WSS: %d. \n", result);
+		goto error_notif_deregister;
+	}
+	return 0;
+error_notif_deregister:
+	uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler);
+#ifdef i1480u_FLOW_CONTROL
+error_notif_urb_submit:
+#endif
+	netif_stop_queue(net_dev);
+	i1480u_rx_release(i1480u);
+error_rx_setup:
+	return result;
+}
+
+
+/**
+ * Called on 'ifconfig down'
+ */
+int i1480u_stop(struct net_device *net_dev)
+{
+	struct i1480u *i1480u = netdev_priv(net_dev);
+	struct wlp *wlp = &i1480u->wlp;
+	struct uwb_rc *rc = wlp->rc;
+
+	BUG_ON(wlp->rc == NULL);
+	wlp_wss_remove(&wlp->wss);
+	uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler);
+	netif_carrier_off(net_dev);
+#ifdef i1480u_FLOW_CONTROL
+	usb_kill_urb(i1480u->notif_urb);
+#endif
+	netif_stop_queue(net_dev);
+	i1480u_rx_release(i1480u);
+	i1480u_tx_release(i1480u);
+	return 0;
+}
+
+
+/** Report statistics */
+struct net_device_stats *i1480u_get_stats(struct net_device *net_dev)
+{
+	struct i1480u *i1480u = netdev_priv(net_dev);
+	return &i1480u->stats;
+}
+
+
+/**
+ *
+ * Change the interface config--we probably don't have to do anything.
+ */
+int i1480u_set_config(struct net_device *net_dev, struct ifmap *map)
+{
+	int result;
+	struct i1480u *i1480u = netdev_priv(net_dev);
+	BUG_ON(i1480u->wlp.rc == NULL);
+	result = 0;
+	return result;
+}
+
+/**
+ * Change the MTU of the interface
+ */
+int i1480u_change_mtu(struct net_device *net_dev, int mtu)
+{
+	static union {
+		struct wlp_tx_hdr tx;
+		struct wlp_rx_hdr rx;
+	} i1480u_all_hdrs;
+
+	if (mtu < ETH_HLEN)	/* We encap eth frames */
+		return -ERANGE;
+	if (mtu > 4000 - sizeof(i1480u_all_hdrs))
+		return -ERANGE;
+	net_dev->mtu = mtu;
+	return 0;
+}
+
+
+/**
+ * Callback function to handle events from UWB
+ * When we see other devices we know the carrier is ok,
+ * if we are the only device in the beacon group we set the carrier
+ * state to off.
+ * */
+void i1480u_uwb_notifs_cb(void *data, struct uwb_dev *uwb_dev,
+			  enum uwb_notifs event)
+{
+	struct i1480u *i1480u = data;
+	struct net_device *net_dev = i1480u->net_dev;
+	struct device *dev = &i1480u->usb_iface->dev;
+	switch (event) {
+	case UWB_NOTIF_BG_JOIN:
+		netif_carrier_on(net_dev);
+		dev_info(dev, "Link is up\n");
+		break;
+	case UWB_NOTIF_BG_LEAVE:
+		netif_carrier_off(net_dev);
+		dev_info(dev, "Link is down\n");
+		break;
+	default:
+		dev_err(dev, "don't know how to handle event %d from uwb\n",
+				event);
+	}
+}
+
+/**
+ * Stop the network queue
+ *
+ * Enable WLP substack to stop network queue. We also set the flow control
+ * threshold at this time to prevent the flow control from restarting the
+ * queue.
+ *
+ * we are loosing the current threshold value here ... FIXME?
+ */
+void i1480u_stop_queue(struct wlp *wlp)
+{
+	struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
+	struct net_device *net_dev = i1480u->net_dev;
+	i1480u->tx_inflight.threshold = 0;
+	netif_stop_queue(net_dev);
+}
+
+/**
+ * Start the network queue
+ *
+ * Enable WLP substack to start network queue. Also re-enable the flow
+ * control to manage the queue again.
+ *
+ * We re-enable the flow control by storing the default threshold in the
+ * flow control threshold. This means that if the user modified the
+ * threshold before the queue was stopped and restarted that information
+ * will be lost. FIXME?
+ */
+void i1480u_start_queue(struct wlp *wlp)
+{
+	struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
+	struct net_device *net_dev = i1480u->net_dev;
+	i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD;
+	netif_start_queue(net_dev);
+}
diff --git a/drivers/uwb/i1480/i1480u-wlp/rx.c b/drivers/uwb/i1480/i1480u-wlp/rx.c
new file mode 100644
index 0000000..9fc0353
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/rx.c
@@ -0,0 +1,486 @@
+/*
+ * WUSB Wire Adapter: WLP interface
+ * Driver for the Linux Network stack.
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * i1480u's RX handling is simple. i1480u will send the received
+ * network packets broken up in fragments; 1 to N fragments make a
+ * packet, we assemble them together and deliver the packet with netif_rx().
+ *
+ * Beacuse each USB transfer is a *single* fragment (except when the
+ * transfer contains a first fragment), each URB called thus
+ * back contains one or two fragments. So we queue N URBs, each with its own
+ * fragment buffer. When a URB is done, we process it (adding to the
+ * current skb from the fragment buffer until complete). Once
+ * processed, we requeue the URB. There is always a bunch of URBs
+ * ready to take data, so the intergap should be minimal.
+ *
+ * An URB's transfer buffer is the data field of a socket buffer. This
+ * reduces copying as data can be passed directly to network layer. If a
+ * complete packet or 1st fragment is received the URB's transfer buffer is
+ * taken away from it and used to send data to the network layer. In this
+ * case a new transfer buffer is allocated to the URB before being requeued.
+ * If a "NEXT" or "LAST" fragment is received, the fragment contents is
+ * appended to the RX packet under construction and the transfer buffer
+ * is reused. To be able to use this buffer to assemble complete packets
+ * we set each buffer's size to that of the MAX ethernet packet that can
+ * be received. There is thus room for improvement in memory usage.
+ *
+ * When the max tx fragment size increases, we should be able to read
+ * data into the skbs directly with very simple code.
+ *
+ * ROADMAP:
+ *
+ *   ENTRY POINTS:
+ *
+ *     i1480u_rx_setup(): setup RX context [from i1480u_open()]
+ *
+ *     i1480u_rx_release(): release RX context [from i1480u_stop()]
+ *
+ *     i1480u_rx_cb(): called when the RX USB URB receives a
+ *                     packet. It removes the header and pushes it up
+ *                     the Linux netdev stack with netif_rx().
+ *
+ *       i1480u_rx_buffer()
+ *         i1480u_drop() and i1480u_fix()
+ *         i1480u_skb_deliver
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "i1480u-wlp.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+
+/**
+ * Setup the RX context
+ *
+ * Each URB is provided with a transfer_buffer that is the data field
+ * of a new socket buffer.
+ */
+int i1480u_rx_setup(struct i1480u *i1480u)
+{
+	int result, cnt;
+	struct device *dev = &i1480u->usb_iface->dev;
+	struct net_device *net_dev = i1480u->net_dev;
+	struct usb_endpoint_descriptor *epd;
+	struct sk_buff *skb;
+
+	/* Alloc RX stuff */
+	i1480u->rx_skb = NULL;	/* not in process of receiving packet */
+	result = -ENOMEM;
+	epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
+	for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
+		struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
+		rx_buf->i1480u = i1480u;
+		skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
+		if (!skb) {
+			dev_err(dev,
+				"RX: cannot allocate RX buffer %d\n", cnt);
+			result = -ENOMEM;
+			goto error;
+		}
+		skb->dev = net_dev;
+		skb->ip_summed = CHECKSUM_NONE;
+		skb_reserve(skb, 2);
+		rx_buf->data = skb;
+		rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (unlikely(rx_buf->urb == NULL)) {
+			dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
+			result = -ENOMEM;
+			goto error;
+		}
+		usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
+			  usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
+			  rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
+			  i1480u_rx_cb, rx_buf);
+		result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
+		if (unlikely(result < 0)) {
+			dev_err(dev, "RX: cannot submit URB %d: %d\n",
+				cnt, result);
+			goto error;
+		}
+	}
+	return 0;
+
+error:
+	i1480u_rx_release(i1480u);
+	return result;
+}
+
+
+/** Release resources associated to the rx context */
+void i1480u_rx_release(struct i1480u *i1480u)
+{
+	int cnt;
+	for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
+		if (i1480u->rx_buf[cnt].data)
+			dev_kfree_skb(i1480u->rx_buf[cnt].data);
+		if (i1480u->rx_buf[cnt].urb) {
+			usb_kill_urb(i1480u->rx_buf[cnt].urb);
+			usb_free_urb(i1480u->rx_buf[cnt].urb);
+		}
+	}
+	if (i1480u->rx_skb != NULL)
+		dev_kfree_skb(i1480u->rx_skb);
+}
+
+static
+void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
+{
+	int cnt;
+	for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
+		if (i1480u->rx_buf[cnt].urb)
+			usb_unlink_urb(i1480u->rx_buf[cnt].urb);
+	}
+}
+
+/** Fix an out-of-sequence packet */
+#define i1480u_fix(i1480u, msg...)			\
+do {							\
+	if (printk_ratelimit())				\
+		dev_err(&i1480u->usb_iface->dev, msg);	\
+	dev_kfree_skb_irq(i1480u->rx_skb);		\
+	i1480u->rx_skb = NULL;				\
+	i1480u->rx_untd_pkt_size = 0;			\
+} while (0)
+
+
+/** Drop an out-of-sequence packet */
+#define i1480u_drop(i1480u, msg...)			\
+do {							\
+	if (printk_ratelimit())				\
+		dev_err(&i1480u->usb_iface->dev, msg);	\
+	i1480u->stats.rx_dropped++;			\
+} while (0)
+
+
+
+
+/** Finalizes setting up the SKB and delivers it
+ *
+ * We first pass the incoming frame to WLP substack for verification. It
+ * may also be a WLP association frame in which case WLP will take over the
+ * processing. If WLP does not take it over it will still verify it, if the
+ * frame is invalid the skb will be freed by WLP and we will not continue
+ * parsing.
+ * */
+static
+void i1480u_skb_deliver(struct i1480u *i1480u)
+{
+	int should_parse;
+	struct net_device *net_dev = i1480u->net_dev;
+	struct device *dev = &i1480u->usb_iface->dev;
+
+	d_printf(6, dev, "RX delivered pre skb(%p), %u bytes\n",
+		 i1480u->rx_skb, i1480u->rx_skb->len);
+	d_dump(7, dev, i1480u->rx_skb->data, i1480u->rx_skb->len);
+	should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
+					 &i1480u->rx_srcaddr);
+	if (!should_parse)
+		goto out;
+	i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
+	d_printf(5, dev, "RX delivered skb(%p), %u bytes\n",
+		 i1480u->rx_skb, i1480u->rx_skb->len);
+	d_dump(7, dev, i1480u->rx_skb->data,
+	       i1480u->rx_skb->len > 72 ? 72 : i1480u->rx_skb->len);
+	i1480u->stats.rx_packets++;
+	i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size;
+	net_dev->last_rx = jiffies;
+	/* FIXME: flow control: check netif_rx() retval */
+
+	netif_rx(i1480u->rx_skb);		/* deliver */
+out:
+	i1480u->rx_skb = NULL;
+	i1480u->rx_untd_pkt_size = 0;
+}
+
+
+/**
+ * Process a buffer of data received from the USB RX endpoint
+ *
+ * First fragment arrives with next or last fragment. All other fragments
+ * arrive alone.
+ *
+ * /me hates long functions.
+ */
+static
+void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
+{
+	unsigned pkt_completed = 0;	/* !0 when we got all pkt fragments */
+	size_t untd_hdr_size, untd_frg_size;
+	size_t i1480u_hdr_size;
+	struct wlp_rx_hdr *i1480u_hdr = NULL;
+
+	struct i1480u *i1480u = rx_buf->i1480u;
+	struct sk_buff *skb = rx_buf->data;
+	int size_left = rx_buf->urb->actual_length;
+	void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
+	struct untd_hdr *untd_hdr;
+
+	struct net_device *net_dev = i1480u->net_dev;
+	struct device *dev = &i1480u->usb_iface->dev;
+	struct sk_buff *new_skb;
+
+#if 0
+	dev_fnstart(dev,
+		    "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left);
+	dev_err(dev, "RX packet, %zu bytes\n", size_left);
+	dump_bytes(dev, ptr, size_left);
+#endif
+	i1480u_hdr_size = sizeof(struct wlp_rx_hdr);
+
+	while (size_left > 0) {
+		if (pkt_completed) {
+			i1480u_drop(i1480u, "RX: fragment follows completed"
+					 "packet in same buffer. Dropping\n");
+			break;
+		}
+		untd_hdr = ptr;
+		if (size_left < sizeof(*untd_hdr)) {	/*  Check the UNTD header */
+			i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
+			goto out;
+		}
+		if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) {	/* Paranoia: TX set? */
+			i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
+			goto out;
+		}
+		switch (untd_hdr_type(untd_hdr)) {	/* Check the UNTD header type */
+		case i1480u_PKT_FRAG_1ST: {
+			struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
+			dev_dbg(dev, "1st fragment\n");
+			untd_hdr_size = sizeof(struct untd_hdr_1st);
+			if (i1480u->rx_skb != NULL)
+				i1480u_fix(i1480u, "RX: 1st fragment out of "
+					"sequence! Fixing\n");
+			if (size_left < untd_hdr_size + i1480u_hdr_size) {
+				i1480u_drop(i1480u, "RX: short 1st fragment! "
+					"Dropping\n");
+				goto out;
+			}
+			i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
+						 - i1480u_hdr_size;
+			untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
+			if (size_left < untd_hdr_size + untd_frg_size) {
+				i1480u_drop(i1480u,
+					    "RX: short payload! Dropping\n");
+				goto out;
+			}
+			i1480u->rx_skb = skb;
+			i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
+			i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
+			skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
+			skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
+			stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
+			stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
+			rx_buf->data = NULL; /* need to create new buffer */
+			break;
+		}
+		case i1480u_PKT_FRAG_NXT: {
+			dev_dbg(dev, "nxt fragment\n");
+			untd_hdr_size = sizeof(struct untd_hdr_rst);
+			if (i1480u->rx_skb == NULL) {
+				i1480u_drop(i1480u, "RX: next fragment out of "
+					    "sequence! Dropping\n");
+				goto out;
+			}
+			if (size_left < untd_hdr_size) {
+				i1480u_drop(i1480u, "RX: short NXT fragment! "
+					    "Dropping\n");
+				goto out;
+			}
+			untd_frg_size = le16_to_cpu(untd_hdr->len);
+			if (size_left < untd_hdr_size + untd_frg_size) {
+				i1480u_drop(i1480u,
+					    "RX: short payload! Dropping\n");
+				goto out;
+			}
+			memmove(skb_put(i1480u->rx_skb, untd_frg_size),
+					ptr + untd_hdr_size, untd_frg_size);
+			break;
+		}
+		case i1480u_PKT_FRAG_LST: {
+			dev_dbg(dev, "Lst fragment\n");
+			untd_hdr_size = sizeof(struct untd_hdr_rst);
+			if (i1480u->rx_skb == NULL) {
+				i1480u_drop(i1480u, "RX: last fragment out of "
+					    "sequence! Dropping\n");
+				goto out;
+			}
+			if (size_left < untd_hdr_size) {
+				i1480u_drop(i1480u, "RX: short LST fragment! "
+					    "Dropping\n");
+				goto out;
+			}
+			untd_frg_size = le16_to_cpu(untd_hdr->len);
+			if (size_left < untd_frg_size + untd_hdr_size) {
+				i1480u_drop(i1480u,
+					    "RX: short payload! Dropping\n");
+				goto out;
+			}
+			memmove(skb_put(i1480u->rx_skb, untd_frg_size),
+					ptr + untd_hdr_size, untd_frg_size);
+			pkt_completed = 1;
+			break;
+		}
+		case i1480u_PKT_FRAG_CMP: {
+			dev_dbg(dev, "cmp fragment\n");
+			untd_hdr_size = sizeof(struct untd_hdr_cmp);
+			if (i1480u->rx_skb != NULL)
+				i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
+					   " fragment!\n");
+			if (size_left < untd_hdr_size + i1480u_hdr_size) {
+				i1480u_drop(i1480u, "RX: short CMP fragment! "
+					    "Dropping\n");
+				goto out;
+			}
+			i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
+			untd_frg_size = i1480u->rx_untd_pkt_size;
+			if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
+				i1480u_drop(i1480u,
+					    "RX: short payload! Dropping\n");
+				goto out;
+			}
+			i1480u->rx_skb = skb;
+			i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
+			i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
+			stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
+			stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
+			skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
+			skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
+			rx_buf->data = NULL;	/* for hand off skb to network stack */
+			pkt_completed = 1;
+			i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
+			break;
+		}
+		default:
+			i1480u_drop(i1480u, "RX: unknown packet type %u! "
+				    "Dropping\n", untd_hdr_type(untd_hdr));
+			goto out;
+		}
+		size_left -= untd_hdr_size + untd_frg_size;
+		if (size_left > 0)
+			ptr += untd_hdr_size + untd_frg_size;
+	}
+	if (pkt_completed)
+		i1480u_skb_deliver(i1480u);
+out:
+	/* recreate needed RX buffers*/
+	if (rx_buf->data == NULL) {
+		/* buffer is being used to receive packet, create new */
+		new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
+		if (!new_skb) {
+			if (printk_ratelimit())
+				dev_err(dev,
+				"RX: cannot allocate RX buffer\n");
+		} else {
+			new_skb->dev = net_dev;
+			new_skb->ip_summed = CHECKSUM_NONE;
+			skb_reserve(new_skb, 2);
+			rx_buf->data = new_skb;
+		}
+	}
+	return;
+}
+
+
+/**
+ * Called when an RX URB has finished receiving or has found some kind
+ * of error condition.
+ *
+ * LIMITATIONS:
+ *
+ *  - We read USB-transfers, each transfer contains a SINGLE fragment
+ *    (can contain a complete packet, or a 1st, next, or last fragment
+ *    of a packet).
+ *    Looks like a transfer can contain more than one fragment (07/18/06)
+ *
+ *  - Each transfer buffer is the size of the maximum packet size (minus
+ *    headroom), i1480u_MAX_PKT_SIZE - 2
+ *
+ *  - We always read the full USB-transfer, no partials.
+ *
+ *  - Each transfer is read directly into a skb. This skb will be used to
+ *    send data to the upper layers if it is the first fragment or a complete
+ *    packet. In the other cases the data will be copied from the skb to
+ *    another skb that is being prepared for the upper layers from a prev
+ *    first fragment.
+ *
+ * It is simply too much of a pain. Gosh, there should be a unified
+ * SG infrastructure for *everything* [so that I could declare a SG
+ * buffer, pass it to USB for receiving, append some space to it if
+ * I wish, receive more until I have the whole chunk, adapt
+ * pointers on each fragment to remove hardware headers and then
+ * attach that to an skbuff and netif_rx()].
+ */
+void i1480u_rx_cb(struct urb *urb)
+{
+	int result;
+	int do_parse_buffer = 1;
+	struct i1480u_rx_buf *rx_buf = urb->context;
+	struct i1480u *i1480u = rx_buf->i1480u;
+	struct device *dev = &i1480u->usb_iface->dev;
+	unsigned long flags;
+	u8 rx_buf_idx = rx_buf - i1480u->rx_buf;
+
+	switch (urb->status) {
+	case 0:
+		break;
+	case -ECONNRESET:	/* Not an error, but a controlled situation; */
+	case -ENOENT:		/* (we killed the URB)...so, no broadcast */
+	case -ESHUTDOWN:	/* going away! */
+		dev_err(dev, "RX URB[%u]: goind down %d\n",
+			rx_buf_idx, urb->status);
+		goto error;
+	default:
+		dev_err(dev, "RX URB[%u]: unknown status %d\n",
+			rx_buf_idx, urb->status);
+		if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
+					EDC_ERROR_TIMEFRAME)) {
+			dev_err(dev, "RX: max acceptable errors exceeded,"
+					" resetting device.\n");
+			i1480u_rx_unlink_urbs(i1480u);
+			wlp_reset_all(&i1480u->wlp);
+			goto error;
+		}
+		do_parse_buffer = 0;
+		break;
+	}
+	spin_lock_irqsave(&i1480u->lock, flags);
+	/* chew the data fragments, extract network packets */
+	if (do_parse_buffer) {
+		i1480u_rx_buffer(rx_buf);
+		if (rx_buf->data) {
+			rx_buf->urb->transfer_buffer = rx_buf->data->data;
+			result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
+			if (result < 0) {
+				dev_err(dev, "RX URB[%u]: cannot submit %d\n",
+					rx_buf_idx, result);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&i1480u->lock, flags);
+error:
+	return;
+}
+
diff --git a/drivers/uwb/i1480/i1480u-wlp/sysfs.c b/drivers/uwb/i1480/i1480u-wlp/sysfs.c
new file mode 100644
index 0000000..a1d8ca6
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/sysfs.c
@@ -0,0 +1,408 @@
+/*
+ * WUSB Wire Adapter: WLP interface
+ * Sysfs interfaces
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/uwb/debug.h>
+#include <linux/device.h>
+#include "i1480u-wlp.h"
+
+
+/**
+ *
+ * @dev: Class device from the net_device; assumed refcnted.
+ *
+ * Yes, I don't lock--we assume it is refcounted and I am getting a
+ * single byte value that is kind of atomic to read.
+ */
+ssize_t uwb_phy_rate_show(const struct wlp_options *options, char *buf)
+{
+	return sprintf(buf, "%u\n",
+		       wlp_tx_hdr_phy_rate(&options->def_tx_hdr));
+}
+EXPORT_SYMBOL_GPL(uwb_phy_rate_show);
+
+
+ssize_t uwb_phy_rate_store(struct wlp_options *options,
+			   const char *buf, size_t size)
+{
+	ssize_t result;
+	unsigned rate;
+
+	result = sscanf(buf, "%u\n", &rate);
+	if (result != 1) {
+		result = -EINVAL;
+		goto out;
+	}
+	result = -EINVAL;
+	if (rate >= UWB_PHY_RATE_INVALID)
+		goto out;
+	wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, rate);
+	result = 0;
+out:
+	return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(uwb_phy_rate_store);
+
+
+ssize_t uwb_rts_cts_show(const struct wlp_options *options, char *buf)
+{
+	return sprintf(buf, "%u\n",
+		       wlp_tx_hdr_rts_cts(&options->def_tx_hdr));
+}
+EXPORT_SYMBOL_GPL(uwb_rts_cts_show);
+
+
+ssize_t uwb_rts_cts_store(struct wlp_options *options,
+			  const char *buf, size_t size)
+{
+	ssize_t result;
+	unsigned value;
+
+	result = sscanf(buf, "%u\n", &value);
+	if (result != 1) {
+		result = -EINVAL;
+		goto out;
+	}
+	result = -EINVAL;
+	wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, !!value);
+	result = 0;
+out:
+	return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(uwb_rts_cts_store);
+
+
+ssize_t uwb_ack_policy_show(const struct wlp_options *options, char *buf)
+{
+	return sprintf(buf, "%u\n",
+		       wlp_tx_hdr_ack_policy(&options->def_tx_hdr));
+}
+EXPORT_SYMBOL_GPL(uwb_ack_policy_show);
+
+
+ssize_t uwb_ack_policy_store(struct wlp_options *options,
+			     const char *buf, size_t size)
+{
+	ssize_t result;
+	unsigned value;
+
+	result = sscanf(buf, "%u\n", &value);
+	if (result != 1 || value > UWB_ACK_B_REQ) {
+		result = -EINVAL;
+		goto out;
+	}
+	wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, value);
+	result = 0;
+out:
+	return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(uwb_ack_policy_store);
+
+
+/**
+ * Show the PCA base priority.
+ *
+ * We can access without locking, as the value is (for now) orthogonal
+ * to other values.
+ */
+ssize_t uwb_pca_base_priority_show(const struct wlp_options *options,
+				   char *buf)
+{
+	return sprintf(buf, "%u\n",
+		       options->pca_base_priority);
+}
+EXPORT_SYMBOL_GPL(uwb_pca_base_priority_show);
+
+
+/**
+ * Set the PCA base priority.
+ *
+ * We can access without locking, as the value is (for now) orthogonal
+ * to other values.
+ */
+ssize_t uwb_pca_base_priority_store(struct wlp_options *options,
+				    const char *buf, size_t size)
+{
+	ssize_t result = -EINVAL;
+	u8 pca_base_priority;
+
+	result = sscanf(buf, "%hhu\n", &pca_base_priority);
+	if (result != 1) {
+		result = -EINVAL;
+		goto out;
+	}
+	result = -EINVAL;
+	if (pca_base_priority >= 8)
+		goto out;
+	options->pca_base_priority = pca_base_priority;
+	/* Update TX header if we are currently using PCA. */
+	if (result >= 0 && (wlp_tx_hdr_delivery_id_type(&options->def_tx_hdr) & WLP_DRP) == 0)
+		wlp_tx_hdr_set_delivery_id_type(&options->def_tx_hdr, options->pca_base_priority);
+	result = 0;
+out:
+	return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(uwb_pca_base_priority_store);
+
+/**
+ * Show current inflight values
+ *
+ * Will print the current MAX and THRESHOLD values for the basic flow
+ * control. In addition it will report how many times the TX queue needed
+ * to be restarted since the last time this query was made.
+ */
+static ssize_t wlp_tx_inflight_show(struct i1480u_tx_inflight *inflight,
+				    char *buf)
+{
+	ssize_t result;
+	unsigned long sec_elapsed = (jiffies - inflight->restart_ts)/HZ;
+	unsigned long restart_count = atomic_read(&inflight->restart_count);
+
+	result = scnprintf(buf, PAGE_SIZE, "%lu %lu %d %lu %lu %lu\n"
+			   "#read: threshold max inflight_count restarts "
+			   "seconds restarts/sec\n"
+			   "#write: threshold max\n",
+			   inflight->threshold, inflight->max,
+			   atomic_read(&inflight->count),
+			   restart_count, sec_elapsed,
+			   sec_elapsed == 0 ? 0 : restart_count/sec_elapsed);
+	inflight->restart_ts = jiffies;
+	atomic_set(&inflight->restart_count, 0);
+	return result;
+}
+
+static
+ssize_t wlp_tx_inflight_store(struct i1480u_tx_inflight *inflight,
+				const char *buf, size_t size)
+{
+	unsigned long in_threshold, in_max;
+	ssize_t result;
+	result = sscanf(buf, "%lu %lu", &in_threshold, &in_max);
+	if (result != 2)
+		return -EINVAL;
+	if (in_max <= in_threshold)
+		return -EINVAL;
+	inflight->max = in_max;
+	inflight->threshold = in_threshold;
+	return size;
+}
+/*
+ * Glue (or function adaptors) for accesing info on sysfs
+ *
+ * [we need this indirection because the PCI driver does almost the
+ * same]
+ *
+ * Linux 2.6.21 changed how 'struct netdevice' does attributes (from
+ * having a 'struct class_dev' to having a 'struct device'). That is
+ * quite of a pain.
+ *
+ * So we try to abstract that here. i1480u_SHOW() and i1480u_STORE()
+ * create adaptors for extracting the 'struct i1480u' from a 'struct
+ * dev' and calling a function for doing a sysfs operation (as we have
+ * them factorized already). i1480u_ATTR creates the attribute file
+ * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a
+ * class_device_attr_NAME or device_attr_NAME (for group registration).
+ */
+#include <linux/version.h>
+
+#define i1480u_SHOW(name, fn, param)				\
+static ssize_t i1480u_show_##name(struct device *dev,		\
+				  struct device_attribute *attr,\
+				  char *buf)			\
+{								\
+	struct i1480u *i1480u = netdev_priv(to_net_dev(dev));	\
+	return fn(&i1480u->param, buf);				\
+}
+
+#define i1480u_STORE(name, fn, param)				\
+static ssize_t i1480u_store_##name(struct device *dev,		\
+				   struct device_attribute *attr,\
+				   const char *buf, size_t size)\
+{								\
+	struct i1480u *i1480u = netdev_priv(to_net_dev(dev));	\
+	return fn(&i1480u->param, buf, size);			\
+}
+
+#define i1480u_ATTR(name, perm) static DEVICE_ATTR(name, perm,  \
+					     i1480u_show_##name,\
+					     i1480u_store_##name)
+
+#define i1480u_ATTR_SHOW(name) static DEVICE_ATTR(name,		\
+					S_IRUGO,		\
+					i1480u_show_##name, NULL)
+
+#define i1480u_ATTR_NAME(a) (dev_attr_##a)
+
+
+/*
+ * Sysfs adaptors
+ */
+i1480u_SHOW(uwb_phy_rate, uwb_phy_rate_show, options);
+i1480u_STORE(uwb_phy_rate, uwb_phy_rate_store, options);
+i1480u_ATTR(uwb_phy_rate, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(uwb_rts_cts, uwb_rts_cts_show, options);
+i1480u_STORE(uwb_rts_cts, uwb_rts_cts_store, options);
+i1480u_ATTR(uwb_rts_cts, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(uwb_ack_policy, uwb_ack_policy_show, options);
+i1480u_STORE(uwb_ack_policy, uwb_ack_policy_store, options);
+i1480u_ATTR(uwb_ack_policy, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(uwb_pca_base_priority, uwb_pca_base_priority_show, options);
+i1480u_STORE(uwb_pca_base_priority, uwb_pca_base_priority_store, options);
+i1480u_ATTR(uwb_pca_base_priority, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_eda, wlp_eda_show, wlp);
+i1480u_STORE(wlp_eda, wlp_eda_store, wlp);
+i1480u_ATTR(wlp_eda, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_uuid, wlp_uuid_show, wlp);
+i1480u_STORE(wlp_uuid, wlp_uuid_store, wlp);
+i1480u_ATTR(wlp_uuid, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_name, wlp_dev_name_show, wlp);
+i1480u_STORE(wlp_dev_name, wlp_dev_name_store, wlp);
+i1480u_ATTR(wlp_dev_name, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_manufacturer, wlp_dev_manufacturer_show, wlp);
+i1480u_STORE(wlp_dev_manufacturer, wlp_dev_manufacturer_store, wlp);
+i1480u_ATTR(wlp_dev_manufacturer, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_model_name, wlp_dev_model_name_show, wlp);
+i1480u_STORE(wlp_dev_model_name, wlp_dev_model_name_store, wlp);
+i1480u_ATTR(wlp_dev_model_name, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_model_nr, wlp_dev_model_nr_show, wlp);
+i1480u_STORE(wlp_dev_model_nr, wlp_dev_model_nr_store, wlp);
+i1480u_ATTR(wlp_dev_model_nr, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_serial, wlp_dev_serial_show, wlp);
+i1480u_STORE(wlp_dev_serial, wlp_dev_serial_store, wlp);
+i1480u_ATTR(wlp_dev_serial, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_prim_category, wlp_dev_prim_category_show, wlp);
+i1480u_STORE(wlp_dev_prim_category, wlp_dev_prim_category_store, wlp);
+i1480u_ATTR(wlp_dev_prim_category, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_prim_OUI, wlp_dev_prim_OUI_show, wlp);
+i1480u_STORE(wlp_dev_prim_OUI, wlp_dev_prim_OUI_store, wlp);
+i1480u_ATTR(wlp_dev_prim_OUI, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_show, wlp);
+i1480u_STORE(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_store, wlp);
+i1480u_ATTR(wlp_dev_prim_OUI_sub, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_prim_subcat, wlp_dev_prim_subcat_show, wlp);
+i1480u_STORE(wlp_dev_prim_subcat, wlp_dev_prim_subcat_store, wlp);
+i1480u_ATTR(wlp_dev_prim_subcat, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_neighborhood, wlp_neighborhood_show, wlp);
+i1480u_ATTR_SHOW(wlp_neighborhood);
+
+i1480u_SHOW(wss_activate, wlp_wss_activate_show, wlp.wss);
+i1480u_STORE(wss_activate, wlp_wss_activate_store, wlp.wss);
+i1480u_ATTR(wss_activate, S_IRUGO | S_IWUSR);
+
+/*
+ * Show the (min, max, avg) Line Quality Estimate (LQE, in dB) as over
+ * the last 256 received WLP frames (ECMA-368 13.3).
+ *
+ * [the -7dB that have to be substracted from the LQI to make the LQE
+ * are already taken into account].
+ */
+i1480u_SHOW(wlp_lqe, stats_show, lqe_stats);
+i1480u_STORE(wlp_lqe, stats_store, lqe_stats);
+i1480u_ATTR(wlp_lqe, S_IRUGO | S_IWUSR);
+
+/*
+ * Show the Receive Signal Strength Indicator averaged over all the
+ * received WLP frames (ECMA-368 13.3). Still is not clear what
+ * this value is, but is kind of a percentage of the signal strength
+ * at the antenna.
+ */
+i1480u_SHOW(wlp_rssi, stats_show, rssi_stats);
+i1480u_STORE(wlp_rssi, stats_store, rssi_stats);
+i1480u_ATTR(wlp_rssi, S_IRUGO | S_IWUSR);
+
+/**
+ * We maintain a basic flow control counter. "count" how many TX URBs are
+ * outstanding. Only allow "max"
+ * TX URBs to be outstanding. If this value is reached the queue will be
+ * stopped. The queue will be restarted when there are
+ * "threshold" URBs outstanding.
+ */
+i1480u_SHOW(wlp_tx_inflight, wlp_tx_inflight_show, tx_inflight);
+i1480u_STORE(wlp_tx_inflight, wlp_tx_inflight_store, tx_inflight);
+i1480u_ATTR(wlp_tx_inflight, S_IRUGO | S_IWUSR);
+
+static struct attribute *i1480u_attrs[] = {
+	&i1480u_ATTR_NAME(uwb_phy_rate).attr,
+	&i1480u_ATTR_NAME(uwb_rts_cts).attr,
+	&i1480u_ATTR_NAME(uwb_ack_policy).attr,
+	&i1480u_ATTR_NAME(uwb_pca_base_priority).attr,
+	&i1480u_ATTR_NAME(wlp_lqe).attr,
+	&i1480u_ATTR_NAME(wlp_rssi).attr,
+	&i1480u_ATTR_NAME(wlp_eda).attr,
+	&i1480u_ATTR_NAME(wlp_uuid).attr,
+	&i1480u_ATTR_NAME(wlp_dev_name).attr,
+	&i1480u_ATTR_NAME(wlp_dev_manufacturer).attr,
+	&i1480u_ATTR_NAME(wlp_dev_model_name).attr,
+	&i1480u_ATTR_NAME(wlp_dev_model_nr).attr,
+	&i1480u_ATTR_NAME(wlp_dev_serial).attr,
+	&i1480u_ATTR_NAME(wlp_dev_prim_category).attr,
+	&i1480u_ATTR_NAME(wlp_dev_prim_OUI).attr,
+	&i1480u_ATTR_NAME(wlp_dev_prim_OUI_sub).attr,
+	&i1480u_ATTR_NAME(wlp_dev_prim_subcat).attr,
+	&i1480u_ATTR_NAME(wlp_neighborhood).attr,
+	&i1480u_ATTR_NAME(wss_activate).attr,
+	&i1480u_ATTR_NAME(wlp_tx_inflight).attr,
+	NULL,
+};
+
+static struct attribute_group i1480u_attr_group = {
+	.name = NULL,	/* we want them in the same directory */
+	.attrs = i1480u_attrs,
+};
+
+int i1480u_sysfs_setup(struct i1480u *i1480u)
+{
+	int result;
+	struct device *dev = &i1480u->usb_iface->dev;
+	result = sysfs_create_group(&i1480u->net_dev->dev.kobj,
+				    &i1480u_attr_group);
+	if (result < 0)
+		dev_err(dev, "cannot initialize sysfs attributes: %d\n",
+			result);
+	return result;
+}
+
+
+void i1480u_sysfs_release(struct i1480u *i1480u)
+{
+	sysfs_remove_group(&i1480u->net_dev->dev.kobj,
+			   &i1480u_attr_group);
+}
diff --git a/drivers/uwb/i1480/i1480u-wlp/tx.c b/drivers/uwb/i1480/i1480u-wlp/tx.c
new file mode 100644
index 0000000..3426bfb
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/tx.c
@@ -0,0 +1,632 @@
+/*
+ * WUSB Wire Adapter: WLP interface
+ * Deal with TX (massaging data to transmit, handling it)
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Transmission engine. Get an skb, create from that a WLP transmit
+ * context, add a WLP TX header (which we keep prefilled in the
+ * device's instance), fill out the target-specific fields and
+ * fire it.
+ *
+ * ROADMAP:
+ *
+ *   Entry points:
+ *
+ *     i1480u_tx_release(): called by i1480u_disconnect() to release
+ *                          pending tx contexts.
+ *
+ *     i1480u_tx_cb(): callback for TX contexts (USB URBs)
+ *       i1480u_tx_destroy():
+ *
+ *     i1480u_tx_timeout(): called for timeout handling from the
+ *                          network stack.
+ *
+ *     i1480u_hard_start_xmit(): called for transmitting an skb from
+ *                               the network stack. Will interact with WLP
+ *                               substack to verify and prepare frame.
+ *       i1480u_xmit_frame(): actual transmission on hardware
+ *
+ *         i1480u_tx_create()	    Creates TX context
+ *            i1480u_tx_create_1()    For packets in 1 fragment
+ *            i1480u_tx_create_n()    For packets in >1 fragments
+ *
+ * TODO:
+ *
+ * - FIXME: rewrite using usb_sg_*(), add asynch support to
+ *          usb_sg_*(). It might not make too much sense as most of
+ *          the times the MTU will be smaller than one page...
+ */
+
+#include "i1480u-wlp.h"
+#define D_LOCAL 5
+#include <linux/uwb/debug.h>
+
+enum {
+	/* This is only for Next and Last TX packets */
+	i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
+		- sizeof(struct untd_hdr_rst),
+};
+
+/** Free resources allocated to a i1480u tx context. */
+static
+void i1480u_tx_free(struct i1480u_tx *wtx)
+{
+	kfree(wtx->buf);
+	if (wtx->skb)
+		dev_kfree_skb_irq(wtx->skb);
+	usb_free_urb(wtx->urb);
+	kfree(wtx);
+}
+
+static
+void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&i1480u->tx_list_lock, flags);	/* not active any more */
+	list_del(&wtx->list_node);
+	i1480u_tx_free(wtx);
+	spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
+}
+
+static
+void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
+{
+	unsigned long flags;
+	struct i1480u_tx *wtx, *next;
+
+	spin_lock_irqsave(&i1480u->tx_list_lock, flags);
+	list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
+		usb_unlink_urb(wtx->urb);
+	}
+	spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
+}
+
+
+/**
+ * Callback for a completed tx USB URB.
+ *
+ * TODO:
+ *
+ * - FIXME: recover errors more gracefully
+ * - FIXME: handle NAKs (I dont think they come here) for flow ctl
+ */
+static
+void i1480u_tx_cb(struct urb *urb)
+{
+	struct i1480u_tx *wtx = urb->context;
+	struct i1480u *i1480u = wtx->i1480u;
+	struct net_device *net_dev = i1480u->net_dev;
+	struct device *dev = &i1480u->usb_iface->dev;
+	unsigned long flags;
+
+	switch (urb->status) {
+	case 0:
+		spin_lock_irqsave(&i1480u->lock, flags);
+		i1480u->stats.tx_packets++;
+		i1480u->stats.tx_bytes += urb->actual_length;
+		spin_unlock_irqrestore(&i1480u->lock, flags);
+		break;
+	case -ECONNRESET:	/* Not an error, but a controlled situation; */
+	case -ENOENT:		/* (we killed the URB)...so, no broadcast */
+		dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
+		netif_stop_queue(net_dev);
+		break;
+	case -ESHUTDOWN:	/* going away! */
+		dev_dbg(dev, "notif endp: down %d\n", urb->status);
+		netif_stop_queue(net_dev);
+		break;
+	default:
+		dev_err(dev, "TX: unknown URB status %d\n", urb->status);
+		if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
+					EDC_ERROR_TIMEFRAME)) {
+			dev_err(dev, "TX: max acceptable errors exceeded."
+					"Reset device.\n");
+			netif_stop_queue(net_dev);
+			i1480u_tx_unlink_urbs(i1480u);
+			wlp_reset_all(&i1480u->wlp);
+		}
+		break;
+	}
+	i1480u_tx_destroy(i1480u, wtx);
+	if (atomic_dec_return(&i1480u->tx_inflight.count)
+	    <= i1480u->tx_inflight.threshold
+	    && netif_queue_stopped(net_dev)
+	    && i1480u->tx_inflight.threshold != 0) {
+		if (d_test(2) && printk_ratelimit())
+			d_printf(2, dev, "Restart queue. \n");
+		netif_start_queue(net_dev);
+		atomic_inc(&i1480u->tx_inflight.restart_count);
+	}
+	return;
+}
+
+
+/**
+ * Given a buffer that doesn't fit in a single fragment, create an
+ * scatter/gather structure for delivery to the USB pipe.
+ *
+ * Implements functionality of i1480u_tx_create().
+ *
+ * @wtx:	tx descriptor
+ * @skb:	skb to send
+ * @gfp_mask:	gfp allocation mask
+ * @returns:    Pointer to @wtx if ok, NULL on error.
+ *
+ * Sorry, TOO LONG a function, but breaking it up is kind of hard
+ *
+ * This will break the buffer in chunks smaller than
+ * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
+ * to each:
+ *
+ *   1st header           \
+ *   i1480 tx header      |  fragment 1
+ *   fragment data        /
+ *   nxt header           \  fragment 2
+ *   fragment data        /
+ *   ..
+ *   ..
+ *   last header          \  fragment 3
+ *   last fragment data   /
+ *
+ * This does not fill the i1480 TX header, it is left up to the
+ * caller to do that; you can get it from @wtx->wlp_tx_hdr.
+ *
+ * This function consumes the skb unless there is an error.
+ */
+static
+int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
+		       gfp_t gfp_mask)
+{
+	int result;
+	void *pl;
+	size_t pl_size;
+
+	void *pl_itr, *buf_itr;
+	size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
+	struct untd_hdr_1st *untd_hdr_1st;
+	struct wlp_tx_hdr *wlp_tx_hdr;
+	struct untd_hdr_rst *untd_hdr_rst;
+
+	wtx->skb = NULL;
+	pl = skb->data;
+	pl_itr = pl;
+	pl_size = skb->len;
+	pl_size_left = pl_size;	/* payload size */
+	/* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
+	 * the headers */
+	pl_size_1st = i1480u_MAX_FRG_SIZE
+		- sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
+	BUG_ON(pl_size_1st > pl_size);
+	pl_size_left -= pl_size_1st;
+	/* The rest have an smaller header (no i1480 TX header). We
+	 * need to break up the payload in blocks smaller than
+	 * i1480u_MAX_PL_SIZE (payload excluding header). */
+	frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
+	/* Allocate space for the new buffer. In this new buffer we'll
+	 * place the headers followed by the data fragment, headers,
+	 * data fragments, etc..
+	 */
+	result = -ENOMEM;
+	wtx->buf_size = sizeof(*untd_hdr_1st)
+		+ sizeof(*wlp_tx_hdr)
+		+ frgs * sizeof(*untd_hdr_rst)
+		+ pl_size;
+	wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
+	if (wtx->buf == NULL)
+		goto error_buf_alloc;
+
+	buf_itr = wtx->buf;		/* We got the space, let's fill it up */
+	/* Fill 1st fragment */
+	untd_hdr_1st = buf_itr;
+	buf_itr += sizeof(*untd_hdr_1st);
+	untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
+	untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
+	untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
+	untd_hdr_1st->fragment_len =
+		cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
+	memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
+	/* Set up i1480 header info */
+	wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
+	buf_itr += sizeof(*wlp_tx_hdr);
+	/* Copy the first fragment */
+	memcpy(buf_itr, pl_itr, pl_size_1st);
+	pl_itr += pl_size_1st;
+	buf_itr += pl_size_1st;
+
+	/* Now do each remaining fragment */
+	result = -EINVAL;
+	while (pl_size_left > 0) {
+		d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n",
+			 pl_size_left, buf_itr - wtx->buf);
+		if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
+		    > wtx->buf_size) {
+			printk(KERN_ERR "BUG: no space for header\n");
+			goto error_bug;
+		}
+		d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n",
+			 pl_size_left, buf_itr - wtx->buf);
+		untd_hdr_rst = buf_itr;
+		buf_itr += sizeof(*untd_hdr_rst);
+		if (pl_size_left > i1480u_MAX_PL_SIZE) {
+			frg_pl_size = i1480u_MAX_PL_SIZE;
+			untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
+		} else {
+			frg_pl_size = pl_size_left;
+			untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
+		}
+		d_printf(5, NULL,
+			 "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
+			 pl_size_left, buf_itr - wtx->buf, frg_pl_size);
+		untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
+		untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
+		untd_hdr_rst->padding = 0;
+		if (buf_itr + frg_pl_size - wtx->buf
+		    > wtx->buf_size) {
+			printk(KERN_ERR "BUG: no space for payload\n");
+			goto error_bug;
+		}
+		memcpy(buf_itr, pl_itr, frg_pl_size);
+		buf_itr += frg_pl_size;
+		pl_itr += frg_pl_size;
+		pl_size_left -= frg_pl_size;
+		d_printf(5, NULL,
+			 "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
+			 pl_size_left, buf_itr - wtx->buf, frg_pl_size);
+	}
+	dev_kfree_skb_irq(skb);
+	return 0;
+
+error_bug:
+	printk(KERN_ERR
+	       "BUG: skb %u bytes\n"
+	       "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
+	       "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
+	       skb->len,
+	       frg_pl_size, i1480u_MAX_FRG_SIZE,
+	       buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
+
+	kfree(wtx->buf);
+error_buf_alloc:
+	return result;
+}
+
+
+/**
+ * Given a buffer that fits in a single fragment, fill out a @wtx
+ * struct for transmitting it down the USB pipe.
+ *
+ * Uses the fact that we have space reserved in front of the skbuff
+ * for hardware headers :]
+ *
+ * This does not fill the i1480 TX header, it is left up to the
+ * caller to do that; you can get it from @wtx->wlp_tx_hdr.
+ *
+ * @pl:		pointer to payload data
+ * @pl_size:    size of the payuload
+ *
+ * This function does not consume the @skb.
+ */
+static
+int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
+		       gfp_t gfp_mask)
+{
+	struct untd_hdr_cmp *untd_hdr_cmp;
+	struct wlp_tx_hdr *wlp_tx_hdr;
+
+	wtx->buf = NULL;
+	wtx->skb = skb;
+	BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
+	wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
+	wtx->wlp_tx_hdr = wlp_tx_hdr;
+	BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
+	untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
+
+	untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
+	untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
+	untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
+	untd_hdr_cmp->padding = 0;
+	return 0;
+}
+
+
+/**
+ * Given a skb to transmit, massage it to become palatable for the TX pipe
+ *
+ * This will break the buffer in chunks smaller than
+ * i1480u_MAX_FRG_SIZE and add proper headers to each.
+ *
+ *   1st header           \
+ *   i1480 tx header      |  fragment 1
+ *   fragment data        /
+ *   nxt header           \  fragment 2
+ *   fragment data        /
+ *   ..
+ *   ..
+ *   last header          \  fragment 3
+ *   last fragment data   /
+ *
+ * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
+ *
+ * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
+ * following is composed:
+ *
+ *   complete header      \
+ *   i1480 tx header      | single fragment
+ *   packet data          /
+ *
+ * We were going to use s/g support, but because the interface is
+ * synch and at the end there is plenty of overhead to do it, it
+ * didn't seem that worth for data that is going to be smaller than
+ * one page.
+ */
+static
+struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
+				   struct sk_buff *skb, gfp_t gfp_mask)
+{
+	int result;
+	struct usb_endpoint_descriptor *epd;
+	int usb_pipe;
+	unsigned long flags;
+
+	struct i1480u_tx *wtx;
+	const size_t pl_max_size =
+		i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
+		- sizeof(struct wlp_tx_hdr);
+
+	wtx = kmalloc(sizeof(*wtx), gfp_mask);
+	if (wtx == NULL)
+		goto error_wtx_alloc;
+	wtx->urb = usb_alloc_urb(0, gfp_mask);
+	if (wtx->urb == NULL)
+		goto error_urb_alloc;
+	epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
+	usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
+	/* Fits in a single complete packet or need to split? */
+	if (skb->len > pl_max_size) {
+		result = i1480u_tx_create_n(wtx, skb, gfp_mask);
+		if (result < 0)
+			goto error_create;
+		usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
+				  wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
+	} else {
+		result = i1480u_tx_create_1(wtx, skb, gfp_mask);
+		if (result < 0)
+			goto error_create;
+		usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
+				  skb->data, skb->len, i1480u_tx_cb, wtx);
+	}
+	spin_lock_irqsave(&i1480u->tx_list_lock, flags);
+	list_add(&wtx->list_node, &i1480u->tx_list);
+	spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
+	return wtx;
+
+error_create:
+	kfree(wtx->urb);
+error_urb_alloc:
+	kfree(wtx);
+error_wtx_alloc:
+	return NULL;
+}
+
+/**
+ * Actual fragmentation and transmission of frame
+ *
+ * @wlp:  WLP substack data structure
+ * @skb:  To be transmitted
+ * @dst:  Device address of destination
+ * @returns: 0 on success, <0 on failure
+ *
+ * This function can also be called directly (not just from
+ * hard_start_xmit), so we also check here if the interface is up before
+ * taking sending anything.
+ */
+int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
+		      struct uwb_dev_addr *dst)
+{
+	int result = -ENXIO;
+	struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
+	struct device *dev = &i1480u->usb_iface->dev;
+	struct net_device *net_dev = i1480u->net_dev;
+	struct i1480u_tx *wtx;
+	struct wlp_tx_hdr *wlp_tx_hdr;
+	static unsigned char dev_bcast[2] = { 0xff, 0xff };
+#if 0
+	int lockup = 50;
+#endif
+
+	d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
+		  net_dev);
+	BUG_ON(i1480u->wlp.rc == NULL);
+	if ((net_dev->flags & IFF_UP) == 0)
+		goto out;
+	result = -EBUSY;
+	if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
+		if (d_test(2) && printk_ratelimit())
+			d_printf(2, dev, "Max frames in flight "
+				 "stopping queue.\n");
+		netif_stop_queue(net_dev);
+		goto error_max_inflight;
+	}
+	result = -ENOMEM;
+	wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
+	if (unlikely(wtx == NULL)) {
+		if (printk_ratelimit())
+			dev_err(dev, "TX: no memory for WLP TX URB,"
+				"dropping packet (in flight %d)\n",
+				atomic_read(&i1480u->tx_inflight.count));
+		netif_stop_queue(net_dev);
+		goto error_wtx_alloc;
+	}
+	wtx->i1480u = i1480u;
+	/* Fill out the i1480 header; @i1480u->def_tx_hdr read without
+	 * locking. We do so because they are kind of orthogonal to
+	 * each other (and thus not changed in an atomic batch).
+	 * The ETH header is right after the WLP TX header. */
+	wlp_tx_hdr = wtx->wlp_tx_hdr;
+	*wlp_tx_hdr = i1480u->options.def_tx_hdr;
+	wlp_tx_hdr->dstaddr = *dst;
+	if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
+	    && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
+		/*Broadcast message directed to DRP host. Send as best effort
+		 * on PCA. */
+		wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
+	}
+
+#if 0
+	dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len);
+	dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len);
+#endif
+#if 0
+	/* simulates a device lockup after every lockup# packets */
+	if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) {
+		/* Simulate a dropped transmit interrupt */
+		net_dev->trans_start = jiffies;
+		netif_stop_queue(net_dev);
+		dev_err(dev, "Simulate lockup at %ld\n", jiffies);
+		return result;
+	}
+#endif
+
+	result = usb_submit_urb(wtx->urb, GFP_ATOMIC);		/* Go baby */
+	if (result < 0) {
+		dev_err(dev, "TX: cannot submit URB: %d\n", result);
+		/* We leave the freeing of skb to calling function */
+		wtx->skb = NULL;
+		goto error_tx_urb_submit;
+	}
+	atomic_inc(&i1480u->tx_inflight.count);
+	net_dev->trans_start = jiffies;
+	d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
+		net_dev, result);
+	return result;
+
+error_tx_urb_submit:
+	i1480u_tx_destroy(i1480u, wtx);
+error_wtx_alloc:
+error_max_inflight:
+out:
+	d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
+		net_dev, result);
+	return result;
+}
+
+
+/**
+ * Transmit an skb  Called when an skbuf has to be transmitted
+ *
+ * The skb is first passed to WLP substack to ensure this is a valid
+ * frame. If valid the device address of destination will be filled and
+ * the WLP header prepended to the skb. If this step fails we fake sending
+ * the frame, if we return an error the network stack will just keep trying.
+ *
+ * Broadcast frames inside a WSS needs to be treated special as multicast is
+ * not supported. A broadcast frame is sent as unicast to each member of the
+ * WSS - this is done by the WLP substack when it finds a broadcast frame.
+ * So, we test if the WLP substack took over the skb and only transmit it
+ * if it has not (been taken over).
+ *
+ * @net_dev->xmit_lock is held
+ */
+int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+{
+	int result;
+	struct i1480u *i1480u = netdev_priv(net_dev);
+	struct device *dev = &i1480u->usb_iface->dev;
+	struct uwb_dev_addr dst;
+
+	d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
+		  net_dev);
+	BUG_ON(i1480u->wlp.rc == NULL);
+	if ((net_dev->flags & IFF_UP) == 0)
+		goto error;
+	result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
+	if (result < 0) {
+		dev_err(dev, "WLP verification of TX frame failed (%d). "
+			"Dropping packet.\n", result);
+		goto error;
+	} else if (result == 1) {
+		d_printf(6, dev, "WLP will transmit frame. \n");
+		/* trans_start time will be set when WLP actually transmits
+		 * the frame */
+		goto out;
+	}
+	d_printf(6, dev, "Transmitting frame. \n");
+	result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
+	if (result < 0) {
+		dev_err(dev, "Frame TX failed (%d).\n", result);
+		goto error;
+	}
+	d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
+		net_dev, result);
+	return NETDEV_TX_OK;
+error:
+	dev_kfree_skb_any(skb);
+	i1480u->stats.tx_dropped++;
+out:
+	d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
+		net_dev, result);
+	return NETDEV_TX_OK;
+}
+
+
+/**
+ * Called when a pkt transmission doesn't complete in a reasonable period
+ * Device reset may sleep - do it outside of interrupt context (delayed)
+ */
+void i1480u_tx_timeout(struct net_device *net_dev)
+{
+	struct i1480u *i1480u = netdev_priv(net_dev);
+
+	wlp_reset_all(&i1480u->wlp);
+}
+
+
+void i1480u_tx_release(struct i1480u *i1480u)
+{
+	unsigned long flags;
+	struct i1480u_tx *wtx, *next;
+	int count = 0, empty;
+
+	spin_lock_irqsave(&i1480u->tx_list_lock, flags);
+	list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
+		count++;
+		usb_unlink_urb(wtx->urb);
+	}
+	spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
+	count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
+	/*
+	 * We don't like this sollution too much (dirty as it is), but
+	 * it is cheaper than putting a refcount on each i1480u_tx and
+	 * i1480uting for all of them to go away...
+	 *
+	 * Called when no more packets can be added to tx_list
+	 * so can i1480ut for it to be empty.
+	 */
+	while (1) {
+		spin_lock_irqsave(&i1480u->tx_list_lock, flags);
+		empty = list_empty(&i1480u->tx_list);
+		spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
+		if (empty)
+			break;
+		count--;
+		BUG_ON(count == 0);
+		msleep(20);
+	}
+}
diff --git a/drivers/uwb/ie.c b/drivers/uwb/ie.c
new file mode 100644
index 0000000..cf6f3d1
--- /dev/null
+++ b/drivers/uwb/ie.c
@@ -0,0 +1,541 @@
+/*
+ * Ultra Wide Band
+ * Information Element Handling
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include "uwb-internal.h"
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/**
+ * uwb_ie_next - get the next IE in a buffer
+ * @ptr: start of the buffer containing the IE data
+ * @len: length of the buffer
+ *
+ * Both @ptr and @len are updated so subsequent calls to uwb_ie_next()
+ * will get the next IE.
+ *
+ * NULL is returned (and @ptr and @len will not be updated) if there
+ * are no more IEs in the buffer or the buffer is too short.
+ */
+struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len)
+{
+	struct uwb_ie_hdr *hdr;
+	size_t ie_len;
+
+	if (*len < sizeof(struct uwb_ie_hdr))
+		return NULL;
+
+	hdr = *ptr;
+	ie_len = sizeof(struct uwb_ie_hdr) + hdr->length;
+
+	if (*len < ie_len)
+		return NULL;
+
+	*ptr += ie_len;
+	*len -= ie_len;
+
+	return hdr;
+}
+EXPORT_SYMBOL_GPL(uwb_ie_next);
+
+/**
+ * Get the IEs that a radio controller is sending in its beacon
+ *
+ * @uwb_rc:  UWB Radio Controller
+ * @returns: Size read from the system
+ *
+ * We don't need to lock the uwb_rc's mutex because we don't modify
+ * anything. Once done with the iedata buffer, call
+ * uwb_rc_ie_release(iedata). Don't call kfree on it.
+ */
+ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie)
+{
+	ssize_t result;
+	struct device *dev = &uwb_rc->uwb_dev.dev;
+	struct uwb_rccb *cmd = NULL;
+	struct uwb_rceb *reply = NULL;
+	struct uwb_rc_evt_get_ie *get_ie;
+
+	d_fnstart(3, dev, "(%p, %p)\n", uwb_rc, pget_ie);
+	result = -ENOMEM;
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		goto error_kzalloc;
+	cmd->bCommandType = UWB_RC_CET_GENERAL;
+	cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE);
+	result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd),
+			     UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE,
+			     &reply);
+	if (result < 0)
+		goto error_cmd;
+	get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb);
+	if (result < sizeof(*get_ie)) {
+		dev_err(dev, "not enough data returned for decoding GET IE "
+			"(%zu bytes received vs %zu needed)\n",
+			result, sizeof(*get_ie));
+		result = -EINVAL;
+	} else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) {
+		dev_err(dev, "not enough data returned for decoding GET IE "
+			"payload (%zu bytes received vs %zu needed)\n", result,
+			sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength));
+		result = -EINVAL;
+	} else
+		*pget_ie = get_ie;
+error_cmd:
+	kfree(cmd);
+error_kzalloc:
+	d_fnend(3, dev, "(%p, %p) = %d\n", uwb_rc, pget_ie, (int)result);
+	return result;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_get_ie);
+
+
+/*
+ * Given a pointer to an IE, print it in ASCII/hex followed by a new line
+ *
+ * @ie_hdr: pointer to the IE header. Length is in there, and it is
+ *          guaranteed that the ie_hdr->length bytes following it are
+ *          safely accesible.
+ *
+ * @_data: context data passed from uwb_ie_for_each(), an struct output_ctx
+ */
+int uwb_ie_dump_hex(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr,
+		    size_t offset, void *_ctx)
+{
+	struct uwb_buf_ctx *ctx = _ctx;
+	const u8 *pl = (void *)(ie_hdr + 1);
+	u8 pl_itr;
+
+	ctx->bytes += scnprintf(ctx->buf + ctx->bytes, ctx->size - ctx->bytes,
+				"%02x %02x ", (unsigned) ie_hdr->element_id,
+				(unsigned) ie_hdr->length);
+	pl_itr = 0;
+	while (pl_itr < ie_hdr->length && ctx->bytes < ctx->size)
+		ctx->bytes += scnprintf(ctx->buf + ctx->bytes,
+					ctx->size - ctx->bytes,
+					"%02x ", (unsigned) pl[pl_itr++]);
+	if (ctx->bytes < ctx->size)
+		ctx->buf[ctx->bytes++] = '\n';
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uwb_ie_dump_hex);
+
+
+/**
+ * Verify that a pointer in a buffer points to valid IE
+ *
+ * @start: pointer to start of buffer in which IE appears
+ * @itr:   pointer to IE inside buffer that will be verified
+ * @top:   pointer to end of buffer
+ *
+ * @returns: 0 if IE is valid, <0 otherwise
+ *
+ * Verification involves checking that the buffer can contain a
+ * header and the amount of data reported in the IE header can be found in
+ * the buffer.
+ */
+static
+int uwb_rc_ie_verify(struct uwb_dev *uwb_dev, const void *start,
+		     const void *itr, const void *top)
+{
+	struct device *dev = &uwb_dev->dev;
+	const struct uwb_ie_hdr *ie_hdr;
+
+	if (top - itr < sizeof(*ie_hdr)) {
+		dev_err(dev, "Bad IE: no data to decode header "
+			"(%zu bytes left vs %zu needed) at offset %zu\n",
+			top - itr, sizeof(*ie_hdr), itr - start);
+		return -EINVAL;
+	}
+	ie_hdr = itr;
+	itr += sizeof(*ie_hdr);
+	if (top - itr < ie_hdr->length) {
+		dev_err(dev, "Bad IE: not enough data for payload "
+			"(%zu bytes left vs %zu needed) at offset %zu\n",
+			top - itr, (size_t)ie_hdr->length,
+			(void *)ie_hdr - start);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+
+/**
+ * Walk a buffer filled with consecutive IE's a buffer
+ *
+ * @uwb_dev: UWB device this IEs belong to (for err messages mainly)
+ *
+ * @fn: function to call with each IE; if it returns 0, we keep
+ *      traversing the buffer. If it returns !0, we'll stop and return
+ *      that value.
+ *
+ * @data: pointer passed to @fn
+ *
+ * @buf: buffer where the consecutive IEs are located
+ *
+ * @size: size of @buf
+ *
+ * Each IE is checked for basic correctness (there is space left for
+ * the header and the payload). If that test is failed, we stop
+ * processing. For every good IE, @fn is called.
+ */
+ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data,
+			const void *buf, size_t size)
+{
+	ssize_t result = 0;
+	const struct uwb_ie_hdr *ie_hdr;
+	const void *itr = buf, *top = itr + size;
+
+	while (itr < top) {
+		if (uwb_rc_ie_verify(uwb_dev, buf, itr, top) != 0)
+			break;
+		ie_hdr = itr;
+		itr += sizeof(*ie_hdr) + ie_hdr->length;
+		result = fn(uwb_dev, ie_hdr, itr - buf, data);
+		if (result != 0)
+			break;
+	}
+	return result;
+}
+EXPORT_SYMBOL_GPL(uwb_ie_for_each);
+
+
+/**
+ * Replace all IEs currently being transmitted by a device
+ *
+ * @cmd:    pointer to the SET-IE command with the IEs to set
+ * @size:   size of @buf
+ */
+int uwb_rc_set_ie(struct uwb_rc *rc, struct uwb_rc_cmd_set_ie *cmd)
+{
+	int result;
+	struct device *dev = &rc->uwb_dev.dev;
+	struct uwb_rc_evt_set_ie reply;
+
+	reply.rceb.bEventType = UWB_RC_CET_GENERAL;
+	reply.rceb.wEvent = UWB_RC_CMD_SET_IE;
+	result = uwb_rc_cmd(rc, "SET-IE", &cmd->rccb,
+			    sizeof(*cmd) + le16_to_cpu(cmd->wIELength),
+			    &reply.rceb, sizeof(reply));
+	if (result < 0)
+		goto error_cmd;
+	else if (result != sizeof(reply)) {
+		dev_err(dev, "SET-IE: not enough data to decode reply "
+			"(%d bytes received vs %zu needed)\n",
+			result, sizeof(reply));
+		result = -EIO;
+	} else if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
+		dev_err(dev, "SET-IE: command execution failed: %s (%d)\n",
+			uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
+		result = -EIO;
+	} else
+		result = 0;
+error_cmd:
+	return result;
+}
+
+/**
+ * Determine by IE id if IE is host settable
+ * WUSB 1.0 [8.6.2.8 Table 8.85]
+ *
+ * EXCEPTION:
+ * All but UWB_IE_WLP appears in Table 8.85 from WUSB 1.0. Setting this IE
+ * is required for the WLP substack to perform association with its WSS so
+ * we hope that the WUSB spec will be changed to reflect this.
+ */
+static
+int uwb_rc_ie_is_host_settable(enum uwb_ie element_id)
+{
+	if (element_id == UWB_PCA_AVAILABILITY ||
+	    element_id == UWB_BP_SWITCH_IE ||
+	    element_id == UWB_MAC_CAPABILITIES_IE ||
+	    element_id == UWB_PHY_CAPABILITIES_IE ||
+	    element_id == UWB_APP_SPEC_PROBE_IE ||
+	    element_id == UWB_IDENTIFICATION_IE ||
+	    element_id == UWB_MASTER_KEY_ID_IE ||
+	    element_id == UWB_IE_WLP ||
+	    element_id == UWB_APP_SPEC_IE)
+		return 1;
+	return 0;
+}
+
+
+/**
+ * Extract Host Settable IEs from IE
+ *
+ * @ie_data: pointer to buffer containing all IEs
+ * @size:    size of buffer
+ *
+ * @returns: length of buffer that only includes host settable IEs
+ *
+ * Given a buffer of IEs we move all Host Settable IEs to front of buffer
+ * by overwriting the IEs that are not Host Settable.
+ * Buffer length is adjusted accordingly.
+ */
+static
+ssize_t uwb_rc_parse_host_settable_ie(struct uwb_dev *uwb_dev,
+				      void *ie_data, size_t size)
+{
+	size_t new_len = size;
+	struct uwb_ie_hdr *ie_hdr;
+	size_t ie_length;
+	void *itr = ie_data, *top = itr + size;
+
+	while (itr < top) {
+		if (uwb_rc_ie_verify(uwb_dev, ie_data, itr, top) != 0)
+			break;
+		ie_hdr = itr;
+		ie_length = sizeof(*ie_hdr) + ie_hdr->length;
+		if (uwb_rc_ie_is_host_settable(ie_hdr->element_id)) {
+			itr += ie_length;
+		} else {
+			memmove(itr, itr + ie_length, top - (itr + ie_length));
+			new_len -= ie_length;
+			top -= ie_length;
+		}
+	}
+	return new_len;
+}
+
+
+/* Cleanup the whole IE management subsystem */
+void uwb_rc_ie_init(struct uwb_rc *uwb_rc)
+{
+	mutex_init(&uwb_rc->ies_mutex);
+}
+
+
+/**
+ * Set up cache for host settable IEs currently being transmitted
+ *
+ * First we just call GET-IE to get the current IEs being transmitted
+ * (or we workaround and pretend we did) and (because the format is
+ * the same) reuse that as the IE cache (with the command prefix, as
+ * explained in 'struct uwb_rc').
+ *
+ * @returns: size of cache created
+ */
+ssize_t uwb_rc_ie_setup(struct uwb_rc *uwb_rc)
+{
+	struct device *dev = &uwb_rc->uwb_dev.dev;
+	ssize_t result;
+	size_t capacity;
+	struct uwb_rc_evt_get_ie *ie_info;
+
+	d_fnstart(3, dev, "(%p)\n", uwb_rc);
+	mutex_lock(&uwb_rc->ies_mutex);
+	result = uwb_rc_get_ie(uwb_rc, &ie_info);
+	if (result < 0)
+		goto error_get_ie;
+	capacity = result;
+	d_printf(5, dev, "Got IEs %zu bytes (%zu long at %p)\n", result,
+		 (size_t)le16_to_cpu(ie_info->wIELength), ie_info);
+
+	/* Remove IEs that host should not set. */
+	result = uwb_rc_parse_host_settable_ie(&uwb_rc->uwb_dev,
+			ie_info->IEData, le16_to_cpu(ie_info->wIELength));
+	if (result < 0)
+		goto error_parse;
+	d_printf(5, dev, "purged non-settable IEs to %zu bytes\n", result);
+	uwb_rc->ies = (void *) ie_info;
+	uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL;
+	uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE);
+	uwb_rc->ies_capacity = capacity;
+	d_printf(5, dev, "IE cache at %p %zu bytes, %zu capacity\n",
+		 ie_info, result, capacity);
+	result = 0;
+error_parse:
+error_get_ie:
+	mutex_unlock(&uwb_rc->ies_mutex);
+	d_fnend(3, dev, "(%p) = %zu\n", uwb_rc, result);
+	return result;
+}
+
+
+/* Cleanup the whole IE management subsystem */
+void uwb_rc_ie_release(struct uwb_rc *uwb_rc)
+{
+	kfree(uwb_rc->ies);
+	uwb_rc->ies = NULL;
+	uwb_rc->ies_capacity = 0;
+}
+
+
+static
+int __acc_size(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr,
+	       size_t offset, void *_ctx)
+{
+	size_t *acc_size = _ctx;
+	*acc_size += sizeof(*ie_hdr) + ie_hdr->length;
+	d_printf(6, &uwb_dev->dev, "new acc size %zu\n", *acc_size);
+	return 0;
+}
+
+
+/**
+ * Add a new IE to IEs currently being transmitted by device
+ *
+ * @ies: the buffer containing the new IE or IEs to be added to
+ *       the device's beacon. The buffer will be verified for
+ *       consistence (meaning the headers should be right) and
+ *       consistent with the buffer size.
+ * @size: size of @ies (in bytes, total buffer size)
+ * @returns: 0 if ok, <0 errno code on error
+ *
+ * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB
+ * after the device sent the first beacon that includes the IEs specified
+ * in the SET IE command. We thus cannot send this command if the device is
+ * not beaconing. Instead, a SET IE command will be sent later right after
+ * we start beaconing.
+ *
+ * Setting an IE on the device will overwrite all current IEs in device. So
+ * we take the current IEs being transmitted by the device, append the
+ * new one, and call SET IE with all the IEs needed.
+ *
+ * The local IE cache will only be updated with the new IE if SET IE
+ * completed successfully.
+ */
+int uwb_rc_ie_add(struct uwb_rc *uwb_rc,
+		  const struct uwb_ie_hdr *ies, size_t size)
+{
+	int result = 0;
+	struct device *dev = &uwb_rc->uwb_dev.dev;
+	struct uwb_rc_cmd_set_ie *new_ies;
+	size_t ies_size, total_size, acc_size = 0;
+
+	if (uwb_rc->ies == NULL)
+		return -ESHUTDOWN;
+	uwb_ie_for_each(&uwb_rc->uwb_dev, __acc_size, &acc_size, ies, size);
+	if (acc_size != size) {
+		dev_err(dev, "BUG: bad IEs, misconstructed headers "
+			"[%zu bytes reported vs %zu calculated]\n",
+			size, acc_size);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	mutex_lock(&uwb_rc->ies_mutex);
+	ies_size = le16_to_cpu(uwb_rc->ies->wIELength);
+	total_size = sizeof(*uwb_rc->ies) + ies_size;
+	if (total_size + size > uwb_rc->ies_capacity) {
+		d_printf(4, dev, "Reallocating IE cache from %p capacity %zu "
+			 "to capacity %zu\n", uwb_rc->ies, uwb_rc->ies_capacity,
+			 total_size + size);
+		new_ies = kzalloc(total_size + size, GFP_KERNEL);
+		if (new_ies == NULL) {
+			dev_err(dev, "No memory for adding new IE\n");
+			result = -ENOMEM;
+			goto error_alloc;
+		}
+		memcpy(new_ies, uwb_rc->ies, total_size);
+		uwb_rc->ies_capacity = total_size + size;
+		kfree(uwb_rc->ies);
+		uwb_rc->ies = new_ies;
+		d_printf(4, dev, "New IE cache at %p capacity %zu\n",
+			 uwb_rc->ies, uwb_rc->ies_capacity);
+	}
+	memcpy((void *)uwb_rc->ies + total_size, ies, size);
+	uwb_rc->ies->wIELength = cpu_to_le16(ies_size + size);
+	if (uwb_rc->beaconing != -1) {
+		result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies);
+		if (result < 0) {
+			dev_err(dev, "Cannot set new IE on device: %d\n",
+				result);
+			uwb_rc->ies->wIELength = cpu_to_le16(ies_size);
+		} else
+			result = 0;
+	}
+	d_printf(4, dev, "IEs now occupy %hu bytes of %zu capacity at %p\n",
+		 le16_to_cpu(uwb_rc->ies->wIELength), uwb_rc->ies_capacity,
+		 uwb_rc->ies);
+error_alloc:
+	mutex_unlock(&uwb_rc->ies_mutex);
+	return result;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_ie_add);
+
+
+/*
+ * Remove an IE from internal cache
+ *
+ * We are dealing with our internal IE cache so no need to verify that the
+ * IEs are valid (it has been done already).
+ *
+ * Should be called with ies_mutex held
+ *
+ * We do not break out once an IE is found in the cache. It is currently
+ * possible to have more than one IE with the same ID included in the
+ * beacon. We don't reallocate, we just mark the size smaller.
+ */
+static
+int uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove)
+{
+	struct uwb_ie_hdr *ie_hdr;
+	size_t new_len = le16_to_cpu(uwb_rc->ies->wIELength);
+	void *itr = uwb_rc->ies->IEData;
+	void *top = itr + new_len;
+
+	while (itr < top) {
+		ie_hdr = itr;
+		if (ie_hdr->element_id != to_remove) {
+			itr += sizeof(*ie_hdr) + ie_hdr->length;
+		} else {
+			int ie_length;
+			ie_length = sizeof(*ie_hdr) + ie_hdr->length;
+			if (top - itr != ie_length)
+				memmove(itr, itr + ie_length, top - itr + ie_length);
+			top -= ie_length;
+			new_len -= ie_length;
+		}
+	}
+	uwb_rc->ies->wIELength = cpu_to_le16(new_len);
+	return 0;
+}
+
+
+/**
+ * Remove an IE currently being transmitted by device
+ *
+ * @element_id: id of IE to be removed from device's beacon
+ */
+int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id)
+{
+	struct device *dev = &uwb_rc->uwb_dev.dev;
+	int result;
+
+	if (uwb_rc->ies == NULL)
+		return -ESHUTDOWN;
+	mutex_lock(&uwb_rc->ies_mutex);
+	result = uwb_rc_ie_cache_rm(uwb_rc, element_id);
+	if (result < 0)
+		dev_err(dev, "Cannot remove IE from cache.\n");
+	if (uwb_rc->beaconing != -1) {
+		result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies);
+		if (result < 0)
+			dev_err(dev, "Cannot set new IE on device.\n");
+	}
+	mutex_unlock(&uwb_rc->ies_mutex);
+	return result;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_ie_rm);
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c
new file mode 100644
index 0000000..15f856c
--- /dev/null
+++ b/drivers/uwb/lc-dev.c
@@ -0,0 +1,492 @@
+/*
+ * Ultra Wide Band
+ * Life cycle of devices
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kdev_t.h>
+#include <linux/random.h>
+#include "uwb-internal.h"
+
+#define D_LOCAL 1
+#include <linux/uwb/debug.h>
+
+
+/* We initialize addresses to 0xff (invalid, as it is bcast) */
+static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr)
+{
+	memset(&addr->data, 0xff, sizeof(addr->data));
+}
+
+static inline void uwb_mac_addr_init(struct uwb_mac_addr *addr)
+{
+	memset(&addr->data, 0xff, sizeof(addr->data));
+}
+
+/* @returns !0 if a device @addr is a broadcast address */
+static inline int uwb_dev_addr_bcast(const struct uwb_dev_addr *addr)
+{
+	static const struct uwb_dev_addr bcast = { .data = { 0xff, 0xff } };
+	return !uwb_dev_addr_cmp(addr, &bcast);
+}
+
+/*
+ * Add callback @new to be called when an event occurs in @rc.
+ */
+int uwb_notifs_register(struct uwb_rc *rc, struct uwb_notifs_handler *new)
+{
+	if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
+		return -ERESTARTSYS;
+	list_add(&new->list_node, &rc->notifs_chain.list);
+	mutex_unlock(&rc->notifs_chain.mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uwb_notifs_register);
+
+/*
+ * Remove event handler (callback)
+ */
+int uwb_notifs_deregister(struct uwb_rc *rc, struct uwb_notifs_handler *entry)
+{
+	if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
+		return -ERESTARTSYS;
+	list_del(&entry->list_node);
+	mutex_unlock(&rc->notifs_chain.mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uwb_notifs_deregister);
+
+/*
+ * Notify all event handlers of a given event on @rc
+ *
+ * We are called with a valid reference to the device, or NULL if the
+ * event is not for a particular event (e.g., a BG join event).
+ */
+void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event)
+{
+	struct uwb_notifs_handler *handler;
+	if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
+		return;
+	if (!list_empty(&rc->notifs_chain.list)) {
+		list_for_each_entry(handler, &rc->notifs_chain.list, list_node) {
+			handler->cb(handler->data, uwb_dev, event);
+		}
+	}
+	mutex_unlock(&rc->notifs_chain.mutex);
+}
+
+/*
+ * Release the backing device of a uwb_dev that has been dynamically allocated.
+ */
+static void uwb_dev_sys_release(struct device *dev)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+
+	d_fnstart(4, NULL, "(dev %p uwb_dev %p)\n", dev, uwb_dev);
+	uwb_bce_put(uwb_dev->bce);
+	d_printf(0, &uwb_dev->dev, "uwb_dev %p freed\n", uwb_dev);
+	memset(uwb_dev, 0x69, sizeof(*uwb_dev));
+	kfree(uwb_dev);
+	d_fnend(4, NULL, "(dev %p uwb_dev %p) = void\n", dev, uwb_dev);
+}
+
+/*
+ * Initialize a UWB device instance
+ *
+ * Alloc, zero and call this function.
+ */
+void uwb_dev_init(struct uwb_dev *uwb_dev)
+{
+	mutex_init(&uwb_dev->mutex);
+	device_initialize(&uwb_dev->dev);
+	uwb_dev->dev.release = uwb_dev_sys_release;
+	uwb_dev_addr_init(&uwb_dev->dev_addr);
+	uwb_mac_addr_init(&uwb_dev->mac_addr);
+	bitmap_fill(uwb_dev->streams, UWB_NUM_GLOBAL_STREAMS);
+}
+
+static ssize_t uwb_dev_EUI_48_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	char addr[UWB_ADDR_STRSIZE];
+
+	uwb_mac_addr_print(addr, sizeof(addr), &uwb_dev->mac_addr);
+	return sprintf(buf, "%s\n", addr);
+}
+static DEVICE_ATTR(EUI_48, S_IRUGO, uwb_dev_EUI_48_show, NULL);
+
+static ssize_t uwb_dev_DevAddr_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	char addr[UWB_ADDR_STRSIZE];
+
+	uwb_dev_addr_print(addr, sizeof(addr), &uwb_dev->dev_addr);
+	return sprintf(buf, "%s\n", addr);
+}
+static DEVICE_ATTR(DevAddr, S_IRUGO, uwb_dev_DevAddr_show, NULL);
+
+/*
+ * Show the BPST of this device.
+ *
+ * Calculated from the receive time of the device's beacon and it's
+ * slot number.
+ */
+static ssize_t uwb_dev_BPST_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	struct uwb_beca_e *bce;
+	struct uwb_beacon_frame *bf;
+	u16 bpst;
+
+	bce = uwb_dev->bce;
+	mutex_lock(&bce->mutex);
+	bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo;
+	bpst = bce->be->wBPSTOffset
+		- (u16)(bf->Beacon_Slot_Number * UWB_BEACON_SLOT_LENGTH_US);
+	mutex_unlock(&bce->mutex);
+
+	return sprintf(buf, "%d\n", bpst);
+}
+static DEVICE_ATTR(BPST, S_IRUGO, uwb_dev_BPST_show, NULL);
+
+/*
+ * Show the IEs a device is beaconing
+ *
+ * We need to access the beacon cache, so we just lock it really
+ * quick, print the IEs and unlock.
+ *
+ * We have a reference on the cache entry, so that should be
+ * quite safe.
+ */
+static ssize_t uwb_dev_IEs_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+
+	return uwb_bce_print_IEs(uwb_dev, uwb_dev->bce, buf, PAGE_SIZE);
+}
+static DEVICE_ATTR(IEs, S_IRUGO | S_IWUSR, uwb_dev_IEs_show, NULL);
+
+static ssize_t uwb_dev_LQE_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	struct uwb_beca_e *bce = uwb_dev->bce;
+	size_t result;
+
+	mutex_lock(&bce->mutex);
+	result = stats_show(&uwb_dev->bce->lqe_stats, buf);
+	mutex_unlock(&bce->mutex);
+	return result;
+}
+
+static ssize_t uwb_dev_LQE_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t size)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	struct uwb_beca_e *bce = uwb_dev->bce;
+	ssize_t result;
+
+	mutex_lock(&bce->mutex);
+	result = stats_store(&uwb_dev->bce->lqe_stats, buf, size);
+	mutex_unlock(&bce->mutex);
+	return result;
+}
+static DEVICE_ATTR(LQE, S_IRUGO | S_IWUSR, uwb_dev_LQE_show, uwb_dev_LQE_store);
+
+static ssize_t uwb_dev_RSSI_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	struct uwb_beca_e *bce = uwb_dev->bce;
+	size_t result;
+
+	mutex_lock(&bce->mutex);
+	result = stats_show(&uwb_dev->bce->rssi_stats, buf);
+	mutex_unlock(&bce->mutex);
+	return result;
+}
+
+static ssize_t uwb_dev_RSSI_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	struct uwb_beca_e *bce = uwb_dev->bce;
+	ssize_t result;
+
+	mutex_lock(&bce->mutex);
+	result = stats_store(&uwb_dev->bce->rssi_stats, buf, size);
+	mutex_unlock(&bce->mutex);
+	return result;
+}
+static DEVICE_ATTR(RSSI, S_IRUGO | S_IWUSR, uwb_dev_RSSI_show, uwb_dev_RSSI_store);
+
+
+static struct attribute *dev_attrs[] = {
+	&dev_attr_EUI_48.attr,
+	&dev_attr_DevAddr.attr,
+	&dev_attr_BPST.attr,
+	&dev_attr_IEs.attr,
+	&dev_attr_LQE.attr,
+	&dev_attr_RSSI.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.attrs = dev_attrs,
+};
+
+static struct attribute_group *groups[] = {
+	&dev_attr_group,
+	NULL,
+};
+
+/**
+ * Device SYSFS registration
+ *
+ *
+ */
+static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev)
+{
+	int result;
+	struct device *dev;
+
+	d_fnstart(4, NULL, "(uwb_dev %p parent_dev %p)\n", uwb_dev, parent_dev);
+	BUG_ON(parent_dev == NULL);
+
+	dev = &uwb_dev->dev;
+	/* Device sysfs files are only useful for neighbor devices not
+	   local radio controllers. */
+	if (&uwb_dev->rc->uwb_dev != uwb_dev)
+		dev->groups = groups;
+	dev->parent = parent_dev;
+	dev_set_drvdata(dev, uwb_dev);
+
+	result = device_add(dev);
+	d_fnend(4, NULL, "(uwb_dev %p parent_dev %p) = %d\n", uwb_dev, parent_dev, result);
+	return result;
+}
+
+
+static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev)
+{
+	d_fnstart(4, NULL, "(uwb_dev %p)\n", uwb_dev);
+	dev_set_drvdata(&uwb_dev->dev, NULL);
+	device_del(&uwb_dev->dev);
+	d_fnend(4, NULL, "(uwb_dev %p) = void\n", uwb_dev);
+}
+
+
+/**
+ * Register and initialize a new UWB device
+ *
+ * Did you call uwb_dev_init() on it?
+ *
+ * @parent_rc: is the parent radio controller who has the link to the
+ *             device. When registering the UWB device that is a UWB
+ *             Radio Controller, we point back to it.
+ *
+ * If registering the device that is part of a radio, caller has set
+ * rc->uwb_dev->dev. Otherwise it is to be left NULL--a new one will
+ * be allocated.
+ */
+int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev,
+		struct uwb_rc *parent_rc)
+{
+	int result;
+	struct device *dev;
+
+	BUG_ON(uwb_dev == NULL);
+	BUG_ON(parent_dev == NULL);
+	BUG_ON(parent_rc == NULL);
+
+	mutex_lock(&uwb_dev->mutex);
+	dev = &uwb_dev->dev;
+	uwb_dev->rc = parent_rc;
+	result = __uwb_dev_sys_add(uwb_dev, parent_dev);
+	if (result < 0)
+		printk(KERN_ERR "UWB: unable to register dev %s with sysfs: %d\n",
+		       dev_name(dev), result);
+	mutex_unlock(&uwb_dev->mutex);
+	return result;
+}
+
+
+void uwb_dev_rm(struct uwb_dev *uwb_dev)
+{
+	mutex_lock(&uwb_dev->mutex);
+	__uwb_dev_sys_rm(uwb_dev);
+	mutex_unlock(&uwb_dev->mutex);
+}
+
+
+static
+int __uwb_dev_try_get(struct device *dev, void *__target_uwb_dev)
+{
+	struct uwb_dev *target_uwb_dev = __target_uwb_dev;
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	if (uwb_dev == target_uwb_dev) {
+		uwb_dev_get(uwb_dev);
+		return 1;
+	} else
+		return 0;
+}
+
+
+/**
+ * Given a UWB device descriptor, validate and refcount it
+ *
+ * @returns NULL if the device does not exist or is quiescing; the ptr to
+ *               it otherwise.
+ */
+struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev)
+{
+	if (uwb_dev_for_each(rc, __uwb_dev_try_get, uwb_dev))
+		return uwb_dev;
+	else
+		return NULL;
+}
+EXPORT_SYMBOL_GPL(uwb_dev_try_get);
+
+
+/**
+ * Remove a device from the system [grunt for other functions]
+ */
+int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc)
+{
+	struct device *dev = &uwb_dev->dev;
+	char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
+
+	d_fnstart(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p)\n", dev, uwb_dev, rc);
+	uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr);
+	uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr);
+	dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n",
+		 macbuf, devbuf,
+		 rc ? rc->uwb_dev.dev.parent->bus->name : "n/a",
+		 rc ? dev_name(rc->uwb_dev.dev.parent) : "");
+	uwb_dev_rm(uwb_dev);
+	uwb_dev_put(uwb_dev);	/* for the creation in _onair() */
+	d_fnend(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p) = 0\n", dev, uwb_dev, rc);
+	return 0;
+}
+
+
+/**
+ * A device went off the air, clean up after it!
+ *
+ * This is called by the UWB Daemon (through the beacon purge function
+ * uwb_bcn_cache_purge) when it is detected that a device has been in
+ * radio silence for a while.
+ *
+ * If this device is actually a local radio controller we don't need
+ * to go through the offair process, as it is not registered as that.
+ *
+ * NOTE: uwb_bcn_cache.mutex is held!
+ */
+void uwbd_dev_offair(struct uwb_beca_e *bce)
+{
+	struct uwb_dev *uwb_dev;
+
+	uwb_dev = bce->uwb_dev;
+	if (uwb_dev) {
+		uwb_notify(uwb_dev->rc, uwb_dev, UWB_NOTIF_OFFAIR);
+		__uwb_dev_offair(uwb_dev, uwb_dev->rc);
+	}
+}
+
+
+/**
+ * A device went on the air, start it up!
+ *
+ * This is called by the UWB Daemon when it is detected that a device
+ * has popped up in the radio range of the radio controller.
+ *
+ * It will just create the freaking device, register the beacon and
+ * stuff and yatla, done.
+ *
+ *
+ * NOTE: uwb_beca.mutex is held, bce->mutex is held
+ */
+void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
+{
+	int result;
+	struct device *dev = &rc->uwb_dev.dev;
+	struct uwb_dev *uwb_dev;
+	char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
+
+	uwb_mac_addr_print(macbuf, sizeof(macbuf), bce->mac_addr);
+	uwb_dev_addr_print(devbuf, sizeof(devbuf), &bce->dev_addr);
+	uwb_dev = kzalloc(sizeof(struct uwb_dev), GFP_KERNEL);
+	if (uwb_dev == NULL) {
+		dev_err(dev, "new device %s: Cannot allocate memory\n",
+			macbuf);
+		return;
+	}
+	uwb_dev_init(uwb_dev);		/* This sets refcnt to one, we own it */
+	uwb_dev->mac_addr = *bce->mac_addr;
+	uwb_dev->dev_addr = bce->dev_addr;
+	dev_set_name(&uwb_dev->dev, macbuf);
+	result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc);
+	if (result < 0) {
+		dev_err(dev, "new device %s: cannot instantiate device\n",
+			macbuf);
+		goto error_dev_add;
+	}
+	/* plug the beacon cache */
+	bce->uwb_dev = uwb_dev;
+	uwb_dev->bce = bce;
+	uwb_bce_get(bce);		/* released in uwb_dev_sys_release() */
+	dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n",
+		 macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name,
+		 dev_name(rc->uwb_dev.dev.parent));
+	uwb_notify(rc, uwb_dev, UWB_NOTIF_ONAIR);
+	return;
+
+error_dev_add:
+	kfree(uwb_dev);
+	return;
+}
+
+/**
+ * Iterate over the list of UWB devices, calling a @function on each
+ *
+ * See docs for bus_for_each()....
+ *
+ * @rc:       radio controller for the devices.
+ * @function: function to call.
+ * @priv:     data to pass to @function.
+ * @returns:  0 if no invocation of function() returned a value
+ *            different to zero. That value otherwise.
+ */
+int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f function, void *priv)
+{
+	return device_for_each_child(&rc->uwb_dev.dev, priv, function);
+}
+EXPORT_SYMBOL_GPL(uwb_dev_for_each);
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
new file mode 100644
index 0000000..ee5772f
--- /dev/null
+++ b/drivers/uwb/lc-rc.c
@@ -0,0 +1,495 @@
+/*
+ * Ultra Wide Band
+ * Life cycle of radio controllers
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * A UWB radio controller is also a UWB device, so it embeds one...
+ *
+ * List of RCs comes from the 'struct class uwb_rc_class'.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/random.h>
+#include <linux/kdev_t.h>
+#include <linux/etherdevice.h>
+#include <linux/usb.h>
+
+#define D_LOCAL 1
+#include <linux/uwb/debug.h>
+#include "uwb-internal.h"
+
+static int uwb_rc_index_match(struct device *dev, void *data)
+{
+	int *index = data;
+	struct uwb_rc *rc = dev_get_drvdata(dev);
+
+	if (rc->index == *index)
+		return 1;
+	return 0;
+}
+
+static struct uwb_rc *uwb_rc_find_by_index(int index)
+{
+	struct device *dev;
+	struct uwb_rc *rc = NULL;
+
+	dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
+	if (dev)
+		rc = dev_get_drvdata(dev);
+	return rc;
+}
+
+static int uwb_rc_new_index(void)
+{
+	int index = 0;
+
+	for (;;) {
+		if (!uwb_rc_find_by_index(index))
+			return index;
+		if (++index < 0)
+			index = 0;
+	}
+}
+
+/**
+ * Release the backing device of a uwb_rc that has been dynamically allocated.
+ */
+static void uwb_rc_sys_release(struct device *dev)
+{
+	struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev);
+	struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev);
+
+	uwb_rc_neh_destroy(rc);
+	uwb_rc_ie_release(rc);
+	d_printf(1, dev, "freed uwb_rc %p\n", rc);
+	kfree(rc);
+}
+
+
+void uwb_rc_init(struct uwb_rc *rc)
+{
+	struct uwb_dev *uwb_dev = &rc->uwb_dev;
+
+	uwb_dev_init(uwb_dev);
+	rc->uwb_dev.dev.class = &uwb_rc_class;
+	rc->uwb_dev.dev.release = uwb_rc_sys_release;
+	uwb_rc_neh_create(rc);
+	rc->beaconing = -1;
+	rc->scan_type = UWB_SCAN_DISABLED;
+	INIT_LIST_HEAD(&rc->notifs_chain.list);
+	mutex_init(&rc->notifs_chain.mutex);
+	uwb_drp_avail_init(rc);
+	uwb_rc_ie_init(rc);
+	uwb_rsv_init(rc);
+	uwb_rc_pal_init(rc);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_init);
+
+
+struct uwb_rc *uwb_rc_alloc(void)
+{
+	struct uwb_rc *rc;
+	rc = kzalloc(sizeof(*rc), GFP_KERNEL);
+	if (rc == NULL)
+		return NULL;
+	uwb_rc_init(rc);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_alloc);
+
+static struct attribute *rc_attrs[] = {
+		&dev_attr_mac_address.attr,
+		&dev_attr_scan.attr,
+		&dev_attr_beacon.attr,
+		NULL,
+};
+
+static struct attribute_group rc_attr_group = {
+	.attrs = rc_attrs,
+};
+
+/*
+ * Registration of sysfs specific stuff
+ */
+static int uwb_rc_sys_add(struct uwb_rc *rc)
+{
+	return sysfs_create_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
+}
+
+
+static void __uwb_rc_sys_rm(struct uwb_rc *rc)
+{
+	sysfs_remove_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
+}
+
+/**
+ * uwb_rc_mac_addr_setup - get an RC's EUI-48 address or set it
+ * @rc:  the radio controller.
+ *
+ * If the EUI-48 address is 00:00:00:00:00:00 or FF:FF:FF:FF:FF:FF
+ * then a random locally administered EUI-48 is generated and set on
+ * the device.  The probability of address collisions is sufficiently
+ * unlikely (1/2^40 = 9.1e-13) that they're not checked for.
+ */
+static
+int uwb_rc_mac_addr_setup(struct uwb_rc *rc)
+{
+	int result;
+	struct device *dev = &rc->uwb_dev.dev;
+	struct uwb_dev *uwb_dev = &rc->uwb_dev;
+	char devname[UWB_ADDR_STRSIZE];
+	struct uwb_mac_addr addr;
+
+	result = uwb_rc_mac_addr_get(rc, &addr);
+	if (result < 0) {
+		dev_err(dev, "cannot retrieve UWB EUI-48 address: %d\n", result);
+		return result;
+	}
+
+	if (uwb_mac_addr_unset(&addr) || uwb_mac_addr_bcast(&addr)) {
+		addr.data[0] = 0x02; /* locally adminstered and unicast */
+		get_random_bytes(&addr.data[1], sizeof(addr.data)-1);
+
+		result = uwb_rc_mac_addr_set(rc, &addr);
+		if (result < 0) {
+			uwb_mac_addr_print(devname, sizeof(devname), &addr);
+			dev_err(dev, "cannot set EUI-48 address %s: %d\n",
+				devname, result);
+			return result;
+		}
+	}
+	uwb_dev->mac_addr = addr;
+	return 0;
+}
+
+
+
+static int uwb_rc_setup(struct uwb_rc *rc)
+{
+	int result;
+	struct device *dev = &rc->uwb_dev.dev;
+
+	result = uwb_rc_reset(rc);
+	if (result < 0) {
+		dev_err(dev, "cannot reset UWB radio: %d\n", result);
+		goto error;
+	}
+	result = uwb_rc_mac_addr_setup(rc);
+	if (result < 0) {
+		dev_err(dev, "cannot setup UWB MAC address: %d\n", result);
+		goto error;
+	}
+	result = uwb_rc_dev_addr_assign(rc);
+	if (result < 0) {
+		dev_err(dev, "cannot assign UWB DevAddr: %d\n", result);
+		goto error;
+	}
+	result = uwb_rc_ie_setup(rc);
+	if (result < 0) {
+		dev_err(dev, "cannot setup IE subsystem: %d\n", result);
+		goto error_ie_setup;
+	}
+	result = uwb_rsv_setup(rc);
+	if (result < 0) {
+		dev_err(dev, "cannot setup reservation subsystem: %d\n", result);
+		goto error_rsv_setup;
+	}
+	uwb_dbg_add_rc(rc);
+	return 0;
+
+error_rsv_setup:
+	uwb_rc_ie_release(rc);
+error_ie_setup:
+error:
+	return result;
+}
+
+
+/**
+ * Register a new UWB radio controller
+ *
+ * Did you call uwb_rc_init() on your rc?
+ *
+ * We assume that this is being called with a > 0 refcount on
+ * it [through ops->{get|put}_device(). We'll take our own, though.
+ *
+ * @parent_dev is our real device, the one that provides the actual UWB device
+ */
+int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv)
+{
+	int result;
+	struct device *dev;
+	char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
+
+	rc->index = uwb_rc_new_index();
+
+	dev = &rc->uwb_dev.dev;
+	dev_set_name(dev, "uwb%d", rc->index);
+
+	rc->priv = priv;
+
+	result = rc->start(rc);
+	if (result < 0)
+		goto error_rc_start;
+
+	result = uwb_rc_setup(rc);
+	if (result < 0) {
+		dev_err(dev, "cannot setup UWB radio controller: %d\n", result);
+		goto error_rc_setup;
+	}
+
+	result = uwb_dev_add(&rc->uwb_dev, parent_dev, rc);
+	if (result < 0 && result != -EADDRNOTAVAIL)
+		goto error_dev_add;
+
+	result = uwb_rc_sys_add(rc);
+	if (result < 0) {
+		dev_err(parent_dev, "cannot register UWB radio controller "
+			"dev attributes: %d\n", result);
+		goto error_sys_add;
+	}
+
+	uwb_mac_addr_print(macbuf, sizeof(macbuf), &rc->uwb_dev.mac_addr);
+	uwb_dev_addr_print(devbuf, sizeof(devbuf), &rc->uwb_dev.dev_addr);
+	dev_info(dev,
+		 "new uwb radio controller (mac %s dev %s) on %s %s\n",
+		 macbuf, devbuf, parent_dev->bus->name, dev_name(parent_dev));
+	rc->ready = 1;
+	return 0;
+
+error_sys_add:
+	uwb_dev_rm(&rc->uwb_dev);
+error_dev_add:
+error_rc_setup:
+	rc->stop(rc);
+	uwbd_flush(rc);
+error_rc_start:
+	return result;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_add);
+
+
+static int uwb_dev_offair_helper(struct device *dev, void *priv)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+
+	return __uwb_dev_offair(uwb_dev, uwb_dev->rc);
+}
+
+/*
+ * Remove a Radio Controller; stop beaconing/scanning, disconnect all children
+ */
+void uwb_rc_rm(struct uwb_rc *rc)
+{
+	rc->ready = 0;
+
+	uwb_dbg_del_rc(rc);
+	uwb_rsv_cleanup(rc);
+	uwb_rc_ie_rm(rc, UWB_IDENTIFICATION_IE);
+	if (rc->beaconing >= 0)
+		uwb_rc_beacon(rc, -1, 0);
+	if (rc->scan_type != UWB_SCAN_DISABLED)
+		uwb_rc_scan(rc, rc->scanning, UWB_SCAN_DISABLED, 0);
+	uwb_rc_reset(rc);
+
+	rc->stop(rc);
+	uwbd_flush(rc);
+
+	uwb_dev_lock(&rc->uwb_dev);
+	rc->priv = NULL;
+	rc->cmd = NULL;
+	uwb_dev_unlock(&rc->uwb_dev);
+	mutex_lock(&uwb_beca.mutex);
+	uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL);
+	__uwb_rc_sys_rm(rc);
+	mutex_unlock(&uwb_beca.mutex);
+	uwb_dev_rm(&rc->uwb_dev);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_rm);
+
+static int find_rc_try_get(struct device *dev, void *data)
+{
+	struct uwb_rc *target_rc = data;
+	struct uwb_rc *rc = dev_get_drvdata(dev);
+
+	if (rc == NULL) {
+		WARN_ON(1);
+		return 0;
+	}
+	if (rc == target_rc) {
+		if (rc->ready == 0)
+			return 0;
+		else
+			return 1;
+	}
+	return 0;
+}
+
+/**
+ * Given a radio controller descriptor, validate and refcount it
+ *
+ * @returns NULL if the rc does not exist or is quiescing; the ptr to
+ *               it otherwise.
+ */
+struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
+{
+	struct device *dev;
+	struct uwb_rc *rc = NULL;
+
+	dev = class_find_device(&uwb_rc_class, NULL, target_rc,
+				find_rc_try_get);
+	if (dev) {
+		rc = dev_get_drvdata(dev);
+		__uwb_rc_get(rc);
+	}
+	return rc;
+}
+EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
+
+/*
+ * RC get for external refcount acquirers...
+ *
+ * Increments the refcount of the device and it's backend modules
+ */
+static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc)
+{
+	if (rc->ready == 0)
+		return NULL;
+	uwb_dev_get(&rc->uwb_dev);
+	return rc;
+}
+
+static int find_rc_grandpa(struct device *dev, void *data)
+{
+	struct device *grandpa_dev = data;
+	struct uwb_rc *rc = dev_get_drvdata(dev);
+
+	if (rc->uwb_dev.dev.parent->parent == grandpa_dev) {
+		rc = uwb_rc_get(rc);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * Locate and refcount a radio controller given a common grand-parent
+ *
+ * @grandpa_dev  Pointer to the 'grandparent' device structure.
+ * @returns NULL If the rc does not exist or is quiescing; the ptr to
+ *               it otherwise, properly referenced.
+ *
+ * The Radio Control interface (or the UWB Radio Controller) is always
+ * an interface of a device. The parent is the interface, the
+ * grandparent is the device that encapsulates the interface.
+ *
+ * There is no need to lock around as the "grandpa" would be
+ * refcounted by the target, and to remove the referemes, the
+ * uwb_rc_class->sem would have to be taken--we hold it, ergo we
+ * should be safe.
+ */
+struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
+{
+	struct device *dev;
+	struct uwb_rc *rc = NULL;
+
+	dev = class_find_device(&uwb_rc_class, NULL, (void *)grandpa_dev,
+				find_rc_grandpa);
+	if (dev)
+		rc = dev_get_drvdata(dev);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
+
+/**
+ * Find a radio controller by device address
+ *
+ * @returns the pointer to the radio controller, properly referenced
+ */
+static int find_rc_dev(struct device *dev, void *data)
+{
+	struct uwb_dev_addr *addr = data;
+	struct uwb_rc *rc = dev_get_drvdata(dev);
+
+	if (rc == NULL) {
+		WARN_ON(1);
+		return 0;
+	}
+	if (!uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, addr)) {
+		rc = uwb_rc_get(rc);
+		return 1;
+	}
+	return 0;
+}
+
+struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
+{
+	struct device *dev;
+	struct uwb_rc *rc = NULL;
+
+	dev = class_find_device(&uwb_rc_class, NULL, (void *)addr,
+				find_rc_dev);
+	if (dev)
+		rc = dev_get_drvdata(dev);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_get_by_dev);
+
+/**
+ * Drop a reference on a radio controller
+ *
+ * This is the version that should be done by entities external to the
+ * UWB Radio Control stack (ie: clients of the API).
+ */
+void uwb_rc_put(struct uwb_rc *rc)
+{
+	__uwb_rc_put(rc);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_put);
+
+/*
+ *
+ *
+ */
+ssize_t uwb_rc_print_IEs(struct uwb_rc *uwb_rc, char *buf, size_t size)
+{
+	ssize_t result;
+	struct uwb_rc_evt_get_ie *ie_info;
+	struct uwb_buf_ctx ctx;
+
+	result = uwb_rc_get_ie(uwb_rc, &ie_info);
+	if (result < 0)
+		goto error_get_ie;
+	ctx.buf = buf;
+	ctx.size = size;
+	ctx.bytes = 0;
+	uwb_ie_for_each(&uwb_rc->uwb_dev, uwb_ie_dump_hex, &ctx,
+			ie_info->IEData, result - sizeof(*ie_info));
+	result = ctx.bytes;
+	kfree(ie_info);
+error_get_ie:
+	return result;
+}
+
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c
new file mode 100644
index 0000000..9b4eb64
--- /dev/null
+++ b/drivers/uwb/neh.c
@@ -0,0 +1,616 @@
+/*
+ * WUSB Wire Adapter: Radio Control Interface (WUSB[8])
+ * Notification and Event Handling
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * The RC interface of the Host Wire Adapter (USB dongle) or WHCI PCI
+ * card delivers a stream of notifications and events to the
+ * notification end event endpoint or area. This code takes care of
+ * getting a buffer with that data, breaking it up in separate
+ * notifications and events and then deliver those.
+ *
+ * Events are answers to commands and they carry a context ID that
+ * associates them to the command. Notifications are that,
+ * notifications, they come out of the blue and have a context ID of
+ * zero. Think of the context ID kind of like a handler. The
+ * uwb_rc_neh_* code deals with managing context IDs.
+ *
+ * This is why you require a handle to operate on a UWB host. When you
+ * open a handle a context ID is assigned to you.
+ *
+ * So, as it is done is:
+ *
+ * 1. Add an event handler [uwb_rc_neh_add()] (assigns a ctx id)
+ * 2. Issue command [rc->cmd(rc, ...)]
+ * 3. Arm the timeout timer [uwb_rc_neh_arm()]
+ * 4, Release the reference to the neh [uwb_rc_neh_put()]
+ * 5. Wait for the callback
+ * 6. Command result (RCEB) is passed to the callback
+ *
+ * If (2) fails, you should remove the handle [uwb_rc_neh_rm()]
+ * instead of arming the timer.
+ *
+ * Handles are for using in *serialized* code, single thread.
+ *
+ * When the notification/event comes, the IRQ handler/endpoint
+ * callback passes the data read to uwb_rc_neh_grok() which will break
+ * it up in a discrete series of events, look up who is listening for
+ * them and execute the pertinent callbacks.
+ *
+ * If the reader detects an error while reading the data stream, call
+ * uwb_rc_neh_error().
+ *
+ * CONSTRAINTS/ASSUMPTIONS:
+ *
+ * - Most notifications/events are small (less thank .5k), copying
+ *   around is ok.
+ *
+ * - Notifications/events are ALWAYS smaller than PAGE_SIZE
+ *
+ * - Notifications/events always come in a single piece (ie: a buffer
+ *   will always contain entire notifications/events).
+ *
+ * - we cannot know in advance how long each event is (because they
+ *   lack a length field in their header--smart move by the standards
+ *   body, btw). So we need a facility to get the event size given the
+ *   header. This is what the EST code does (notif/Event Size
+ *   Tables), check nest.c--as well, you can associate the size to
+ *   the handle [w/ neh->extra_size()].
+ *
+ * - Most notifications/events are fixed size; only a few are variable
+ *   size (NEST takes care of that).
+ *
+ * - Listeners of events expect them, so they usually provide a
+ *   buffer, as they know the size. Listeners to notifications don't,
+ *   so we allocate their buffers dynamically.
+ */
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/err.h>
+
+#include "uwb-internal.h"
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/*
+ * UWB Radio Controller Notification/Event Handle
+ *
+ * Represents an entity waiting for an event coming from the UWB Radio
+ * Controller with a given context id (context) and type (evt_type and
+ * evt). On reception of the notification/event, the callback (cb) is
+ * called with the event.
+ *
+ * If the timer expires before the event is received, the callback is
+ * called with -ETIMEDOUT as the event size.
+ */
+struct uwb_rc_neh {
+	struct kref kref;
+
+	struct uwb_rc *rc;
+	u8 evt_type;
+	__le16 evt;
+	u8 context;
+	uwb_rc_cmd_cb_f cb;
+	void *arg;
+
+	struct timer_list timer;
+	struct list_head list_node;
+};
+
+static void uwb_rc_neh_timer(unsigned long arg);
+
+static void uwb_rc_neh_release(struct kref *kref)
+{
+	struct uwb_rc_neh *neh = container_of(kref, struct uwb_rc_neh, kref);
+
+	kfree(neh);
+}
+
+static void uwb_rc_neh_get(struct uwb_rc_neh *neh)
+{
+	kref_get(&neh->kref);
+}
+
+/**
+ * uwb_rc_neh_put - release reference to a neh
+ * @neh: the neh
+ */
+void uwb_rc_neh_put(struct uwb_rc_neh *neh)
+{
+	kref_put(&neh->kref, uwb_rc_neh_release);
+}
+
+
+/**
+ * Assigns @neh a context id from @rc's pool
+ *
+ * @rc:	    UWB Radio Controller descriptor; @rc->neh_lock taken
+ * @neh:    Notification/Event Handle
+ * @returns 0 if context id was assigned ok; < 0 errno on error (if
+ *	    all the context IDs are taken).
+ *
+ * (assumes @wa is locked).
+ *
+ * NOTE: WUSB spec reserves context ids 0x00 for notifications and
+ *	 0xff is invalid, so they must not be used. Initialization
+ *	 fills up those two in the bitmap so they are not allocated.
+ *
+ * We spread the allocation around to reduce the posiblity of two
+ * consecutive opened @neh's getting the same context ID assigned (to
+ * avoid surprises with late events that timed out long time ago). So
+ * first we search from where @rc->ctx_roll is, if not found, we
+ * search from zero.
+ */
+static
+int __uwb_rc_ctx_get(struct uwb_rc *rc, struct uwb_rc_neh *neh)
+{
+	int result;
+	result = find_next_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX,
+				    rc->ctx_roll++);
+	if (result < UWB_RC_CTX_MAX)
+		goto found;
+	result = find_first_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX);
+	if (result < UWB_RC_CTX_MAX)
+		goto found;
+	return -ENFILE;
+found:
+	set_bit(result, rc->ctx_bm);
+	neh->context = result;
+	return 0;
+}
+
+
+/** Releases @neh's context ID back to @rc (@rc->neh_lock is locked). */
+static
+void __uwb_rc_ctx_put(struct uwb_rc *rc, struct uwb_rc_neh *neh)
+{
+	struct device *dev = &rc->uwb_dev.dev;
+	if (neh->context == 0)
+		return;
+	if (test_bit(neh->context, rc->ctx_bm) == 0) {
+		dev_err(dev, "context %u not set in bitmap\n",
+			neh->context);
+		WARN_ON(1);
+	}
+	clear_bit(neh->context, rc->ctx_bm);
+	neh->context = 0;
+}
+
+/**
+ * uwb_rc_neh_add - add a neh for a radio controller command
+ * @rc:             the radio controller
+ * @cmd:            the radio controller command
+ * @expected_type:  the type of the expected response event
+ * @expected_event: the expected event ID
+ * @cb:             callback for when the event is received
+ * @arg:            argument for the callback
+ *
+ * Creates a neh and adds it to the list of those waiting for an
+ * event.  A context ID will be assigned to the command.
+ */
+struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
+				  u8 expected_type, u16 expected_event,
+				  uwb_rc_cmd_cb_f cb, void *arg)
+{
+	int result;
+	unsigned long flags;
+	struct device *dev = &rc->uwb_dev.dev;
+	struct uwb_rc_neh *neh;
+
+	neh = kzalloc(sizeof(*neh), GFP_KERNEL);
+	if (neh == NULL) {
+		result = -ENOMEM;
+		goto error_kzalloc;
+	}
+
+	kref_init(&neh->kref);
+	INIT_LIST_HEAD(&neh->list_node);
+	init_timer(&neh->timer);
+	neh->timer.function = uwb_rc_neh_timer;
+	neh->timer.data     = (unsigned long)neh;
+
+	neh->rc = rc;
+	neh->evt_type = expected_type;
+	neh->evt = cpu_to_le16(expected_event);
+	neh->cb = cb;
+	neh->arg = arg;
+
+	spin_lock_irqsave(&rc->neh_lock, flags);
+	result = __uwb_rc_ctx_get(rc, neh);
+	if (result >= 0) {
+		cmd->bCommandContext = neh->context;
+		list_add_tail(&neh->list_node, &rc->neh_list);
+		uwb_rc_neh_get(neh);
+	}
+	spin_unlock_irqrestore(&rc->neh_lock, flags);
+	if (result < 0)
+		goto error_ctx_get;
+
+	return neh;
+
+error_ctx_get:
+	kfree(neh);
+error_kzalloc:
+	dev_err(dev, "cannot open handle to radio controller: %d\n", result);
+	return ERR_PTR(result);
+}
+
+static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
+{
+	del_timer(&neh->timer);
+	__uwb_rc_ctx_put(rc, neh);
+	list_del(&neh->list_node);
+}
+
+/**
+ * uwb_rc_neh_rm - remove a neh.
+ * @rc:  the radio controller
+ * @neh: the neh to remove
+ *
+ * Remove an active neh immediately instead of waiting for the event
+ * (or a time out).
+ */
+void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rc->neh_lock, flags);
+	__uwb_rc_neh_rm(rc, neh);
+	spin_unlock_irqrestore(&rc->neh_lock, flags);
+
+	uwb_rc_neh_put(neh);
+}
+
+/**
+ * uwb_rc_neh_arm - arm an event handler timeout timer
+ *
+ * @rc:     UWB Radio Controller
+ * @neh:    Notification/event handler for @rc
+ *
+ * The timer is only armed if the neh is active.
+ */
+void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rc->neh_lock, flags);
+	if (neh->context)
+		mod_timer(&neh->timer,
+			  jiffies + msecs_to_jiffies(UWB_RC_CMD_TIMEOUT_MS));
+	spin_unlock_irqrestore(&rc->neh_lock, flags);
+}
+
+static void uwb_rc_neh_cb(struct uwb_rc_neh *neh, struct uwb_rceb *rceb, size_t size)
+{
+	(*neh->cb)(neh->rc, neh->arg, rceb, size);
+	uwb_rc_neh_put(neh);
+}
+
+static bool uwb_rc_neh_match(struct uwb_rc_neh *neh, const struct uwb_rceb *rceb)
+{
+	return neh->evt_type == rceb->bEventType
+		&& neh->evt == rceb->wEvent
+		&& neh->context == rceb->bEventContext;
+}
+
+/**
+ * Find the handle waiting for a RC Radio Control Event
+ *
+ * @rc:         UWB Radio Controller
+ * @rceb:       Pointer to the RCEB buffer
+ * @event_size: Pointer to the size of the RCEB buffer. Might be
+ *              adjusted to take into account the @neh->extra_size
+ *              settings.
+ *
+ * If the listener has no buffer (NULL buffer), one is allocated for
+ * the right size (the amount of data received). @neh->ptr will point
+ * to the event payload, which always starts with a 'struct
+ * uwb_rceb'. kfree() it when done.
+ */
+static
+struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc,
+				     const struct uwb_rceb *rceb)
+{
+	struct uwb_rc_neh *neh = NULL, *h;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rc->neh_lock, flags);
+
+	list_for_each_entry(h, &rc->neh_list, list_node) {
+		if (uwb_rc_neh_match(h, rceb)) {
+			neh = h;
+			break;
+		}
+	}
+
+	if (neh)
+		__uwb_rc_neh_rm(rc, neh);
+
+	spin_unlock_irqrestore(&rc->neh_lock, flags);
+
+	return neh;
+}
+
+
+/**
+ * Process notifications coming from the radio control interface
+ *
+ * @rc:    UWB Radio Control Interface descriptor
+ * @neh:   Notification/Event Handler @neh->ptr points to
+ *         @uwb_evt->buffer.
+ *
+ * This function is called by the event/notif handling subsystem when
+ * notifications arrive (hwarc_probe() arms a notification/event handle
+ * that calls back this function for every received notification; this
+ * function then will rearm itself).
+ *
+ * Notification data buffers are dynamically allocated by the NEH
+ * handling code in neh.c [uwb_rc_neh_lookup()]. What is actually
+ * allocated is space to contain the notification data.
+ *
+ * Buffers are prefixed with a Radio Control Event Block (RCEB) as
+ * defined by the WUSB Wired-Adapter Radio Control interface. We
+ * just use it for the notification code.
+ *
+ * On each case statement we just transcode endianess of the different
+ * fields. We declare a pointer to a RCI definition of an event, and
+ * then to a UWB definition of the same event (which are the same,
+ * remember). Event if we use different pointers
+ */
+static
+void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size)
+{
+	struct device *dev = &rc->uwb_dev.dev;
+	struct uwb_event *uwb_evt;
+
+	if (size == -ESHUTDOWN)
+		return;
+	if (size < 0) {
+		dev_err(dev, "ignoring event with error code %zu\n",
+			size);
+		return;
+	}
+
+	uwb_evt = kzalloc(sizeof(*uwb_evt), GFP_ATOMIC);
+	if (unlikely(uwb_evt == NULL)) {
+		dev_err(dev, "no memory to queue event 0x%02x/%04x/%02x\n",
+			rceb->bEventType, le16_to_cpu(rceb->wEvent),
+			rceb->bEventContext);
+		return;
+	}
+	uwb_evt->rc = __uwb_rc_get(rc);	/* will be put by uwbd's uwbd_event_handle() */
+	uwb_evt->ts_jiffies = jiffies;
+	uwb_evt->type = UWB_EVT_TYPE_NOTIF;
+	uwb_evt->notif.size = size;
+	uwb_evt->notif.rceb = rceb;
+
+	switch (le16_to_cpu(rceb->wEvent)) {
+		/* Trap some vendor specific events
+		 *
+		 * FIXME: move this to handling in ptc-est, where we
+		 * register a NULL event handler for these two guys
+		 * using the Intel IDs.
+		 */
+	case 0x0103:
+		dev_info(dev, "FIXME: DEVICE ADD\n");
+		return;
+	case 0x0104:
+		dev_info(dev, "FIXME: DEVICE RM\n");
+		return;
+	default:
+		break;
+	}
+
+	uwbd_event_queue(uwb_evt);
+}
+
+static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size_t size)
+{
+	struct device *dev = &rc->uwb_dev.dev;
+	struct uwb_rc_neh *neh;
+	struct uwb_rceb *notif;
+
+	if (rceb->bEventContext == 0) {
+		notif = kmalloc(size, GFP_ATOMIC);
+		if (notif) {
+			memcpy(notif, rceb, size);
+			uwb_rc_notif(rc, notif, size);
+		} else
+			dev_err(dev, "event 0x%02x/%04x/%02x (%zu bytes): no memory\n",
+				rceb->bEventType, le16_to_cpu(rceb->wEvent),
+				rceb->bEventContext, size);
+	} else {
+		neh = uwb_rc_neh_lookup(rc, rceb);
+		if (neh)
+			uwb_rc_neh_cb(neh, rceb, size);
+		else
+			dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
+				 rceb->bEventType, le16_to_cpu(rceb->wEvent),
+				 rceb->bEventContext, size);
+	}
+}
+
+/**
+ * Given a buffer with one or more UWB RC events/notifications, break
+ * them up and dispatch them.
+ *
+ * @rc:	      UWB Radio Controller
+ * @buf:      Buffer with the stream of notifications/events
+ * @buf_size: Amount of data in the buffer
+ *
+ * Note each notification/event starts always with a 'struct
+ * uwb_rceb', so the minimum size if 4 bytes.
+ *
+ * The device may pass us events formatted differently than expected.
+ * These are first filtered, potentially creating a new event in a new
+ * memory location. If a new event is created by the filter it is also
+ * freed here.
+ *
+ * For each notif/event, tries to guess the size looking at the EST
+ * tables, then looks for a neh that is waiting for that event and if
+ * found, copies the payload to the neh's buffer and calls it back. If
+ * not, the data is ignored.
+ *
+ * Note that if we can't find a size description in the EST tables, we
+ * still might find a size in the 'neh' handle in uwb_rc_neh_lookup().
+ *
+ * Assumptions:
+ *
+ *   @rc->neh_lock is NOT taken
+ *
+ * We keep track of various sizes here:
+ * size:      contains the size of the buffer that is processed for the
+ *            incoming event. this buffer may contain events that are not
+ *            formatted as WHCI.
+ * real_size: the actual space taken by this event in the buffer.
+ *            We need to keep track of the real size of an event to be able to
+ *            advance the buffer correctly.
+ * event_size: the size of the event as expected by the core layer
+ *            [OR] the size of the event after filtering. if the filtering
+ *            created a new event in a new memory location then this is
+ *            effectively the size of a new event buffer
+ */
+void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size)
+{
+	struct device *dev = &rc->uwb_dev.dev;
+	void *itr;
+	struct uwb_rceb *rceb;
+	size_t size, real_size, event_size;
+	int needtofree;
+
+	d_fnstart(3, dev, "(rc %p buf %p %zu buf_size)\n", rc, buf, buf_size);
+	d_printf(2, dev, "groking event block: %zu bytes\n", buf_size);
+	itr = buf;
+	size = buf_size;
+	while (size > 0) {
+		if (size < sizeof(*rceb)) {
+			dev_err(dev, "not enough data in event buffer to "
+				"process incoming events (%zu left, minimum is "
+				"%zu)\n", size, sizeof(*rceb));
+			break;
+		}
+
+		rceb = itr;
+		if (rc->filter_event) {
+			needtofree = rc->filter_event(rc, &rceb, size,
+						      &real_size, &event_size);
+			if (needtofree < 0 && needtofree != -ENOANO) {
+				dev_err(dev, "BUG: Unable to filter event "
+					"(0x%02x/%04x/%02x) from "
+					"device. \n", rceb->bEventType,
+					le16_to_cpu(rceb->wEvent),
+					rceb->bEventContext);
+				break;
+			}
+		} else
+			needtofree = -ENOANO;
+		/* do real processing if there was no filtering or the
+		 * filtering didn't act */
+		if (needtofree == -ENOANO) {
+			ssize_t ret = uwb_est_find_size(rc, rceb, size);
+			if (ret < 0)
+				break;
+			if (ret > size) {
+				dev_err(dev, "BUG: hw sent incomplete event "
+					"0x%02x/%04x/%02x (%zd bytes), only got "
+					"%zu bytes. We don't handle that.\n",
+					rceb->bEventType, le16_to_cpu(rceb->wEvent),
+					rceb->bEventContext, ret, size);
+				break;
+			}
+			real_size = event_size = ret;
+		}
+		uwb_rc_neh_grok_event(rc, rceb, event_size);
+
+		if (needtofree == 1)
+			kfree(rceb);
+
+		itr += real_size;
+		size -= real_size;
+		d_printf(2, dev, "consumed %zd bytes, %zu left\n",
+			 event_size, size);
+	}
+	d_fnend(3, dev, "(rc %p buf %p %zu buf_size) = void\n", rc, buf, buf_size);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_neh_grok);
+
+
+/**
+ * The entity that reads from the device notification/event channel has
+ * detected an error.
+ *
+ * @rc:    UWB Radio Controller
+ * @error: Errno error code
+ *
+ */
+void uwb_rc_neh_error(struct uwb_rc *rc, int error)
+{
+	struct uwb_rc_neh *neh, *next;
+	unsigned long flags;
+
+	BUG_ON(error >= 0);
+	spin_lock_irqsave(&rc->neh_lock, flags);
+	list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) {
+		__uwb_rc_neh_rm(rc, neh);
+		uwb_rc_neh_cb(neh, NULL, error);
+	}
+	spin_unlock_irqrestore(&rc->neh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
+
+
+static void uwb_rc_neh_timer(unsigned long arg)
+{
+	struct uwb_rc_neh *neh = (struct uwb_rc_neh *)arg;
+	struct uwb_rc *rc = neh->rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rc->neh_lock, flags);
+	__uwb_rc_neh_rm(rc, neh);
+	spin_unlock_irqrestore(&rc->neh_lock, flags);
+
+	uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT);
+}
+
+/** Initializes the @rc's neh subsystem
+ */
+void uwb_rc_neh_create(struct uwb_rc *rc)
+{
+	spin_lock_init(&rc->neh_lock);
+	INIT_LIST_HEAD(&rc->neh_list);
+	set_bit(0, rc->ctx_bm);		/* 0 is reserved (see [WUSB] table 8-65) */
+	set_bit(0xff, rc->ctx_bm);	/* and 0xff is invalid */
+	rc->ctx_roll = 1;
+}
+
+
+/** Release's the @rc's neh subsystem */
+void uwb_rc_neh_destroy(struct uwb_rc *rc)
+{
+	unsigned long flags;
+	struct uwb_rc_neh *neh, *next;
+
+	spin_lock_irqsave(&rc->neh_lock, flags);
+	list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) {
+		__uwb_rc_neh_rm(rc, neh);
+		uwb_rc_neh_put(neh);
+	}
+	spin_unlock_irqrestore(&rc->neh_lock, flags);
+}
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
new file mode 100644
index 0000000..1afb38e
--- /dev/null
+++ b/drivers/uwb/pal.c
@@ -0,0 +1,91 @@
+/*
+ * UWB PAL support.
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/uwb.h>
+
+#include "uwb-internal.h"
+
+/**
+ * uwb_pal_init - initialize a UWB PAL
+ * @pal: the PAL to initialize
+ */
+void uwb_pal_init(struct uwb_pal *pal)
+{
+	INIT_LIST_HEAD(&pal->node);
+}
+EXPORT_SYMBOL_GPL(uwb_pal_init);
+
+/**
+ * uwb_pal_register - register a UWB PAL
+ * @rc: the radio controller the PAL will be using
+ * @pal: the PAL
+ *
+ * The PAL must be initialized with uwb_pal_init().
+ */
+int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal)
+{
+	int ret;
+
+	if (pal->device) {
+		ret = sysfs_create_link(&pal->device->kobj,
+					&rc->uwb_dev.dev.kobj, "uwb_rc");
+		if (ret < 0)
+			return ret;
+		ret = sysfs_create_link(&rc->uwb_dev.dev.kobj,
+					&pal->device->kobj, pal->name);
+		if (ret < 0) {
+			sysfs_remove_link(&pal->device->kobj, "uwb_rc");
+			return ret;
+		}
+	}
+
+	spin_lock(&rc->pal_lock);
+	list_add(&pal->node, &rc->pals);
+	spin_unlock(&rc->pal_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uwb_pal_register);
+
+/**
+ * uwb_pal_register - unregister a UWB PAL
+ * @rc: the radio controller the PAL was using
+ * @pal: the PAL
+ */
+void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal)
+{
+	spin_lock(&rc->pal_lock);
+	list_del(&pal->node);
+	spin_unlock(&rc->pal_lock);
+
+	if (pal->device) {
+		sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name);
+		sysfs_remove_link(&pal->device->kobj, "uwb_rc");
+	}
+}
+EXPORT_SYMBOL_GPL(uwb_pal_unregister);
+
+/**
+ * uwb_rc_pal_init - initialize the PAL related parts of a radio controller
+ * @rc: the radio controller
+ */
+void uwb_rc_pal_init(struct uwb_rc *rc)
+{
+	spin_lock_init(&rc->pal_lock);
+	INIT_LIST_HEAD(&rc->pals);
+}
diff --git a/drivers/uwb/reset.c b/drivers/uwb/reset.c
new file mode 100644
index 0000000..8de856f
--- /dev/null
+++ b/drivers/uwb/reset.c
@@ -0,0 +1,362 @@
+/*
+ * Ultra Wide Band
+ * UWB basic command support and radio reset
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME:
+ *
+ *  - docs
+ *
+ *  - Now we are serializing (using the uwb_dev->mutex) the command
+ *    execution; it should be parallelized as much as possible some
+ *    day.
+ */
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "uwb-internal.h"
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/**
+ * Command result codes (WUSB1.0[T8-69])
+ */
+static
+const char *__strerror[] = {
+	"success",
+	"failure",
+	"hardware failure",
+	"no more slots",
+	"beacon is too large",
+	"invalid parameter",
+	"unsupported power level",
+	"time out (wa) or invalid ie data (whci)",
+	"beacon size exceeded",
+	"cancelled",
+	"invalid state",
+	"invalid size",
+	"ack not recieved",
+	"no more asie notification",
+};
+
+
+/** Return a string matching the given error code */
+const char *uwb_rc_strerror(unsigned code)
+{
+	if (code == 255)
+		return "time out";
+	if (code >= ARRAY_SIZE(__strerror))
+		return "unknown error";
+	return __strerror[code];
+}
+
+int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
+		     struct uwb_rccb *cmd, size_t cmd_size,
+		     u8 expected_type, u16 expected_event,
+		     uwb_rc_cmd_cb_f cb, void *arg)
+{
+	struct device *dev = &rc->uwb_dev.dev;
+	struct uwb_rc_neh *neh;
+	int needtofree = 0;
+	int result;
+
+	uwb_dev_lock(&rc->uwb_dev);	/* Protect against rc->priv being removed */
+	if (rc->priv == NULL) {
+		uwb_dev_unlock(&rc->uwb_dev);
+		return -ESHUTDOWN;
+	}
+
+	if (rc->filter_cmd) {
+		needtofree = rc->filter_cmd(rc, &cmd, &cmd_size);
+		if (needtofree < 0 && needtofree != -ENOANO) {
+			dev_err(dev, "%s: filter error: %d\n",
+				cmd_name, needtofree);
+			uwb_dev_unlock(&rc->uwb_dev);
+			return needtofree;
+		}
+	}
+
+	neh = uwb_rc_neh_add(rc, cmd, expected_type, expected_event, cb, arg);
+	if (IS_ERR(neh)) {
+		result = PTR_ERR(neh);
+		goto out;
+	}
+
+	result = rc->cmd(rc, cmd, cmd_size);
+	uwb_dev_unlock(&rc->uwb_dev);
+	if (result < 0)
+		uwb_rc_neh_rm(rc, neh);
+	else
+		uwb_rc_neh_arm(rc, neh);
+	uwb_rc_neh_put(neh);
+out:
+	if (needtofree == 1)
+		kfree(cmd);
+	return result < 0 ? result : 0;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_cmd_async);
+
+struct uwb_rc_cmd_done_params {
+	struct completion completion;
+	struct uwb_rceb *reply;
+	ssize_t reply_size;
+};
+
+static void uwb_rc_cmd_done(struct uwb_rc *rc, void *arg,
+			    struct uwb_rceb *reply, ssize_t reply_size)
+{
+	struct uwb_rc_cmd_done_params *p = (struct uwb_rc_cmd_done_params *)arg;
+
+	if (reply_size > 0) {
+		if (p->reply)
+			reply_size = min(p->reply_size, reply_size);
+		else
+			p->reply = kmalloc(reply_size, GFP_ATOMIC);
+
+		if (p->reply)
+			memcpy(p->reply, reply, reply_size);
+		else
+			reply_size = -ENOMEM;
+	}
+	p->reply_size = reply_size;
+	complete(&p->completion);
+}
+
+
+/**
+ * Generic function for issuing commands to the Radio Control Interface
+ *
+ * @rc:       UWB Radio Control descriptor
+ * @cmd_name: Name of the command being issued (for error messages)
+ * @cmd:      Pointer to rccb structure containing the command;
+ *            normally you embed this structure as the first member of
+ *            the full command structure.
+ * @cmd_size: Size of the whole command buffer pointed to by @cmd.
+ * @reply:    Pointer to where to store the reply
+ * @reply_size: @reply's size
+ * @expected_type: Expected type in the return event
+ * @expected_event: Expected event code in the return event
+ * @preply:   Here a pointer to where the event data is received will
+ *            be stored. Once done with the data, free with kfree().
+ *
+ * This function is generic; it works for commands that return a fixed
+ * and known size or for commands that return a variable amount of data.
+ *
+ * If a buffer is provided, that is used, although it could be chopped
+ * to the maximum size of the buffer. If the buffer is NULL, then one
+ * be allocated in *preply with the whole contents of the reply.
+ *
+ * @rc needs to be referenced
+ */
+static
+ssize_t __uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
+		     struct uwb_rccb *cmd, size_t cmd_size,
+		     struct uwb_rceb *reply, size_t reply_size,
+		     u8 expected_type, u16 expected_event,
+		     struct uwb_rceb **preply)
+{
+	ssize_t result = 0;
+	struct device *dev = &rc->uwb_dev.dev;
+	struct uwb_rc_cmd_done_params params;
+
+	init_completion(&params.completion);
+	params.reply = reply;
+	params.reply_size = reply_size;
+
+	result = uwb_rc_cmd_async(rc, cmd_name, cmd, cmd_size,
+				  expected_type, expected_event,
+				  uwb_rc_cmd_done, &params);
+	if (result)
+		return result;
+
+	wait_for_completion(&params.completion);
+
+	if (preply)
+		*preply = params.reply;
+
+	if (params.reply_size < 0)
+		dev_err(dev, "%s: confirmation event 0x%02x/%04x/%02x "
+			"reception failed: %d\n", cmd_name,
+			expected_type, expected_event, cmd->bCommandContext,
+			(int)params.reply_size);
+	return params.reply_size;
+}
+
+
+/**
+ * Generic function for issuing commands to the Radio Control Interface
+ *
+ * @rc:       UWB Radio Control descriptor
+ * @cmd_name: Name of the command being issued (for error messages)
+ * @cmd:      Pointer to rccb structure containing the command;
+ *            normally you embed this structure as the first member of
+ *            the full command structure.
+ * @cmd_size: Size of the whole command buffer pointed to by @cmd.
+ * @reply:    Pointer to the beginning of the confirmation event
+ *            buffer. Normally bigger than an 'struct hwarc_rceb'.
+ *            You need to fill out reply->bEventType and reply->wEvent (in
+ *            cpu order) as the function will use them to verify the
+ *            confirmation event.
+ * @reply_size: Size of the reply buffer
+ *
+ * The function checks that the length returned in the reply is at
+ * least as big as @reply_size; if not, it will be deemed an error and
+ * -EIO returned.
+ *
+ * @rc needs to be referenced
+ */
+ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
+		   struct uwb_rccb *cmd, size_t cmd_size,
+		   struct uwb_rceb *reply, size_t reply_size)
+{
+	struct device *dev = &rc->uwb_dev.dev;
+	ssize_t result;
+
+	result = __uwb_rc_cmd(rc, cmd_name,
+			      cmd, cmd_size, reply, reply_size,
+			      reply->bEventType, reply->wEvent, NULL);
+
+	if (result > 0 && result < reply_size) {
+		dev_err(dev, "%s: not enough data returned for decoding reply "
+			"(%zu bytes received vs at least %zu needed)\n",
+			cmd_name, result, reply_size);
+		result = -EIO;
+	}
+	return result;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_cmd);
+
+
+/**
+ * Generic function for issuing commands to the Radio Control
+ * Interface that return an unknown amount of data
+ *
+ * @rc:       UWB Radio Control descriptor
+ * @cmd_name: Name of the command being issued (for error messages)
+ * @cmd:      Pointer to rccb structure containing the command;
+ *            normally you embed this structure as the first member of
+ *            the full command structure.
+ * @cmd_size: Size of the whole command buffer pointed to by @cmd.
+ * @expected_type: Expected type in the return event
+ * @expected_event: Expected event code in the return event
+ * @preply:   Here a pointer to where the event data is received will
+ *            be stored. Once done with the data, free with kfree().
+ *
+ * The function checks that the length returned in the reply is at
+ * least as big as a 'struct uwb_rceb *'; if not, it will be deemed an
+ * error and -EIO returned.
+ *
+ * @rc needs to be referenced
+ */
+ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
+		    struct uwb_rccb *cmd, size_t cmd_size,
+		    u8 expected_type, u16 expected_event,
+		    struct uwb_rceb **preply)
+{
+	return __uwb_rc_cmd(rc, cmd_name, cmd, cmd_size, NULL, 0,
+			    expected_type, expected_event, preply);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_vcmd);
+
+
+/**
+ * Reset a UWB Host Controller (and all radio settings)
+ *
+ * @rc:      Host Controller descriptor
+ * @returns: 0 if ok, < 0 errno code on error
+ *
+ * We put the command on kmalloc'ed memory as some arches cannot do
+ * USB from the stack. The reply event is copied from an stage buffer,
+ * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
+ */
+int uwb_rc_reset(struct uwb_rc *rc)
+{
+	int result = -ENOMEM;
+	struct uwb_rc_evt_confirm reply;
+	struct uwb_rccb *cmd;
+	size_t cmd_size = sizeof(*cmd);
+
+	mutex_lock(&rc->uwb_dev.mutex);
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		goto error_kzalloc;
+	cmd->bCommandType = UWB_RC_CET_GENERAL;
+	cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET);
+	reply.rceb.bEventType = UWB_RC_CET_GENERAL;
+	reply.rceb.wEvent = UWB_RC_CMD_RESET;
+	result = uwb_rc_cmd(rc, "RESET", cmd, cmd_size,
+			    &reply.rceb, sizeof(reply));
+	if (result < 0)
+		goto error_cmd;
+	if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
+		dev_err(&rc->uwb_dev.dev,
+			"RESET: command execution failed: %s (%d)\n",
+			uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
+		result = -EIO;
+	}
+error_cmd:
+	kfree(cmd);
+error_kzalloc:
+	mutex_unlock(&rc->uwb_dev.mutex);
+	return result;
+}
+
+int uwbd_msg_handle_reset(struct uwb_event *evt)
+{
+	struct uwb_rc *rc = evt->rc;
+	int ret;
+
+	/* Need to prevent the RC hardware module going away while in
+	   the rc->reset() call. */
+	if (!try_module_get(rc->owner))
+		return 0;
+
+	dev_info(&rc->uwb_dev.dev, "resetting radio controller\n");
+	ret = rc->reset(rc);
+	if (ret)
+		dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret);
+
+	module_put(rc->owner);
+	return ret;
+}
+
+/**
+ * uwb_rc_reset_all - request a reset of the radio controller and PALs
+ * @rc: the radio controller of the hardware device to be reset.
+ *
+ * The full hardware reset of the radio controller and all the PALs
+ * will be scheduled.
+ */
+void uwb_rc_reset_all(struct uwb_rc *rc)
+{
+	struct uwb_event *evt;
+
+	evt = kzalloc(sizeof(struct uwb_event), GFP_ATOMIC);
+	if (unlikely(evt == NULL))
+		return;
+
+	evt->rc = __uwb_rc_get(rc);	/* will be put by uwbd's uwbd_event_handle() */
+	evt->ts_jiffies = jiffies;
+	evt->type = UWB_EVT_TYPE_MSG;
+	evt->message = UWB_EVT_MSG_RESET;
+
+	uwbd_event_queue(evt);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_reset_all);
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c
new file mode 100644
index 0000000..bae16204
--- /dev/null
+++ b/drivers/uwb/rsv.c
@@ -0,0 +1,680 @@
+/*
+ * UWB reservation management.
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/uwb.h>
+
+#include "uwb-internal.h"
+
+static void uwb_rsv_timer(unsigned long arg);
+
+static const char *rsv_states[] = {
+	[UWB_RSV_STATE_NONE]          = "none",
+	[UWB_RSV_STATE_O_INITIATED]   = "initiated",
+	[UWB_RSV_STATE_O_PENDING]     = "pending",
+	[UWB_RSV_STATE_O_MODIFIED]    = "modified",
+	[UWB_RSV_STATE_O_ESTABLISHED] = "established",
+	[UWB_RSV_STATE_T_ACCEPTED]    = "accepted",
+	[UWB_RSV_STATE_T_DENIED]      = "denied",
+	[UWB_RSV_STATE_T_PENDING]     = "pending",
+};
+
+static const char *rsv_types[] = {
+	[UWB_DRP_TYPE_ALIEN_BP] = "alien-bp",
+	[UWB_DRP_TYPE_HARD]     = "hard",
+	[UWB_DRP_TYPE_SOFT]     = "soft",
+	[UWB_DRP_TYPE_PRIVATE]  = "private",
+	[UWB_DRP_TYPE_PCA]      = "pca",
+};
+
+/**
+ * uwb_rsv_state_str - return a string for a reservation state
+ * @state: the reservation state.
+ */
+const char *uwb_rsv_state_str(enum uwb_rsv_state state)
+{
+	if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST)
+		return "unknown";
+	return rsv_states[state];
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_state_str);
+
+/**
+ * uwb_rsv_type_str - return a string for a reservation type
+ * @type: the reservation type
+ */
+const char *uwb_rsv_type_str(enum uwb_drp_type type)
+{
+	if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA)
+		return "invalid";
+	return rsv_types[type];
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_type_str);
+
+static void uwb_rsv_dump(struct uwb_rsv *rsv)
+{
+	struct device *dev = &rsv->rc->uwb_dev.dev;
+	struct uwb_dev_addr devaddr;
+	char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
+
+	uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
+	if (rsv->target.type == UWB_RSV_TARGET_DEV)
+		devaddr = rsv->target.dev->dev_addr;
+	else
+		devaddr = rsv->target.devaddr;
+	uwb_dev_addr_print(target, sizeof(target), &devaddr);
+
+	dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state));
+}
+
+/*
+ * Get a free stream index for a reservation.
+ *
+ * If the target is a DevAddr (e.g., a WUSB cluster reservation) then
+ * the stream is allocated from a pool of per-RC stream indexes,
+ * otherwise a unique stream index for the target is selected.
+ */
+static int uwb_rsv_get_stream(struct uwb_rsv *rsv)
+{
+	struct uwb_rc *rc = rsv->rc;
+	unsigned long *streams_bm;
+	int stream;
+
+	switch (rsv->target.type) {
+	case UWB_RSV_TARGET_DEV:
+		streams_bm = rsv->target.dev->streams;
+		break;
+	case UWB_RSV_TARGET_DEVADDR:
+		streams_bm = rc->uwb_dev.streams;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS);
+	if (stream >= UWB_NUM_STREAMS)
+		return -EBUSY;
+
+	rsv->stream = stream;
+	set_bit(stream, streams_bm);
+
+	return 0;
+}
+
+static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
+{
+	struct uwb_rc *rc = rsv->rc;
+	unsigned long *streams_bm;
+
+	switch (rsv->target.type) {
+	case UWB_RSV_TARGET_DEV:
+		streams_bm = rsv->target.dev->streams;
+		break;
+	case UWB_RSV_TARGET_DEVADDR:
+		streams_bm = rc->uwb_dev.streams;
+		break;
+	default:
+		return;
+	}
+
+	clear_bit(rsv->stream, streams_bm);
+}
+
+/*
+ * Generate a MAS allocation with a single row component.
+ */
+static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas,
+				  int first_mas, int mas_per_zone,
+				  int zs, int ze)
+{
+	struct uwb_mas_bm col;
+	int z;
+
+	bitmap_zero(mas->bm, UWB_NUM_MAS);
+	bitmap_zero(col.bm, UWB_NUM_MAS);
+	bitmap_fill(col.bm, mas_per_zone);
+	bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS);
+
+	for (z = zs; z <= ze; z++) {
+		bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS);
+		bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
+	}
+}
+
+/*
+ * Allocate some MAS for this reservation based on current local
+ * availability, the reservation parameters (max_mas, min_mas,
+ * sparsity), and the WiMedia rules for MAS allocations.
+ *
+ * Returns -EBUSY is insufficient free MAS are available.
+ *
+ * FIXME: to simplify this, only safe reservations with a single row
+ * component in zones 1 to 15 are tried (zone 0 is skipped to avoid
+ * problems with the MAS reserved for the BP).
+ *
+ * [ECMA-368] section B.2.
+ */
+static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv)
+{
+	static const int safe_mas_in_row[UWB_NUM_ZONES] = {
+		8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1,
+	};
+	int n, r;
+	struct uwb_mas_bm mas;
+	bool found = false;
+
+	/*
+	 * Search all valid safe allocations until either: too few MAS
+	 * are available; or the smallest allocation with sufficient
+	 * MAS is found.
+	 *
+	 * The top of the zones are preferred, so space for larger
+	 * allocations is available in the bottom of the zone (e.g., a
+	 * 15 MAS allocation should start in row 14 leaving space for
+	 * a 120 MAS allocation at row 0).
+	 */
+	for (n = safe_mas_in_row[0]; n >= 1; n--) {
+		int num_mas;
+
+		num_mas = n * (UWB_NUM_ZONES - 1);
+		if (num_mas < rsv->min_mas)
+			break;
+		if (found && num_mas < rsv->max_mas)
+			break;
+
+		for (r = UWB_MAS_PER_ZONE-1;  r >= 0; r--) {
+			if (safe_mas_in_row[r] < n)
+				continue;
+			uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES);
+			if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) {
+				found = true;
+				break;
+			}
+		}
+	}
+
+	if (!found)
+		return -EBUSY;
+
+	bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
+	return 0;
+}
+
+static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
+{
+	int sframes = UWB_MAX_LOST_BEACONS;
+
+	/*
+	 * Multicast reservations can become established within 1
+	 * super frame and should not be terminated if no response is
+	 * received.
+	 */
+	if (rsv->is_multicast) {
+		if (rsv->state == UWB_RSV_STATE_O_INITIATED)
+			sframes = 1;
+		if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED)
+			sframes = 0;
+	}
+
+	rsv->expired = false;
+	if (sframes > 0) {
+		/*
+		 * Add an additional 2 superframes to account for the
+		 * time to send the SET DRP IE command.
+		 */
+		unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US;
+		mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us));
+	} else
+		del_timer(&rsv->timer);
+}
+
+/*
+ * Update a reservations state, and schedule an update of the
+ * transmitted DRP IEs.
+ */
+static void uwb_rsv_state_update(struct uwb_rsv *rsv,
+				 enum uwb_rsv_state new_state)
+{
+	rsv->state = new_state;
+	rsv->ie_valid = false;
+
+	uwb_rsv_dump(rsv);
+
+	uwb_rsv_stroke_timer(rsv);
+	uwb_rsv_sched_update(rsv->rc);
+}
+
+static void uwb_rsv_callback(struct uwb_rsv *rsv)
+{
+	if (rsv->callback)
+		rsv->callback(rsv);
+}
+
+void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
+{
+	if (rsv->state == new_state) {
+		switch (rsv->state) {
+		case UWB_RSV_STATE_O_ESTABLISHED:
+		case UWB_RSV_STATE_T_ACCEPTED:
+		case UWB_RSV_STATE_NONE:
+			uwb_rsv_stroke_timer(rsv);
+			break;
+		default:
+			/* Expecting a state transition so leave timer
+			   as-is. */
+			break;
+		}
+		return;
+	}
+
+	switch (new_state) {
+	case UWB_RSV_STATE_NONE:
+		uwb_drp_avail_release(rsv->rc, &rsv->mas);
+		uwb_rsv_put_stream(rsv);
+		uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE);
+		uwb_rsv_callback(rsv);
+		break;
+	case UWB_RSV_STATE_O_INITIATED:
+		uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED);
+		break;
+	case UWB_RSV_STATE_O_PENDING:
+		uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING);
+		break;
+	case UWB_RSV_STATE_O_ESTABLISHED:
+		uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
+		uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED);
+		uwb_rsv_callback(rsv);
+		break;
+	case UWB_RSV_STATE_T_ACCEPTED:
+		uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
+		uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED);
+		uwb_rsv_callback(rsv);
+		break;
+	case UWB_RSV_STATE_T_DENIED:
+		uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED);
+		break;
+	default:
+		dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n",
+			uwb_rsv_state_str(new_state), new_state);
+	}
+}
+
+static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
+{
+	struct uwb_rsv *rsv;
+
+	rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL);
+	if (!rsv)
+		return NULL;
+
+	INIT_LIST_HEAD(&rsv->rc_node);
+	INIT_LIST_HEAD(&rsv->pal_node);
+	init_timer(&rsv->timer);
+	rsv->timer.function = uwb_rsv_timer;
+	rsv->timer.data     = (unsigned long)rsv;
+
+	rsv->rc = rc;
+
+	return rsv;
+}
+
+static void uwb_rsv_free(struct uwb_rsv *rsv)
+{
+	uwb_dev_put(rsv->owner);
+	if (rsv->target.type == UWB_RSV_TARGET_DEV)
+		uwb_dev_put(rsv->target.dev);
+	kfree(rsv);
+}
+
+/**
+ * uwb_rsv_create - allocate and initialize a UWB reservation structure
+ * @rc: the radio controller
+ * @cb: callback to use when the reservation completes or terminates
+ * @pal_priv: data private to the PAL to be passed in the callback
+ *
+ * The callback is called when the state of the reservation changes from:
+ *
+ *   - pending to accepted
+ *   - pending to denined
+ *   - accepted to terminated
+ *   - pending to terminated
+ */
+struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv)
+{
+	struct uwb_rsv *rsv;
+
+	rsv = uwb_rsv_alloc(rc);
+	if (!rsv)
+		return NULL;
+
+	rsv->callback = cb;
+	rsv->pal_priv = pal_priv;
+
+	return rsv;
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_create);
+
+void uwb_rsv_remove(struct uwb_rsv *rsv)
+{
+	if (rsv->state != UWB_RSV_STATE_NONE)
+		uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
+	del_timer_sync(&rsv->timer);
+	list_del(&rsv->rc_node);
+	uwb_rsv_free(rsv);
+}
+
+/**
+ * uwb_rsv_destroy - free a UWB reservation structure
+ * @rsv: the reservation to free
+ *
+ * The reservation will be terminated if it is pending or established.
+ */
+void uwb_rsv_destroy(struct uwb_rsv *rsv)
+{
+	struct uwb_rc *rc = rsv->rc;
+
+	mutex_lock(&rc->rsvs_mutex);
+	uwb_rsv_remove(rsv);
+	mutex_unlock(&rc->rsvs_mutex);
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_destroy);
+
+/**
+ * usb_rsv_establish - start a reservation establishment
+ * @rsv: the reservation
+ *
+ * The PAL should fill in @rsv's owner, target, type, max_mas,
+ * min_mas, sparsity and is_multicast fields.  If the target is a
+ * uwb_dev it must be referenced.
+ *
+ * The reservation's callback will be called when the reservation is
+ * accepted, denied or times out.
+ */
+int uwb_rsv_establish(struct uwb_rsv *rsv)
+{
+	struct uwb_rc *rc = rsv->rc;
+	int ret;
+
+	mutex_lock(&rc->rsvs_mutex);
+
+	ret = uwb_rsv_get_stream(rsv);
+	if (ret)
+		goto out;
+
+	ret = uwb_rsv_alloc_mas(rsv);
+	if (ret) {
+		uwb_rsv_put_stream(rsv);
+		goto out;
+	}
+
+	list_add_tail(&rsv->rc_node, &rc->reservations);
+	rsv->owner = &rc->uwb_dev;
+	uwb_dev_get(rsv->owner);
+	uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED);
+out:
+	mutex_unlock(&rc->rsvs_mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_establish);
+
+/**
+ * uwb_rsv_modify - modify an already established reservation
+ * @rsv: the reservation to modify
+ * @max_mas: new maximum MAS to reserve
+ * @min_mas: new minimum MAS to reserve
+ * @sparsity: new sparsity to use
+ *
+ * FIXME: implement this once there are PALs that use it.
+ */
+int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity)
+{
+	return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_modify);
+
+/**
+ * uwb_rsv_terminate - terminate an established reservation
+ * @rsv: the reservation to terminate
+ *
+ * A reservation is terminated by removing the DRP IE from the beacon,
+ * the other end will consider the reservation to be terminated when
+ * it does not see the DRP IE for at least mMaxLostBeacons.
+ *
+ * If applicable, the reference to the target uwb_dev will be released.
+ */
+void uwb_rsv_terminate(struct uwb_rsv *rsv)
+{
+	struct uwb_rc *rc = rsv->rc;
+
+	mutex_lock(&rc->rsvs_mutex);
+
+	uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
+
+	mutex_unlock(&rc->rsvs_mutex);
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_terminate);
+
+/**
+ * uwb_rsv_accept - accept a new reservation from a peer
+ * @rsv:      the reservation
+ * @cb:       call back for reservation changes
+ * @pal_priv: data to be passed in the above call back
+ *
+ * Reservation requests from peers are denied unless a PAL accepts it
+ * by calling this function.
+ */
+void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv)
+{
+	rsv->callback = cb;
+	rsv->pal_priv = pal_priv;
+	rsv->state    = UWB_RSV_STATE_T_ACCEPTED;
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_accept);
+
+/*
+ * Is a received DRP IE for this reservation?
+ */
+static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src,
+			  struct uwb_ie_drp *drp_ie)
+{
+	struct uwb_dev_addr *rsv_src;
+	int stream;
+
+	stream = uwb_ie_drp_stream_index(drp_ie);
+
+	if (rsv->stream != stream)
+		return false;
+
+	switch (rsv->target.type) {
+	case UWB_RSV_TARGET_DEVADDR:
+		return rsv->stream == stream;
+	case UWB_RSV_TARGET_DEV:
+		if (uwb_ie_drp_owner(drp_ie))
+			rsv_src = &rsv->owner->dev_addr;
+		else
+			rsv_src = &rsv->target.dev->dev_addr;
+		return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0;
+	}
+	return false;
+}
+
+static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc,
+					  struct uwb_dev *src,
+					  struct uwb_ie_drp *drp_ie)
+{
+	struct uwb_rsv *rsv;
+	struct uwb_pal *pal;
+	enum uwb_rsv_state state;
+
+	rsv = uwb_rsv_alloc(rc);
+	if (!rsv)
+		return NULL;
+
+	rsv->rc          = rc;
+	rsv->owner       = src;
+	uwb_dev_get(rsv->owner);
+	rsv->target.type = UWB_RSV_TARGET_DEV;
+	rsv->target.dev  = &rc->uwb_dev;
+	rsv->type        = uwb_ie_drp_type(drp_ie);
+	rsv->stream      = uwb_ie_drp_stream_index(drp_ie);
+	set_bit(rsv->stream, rsv->owner->streams);
+	uwb_drp_ie_to_bm(&rsv->mas, drp_ie);
+
+	/*
+	 * See if any PALs are interested in this reservation. If not,
+	 * deny the request.
+	 */
+	rsv->state = UWB_RSV_STATE_T_DENIED;
+	spin_lock(&rc->pal_lock);
+	list_for_each_entry(pal, &rc->pals, node) {
+		if (pal->new_rsv)
+			pal->new_rsv(rsv);
+		if (rsv->state == UWB_RSV_STATE_T_ACCEPTED)
+			break;
+	}
+	spin_unlock(&rc->pal_lock);
+
+	list_add_tail(&rsv->rc_node, &rc->reservations);
+	state = rsv->state;
+	rsv->state = UWB_RSV_STATE_NONE;
+	uwb_rsv_set_state(rsv, state);
+
+	return rsv;
+}
+
+/**
+ * uwb_rsv_find - find a reservation for a received DRP IE.
+ * @rc: the radio controller
+ * @src: source of the DRP IE
+ * @drp_ie: the DRP IE
+ *
+ * If the reservation cannot be found and the DRP IE is from a peer
+ * attempting to establish a new reservation, create a new reservation
+ * and add it to the list.
+ */
+struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
+			     struct uwb_ie_drp *drp_ie)
+{
+	struct uwb_rsv *rsv;
+
+	list_for_each_entry(rsv, &rc->reservations, rc_node) {
+		if (uwb_rsv_match(rsv, src, drp_ie))
+			return rsv;
+	}
+
+	if (uwb_ie_drp_owner(drp_ie))
+		return uwb_rsv_new_target(rc, src, drp_ie);
+
+	return NULL;
+}
+
+/*
+ * Go through all the reservations and check for timeouts and (if
+ * necessary) update their DRP IEs.
+ *
+ * FIXME: look at building the SET_DRP_IE command here rather than
+ * having to rescan the list in uwb_rc_send_all_drp_ie().
+ */
+static bool uwb_rsv_update_all(struct uwb_rc *rc)
+{
+	struct uwb_rsv *rsv, *t;
+	bool ie_updated = false;
+
+	list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
+		if (rsv->expired)
+			uwb_drp_handle_timeout(rsv);
+		if (!rsv->ie_valid) {
+			uwb_drp_ie_update(rsv);
+			ie_updated = true;
+		}
+	}
+
+	return ie_updated;
+}
+
+void uwb_rsv_sched_update(struct uwb_rc *rc)
+{
+	queue_work(rc->rsv_workq, &rc->rsv_update_work);
+}
+
+/*
+ * Update DRP IEs and, if necessary, the DRP Availability IE and send
+ * the updated IEs to the radio controller.
+ */
+static void uwb_rsv_update_work(struct work_struct *work)
+{
+	struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work);
+	bool ie_updated;
+
+	mutex_lock(&rc->rsvs_mutex);
+
+	ie_updated = uwb_rsv_update_all(rc);
+
+	if (!rc->drp_avail.ie_valid) {
+		uwb_drp_avail_ie_update(rc);
+		ie_updated = true;
+	}
+
+	if (ie_updated)
+		uwb_rc_send_all_drp_ie(rc);
+
+	mutex_unlock(&rc->rsvs_mutex);
+}
+
+static void uwb_rsv_timer(unsigned long arg)
+{
+	struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
+
+	rsv->expired = true;
+	uwb_rsv_sched_update(rsv->rc);
+}
+
+void uwb_rsv_init(struct uwb_rc *rc)
+{
+	INIT_LIST_HEAD(&rc->reservations);
+	mutex_init(&rc->rsvs_mutex);
+	INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work);
+
+	bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
+}
+
+int uwb_rsv_setup(struct uwb_rc *rc)
+{
+	char name[16];
+
+	snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev));
+	rc->rsv_workq = create_singlethread_workqueue(name);
+	if (rc->rsv_workq == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void uwb_rsv_cleanup(struct uwb_rc *rc)
+{
+	struct uwb_rsv *rsv, *t;
+
+	mutex_lock(&rc->rsvs_mutex);
+	list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
+		uwb_rsv_remove(rsv);
+	}
+	mutex_unlock(&rc->rsvs_mutex);
+
+	cancel_work_sync(&rc->rsv_update_work);
+	destroy_workqueue(rc->rsv_workq);
+}
diff --git a/drivers/uwb/scan.c b/drivers/uwb/scan.c
new file mode 100644
index 0000000..2d27074
--- /dev/null
+++ b/drivers/uwb/scan.c
@@ -0,0 +1,133 @@
+/*
+ * Ultra Wide Band
+ * Scanning management
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ *
+ * FIXME: docs
+ * FIXME: there are issues here on how BEACON and SCAN on USB RCI deal
+ *        with each other. Currently seems that START_BEACON while
+ *        SCAN_ONLY will cancel the scan, so we need to update the
+ *        state here. Clarification request sent by email on
+ *        10/05/2005.
+ *        10/28/2005 No clear answer heard--maybe we'll hack the API
+ *                   so that when we start beaconing, if the HC is
+ *                   scanning in a mode not compatible with beaconing
+ *                   we just fail.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include "uwb-internal.h"
+
+
+/**
+ * Start/stop scanning in a radio controller
+ *
+ * @rc:      UWB Radio Controlller
+ * @channel: Channel to scan; encodings in WUSB1.0[Table 5.12]
+ * @type:    Type of scanning to do.
+ * @bpst_offset: value at which to start scanning (if type ==
+ *                UWB_SCAN_ONLY_STARTTIME)
+ * @returns: 0 if ok, < 0 errno code on error
+ *
+ * We put the command on kmalloc'ed memory as some arches cannot do
+ * USB from the stack. The reply event is copied from an stage buffer,
+ * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
+ */
+int uwb_rc_scan(struct uwb_rc *rc,
+		unsigned channel, enum uwb_scan_type type,
+		unsigned bpst_offset)
+{
+	int result;
+	struct uwb_rc_cmd_scan *cmd;
+	struct uwb_rc_evt_confirm reply;
+
+	result = -ENOMEM;
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		goto error_kzalloc;
+	mutex_lock(&rc->uwb_dev.mutex);
+	cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
+	cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SCAN);
+	cmd->bChannelNumber = channel;
+	cmd->bScanState = type;
+	cmd->wStartTime = cpu_to_le16(bpst_offset);
+	reply.rceb.bEventType = UWB_RC_CET_GENERAL;
+	reply.rceb.wEvent = UWB_RC_CMD_SCAN;
+	result = uwb_rc_cmd(rc, "SCAN", &cmd->rccb, sizeof(*cmd),
+			    &reply.rceb, sizeof(reply));
+	if (result < 0)
+		goto error_cmd;
+	if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
+		dev_err(&rc->uwb_dev.dev,
+			"SCAN: command execution failed: %s (%d)\n",
+			uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
+		result = -EIO;
+		goto error_cmd;
+	}
+	rc->scanning = channel;
+	rc->scan_type = type;
+error_cmd:
+	mutex_unlock(&rc->uwb_dev.mutex);
+	kfree(cmd);
+error_kzalloc:
+	return result;
+}
+
+/*
+ * Print scanning state
+ */
+static ssize_t uwb_rc_scan_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	struct uwb_rc *rc = uwb_dev->rc;
+	ssize_t result;
+
+	mutex_lock(&rc->uwb_dev.mutex);
+	result = sprintf(buf, "%d %d\n", rc->scanning, rc->scan_type);
+	mutex_unlock(&rc->uwb_dev.mutex);
+	return result;
+}
+
+/*
+ *
+ */
+static ssize_t uwb_rc_scan_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t size)
+{
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+	struct uwb_rc *rc = uwb_dev->rc;
+	unsigned channel;
+	unsigned type;
+	unsigned bpst_offset = 0;
+	ssize_t result = -EINVAL;
+
+	result = sscanf(buf, "%u %u %u\n", &channel, &type, &bpst_offset);
+	if (result >= 2 && type < UWB_SCAN_TOP)
+		result = uwb_rc_scan(rc, channel, type, bpst_offset);
+
+	return result < 0 ? result : size;
+}
+
+/** Radio Control sysfs interface (declaration) */
+DEVICE_ATTR(scan, S_IRUGO | S_IWUSR, uwb_rc_scan_show, uwb_rc_scan_store);
diff --git a/drivers/uwb/umc-bus.c b/drivers/uwb/umc-bus.c
new file mode 100644
index 0000000..2d8d62d
--- /dev/null
+++ b/drivers/uwb/umc-bus.c
@@ -0,0 +1,218 @@
+/*
+ * Bus for UWB Multi-interface Controller capabilities.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This file is released under the GNU GPL v2.
+ */
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+#include <linux/uwb/umc.h>
+#include <linux/pci.h>
+
+static int umc_bus_unbind_helper(struct device *dev, void *data)
+{
+	struct device *parent = data;
+
+	if (dev->parent == parent && dev->driver)
+		device_release_driver(dev);
+	return 0;
+}
+
+/**
+ * umc_controller_reset - reset the whole UMC controller
+ * @umc: the UMC device for the radio controller.
+ *
+ * Drivers will be unbound from all UMC devices belonging to the
+ * controller and then the radio controller will be rebound.  The
+ * radio controller is expected to do a full hardware reset when it is
+ * probed.
+ *
+ * If this is called while a probe() or remove() is in progress it
+ * will return -EAGAIN and not perform the reset.
+ */
+int umc_controller_reset(struct umc_dev *umc)
+{
+	struct device *parent = umc->dev.parent;
+	int ret;
+
+	if (down_trylock(&parent->sem))
+		return -EAGAIN;
+	bus_for_each_dev(&umc_bus_type, NULL, parent, umc_bus_unbind_helper);
+	ret = device_attach(&umc->dev);
+	if (ret == 1)
+		ret = 0;
+	up(&parent->sem);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(umc_controller_reset);
+
+/**
+ * umc_match_pci_id - match a UMC driver to a UMC device's parent PCI device.
+ * @umc_drv: umc driver with match_data pointing to a zero-terminated
+ * table of pci_device_id's.
+ * @umc: umc device whose parent is to be matched.
+ */
+int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc)
+{
+	const struct pci_device_id *id_table = umc_drv->match_data;
+	struct pci_dev *pci;
+
+	if (umc->dev.parent->bus != &pci_bus_type)
+		return 0;
+
+	pci = to_pci_dev(umc->dev.parent);
+	return pci_match_id(id_table, pci) != NULL;
+}
+EXPORT_SYMBOL_GPL(umc_match_pci_id);
+
+static int umc_bus_rescan_helper(struct device *dev, void *data)
+{
+	int ret = 0;
+
+	if (!dev->driver)
+		ret = device_attach(dev);
+
+	return ret < 0 ? ret : 0;
+}
+
+static void umc_bus_rescan(void)
+{
+	int err;
+
+	/*
+	 * We can't use bus_rescan_devices() here as it deadlocks when
+	 * it tries to retake the dev->parent semaphore.
+	 */
+	err = bus_for_each_dev(&umc_bus_type, NULL, NULL, umc_bus_rescan_helper);
+	if (err < 0)
+		printk(KERN_WARNING "%s: rescan of bus failed: %d\n",
+		       KBUILD_MODNAME, err);
+}
+
+static int umc_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct umc_dev *umc = to_umc_dev(dev);
+	struct umc_driver *umc_driver = to_umc_driver(drv);
+
+	if (umc->cap_id == umc_driver->cap_id) {
+		if (umc_driver->match)
+			return umc_driver->match(umc_driver, umc);
+		else
+			return 1;
+	}
+	return 0;
+}
+
+static int umc_device_probe(struct device *dev)
+{
+	struct umc_dev *umc;
+	struct umc_driver *umc_driver;
+	int err;
+
+	umc_driver = to_umc_driver(dev->driver);
+	umc = to_umc_dev(dev);
+
+	get_device(dev);
+	err = umc_driver->probe(umc);
+	if (err)
+		put_device(dev);
+	else
+		umc_bus_rescan();
+
+	return err;
+}
+
+static int umc_device_remove(struct device *dev)
+{
+	struct umc_dev *umc;
+	struct umc_driver *umc_driver;
+
+	umc_driver = to_umc_driver(dev->driver);
+	umc = to_umc_dev(dev);
+
+	umc_driver->remove(umc);
+	put_device(dev);
+	return 0;
+}
+
+static int umc_device_suspend(struct device *dev, pm_message_t state)
+{
+	struct umc_dev *umc;
+	struct umc_driver *umc_driver;
+	int err = 0;
+
+	umc = to_umc_dev(dev);
+
+	if (dev->driver) {
+		umc_driver = to_umc_driver(dev->driver);
+		if (umc_driver->suspend)
+			err = umc_driver->suspend(umc, state);
+	}
+	return err;
+}
+
+static int umc_device_resume(struct device *dev)
+{
+	struct umc_dev *umc;
+	struct umc_driver *umc_driver;
+	int err = 0;
+
+	umc = to_umc_dev(dev);
+
+	if (dev->driver) {
+		umc_driver = to_umc_driver(dev->driver);
+		if (umc_driver->resume)
+			err = umc_driver->resume(umc);
+	}
+	return err;
+}
+
+static ssize_t capability_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct umc_dev *umc = to_umc_dev(dev);
+
+	return sprintf(buf, "0x%02x\n", umc->cap_id);
+}
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct umc_dev *umc = to_umc_dev(dev);
+
+	return sprintf(buf, "0x%04x\n", umc->version);
+}
+
+static struct device_attribute umc_dev_attrs[] = {
+	__ATTR_RO(capability_id),
+	__ATTR_RO(version),
+	__ATTR_NULL,
+};
+
+struct bus_type umc_bus_type = {
+	.name		= "umc",
+	.match		= umc_bus_match,
+	.probe		= umc_device_probe,
+	.remove		= umc_device_remove,
+	.suspend        = umc_device_suspend,
+	.resume         = umc_device_resume,
+	.dev_attrs	= umc_dev_attrs,
+};
+EXPORT_SYMBOL_GPL(umc_bus_type);
+
+static int __init umc_bus_init(void)
+{
+	return bus_register(&umc_bus_type);
+}
+module_init(umc_bus_init);
+
+static void __exit umc_bus_exit(void)
+{
+	bus_unregister(&umc_bus_type);
+}
+module_exit(umc_bus_exit);
+
+MODULE_DESCRIPTION("UWB Multi-interface Controller capability bus");
+MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/umc-dev.c b/drivers/uwb/umc-dev.c
new file mode 100644
index 0000000..aa44e1c
--- /dev/null
+++ b/drivers/uwb/umc-dev.c
@@ -0,0 +1,104 @@
+/*
+ * UWB Multi-interface Controller device management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This file is released under the GNU GPL v2.
+ */
+#include <linux/kernel.h>
+#include <linux/uwb/umc.h>
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+static void umc_device_release(struct device *dev)
+{
+	struct umc_dev *umc = to_umc_dev(dev);
+
+	kfree(umc);
+}
+
+/**
+ * umc_device_create - allocate a child UMC device
+ * @parent: parent of the new UMC device.
+ * @n:      index of the new device.
+ *
+ * The new UMC device will have a bus ID of the parent with '-n'
+ * appended.
+ */
+struct umc_dev *umc_device_create(struct device *parent, int n)
+{
+	struct umc_dev *umc;
+
+	umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL);
+	if (umc) {
+		snprintf(umc->dev.bus_id, sizeof(umc->dev.bus_id), "%s-%d",
+			 parent->bus_id, n);
+		umc->dev.parent  = parent;
+		umc->dev.bus     = &umc_bus_type;
+		umc->dev.release = umc_device_release;
+
+		umc->dev.dma_mask = parent->dma_mask;
+	}
+	return umc;
+}
+EXPORT_SYMBOL_GPL(umc_device_create);
+
+/**
+ * umc_device_register - register a UMC device
+ * @umc: pointer to the UMC device
+ *
+ * The memory resource for the UMC device is acquired and the device
+ * registered with the system.
+ */
+int umc_device_register(struct umc_dev *umc)
+{
+	int err;
+
+	d_fnstart(3, &umc->dev, "(umc_dev %p)\n", umc);
+
+	err = request_resource(umc->resource.parent, &umc->resource);
+	if (err < 0) {
+		dev_err(&umc->dev, "can't allocate resource range "
+			"%016Lx to %016Lx: %d\n",
+			(unsigned long long)umc->resource.start,
+			(unsigned long long)umc->resource.end,
+			err);
+		goto error_request_resource;
+	}
+
+	err = device_register(&umc->dev);
+	if (err < 0)
+		goto error_device_register;
+	d_fnend(3, &umc->dev, "(umc_dev %p) = 0\n", umc);
+	return 0;
+
+error_device_register:
+	release_resource(&umc->resource);
+error_request_resource:
+	d_fnend(3, &umc->dev, "(umc_dev %p) = %d\n", umc, err);
+	return err;
+}
+EXPORT_SYMBOL_GPL(umc_device_register);
+
+/**
+ * umc_device_unregister - unregister a UMC device
+ * @umc: pointer to the UMC device
+ *
+ * First we unregister the device, make sure the driver can do it's
+ * resource release thing and then we try to release any left over
+ * resources. We take a ref to the device, to make sure it doesn't
+ * dissapear under our feet.
+ */
+void umc_device_unregister(struct umc_dev *umc)
+{
+	struct device *dev;
+	if (!umc)
+		return;
+	dev = get_device(&umc->dev);
+	d_fnstart(3, dev, "(umc_dev %p)\n", umc);
+	device_unregister(&umc->dev);
+	release_resource(&umc->resource);
+	d_fnend(3, dev, "(umc_dev %p) = void\n", umc);
+	put_device(dev);
+}
+EXPORT_SYMBOL_GPL(umc_device_unregister);
diff --git a/drivers/uwb/umc-drv.c b/drivers/uwb/umc-drv.c
new file mode 100644
index 0000000..367b5eb8
--- /dev/null
+++ b/drivers/uwb/umc-drv.c
@@ -0,0 +1,31 @@
+/*
+ * UWB Multi-interface Controller driver management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This file is released under the GNU GPL v2.
+ */
+#include <linux/kernel.h>
+#include <linux/uwb/umc.h>
+
+int __umc_driver_register(struct umc_driver *umc_drv, struct module *module,
+			  const char *mod_name)
+{
+	umc_drv->driver.name     = umc_drv->name;
+	umc_drv->driver.owner    = module;
+	umc_drv->driver.mod_name = mod_name;
+	umc_drv->driver.bus      = &umc_bus_type;
+
+	return driver_register(&umc_drv->driver);
+}
+EXPORT_SYMBOL_GPL(__umc_driver_register);
+
+/**
+ * umc_driver_register - unregister a UMC capabiltity driver.
+ * @umc_drv:  pointer to the driver.
+ */
+void umc_driver_unregister(struct umc_driver *umc_drv)
+{
+	driver_unregister(&umc_drv->driver);
+}
+EXPORT_SYMBOL_GPL(umc_driver_unregister);
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c
new file mode 100644
index 0000000..6d232c35
--- /dev/null
+++ b/drivers/uwb/uwb-debug.c
@@ -0,0 +1,367 @@
+/*
+ * Ultra Wide Band
+ * Debug support
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: doc
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+
+#include <linux/uwb/debug-cmd.h>
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+#include "uwb-internal.h"
+
+void dump_bytes(struct device *dev, const void *_buf, size_t rsize)
+{
+	const char *buf = _buf;
+	char line[32];
+	size_t offset = 0;
+	int cnt, cnt2;
+	for (cnt = 0; cnt < rsize; cnt += 8) {
+		size_t rtop = rsize - cnt < 8 ? rsize - cnt : 8;
+		for (offset = cnt2 = 0; cnt2 < rtop; cnt2++) {
+			offset += scnprintf(line + offset, sizeof(line) - offset,
+					    "%02x ", buf[cnt + cnt2] & 0xff);
+		}
+		if (dev)
+			dev_info(dev, "%s\n", line);
+		else
+			printk(KERN_INFO "%s\n", line);
+	}
+}
+EXPORT_SYMBOL_GPL(dump_bytes);
+
+/*
+ * Debug interface
+ *
+ * Per radio controller debugfs files (in uwb/uwbN/):
+ *
+ * command: Flexible command interface (see <linux/uwb/debug-cmd.h>).
+ *
+ * reservations: information on reservations.
+ *
+ * accept: Set to true (Y or 1) to accept reservation requests from
+ * peers.
+ *
+ * drp_avail: DRP availability information.
+ */
+
+struct uwb_dbg {
+	struct uwb_pal pal;
+
+	u32 accept;
+	struct list_head rsvs;
+
+	struct dentry *root_d;
+	struct dentry *command_f;
+	struct dentry *reservations_f;
+	struct dentry *accept_f;
+	struct dentry *drp_avail_f;
+};
+
+static struct dentry *root_dir;
+
+static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv)
+{
+	struct uwb_rc *rc = rsv->rc;
+	struct device *dev = &rc->uwb_dev.dev;
+	struct uwb_dev_addr devaddr;
+	char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
+
+	uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
+	if (rsv->target.type == UWB_RSV_TARGET_DEV)
+		devaddr = rsv->target.dev->dev_addr;
+	else
+		devaddr = rsv->target.devaddr;
+	uwb_dev_addr_print(target, sizeof(target), &devaddr);
+
+	dev_dbg(dev, "debug: rsv %s -> %s: %s\n",
+		owner, target, uwb_rsv_state_str(rsv->state));
+}
+
+static int cmd_rsv_establish(struct uwb_rc *rc,
+			     struct uwb_dbg_cmd_rsv_establish *cmd)
+{
+	struct uwb_mac_addr macaddr;
+	struct uwb_rsv *rsv;
+	struct uwb_dev *target;
+	int ret;
+
+	memcpy(&macaddr, cmd->target, sizeof(macaddr));
+	target = uwb_dev_get_by_macaddr(rc, &macaddr);
+	if (target == NULL)
+		return -ENODEV;
+
+	rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, NULL);
+	if (rsv == NULL) {
+		uwb_dev_put(target);
+		return -ENOMEM;
+	}
+
+	rsv->owner       = &rc->uwb_dev;
+	rsv->target.type = UWB_RSV_TARGET_DEV;
+	rsv->target.dev  = target;
+	rsv->type        = cmd->type;
+	rsv->max_mas     = cmd->max_mas;
+	rsv->min_mas     = cmd->min_mas;
+	rsv->sparsity    = cmd->sparsity;
+
+	ret = uwb_rsv_establish(rsv);
+	if (ret)
+		uwb_rsv_destroy(rsv);
+	else
+		list_add_tail(&rsv->pal_node, &rc->dbg->rsvs);
+
+	return ret;
+}
+
+static int cmd_rsv_terminate(struct uwb_rc *rc,
+			     struct uwb_dbg_cmd_rsv_terminate *cmd)
+{
+	struct uwb_rsv *rsv, *found = NULL;
+	int i = 0;
+
+	list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) {
+		if (i == cmd->index) {
+			found = rsv;
+			break;
+		}
+	}
+	if (!found)
+		return -EINVAL;
+
+	list_del(&found->pal_node);
+	uwb_rsv_terminate(found);
+
+	return 0;
+}
+
+static int command_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+static ssize_t command_write(struct file *file, const char __user *buf,
+			 size_t len, loff_t *off)
+{
+	struct uwb_rc *rc = file->private_data;
+	struct uwb_dbg_cmd cmd;
+	int ret;
+
+	if (len != sizeof(struct uwb_dbg_cmd))
+		return -EINVAL;
+
+	if (copy_from_user(&cmd, buf, len) != 0)
+		return -EFAULT;
+
+	switch (cmd.type) {
+	case UWB_DBG_CMD_RSV_ESTABLISH:
+		ret = cmd_rsv_establish(rc, &cmd.rsv_establish);
+		break;
+	case UWB_DBG_CMD_RSV_TERMINATE:
+		ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret < 0 ? ret : len;
+}
+
+static struct file_operations command_fops = {
+	.open   = command_open,
+	.write  = command_write,
+	.read   = NULL,
+	.llseek = no_llseek,
+	.owner  = THIS_MODULE,
+};
+
+static int reservations_print(struct seq_file *s, void *p)
+{
+	struct uwb_rc *rc = s->private;
+	struct uwb_rsv *rsv;
+
+	mutex_lock(&rc->rsvs_mutex);
+
+	list_for_each_entry(rsv, &rc->reservations, rc_node) {
+		struct uwb_dev_addr devaddr;
+		char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
+		bool is_owner;
+		char buf[72];
+
+		uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
+		if (rsv->target.type == UWB_RSV_TARGET_DEV) {
+			devaddr = rsv->target.dev->dev_addr;
+			is_owner = &rc->uwb_dev == rsv->owner;
+		} else {
+			devaddr = rsv->target.devaddr;
+			is_owner = true;
+		}
+		uwb_dev_addr_print(target, sizeof(target), &devaddr);
+
+		seq_printf(s, "%c %s -> %s: %s\n",
+			   is_owner ? 'O' : 'T',
+			   owner, target, uwb_rsv_state_str(rsv->state));
+		seq_printf(s, "  stream: %d  type: %s\n",
+			   rsv->stream, uwb_rsv_type_str(rsv->type));
+		bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS);
+		seq_printf(s, "  %s\n", buf);
+	}
+
+	mutex_unlock(&rc->rsvs_mutex);
+
+	return 0;
+}
+
+static int reservations_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, reservations_print, inode->i_private);
+}
+
+static struct file_operations reservations_fops = {
+	.open    = reservations_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+	.owner   = THIS_MODULE,
+};
+
+static int drp_avail_print(struct seq_file *s, void *p)
+{
+	struct uwb_rc *rc = s->private;
+	char buf[72];
+
+	bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.global, UWB_NUM_MAS);
+	seq_printf(s, "global:  %s\n", buf);
+	bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.local, UWB_NUM_MAS);
+	seq_printf(s, "local:   %s\n", buf);
+	bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.pending, UWB_NUM_MAS);
+	seq_printf(s, "pending: %s\n", buf);
+
+	return 0;
+}
+
+static int drp_avail_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, drp_avail_print, inode->i_private);
+}
+
+static struct file_operations drp_avail_fops = {
+	.open    = drp_avail_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+	.owner   = THIS_MODULE,
+};
+
+static void uwb_dbg_new_rsv(struct uwb_rsv *rsv)
+{
+	struct uwb_rc *rc = rsv->rc;
+
+	if (rc->dbg->accept)
+		uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, NULL);
+}
+
+/**
+ * uwb_dbg_add_rc - add a debug interface for a radio controller
+ * @rc: the radio controller
+ */
+void uwb_dbg_add_rc(struct uwb_rc *rc)
+{
+	rc->dbg = kzalloc(sizeof(struct uwb_dbg), GFP_KERNEL);
+	if (rc->dbg == NULL)
+		return;
+
+	INIT_LIST_HEAD(&rc->dbg->rsvs);
+
+	uwb_pal_init(&rc->dbg->pal);
+	rc->dbg->pal.new_rsv = uwb_dbg_new_rsv;
+	uwb_pal_register(rc, &rc->dbg->pal);
+	if (root_dir) {
+		rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev),
+						     root_dir);
+		rc->dbg->command_f = debugfs_create_file("command", 0200,
+							 rc->dbg->root_d, rc,
+							 &command_fops);
+		rc->dbg->reservations_f = debugfs_create_file("reservations", 0444,
+							      rc->dbg->root_d, rc,
+							      &reservations_fops);
+		rc->dbg->accept_f = debugfs_create_bool("accept", 0644,
+							rc->dbg->root_d,
+							&rc->dbg->accept);
+		rc->dbg->drp_avail_f = debugfs_create_file("drp_avail", 0444,
+							   rc->dbg->root_d, rc,
+							   &drp_avail_fops);
+	}
+}
+
+/**
+ * uwb_dbg_add_rc - remove a radio controller's debug interface
+ * @rc: the radio controller
+ */
+void uwb_dbg_del_rc(struct uwb_rc *rc)
+{
+	struct uwb_rsv *rsv, *t;
+
+	if (rc->dbg == NULL)
+		return;
+
+	list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) {
+		uwb_rsv_destroy(rsv);
+	}
+
+	uwb_pal_unregister(rc, &rc->dbg->pal);
+
+	if (root_dir) {
+		debugfs_remove(rc->dbg->drp_avail_f);
+		debugfs_remove(rc->dbg->accept_f);
+		debugfs_remove(rc->dbg->reservations_f);
+		debugfs_remove(rc->dbg->command_f);
+		debugfs_remove(rc->dbg->root_d);
+	}
+}
+
+/**
+ * uwb_dbg_exit - initialize the debug interface sub-module
+ */
+void uwb_dbg_init(void)
+{
+	root_dir = debugfs_create_dir("uwb", NULL);
+}
+
+/**
+ * uwb_dbg_exit - clean-up the debug interface sub-module
+ */
+void uwb_dbg_exit(void)
+{
+	debugfs_remove(root_dir);
+}
diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h
new file mode 100644
index 0000000..2ad307d
--- /dev/null
+++ b/drivers/uwb/uwb-internal.h
@@ -0,0 +1,305 @@
+/*
+ * Ultra Wide Band
+ * UWB internal API
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ * This contains most of the internal API for UWB. This is stuff used
+ * across the stack that of course, is of no interest to the rest.
+ *
+ * Some parts might end up going public (like uwb_rc_*())...
+ */
+
+#ifndef __UWB_INTERNAL_H__
+#define __UWB_INTERNAL_H__
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/uwb.h>
+#include <linux/mutex.h>
+
+struct uwb_beca_e;
+
+/* General device API */
+extern void uwb_dev_init(struct uwb_dev *uwb_dev);
+extern int __uwb_dev_offair(struct uwb_dev *, struct uwb_rc *);
+extern int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev,
+		       struct uwb_rc *parent_rc);
+extern void uwb_dev_rm(struct uwb_dev *uwb_dev);
+extern void uwbd_dev_onair(struct uwb_rc *, struct uwb_beca_e *);
+extern void uwbd_dev_offair(struct uwb_beca_e *);
+void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event);
+
+/* General UWB Radio Controller Internal API */
+extern struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *);
+static inline struct uwb_rc *__uwb_rc_get(struct uwb_rc *rc)
+{
+	uwb_dev_get(&rc->uwb_dev);
+	return rc;
+}
+
+static inline void __uwb_rc_put(struct uwb_rc *rc)
+{
+	uwb_dev_put(&rc->uwb_dev);
+}
+
+extern int uwb_rc_reset(struct uwb_rc *rc);
+extern int uwb_rc_beacon(struct uwb_rc *rc,
+			 int channel, unsigned bpst_offset);
+extern int uwb_rc_scan(struct uwb_rc *rc,
+		       unsigned channel, enum uwb_scan_type type,
+		       unsigned bpst_offset);
+extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc);
+extern ssize_t uwb_rc_print_IEs(struct uwb_rc *rc, char *, size_t);
+extern void uwb_rc_ie_init(struct uwb_rc *);
+extern void uwb_rc_ie_init(struct uwb_rc *);
+extern ssize_t uwb_rc_ie_setup(struct uwb_rc *);
+extern void uwb_rc_ie_release(struct uwb_rc *);
+extern int uwb_rc_ie_add(struct uwb_rc *,
+			 const struct uwb_ie_hdr *, size_t);
+extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie);
+
+extern const char *uwb_rc_strerror(unsigned code);
+
+/*
+ * Time to wait for a response to an RC command.
+ *
+ * Some commands can take a long time to response. e.g., START_BEACON
+ * may scan for several superframes before joining an existing beacon
+ * group and this can take around 600 ms.
+ */
+#define UWB_RC_CMD_TIMEOUT_MS 1000 /* ms */
+
+/*
+ * Notification/Event Handlers
+ */
+
+struct uwb_rc_neh;
+
+void uwb_rc_neh_create(struct uwb_rc *rc);
+void uwb_rc_neh_destroy(struct uwb_rc *rc);
+
+struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
+				  u8 expected_type, u16 expected_event,
+				  uwb_rc_cmd_cb_f cb, void *arg);
+void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh);
+void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh);
+void uwb_rc_neh_put(struct uwb_rc_neh *neh);
+
+/* Event size tables */
+extern int uwb_est_create(void);
+extern void uwb_est_destroy(void);
+
+
+/*
+ * UWB Events & management daemon
+ */
+
+/**
+ * enum uwb_event_type - types of UWB management daemon events
+ *
+ * The UWB management daemon (uwbd) can receive two types of events:
+ *   UWB_EVT_TYPE_NOTIF - notification from the radio controller.
+ *   UWB_EVT_TYPE_MSG   - a simple message.
+ */
+enum uwb_event_type {
+	UWB_EVT_TYPE_NOTIF,
+	UWB_EVT_TYPE_MSG,
+};
+
+/**
+ * struct uwb_event_notif - an event for a radio controller notification
+ * @size: Size of the buffer (ie: Guaranteed to contain at least
+ *        a full 'struct uwb_rceb')
+ * @rceb: Pointer to a kmalloced() event payload
+ */
+struct uwb_event_notif {
+	size_t size;
+	struct uwb_rceb *rceb;
+};
+
+/**
+ * enum uwb_event_message - an event for a message for asynchronous processing
+ *
+ * UWB_EVT_MSG_RESET - reset the radio controller and all PAL hardware.
+ */
+enum uwb_event_message {
+	UWB_EVT_MSG_RESET,
+};
+
+/**
+ * UWB Event
+ * @rc:         Radio controller that emitted the event (referenced)
+ * @ts_jiffies: Timestamp, when was it received
+ * @type:       This event's type.
+ */
+struct uwb_event {
+	struct list_head list_node;
+	struct uwb_rc *rc;
+	unsigned long ts_jiffies;
+	enum uwb_event_type type;
+	union {
+		struct uwb_event_notif notif;
+		enum uwb_event_message message;
+	};
+};
+
+extern void uwbd_start(void);
+extern void uwbd_stop(void);
+extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask);
+extern void uwbd_event_queue(struct uwb_event *);
+void uwbd_flush(struct uwb_rc *rc);
+
+/* UWB event handlers */
+extern int uwbd_evt_handle_rc_beacon(struct uwb_event *);
+extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *);
+extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *);
+extern int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *);
+extern int uwbd_evt_handle_rc_drp(struct uwb_event *);
+extern int uwbd_evt_handle_rc_drp_avail(struct uwb_event *);
+
+int uwbd_msg_handle_reset(struct uwb_event *evt);
+
+
+/*
+ * Address management
+ */
+int uwb_rc_dev_addr_assign(struct uwb_rc *rc);
+int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt);
+
+/*
+ * UWB Beacon Cache
+ *
+ * Each beacon we received is kept in a cache--when we receive that
+ * beacon consistently, that means there is a new device that we have
+ * to add to the system.
+ */
+
+extern unsigned long beacon_timeout_ms;
+
+/** Beacon cache list */
+struct uwb_beca {
+	struct list_head list;
+	size_t entries;
+	struct mutex mutex;
+};
+
+extern struct uwb_beca uwb_beca;
+
+/**
+ * Beacon cache entry
+ *
+ * @jiffies_refresh: last time a beacon was  received that refreshed
+ *                   this cache entry.
+ * @uwb_dev: device connected to this beacon. This pointer is not
+ *           safe, you need to get it with uwb_dev_try_get()
+ *
+ * @hits: how many time we have seen this beacon since last time we
+ *        cleared it
+ */
+struct uwb_beca_e {
+	struct mutex mutex;
+	struct kref refcnt;
+	struct list_head node;
+	struct uwb_mac_addr *mac_addr;
+	struct uwb_dev_addr dev_addr;
+	u8 hits;
+	unsigned long ts_jiffies;
+	struct uwb_dev *uwb_dev;
+	struct uwb_rc_evt_beacon *be;
+	struct stats lqe_stats, rssi_stats;	/* radio statistics */
+};
+struct uwb_beacon_frame;
+extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *,
+				 char *, size_t);
+extern struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *,
+					 struct uwb_beacon_frame *,
+					 unsigned long);
+
+extern void uwb_bce_kfree(struct kref *_bce);
+static inline void uwb_bce_get(struct uwb_beca_e *bce)
+{
+	kref_get(&bce->refcnt);
+}
+static inline void uwb_bce_put(struct uwb_beca_e *bce)
+{
+	kref_put(&bce->refcnt, uwb_bce_kfree);
+}
+extern void uwb_beca_purge(void);
+extern void uwb_beca_release(void);
+
+struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
+				       const struct uwb_dev_addr *devaddr);
+struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc,
+				       const struct uwb_mac_addr *macaddr);
+
+/* -- UWB Sysfs representation */
+extern struct class uwb_rc_class;
+extern struct device_attribute dev_attr_mac_address;
+extern struct device_attribute dev_attr_beacon;
+extern struct device_attribute dev_attr_scan;
+
+/* -- DRP Bandwidth allocator: bandwidth allocations, reservations, DRP */
+void uwb_rsv_init(struct uwb_rc *rc);
+int uwb_rsv_setup(struct uwb_rc *rc);
+void uwb_rsv_cleanup(struct uwb_rc *rc);
+
+void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state);
+void uwb_rsv_remove(struct uwb_rsv *rsv);
+struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
+			     struct uwb_ie_drp *drp_ie);
+void uwb_rsv_sched_update(struct uwb_rc *rc);
+
+void uwb_drp_handle_timeout(struct uwb_rsv *rsv);
+int uwb_drp_ie_update(struct uwb_rsv *rsv);
+void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie);
+
+void uwb_drp_avail_init(struct uwb_rc *rc);
+int  uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas);
+void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas);
+void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas);
+void uwb_drp_avail_ie_update(struct uwb_rc *rc);
+
+/* -- PAL support */
+void uwb_rc_pal_init(struct uwb_rc *rc);
+
+/* -- Misc */
+
+extern ssize_t uwb_mac_frame_hdr_print(char *, size_t,
+				       const struct uwb_mac_frame_hdr *);
+
+/* -- Debug interface */
+void uwb_dbg_init(void);
+void uwb_dbg_exit(void);
+void uwb_dbg_add_rc(struct uwb_rc *rc);
+void uwb_dbg_del_rc(struct uwb_rc *rc);
+
+/* Workarounds for version specific stuff */
+
+static inline void uwb_dev_lock(struct uwb_dev *uwb_dev)
+{
+	down(&uwb_dev->dev.sem);
+}
+
+static inline void uwb_dev_unlock(struct uwb_dev *uwb_dev)
+{
+	up(&uwb_dev->dev.sem);
+}
+
+#endif /* #ifndef __UWB_INTERNAL_H__ */
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c
new file mode 100644
index 0000000..7890841
--- /dev/null
+++ b/drivers/uwb/uwbd.c
@@ -0,0 +1,410 @@
+/*
+ * Ultra Wide Band
+ * Neighborhood Management Daemon
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This daemon takes care of maintaing information that describes the
+ * UWB neighborhood that the radios in this machine can see. It also
+ * keeps a tab of which devices are visible, makes sure each HC sits
+ * on a different channel to avoid interfering, etc.
+ *
+ * Different drivers (radio controller, device, any API in general)
+ * communicate with this daemon through an event queue. Daemon wakes
+ * up, takes a list of events and handles them one by one; handling
+ * function is extracted from a table based on the event's type and
+ * subtype. Events are freed only if the handling function says so.
+ *
+ *   . Lock protecting the event list has to be an spinlock and locked
+ *     with IRQSAVE because it might be called from an interrupt
+ *     context (ie: when events arrive and the notification drops
+ *     down from the ISR).
+ *
+ *   . UWB radio controller drivers queue events to the daemon using
+ *     uwbd_event_queue(). They just get the event, chew it to make it
+ *     look like UWBD likes it and pass it in a buffer allocated with
+ *     uwb_event_alloc().
+ *
+ * EVENTS
+ *
+ * Events have a type, a subtype, a lenght, some other stuff and the
+ * data blob, which depends on the event. The header is 'struct
+ * uwb_event'; for payloads, see 'struct uwbd_evt_*'.
+ *
+ * EVENT HANDLER TABLES
+ *
+ * To find a handling function for an event, the type is used to index
+ * a subtype-table in the type-table. The subtype-table is indexed
+ * with the subtype to get the function that handles the event. Start
+ * with the main type-table 'uwbd_evt_type_handler'.
+ *
+ * DEVICES
+ *
+ * Devices are created when a bunch of beacons have been received and
+ * it is stablished that the device has stable radio presence. CREATED
+ * only, not configured. Devices are ONLY configured when an
+ * Application-Specific IE Probe is receieved, in which the device
+ * declares which Protocol ID it groks. Then the device is CONFIGURED
+ * (and the driver->probe() stuff of the device model is invoked).
+ *
+ * Devices are considered disconnected when a certain number of
+ * beacons are not received in an amount of time.
+ *
+ * Handler functions are called normally uwbd_evt_handle_*().
+ */
+
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/freezer.h>
+#include "uwb-internal.h"
+
+#define D_LOCAL 1
+#include <linux/uwb/debug.h>
+
+
+/**
+ * UWBD Event handler function signature
+ *
+ * Return !0 if the event needs not to be freed (ie the handler
+ * takes/took care of it). 0 means the daemon code will free the
+ * event.
+ *
+ * @evt->rc is already referenced and guaranteed to exist. See
+ * uwb_evt_handle().
+ */
+typedef int (*uwbd_evt_handler_f)(struct uwb_event *);
+
+/**
+ * Properties of a UWBD event
+ *
+ * @handler:    the function that will handle this event
+ * @name:       text name of event
+ */
+struct uwbd_event {
+	uwbd_evt_handler_f handler;
+	const char *name;
+};
+
+/** Table of handlers for and properties of the UWBD Radio Control Events */
+static
+struct uwbd_event uwbd_events[] = {
+	[UWB_RC_EVT_BEACON] = {
+		.handler = uwbd_evt_handle_rc_beacon,
+		.name = "BEACON_RECEIVED"
+	},
+	[UWB_RC_EVT_BEACON_SIZE] = {
+		.handler = uwbd_evt_handle_rc_beacon_size,
+		.name = "BEACON_SIZE_CHANGE"
+	},
+	[UWB_RC_EVT_BPOIE_CHANGE] = {
+		.handler = uwbd_evt_handle_rc_bpoie_change,
+		.name = "BPOIE_CHANGE"
+	},
+	[UWB_RC_EVT_BP_SLOT_CHANGE] = {
+		.handler = uwbd_evt_handle_rc_bp_slot_change,
+		.name = "BP_SLOT_CHANGE"
+	},
+	[UWB_RC_EVT_DRP_AVAIL] = {
+		.handler = uwbd_evt_handle_rc_drp_avail,
+		.name = "DRP_AVAILABILITY_CHANGE"
+	},
+	[UWB_RC_EVT_DRP] = {
+		.handler = uwbd_evt_handle_rc_drp,
+		.name = "DRP"
+	},
+	[UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
+		.handler = uwbd_evt_handle_rc_dev_addr_conflict,
+		.name = "DEV_ADDR_CONFLICT",
+	},
+};
+
+
+
+struct uwbd_evt_type_handler {
+	const char *name;
+	struct uwbd_event *uwbd_events;
+	size_t size;
+};
+
+#define UWBD_EVT_TYPE_HANDLER(n,a) {		\
+	.name = (n),				\
+	.uwbd_events = (a),			\
+	.size = sizeof(a)/sizeof((a)[0])	\
+}
+
+
+/** Table of handlers for each UWBD Event type. */
+static
+struct uwbd_evt_type_handler uwbd_evt_type_handlers[] = {
+	[UWB_RC_CET_GENERAL] = UWBD_EVT_TYPE_HANDLER("RC", uwbd_events)
+};
+
+static const
+size_t uwbd_evt_type_handlers_len =
+	sizeof(uwbd_evt_type_handlers) / sizeof(uwbd_evt_type_handlers[0]);
+
+static const struct uwbd_event uwbd_message_handlers[] = {
+	[UWB_EVT_MSG_RESET] = {
+		.handler = uwbd_msg_handle_reset,
+		.name = "reset",
+	},
+};
+
+static DEFINE_MUTEX(uwbd_event_mutex);
+
+/**
+ * Handle an URC event passed to the UWB Daemon
+ *
+ * @evt: the event to handle
+ * @returns: 0 if the event can be kfreed, !0 on the contrary
+ *           (somebody else took ownership) [coincidentally, returning
+ *           a <0 errno code will free it :)].
+ *
+ * Looks up the two indirection tables (one for the type, one for the
+ * subtype) to decide which function handles it and then calls the
+ * handler.
+ *
+ * The event structure passed to the event handler has the radio
+ * controller in @evt->rc referenced. The reference will be dropped
+ * once the handler returns, so if it needs it for longer (async),
+ * it'll need to take another one.
+ */
+static
+int uwbd_event_handle_urc(struct uwb_event *evt)
+{
+	struct uwbd_evt_type_handler *type_table;
+	uwbd_evt_handler_f handler;
+	u8 type, context;
+	u16 event;
+
+	type = evt->notif.rceb->bEventType;
+	event = le16_to_cpu(evt->notif.rceb->wEvent);
+	context = evt->notif.rceb->bEventContext;
+
+	if (type > uwbd_evt_type_handlers_len) {
+		printk(KERN_ERR "UWBD: event type %u: unknown (too high)\n", type);
+		return -EINVAL;
+	}
+	type_table = &uwbd_evt_type_handlers[type];
+	if (type_table->uwbd_events == NULL) {
+		printk(KERN_ERR "UWBD: event type %u: unknown\n", type);
+		return -EINVAL;
+	}
+	if (event > type_table->size) {
+		printk(KERN_ERR "UWBD: event %s[%u]: unknown (too high)\n",
+		       type_table->name, event);
+		return -EINVAL;
+	}
+	handler = type_table->uwbd_events[event].handler;
+	if (handler == NULL) {
+		printk(KERN_ERR "UWBD: event %s[%u]: unknown\n", type_table->name, event);
+		return -EINVAL;
+	}
+	return (*handler)(evt);
+}
+
+static void uwbd_event_handle_message(struct uwb_event *evt)
+{
+	struct uwb_rc *rc;
+	int result;
+
+	rc = evt->rc;
+
+	if (evt->message < 0 || evt->message >= ARRAY_SIZE(uwbd_message_handlers)) {
+		dev_err(&rc->uwb_dev.dev, "UWBD: invalid message type %d\n", evt->message);
+		return;
+	}
+
+	/* If this is a reset event we need to drop the
+	 * uwbd_event_mutex or it deadlocks when the reset handler
+	 * attempts to flush the uwbd events. */
+	if (evt->message == UWB_EVT_MSG_RESET)
+		mutex_unlock(&uwbd_event_mutex);
+
+	result = uwbd_message_handlers[evt->message].handler(evt);
+	if (result < 0)
+		dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n",
+			uwbd_message_handlers[evt->message].name, result);
+
+	if (evt->message == UWB_EVT_MSG_RESET)
+		mutex_lock(&uwbd_event_mutex);
+}
+
+static void uwbd_event_handle(struct uwb_event *evt)
+{
+	struct uwb_rc *rc;
+	int should_keep;
+
+	rc = evt->rc;
+
+	if (rc->ready) {
+		switch (evt->type) {
+		case UWB_EVT_TYPE_NOTIF:
+			should_keep = uwbd_event_handle_urc(evt);
+			if (should_keep <= 0)
+				kfree(evt->notif.rceb);
+			break;
+		case UWB_EVT_TYPE_MSG:
+			uwbd_event_handle_message(evt);
+			break;
+		default:
+			dev_err(&rc->uwb_dev.dev, "UWBD: invalid event type %d\n", evt->type);
+			break;
+		}
+	}
+
+	__uwb_rc_put(rc);	/* for the __uwb_rc_get() in uwb_rc_notif_cb() */
+}
+/* The UWB Daemon */
+
+
+/** Daemon's PID: used to decide if we can queue or not */
+static int uwbd_pid;
+/** Daemon's task struct for managing the kthread */
+static struct task_struct *uwbd_task;
+/** Daemon's waitqueue for waiting for new events */
+static DECLARE_WAIT_QUEUE_HEAD(uwbd_wq);
+/** Daemon's list of events; we queue/dequeue here */
+static struct list_head uwbd_event_list = LIST_HEAD_INIT(uwbd_event_list);
+/** Daemon's list lock to protect concurent access */
+static DEFINE_SPINLOCK(uwbd_event_list_lock);
+
+
+/**
+ * UWB Daemon
+ *
+ * Listens to all UWB notifications and takes care to track the state
+ * of the UWB neighboorhood for the kernel. When we do a run, we
+ * spinlock, move the list to a private copy and release the
+ * lock. Hold it as little as possible. Not a conflict: it is
+ * guaranteed we own the events in the private list.
+ *
+ * FIXME: should change so we don't have a 1HZ timer all the time, but
+ *        only if there are devices.
+ */
+static int uwbd(void *unused)
+{
+	unsigned long flags;
+	struct list_head list = LIST_HEAD_INIT(list);
+	struct uwb_event *evt, *nxt;
+	int should_stop = 0;
+	while (1) {
+		wait_event_interruptible_timeout(
+			uwbd_wq,
+			!list_empty(&uwbd_event_list)
+			  || (should_stop = kthread_should_stop()),
+			HZ);
+		if (should_stop)
+			break;
+		try_to_freeze();
+
+		mutex_lock(&uwbd_event_mutex);
+		spin_lock_irqsave(&uwbd_event_list_lock, flags);
+		list_splice_init(&uwbd_event_list, &list);
+		spin_unlock_irqrestore(&uwbd_event_list_lock, flags);
+		list_for_each_entry_safe(evt, nxt, &list, list_node) {
+			list_del(&evt->list_node);
+			uwbd_event_handle(evt);
+			kfree(evt);
+		}
+		mutex_unlock(&uwbd_event_mutex);
+
+		uwb_beca_purge();	/* Purge devices that left */
+	}
+	return 0;
+}
+
+
+/** Start the UWB daemon */
+void uwbd_start(void)
+{
+	uwbd_task = kthread_run(uwbd, NULL, "uwbd");
+	if (uwbd_task == NULL)
+		printk(KERN_ERR "UWB: Cannot start management daemon; "
+		       "UWB won't work\n");
+	else
+		uwbd_pid = uwbd_task->pid;
+}
+
+/* Stop the UWB daemon and free any unprocessed events */
+void uwbd_stop(void)
+{
+	unsigned long flags;
+	struct uwb_event *evt, *nxt;
+	kthread_stop(uwbd_task);
+	spin_lock_irqsave(&uwbd_event_list_lock, flags);
+	uwbd_pid = 0;
+	list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) {
+		if (evt->type == UWB_EVT_TYPE_NOTIF)
+			kfree(evt->notif.rceb);
+		kfree(evt);
+	}
+	spin_unlock_irqrestore(&uwbd_event_list_lock, flags);
+	uwb_beca_release();
+}
+
+/*
+ * Queue an event for the management daemon
+ *
+ * When some lower layer receives an event, it uses this function to
+ * push it forward to the UWB daemon.
+ *
+ * Once you pass the event, you don't own it any more, but the daemon
+ * does. It will uwb_event_free() it when done, so make sure you
+ * uwb_event_alloc()ed it or bad things will happen.
+ *
+ * If the daemon is not running, we just free the event.
+ */
+void uwbd_event_queue(struct uwb_event *evt)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&uwbd_event_list_lock, flags);
+	if (uwbd_pid != 0) {
+		list_add(&evt->list_node, &uwbd_event_list);
+		wake_up_all(&uwbd_wq);
+	} else {
+		__uwb_rc_put(evt->rc);
+		if (evt->type == UWB_EVT_TYPE_NOTIF)
+			kfree(evt->notif.rceb);
+		kfree(evt);
+	}
+	spin_unlock_irqrestore(&uwbd_event_list_lock, flags);
+	return;
+}
+
+void uwbd_flush(struct uwb_rc *rc)
+{
+	struct uwb_event *evt, *nxt;
+
+	mutex_lock(&uwbd_event_mutex);
+
+	spin_lock_irq(&uwbd_event_list_lock);
+	list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) {
+		if (evt->rc == rc) {
+			__uwb_rc_put(rc);
+			list_del(&evt->list_node);
+			if (evt->type == UWB_EVT_TYPE_NOTIF)
+				kfree(evt->notif.rceb);
+			kfree(evt);
+		}
+	}
+	spin_unlock_irq(&uwbd_event_list_lock);
+
+	mutex_unlock(&uwbd_event_mutex);
+}
diff --git a/drivers/uwb/whc-rc.c b/drivers/uwb/whc-rc.c
new file mode 100644
index 0000000..1711dea
--- /dev/null
+++ b/drivers/uwb/whc-rc.c
@@ -0,0 +1,520 @@
+/*
+ * Wireless Host Controller: Radio Control Interface (WHCI v0.95[2.3])
+ * Radio Control command/event transport to the UWB stack
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Initialize and hook up the Radio Control interface.
+ *
+ * For each device probed, creates an 'struct whcrc' which contains
+ * just the representation of the UWB Radio Controller, and the logic
+ * for reading notifications and passing them to the UWB Core.
+ *
+ * So we initialize all of those, register the UWB Radio Controller
+ * and setup the notification/event handle to pipe the notifications
+ * to the UWB management Daemon.
+ *
+ * Once uwb_rc_add() is called, the UWB stack takes control, resets
+ * the radio and readies the device to take commands the UWB
+ * API/user-space.
+ *
+ * Note this driver is just a transport driver; the commands are
+ * formed at the UWB stack and given to this driver who will deliver
+ * them to the hw and transfer the replies/notifications back to the
+ * UWB stack through the UWB daemon (UWBD).
+ */
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/uwb.h>
+#include <linux/uwb/whci.h>
+#include <linux/uwb/umc.h>
+#include "uwb-internal.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/**
+ * Descriptor for an instance of the UWB Radio Control Driver that
+ * attaches to the URC interface of the WHCI PCI card.
+ *
+ * Unless there is a lock specific to the 'data members', all access
+ * is protected by uwb_rc->mutex.
+ */
+struct whcrc {
+	struct umc_dev *umc_dev;
+	struct uwb_rc *uwb_rc;		/* UWB host controller */
+
+	unsigned long area;
+	void __iomem *rc_base;
+	size_t rc_len;
+	spinlock_t irq_lock;
+
+	void *evt_buf, *cmd_buf;
+	dma_addr_t evt_dma_buf, cmd_dma_buf;
+	wait_queue_head_t cmd_wq;
+	struct work_struct event_work;
+};
+
+/**
+ * Execute an UWB RC command on WHCI/RC
+ *
+ * @rc:       Instance of a Radio Controller that is a whcrc
+ * @cmd:      Buffer containing the RCCB and payload to execute
+ * @cmd_size: Size of the command buffer.
+ *
+ * We copy the command into whcrc->cmd_buf (as it is pretty and
+ * aligned`and physically contiguous) and then press the right keys in
+ * the controller's URCCMD register to get it to read it. We might
+ * have to wait for the cmd_sem to be open to us.
+ *
+ * NOTE: rc's mutex has to be locked
+ */
+static int whcrc_cmd(struct uwb_rc *uwb_rc,
+	      const struct uwb_rccb *cmd, size_t cmd_size)
+{
+	int result = 0;
+	struct whcrc *whcrc = uwb_rc->priv;
+	struct device *dev = &whcrc->umc_dev->dev;
+	u32 urccmd;
+
+	d_fnstart(3, dev, "(%p, %p, %zu)\n", uwb_rc, cmd, cmd_size);
+	might_sleep();
+
+	if (cmd_size >= 4096) {
+		result = -E2BIG;
+		goto error;
+	}
+
+	/*
+	 * If the URC is halted, then the hardware has reset itself.
+	 * Attempt to recover by restarting the device and then return
+	 * an error as it's likely that the current command isn't
+	 * valid for a newly started RC.
+	 */
+	if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) {
+		dev_err(dev, "requesting reset of halted radio controller\n");
+		uwb_rc_reset_all(uwb_rc);
+		result = -EIO;
+		goto error;
+	}
+
+	result = wait_event_timeout(whcrc->cmd_wq,
+		!(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2);
+	if (result == 0) {
+		dev_err(dev, "device is not ready to execute commands\n");
+		result = -ETIMEDOUT;
+		goto error;
+	}
+
+	memmove(whcrc->cmd_buf, cmd, cmd_size);
+	le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR);
+
+	spin_lock(&whcrc->irq_lock);
+	urccmd = le_readl(whcrc->rc_base + URCCMD);
+	urccmd &= ~(URCCMD_EARV | URCCMD_SIZE_MASK);
+	le_writel(urccmd | URCCMD_ACTIVE | URCCMD_IWR | cmd_size,
+		  whcrc->rc_base + URCCMD);
+	spin_unlock(&whcrc->irq_lock);
+
+error:
+	d_fnend(3, dev, "(%p, %p, %zu) = %d\n",
+		uwb_rc, cmd, cmd_size, result);
+	return result;
+}
+
+static int whcrc_reset(struct uwb_rc *rc)
+{
+	struct whcrc *whcrc = rc->priv;
+
+	return umc_controller_reset(whcrc->umc_dev);
+}
+
+/**
+ * Reset event reception mechanism and tell hw we are ready to get more
+ *
+ * We have read all the events in the event buffer, so we are ready to
+ * reset it to the beginning.
+ *
+ * This is only called during initialization or after an event buffer
+ * has been retired.  This means we can be sure that event processing
+ * is disabled and it's safe to update the URCEVTADDR register.
+ *
+ * There's no need to wait for the event processing to start as the
+ * URC will not clear URCCMD_ACTIVE until (internal) event buffer
+ * space is available.
+ */
+static
+void whcrc_enable_events(struct whcrc *whcrc)
+{
+	struct device *dev = &whcrc->umc_dev->dev;
+	u32 urccmd;
+
+	d_fnstart(4, dev, "(whcrc %p)\n", whcrc);
+
+	le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR);
+
+	spin_lock(&whcrc->irq_lock);
+	urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE;
+	le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD);
+	spin_unlock(&whcrc->irq_lock);
+
+	d_fnend(4, dev, "(whcrc %p) = void\n", whcrc);
+}
+
+static void whcrc_event_work(struct work_struct *work)
+{
+	struct whcrc *whcrc = container_of(work, struct whcrc, event_work);
+	struct device *dev = &whcrc->umc_dev->dev;
+	size_t size;
+	u64 urcevtaddr;
+
+	urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR);
+	size = urcevtaddr & URCEVTADDR_OFFSET_MASK;
+
+	d_printf(3, dev, "received %zu octet event\n", size);
+	d_dump(4, dev, whcrc->evt_buf, size > 32 ? 32 : size);
+
+	uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size);
+	whcrc_enable_events(whcrc);
+}
+
+/**
+ * Catch interrupts?
+ *
+ * We ack inmediately (and expect the hw to do the right thing and
+ * raise another IRQ if things have changed :)
+ */
+static
+irqreturn_t whcrc_irq_cb(int irq, void *_whcrc)
+{
+	struct whcrc *whcrc = _whcrc;
+	struct device *dev = &whcrc->umc_dev->dev;
+	u32 urcsts;
+
+	urcsts = le_readl(whcrc->rc_base + URCSTS);
+	if (!(urcsts & URCSTS_INT_MASK))
+		return IRQ_NONE;
+	le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS);
+
+	d_printf(4, dev, "acked 0x%08x, urcsts 0x%08x\n",
+		 le_readl(whcrc->rc_base + URCSTS), urcsts);
+
+	if (urcsts & URCSTS_HSE) {
+		dev_err(dev, "host system error -- hardware halted\n");
+		/* FIXME: do something sensible here */
+		goto out;
+	}
+	if (urcsts & URCSTS_ER) {
+		d_printf(3, dev, "ER: event ready\n");
+		schedule_work(&whcrc->event_work);
+	}
+	if (urcsts & URCSTS_RCI) {
+		d_printf(3, dev, "RCI: ready to execute another command\n");
+		wake_up_all(&whcrc->cmd_wq);
+	}
+out:
+	return IRQ_HANDLED;
+}
+
+
+/**
+ * Initialize a UMC RC interface: map regions, get (shared) IRQ
+ */
+static
+int whcrc_setup_rc_umc(struct whcrc *whcrc)
+{
+	int result = 0;
+	struct device *dev = &whcrc->umc_dev->dev;
+	struct umc_dev *umc_dev = whcrc->umc_dev;
+
+	whcrc->area = umc_dev->resource.start;
+	whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1;
+	result = -EBUSY;
+	if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME)
+	    == NULL) {
+		dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n",
+			whcrc->rc_len, whcrc->area, result);
+		goto error_request_region;
+	}
+
+	whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len);
+	if (whcrc->rc_base == NULL) {
+		dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n",
+			whcrc->rc_len, whcrc->area, result);
+		goto error_ioremap_nocache;
+	}
+
+	result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED,
+			     KBUILD_MODNAME, whcrc);
+	if (result < 0) {
+		dev_err(dev, "can't allocate IRQ %d: %d\n",
+			umc_dev->irq, result);
+		goto error_request_irq;
+	}
+
+	result = -ENOMEM;
+	whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
+					    &whcrc->cmd_dma_buf, GFP_KERNEL);
+	if (whcrc->cmd_buf == NULL) {
+		dev_err(dev, "Can't allocate cmd transfer buffer\n");
+		goto error_cmd_buffer;
+	}
+
+	whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
+					    &whcrc->evt_dma_buf, GFP_KERNEL);
+	if (whcrc->evt_buf == NULL) {
+		dev_err(dev, "Can't allocate evt transfer buffer\n");
+		goto error_evt_buffer;
+	}
+	d_printf(3, dev, "UWB RC Interface: %zu bytes at 0x%p, irq %u\n",
+		 whcrc->rc_len, whcrc->rc_base, umc_dev->irq);
+	return 0;
+
+error_evt_buffer:
+	dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
+			  whcrc->cmd_dma_buf);
+error_cmd_buffer:
+	free_irq(umc_dev->irq, whcrc);
+error_request_irq:
+	iounmap(whcrc->rc_base);
+error_ioremap_nocache:
+	release_mem_region(whcrc->area, whcrc->rc_len);
+error_request_region:
+	return result;
+}
+
+
+/**
+ * Release RC's UMC resources
+ */
+static
+void whcrc_release_rc_umc(struct whcrc *whcrc)
+{
+	struct umc_dev *umc_dev = whcrc->umc_dev;
+
+	dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf,
+			  whcrc->evt_dma_buf);
+	dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
+			  whcrc->cmd_dma_buf);
+	free_irq(umc_dev->irq, whcrc);
+	iounmap(whcrc->rc_base);
+	release_mem_region(whcrc->area, whcrc->rc_len);
+}
+
+
+/**
+ * whcrc_start_rc - start a WHCI radio controller
+ * @whcrc: the radio controller to start
+ *
+ * Reset the UMC device, start the radio controller, enable events and
+ * finally enable interrupts.
+ */
+static int whcrc_start_rc(struct uwb_rc *rc)
+{
+	struct whcrc *whcrc = rc->priv;
+	int result = 0;
+	struct device *dev = &whcrc->umc_dev->dev;
+	unsigned long start, duration;
+
+	/* Reset the thing */
+	le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD);
+	if (d_test(3))
+		start = jiffies;
+	if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0,
+			  5000, "device to reset at init") < 0) {
+		result = -EBUSY;
+		goto error;
+	} else if (d_test(3)) {
+		duration = jiffies - start;
+		if (duration > msecs_to_jiffies(40))
+			dev_err(dev, "Device took %ums to "
+				     "reset. MAX expected: 40ms\n",
+				     jiffies_to_msecs(duration));
+	}
+
+	/* Set the event buffer, start the controller (enable IRQs later) */
+	le_writel(0, whcrc->rc_base + URCINTR);
+	le_writel(URCCMD_RS, whcrc->rc_base + URCCMD);
+	result = -ETIMEDOUT;
+	if (d_test(3))
+		start = jiffies;
+	if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0,
+			  5000, "device to start") < 0)
+		goto error;
+	if (d_test(3)) {
+		duration = jiffies - start;
+		if (duration > msecs_to_jiffies(40))
+			dev_err(dev, "Device took %ums to start. "
+				     "MAX expected: 40ms\n",
+				     jiffies_to_msecs(duration));
+	}
+	whcrc_enable_events(whcrc);
+	result = 0;
+	le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR);
+error:
+	return result;
+}
+
+
+/**
+ * whcrc_stop_rc - stop a WHCI radio controller
+ * @whcrc: the radio controller to stop
+ *
+ * Disable interrupts and cancel any pending event processing work
+ * before clearing the Run/Stop bit.
+ */
+static
+void whcrc_stop_rc(struct uwb_rc *rc)
+{
+	struct whcrc *whcrc = rc->priv;
+	struct umc_dev *umc_dev = whcrc->umc_dev;
+
+	le_writel(0, whcrc->rc_base + URCINTR);
+	cancel_work_sync(&whcrc->event_work);
+
+	le_writel(0, whcrc->rc_base + URCCMD);
+	whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS,
+		      URCSTS_HALTED, 0, 40, "URCSTS.HALTED");
+}
+
+static void whcrc_init(struct whcrc *whcrc)
+{
+	spin_lock_init(&whcrc->irq_lock);
+	init_waitqueue_head(&whcrc->cmd_wq);
+	INIT_WORK(&whcrc->event_work, whcrc_event_work);
+}
+
+/**
+ * Initialize the radio controller.
+ *
+ * NOTE: we setup whcrc->uwb_rc before calling uwb_rc_add(); in the
+ *       IRQ handler we use that to determine if the hw is ready to
+ *       handle events. Looks like a race condition, but it really is
+ *       not.
+ */
+static
+int whcrc_probe(struct umc_dev *umc_dev)
+{
+	int result;
+	struct uwb_rc *uwb_rc;
+	struct whcrc *whcrc;
+	struct device *dev = &umc_dev->dev;
+
+	d_fnstart(3, dev, "(umc_dev %p)\n", umc_dev);
+	result = -ENOMEM;
+	uwb_rc = uwb_rc_alloc();
+	if (uwb_rc == NULL) {
+		dev_err(dev, "unable to allocate RC instance\n");
+		goto error_rc_alloc;
+	}
+	whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL);
+	if (whcrc == NULL) {
+		dev_err(dev, "unable to allocate WHC-RC instance\n");
+		goto error_alloc;
+	}
+	whcrc_init(whcrc);
+	whcrc->umc_dev = umc_dev;
+
+	result = whcrc_setup_rc_umc(whcrc);
+	if (result < 0) {
+		dev_err(dev, "Can't setup RC UMC interface: %d\n", result);
+		goto error_setup_rc_umc;
+	}
+	whcrc->uwb_rc = uwb_rc;
+
+	uwb_rc->owner = THIS_MODULE;
+	uwb_rc->cmd   = whcrc_cmd;
+	uwb_rc->reset = whcrc_reset;
+	uwb_rc->start = whcrc_start_rc;
+	uwb_rc->stop  = whcrc_stop_rc;
+
+	result = uwb_rc_add(uwb_rc, dev, whcrc);
+	if (result < 0)
+		goto error_rc_add;
+	umc_set_drvdata(umc_dev, whcrc);
+	d_fnend(3, dev, "(umc_dev %p) = 0\n", umc_dev);
+	return 0;
+
+error_rc_add:
+	whcrc_release_rc_umc(whcrc);
+error_setup_rc_umc:
+	kfree(whcrc);
+error_alloc:
+	uwb_rc_put(uwb_rc);
+error_rc_alloc:
+	d_fnend(3, dev, "(umc_dev %p) = %d\n", umc_dev, result);
+	return result;
+}
+
+/**
+ * Clean up the radio control resources
+ *
+ * When we up the command semaphore, everybody possibly held trying to
+ * execute a command should be granted entry and then they'll see the
+ * host is quiescing and up it (so it will chain to the next waiter).
+ * This should not happen (in any case), as we can only remove when
+ * there are no handles open...
+ */
+static void whcrc_remove(struct umc_dev *umc_dev)
+{
+	struct whcrc *whcrc = umc_get_drvdata(umc_dev);
+	struct uwb_rc *uwb_rc = whcrc->uwb_rc;
+
+	umc_set_drvdata(umc_dev, NULL);
+	uwb_rc_rm(uwb_rc);
+	whcrc_release_rc_umc(whcrc);
+	kfree(whcrc);
+	uwb_rc_put(uwb_rc);
+	d_printf(1, &umc_dev->dev, "freed whcrc %p\n", whcrc);
+}
+
+/* PCI device ID's that we handle [so it gets loaded] */
+static struct pci_device_id whcrc_id_table[] = {
+	{ PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
+	{ /* empty last entry */ }
+};
+MODULE_DEVICE_TABLE(pci, whcrc_id_table);
+
+static struct umc_driver whcrc_driver = {
+	.name   = "whc-rc",
+	.cap_id = UMC_CAP_ID_WHCI_RC,
+	.probe  = whcrc_probe,
+	.remove = whcrc_remove,
+};
+
+static int __init whcrc_driver_init(void)
+{
+	return umc_driver_register(&whcrc_driver);
+}
+module_init(whcrc_driver_init);
+
+static void __exit whcrc_driver_exit(void)
+{
+	umc_driver_unregister(&whcrc_driver);
+}
+module_exit(whcrc_driver_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Wireless Host Controller Radio Control Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/whci.c b/drivers/uwb/whci.c
new file mode 100644
index 0000000..3df2388
--- /dev/null
+++ b/drivers/uwb/whci.c
@@ -0,0 +1,269 @@
+/*
+ * WHCI UWB Multi-interface Controller enumerator.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This file is released under the GNU GPL v2.
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/whci.h>
+#include <linux/uwb/umc.h>
+
+struct whci_card {
+	struct pci_dev *pci;
+	void __iomem *uwbbase;
+	u8 n_caps;
+	struct umc_dev *devs[0];
+};
+
+
+/* Fix faulty HW :( */
+static
+u64 whci_capdata_quirks(struct whci_card *card, u64 capdata)
+{
+	u64 capdata_orig = capdata;
+	struct pci_dev *pci_dev = card->pci;
+	if (pci_dev->vendor == PCI_VENDOR_ID_INTEL
+	    && (pci_dev->device == 0x0c3b || pci_dev->device == 0004)
+	    && pci_dev->class == 0x0d1010) {
+		switch (UWBCAPDATA_TO_CAP_ID(capdata)) {
+			/* WLP capability has 0x100 bytes of aperture */
+		case 0x80:
+			capdata |= 0x40 << 8; break;
+			/* WUSB capability has 0x80 bytes of aperture
+			 * and ID is 1 */
+		case 0x02:
+			capdata &= ~0xffff;
+			capdata |= 0x2001;
+			break;
+		}
+	}
+	if (capdata_orig != capdata)
+		dev_warn(&pci_dev->dev,
+			 "PCI v%04x d%04x c%06x#%02x: "
+			 "corrected capdata from %016Lx to %016Lx\n",
+			 pci_dev->vendor, pci_dev->device, pci_dev->class,
+			 (unsigned)UWBCAPDATA_TO_CAP_ID(capdata),
+			 (unsigned long long)capdata_orig,
+			 (unsigned long long)capdata);
+	return capdata;
+}
+
+
+/**
+ * whci_wait_for - wait for a WHCI register to be set
+ *
+ * Polls (for at most @max_ms ms) until '*@reg & @mask == @result'.
+ */
+int whci_wait_for(struct device *dev, u32 __iomem *reg, u32 mask, u32 result,
+	unsigned long max_ms, const char *tag)
+{
+	unsigned t = 0;
+	u32 val;
+	for (;;) {
+		val = le_readl(reg);
+		if ((val & mask) == result)
+			break;
+		msleep(10);
+		if (t >= max_ms) {
+			dev_err(dev, "timed out waiting for %s ", tag);
+			return -ETIMEDOUT;
+		}
+		t += 10;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(whci_wait_for);
+
+
+/*
+ * NOTE: the capinfo and capdata registers are slightly different
+ *       (size and cap-id fields). So for cap #0, we need to fill
+ *       in. Size comes from the size of the register block
+ *       (statically calculated); cap_id comes from nowhere, we use
+ *       zero, that is reserved, for the radio controller, because
+ *       none was defined at the spec level.
+ */
+static int whci_add_cap(struct whci_card *card, int n)
+{
+	struct umc_dev *umc;
+	u64 capdata;
+	int bar, err;
+
+	umc = umc_device_create(&card->pci->dev, n);
+	if (umc == NULL)
+		return -ENOMEM;
+
+	capdata = le_readq(card->uwbbase + UWBCAPDATA(n));
+
+	bar = UWBCAPDATA_TO_BAR(capdata) << 1;
+
+	capdata = whci_capdata_quirks(card, capdata);
+	/* Capability 0 is the radio controller. It's size is 32
+	 * bytes (WHCI0.95[2.3, T2-9]). */
+	umc->version         = UWBCAPDATA_TO_VERSION(capdata);
+	umc->cap_id          = n == 0 ? 0 : UWBCAPDATA_TO_CAP_ID(capdata);
+	umc->bar	     = bar;
+	umc->resource.start  = pci_resource_start(card->pci, bar)
+		+ UWBCAPDATA_TO_OFFSET(capdata);
+	umc->resource.end    = umc->resource.start
+		+ (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1;
+	umc->resource.name   = umc->dev.bus_id;
+	umc->resource.flags  = card->pci->resource[bar].flags;
+	umc->resource.parent = &card->pci->resource[bar];
+	umc->irq             = card->pci->irq;
+
+	err = umc_device_register(umc);
+	if (err < 0)
+		goto error;
+	card->devs[n] = umc;
+	return 0;
+
+error:
+	kfree(umc);
+	return err;
+}
+
+static void whci_del_cap(struct whci_card *card, int n)
+{
+	struct umc_dev *umc = card->devs[n];
+
+	if (umc != NULL)
+		umc_device_unregister(umc);
+}
+
+static int whci_n_caps(struct pci_dev *pci)
+{
+	void __iomem *uwbbase;
+	u64 capinfo;
+
+	uwbbase = pci_iomap(pci, 0, 8);
+	if (!uwbbase)
+		return -ENOMEM;
+	capinfo = le_readq(uwbbase + UWBCAPINFO);
+	pci_iounmap(pci, uwbbase);
+
+	return UWBCAPINFO_TO_N_CAPS(capinfo);
+}
+
+static int whci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+	struct whci_card *card;
+	int err, n_caps, n;
+
+	err = pci_enable_device(pci);
+	if (err < 0)
+		goto error;
+	pci_enable_msi(pci);
+	pci_set_master(pci);
+	err = -ENXIO;
+	if (!pci_set_dma_mask(pci, DMA_64BIT_MASK))
+		pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK);
+	else if (!pci_set_dma_mask(pci, DMA_32BIT_MASK))
+		pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK);
+	else
+		goto error_dma;
+
+	err = n_caps = whci_n_caps(pci);
+	if (n_caps < 0)
+		goto error_ncaps;
+
+	err = -ENOMEM;
+	card = kzalloc(sizeof(struct whci_card)
+		       + sizeof(struct whci_dev *) * (n_caps + 1),
+		       GFP_KERNEL);
+	if (card == NULL)
+		goto error_kzalloc;
+	card->pci = pci;
+	card->n_caps = n_caps;
+
+	err = -EBUSY;
+	if (!request_mem_region(pci_resource_start(pci, 0),
+				UWBCAPDATA_SIZE(card->n_caps),
+				"whci (capability data)"))
+		goto error_request_memregion;
+	err = -ENOMEM;
+	card->uwbbase = pci_iomap(pci, 0, UWBCAPDATA_SIZE(card->n_caps));
+	if (!card->uwbbase)
+		goto error_iomap;
+
+	/* Add each capability. */
+	for (n = 0; n <= card->n_caps; n++) {
+		err = whci_add_cap(card, n);
+		if (err < 0 && n == 0) {
+			dev_err(&pci->dev, "cannot bind UWB radio controller:"
+				" %d\n", err);
+			goto error_bind;
+		}
+		if (err < 0)
+			dev_warn(&pci->dev, "warning: cannot bind capability "
+				 "#%u: %d\n", n, err);
+	}
+	pci_set_drvdata(pci, card);
+	return 0;
+
+error_bind:
+	pci_iounmap(pci, card->uwbbase);
+error_iomap:
+	release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps));
+error_request_memregion:
+	kfree(card);
+error_kzalloc:
+error_ncaps:
+error_dma:
+	pci_disable_msi(pci);
+	pci_disable_device(pci);
+error:
+	return err;
+}
+
+static void whci_remove(struct pci_dev *pci)
+{
+	struct whci_card *card = pci_get_drvdata(pci);
+	int n;
+
+	pci_set_drvdata(pci, NULL);
+	/* Unregister each capability in reverse (so the master device
+	 * is unregistered last). */
+	for (n = card->n_caps; n >= 0 ; n--)
+		whci_del_cap(card, n);
+	pci_iounmap(pci, card->uwbbase);
+	release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps));
+	kfree(card);
+	pci_disable_msi(pci);
+	pci_disable_device(pci);
+}
+
+static struct pci_device_id whci_id_table[] = {
+	{ PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
+	{ 0 },
+};
+MODULE_DEVICE_TABLE(pci, whci_id_table);
+
+
+static struct pci_driver whci_driver = {
+	.name     = "whci",
+	.id_table = whci_id_table,
+	.probe    = whci_probe,
+	.remove   = whci_remove,
+};
+
+static int __init whci_init(void)
+{
+	return pci_register_driver(&whci_driver);
+}
+
+static void __exit whci_exit(void)
+{
+	pci_unregister_driver(&whci_driver);
+}
+
+module_init(whci_init);
+module_exit(whci_exit);
+
+MODULE_DESCRIPTION("WHCI UWB Multi-interface Controller enumerator");
+MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/wlp/Makefile b/drivers/uwb/wlp/Makefile
new file mode 100644
index 0000000..c72c11d
--- /dev/null
+++ b/drivers/uwb/wlp/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_UWB_WLP) := wlp.o
+
+wlp-objs :=	\
+	driver.o	\
+	eda.o		\
+	messages.o	\
+	sysfs.o		\
+	txrx.o		\
+	wlp-lc.o	\
+	wss-lc.o
diff --git a/drivers/uwb/wlp/driver.c b/drivers/uwb/wlp/driver.c
new file mode 100644
index 0000000..cb8d699
--- /dev/null
+++ b/drivers/uwb/wlp/driver.c
@@ -0,0 +1,43 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Life cycle of WLP substack
+ *
+ * FIXME: Docs
+ */
+
+#include <linux/module.h>
+
+static int __init wlp_subsys_init(void)
+{
+	return 0;
+}
+module_init(wlp_subsys_init);
+
+static void __exit wlp_subsys_exit(void)
+{
+	return;
+}
+module_exit(wlp_subsys_exit);
+
+MODULE_AUTHOR("Reinette Chatre <reinette.chatre@intel.com>");
+MODULE_DESCRIPTION("WiMedia Logical Link Control Protocol (WLP)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/wlp/eda.c b/drivers/uwb/wlp/eda.c
new file mode 100644
index 0000000..cdfe8df
--- /dev/null
+++ b/drivers/uwb/wlp/eda.c
@@ -0,0 +1,449 @@
+/*
+ * WUSB Wire Adapter: WLP interface
+ * Ethernet to device address cache
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * We need to be able to map ethernet addresses to device addresses
+ * and back because there is not explicit relationship between the eth
+ * addresses used in the ETH frames and the device addresses (no, it
+ * would not have been simpler to force as ETH address the MBOA MAC
+ * address...no, not at all :).
+ *
+ * A device has one MBOA MAC address and one device address. It is possible
+ * for a device to have more than one virtual MAC address (although a
+ * virtual address can be the same as the MBOA MAC address). The device
+ * address is guaranteed to be unique among the devices in the extended
+ * beacon group (see ECMA 17.1.1). We thus use the device address as index
+ * to this cache. We do allow searching based on virtual address as this
+ * is how Ethernet frames will be addressed.
+ *
+ * We need to support virtual EUI-48. Although, right now the virtual
+ * EUI-48 will always be the same as the MAC SAP address. The EDA cache
+ * entry thus contains a MAC SAP address as well as the virtual address
+ * (used to map the network stack address to a neighbor). When we move
+ * to support more than one virtual MAC on a host then this organization
+ * will have to change. Perhaps a neighbor has a list of WSSs, each with a
+ * tag and virtual EUI-48.
+ *
+ * On data transmission
+ * it is used to determine if the neighbor is connected and what WSS it
+ * belongs to. With this we know what tag to add to the WLP frame. Storing
+ * the WSS in the EDA cache may be overkill because we only support one
+ * WSS. Hopefully we will support more than one WSS at some point.
+ * On data reception it is used to determine the WSS based on
+ * the tag and address of the transmitting neighbor.
+ */
+
+#define D_LOCAL 5
+#include <linux/netdevice.h>
+#include <linux/uwb/debug.h>
+#include <linux/etherdevice.h>
+#include <linux/wlp.h>
+#include "wlp-internal.h"
+
+
+/* FIXME: cache is not purged, only on device close */
+
+/* FIXME: does not scale, change to dynamic array */
+
+/*
+ * Initialize the EDA cache
+ *
+ * @returns 0 if ok, < 0 errno code on error
+ *
+ * Call when the interface is being brought up
+ *
+ * NOTE: Keep it as a separate function as the implementation will
+ *       change and be more complex.
+ */
+void wlp_eda_init(struct wlp_eda *eda)
+{
+	INIT_LIST_HEAD(&eda->cache);
+	spin_lock_init(&eda->lock);
+}
+
+/*
+ * Release the EDA cache
+ *
+ * @returns 0 if ok, < 0 errno code on error
+ *
+ * Called when the interface is brought down
+ */
+void wlp_eda_release(struct wlp_eda *eda)
+{
+	unsigned long flags;
+	struct wlp_eda_node *itr, *next;
+
+	spin_lock_irqsave(&eda->lock, flags);
+	list_for_each_entry_safe(itr, next, &eda->cache, list_node) {
+		list_del(&itr->list_node);
+		kfree(itr);
+	}
+	spin_unlock_irqrestore(&eda->lock, flags);
+}
+
+/*
+ * Add an address mapping
+ *
+ * @returns 0 if ok, < 0 errno code on error
+ *
+ * An address mapping is initially created when the neighbor device is seen
+ * for the first time (it is "onair"). At this time the neighbor is not
+ * connected or associated with a WSS so we only populate the Ethernet and
+ * Device address fields.
+ *
+ */
+int wlp_eda_create_node(struct wlp_eda *eda,
+			const unsigned char eth_addr[ETH_ALEN],
+			const struct uwb_dev_addr *dev_addr)
+{
+	int result = 0;
+	struct wlp_eda_node *itr;
+	unsigned long flags;
+
+	BUG_ON(dev_addr == NULL || eth_addr == NULL);
+	spin_lock_irqsave(&eda->lock, flags);
+	list_for_each_entry(itr, &eda->cache, list_node) {
+		if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
+			printk(KERN_ERR "EDA cache already contains entry "
+			       "for neighbor %02x:%02x\n",
+			       dev_addr->data[1], dev_addr->data[0]);
+			result = -EEXIST;
+			goto out_unlock;
+		}
+	}
+	itr = kzalloc(sizeof(*itr), GFP_ATOMIC);
+	if (itr != NULL) {
+		memcpy(itr->eth_addr, eth_addr, sizeof(itr->eth_addr));
+		itr->dev_addr = *dev_addr;
+		list_add(&itr->list_node, &eda->cache);
+	} else
+		result = -ENOMEM;
+out_unlock:
+	spin_unlock_irqrestore(&eda->lock, flags);
+	return result;
+}
+
+/*
+ * Remove entry from EDA cache
+ *
+ * This is done when the device goes off air.
+ */
+void wlp_eda_rm_node(struct wlp_eda *eda, const struct uwb_dev_addr *dev_addr)
+{
+	struct wlp_eda_node *itr, *next;
+	unsigned long flags;
+
+	spin_lock_irqsave(&eda->lock, flags);
+	list_for_each_entry_safe(itr, next, &eda->cache, list_node) {
+		if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
+			list_del(&itr->list_node);
+			kfree(itr);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&eda->lock, flags);
+}
+
+/*
+ * Update an address mapping
+ *
+ * @returns 0 if ok, < 0 errno code on error
+ */
+int wlp_eda_update_node(struct wlp_eda *eda,
+			const struct uwb_dev_addr *dev_addr,
+			struct wlp_wss *wss,
+			const unsigned char virt_addr[ETH_ALEN],
+			const u8 tag, const enum wlp_wss_connect state)
+{
+	int result = -ENOENT;
+	struct wlp_eda_node *itr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&eda->lock, flags);
+	list_for_each_entry(itr, &eda->cache, list_node) {
+		if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
+			/* Found it, update it */
+			itr->wss = wss;
+			memcpy(itr->virt_addr, virt_addr,
+			       sizeof(itr->virt_addr));
+			itr->tag = tag;
+			itr->state = state;
+			result = 0;
+			goto out_unlock;
+		}
+	}
+	/* Not found */
+out_unlock:
+	spin_unlock_irqrestore(&eda->lock, flags);
+	return result;
+}
+
+/*
+ * Update only state field of an address mapping
+ *
+ * @returns 0 if ok, < 0 errno code on error
+ */
+int wlp_eda_update_node_state(struct wlp_eda *eda,
+			      const struct uwb_dev_addr *dev_addr,
+			      const enum wlp_wss_connect state)
+{
+	int result = -ENOENT;
+	struct wlp_eda_node *itr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&eda->lock, flags);
+	list_for_each_entry(itr, &eda->cache, list_node) {
+		if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
+			/* Found it, update it */
+			itr->state = state;
+			result = 0;
+			goto out_unlock;
+		}
+	}
+	/* Not found */
+out_unlock:
+	spin_unlock_irqrestore(&eda->lock, flags);
+	return result;
+}
+
+/*
+ * Return contents of EDA cache entry
+ *
+ * @dev_addr: index to EDA cache
+ * @eda_entry: pointer to where contents of EDA cache will be copied
+ */
+int wlp_copy_eda_node(struct wlp_eda *eda, struct uwb_dev_addr *dev_addr,
+		      struct wlp_eda_node *eda_entry)
+{
+	int result = -ENOENT;
+	struct wlp_eda_node *itr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&eda->lock, flags);
+	list_for_each_entry(itr, &eda->cache, list_node) {
+		if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
+			*eda_entry = *itr;
+			result = 0;
+			goto out_unlock;
+		}
+	}
+	/* Not found */
+out_unlock:
+	spin_unlock_irqrestore(&eda->lock, flags);
+	return result;
+}
+
+/*
+ * Execute function for every element in the cache
+ *
+ * @function: function to execute on element of cache (must be atomic)
+ * @priv:     private data of function
+ * @returns:  result of first function that failed, or last function
+ *            executed if no function failed.
+ *
+ * Stop executing when function returns error for any element in cache.
+ *
+ * IMPORTANT: We are using a spinlock here: the function executed on each
+ * element has to be atomic.
+ */
+int wlp_eda_for_each(struct wlp_eda *eda, wlp_eda_for_each_f function,
+		     void *priv)
+{
+	int result = 0;
+	struct wlp *wlp = container_of(eda, struct wlp, eda);
+	struct wlp_eda_node *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&eda->lock, flags);
+	list_for_each_entry(entry, &eda->cache, list_node) {
+		result = (*function)(wlp, entry, priv);
+		if (result < 0)
+			break;
+	}
+	spin_unlock_irqrestore(&eda->lock, flags);
+	return result;
+}
+
+/*
+ * Execute function for single element in the cache (return dev addr)
+ *
+ * @virt_addr: index into EDA cache used to determine which element to
+ *             execute the function on
+ * @dev_addr: device address of element in cache will be returned using
+ *            @dev_addr
+ * @function: function to execute on element of cache (must be atomic)
+ * @priv:     private data of function
+ * @returns:  result of function
+ *
+ * IMPORTANT: We are using a spinlock here: the function executed on the
+ * element has to be atomic.
+ */
+int wlp_eda_for_virtual(struct wlp_eda *eda,
+			const unsigned char virt_addr[ETH_ALEN],
+			struct uwb_dev_addr *dev_addr,
+			wlp_eda_for_each_f function,
+			void *priv)
+{
+	int result = 0;
+	struct wlp *wlp = container_of(eda, struct wlp, eda);
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct wlp_eda_node *itr;
+	unsigned long flags;
+	int found = 0;
+
+	spin_lock_irqsave(&eda->lock, flags);
+	list_for_each_entry(itr, &eda->cache, list_node) {
+		if (!memcmp(itr->virt_addr, virt_addr,
+			   sizeof(itr->virt_addr))) {
+			d_printf(6, dev, "EDA: looking for "
+			       "%02x:%02x:%02x:%02x:%02x:%02x hit %02x:%02x "
+			       "wss %p tag 0x%02x state %u\n",
+			       virt_addr[0], virt_addr[1],
+			       virt_addr[2], virt_addr[3],
+			       virt_addr[4], virt_addr[5],
+			       itr->dev_addr.data[1],
+			       itr->dev_addr.data[0], itr->wss,
+			       itr->tag, itr->state);
+			result = (*function)(wlp, itr, priv);
+			*dev_addr = itr->dev_addr;
+			found = 1;
+			break;
+		} else
+			d_printf(6, dev, "EDA: looking for "
+			       "%02x:%02x:%02x:%02x:%02x:%02x "
+			       "against "
+			       "%02x:%02x:%02x:%02x:%02x:%02x miss\n",
+			       virt_addr[0], virt_addr[1],
+			       virt_addr[2], virt_addr[3],
+			       virt_addr[4], virt_addr[5],
+			       itr->virt_addr[0], itr->virt_addr[1],
+			       itr->virt_addr[2], itr->virt_addr[3],
+			       itr->virt_addr[4], itr->virt_addr[5]);
+	}
+	if (!found) {
+		if (printk_ratelimit())
+			dev_err(dev, "EDA: Eth addr %02x:%02x:%02x"
+				":%02x:%02x:%02x not found.\n",
+				virt_addr[0], virt_addr[1],
+				virt_addr[2], virt_addr[3],
+				virt_addr[4], virt_addr[5]);
+		result = -ENODEV;
+	}
+	spin_unlock_irqrestore(&eda->lock, flags);
+	return result;
+}
+
+static const char *__wlp_wss_connect_state[] = { "WLP_WSS_UNCONNECTED",
+					  "WLP_WSS_CONNECTED",
+					  "WLP_WSS_CONNECT_FAILED",
+};
+
+static const char *wlp_wss_connect_state_str(unsigned id)
+{
+	if (id >= ARRAY_SIZE(__wlp_wss_connect_state))
+		return "unknown WSS connection state";
+	return __wlp_wss_connect_state[id];
+}
+
+/*
+ * View EDA cache from user space
+ *
+ * A debugging feature to give user visibility into the EDA cache. Also
+ * used to display members of WSS to user (called from wlp_wss_members_show())
+ */
+ssize_t wlp_eda_show(struct wlp *wlp, char *buf)
+{
+	ssize_t result = 0;
+	struct wlp_eda_node *entry;
+	unsigned long flags;
+	struct wlp_eda *eda = &wlp->eda;
+	spin_lock_irqsave(&eda->lock, flags);
+	result = scnprintf(buf, PAGE_SIZE, "#eth_addr dev_addr wss_ptr "
+			   "tag state virt_addr\n");
+	list_for_each_entry(entry, &eda->cache, list_node) {
+		result += scnprintf(buf + result, PAGE_SIZE - result,
+				    "%02x:%02x:%02x:%02x:%02x:%02x %02x:%02x "
+				    "%p 0x%02x %s "
+				    "%02x:%02x:%02x:%02x:%02x:%02x\n",
+				    entry->eth_addr[0], entry->eth_addr[1],
+				    entry->eth_addr[2], entry->eth_addr[3],
+				    entry->eth_addr[4], entry->eth_addr[5],
+				    entry->dev_addr.data[1],
+				    entry->dev_addr.data[0], entry->wss,
+				    entry->tag,
+				    wlp_wss_connect_state_str(entry->state),
+				    entry->virt_addr[0], entry->virt_addr[1],
+				    entry->virt_addr[2], entry->virt_addr[3],
+				    entry->virt_addr[4], entry->virt_addr[5]);
+		if (result >= PAGE_SIZE)
+			break;
+	}
+	spin_unlock_irqrestore(&eda->lock, flags);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wlp_eda_show);
+
+/*
+ * Add new EDA cache entry based on user input in sysfs
+ *
+ * Should only be used for debugging.
+ *
+ * The WSS is assumed to be the only WSS supported. This needs to be
+ * redesigned when we support more than one WSS.
+ */
+ssize_t wlp_eda_store(struct wlp *wlp, const char *buf, size_t size)
+{
+	ssize_t result;
+	struct wlp_eda *eda = &wlp->eda;
+	u8 eth_addr[6];
+	struct uwb_dev_addr dev_addr;
+	u8 tag;
+	unsigned state;
+
+	result = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx "
+			"%02hhx:%02hhx %02hhx %u\n",
+			&eth_addr[0], &eth_addr[1],
+			&eth_addr[2], &eth_addr[3],
+			&eth_addr[4], &eth_addr[5],
+			&dev_addr.data[1], &dev_addr.data[0], &tag, &state);
+	switch (result) {
+	case 6: /* no dev addr specified -- remove entry NOT IMPLEMENTED */
+		/*result = wlp_eda_rm(eda, eth_addr, &dev_addr);*/
+		result = -ENOSYS;
+		break;
+	case 10:
+		state = state >= 1 ? 1 : 0;
+		result = wlp_eda_create_node(eda, eth_addr, &dev_addr);
+		if (result < 0 && result != -EEXIST)
+			goto error;
+		/* Set virtual addr to be same as MAC */
+		result = wlp_eda_update_node(eda, &dev_addr, &wlp->wss,
+					     eth_addr, tag, state);
+		if (result < 0)
+			goto error;
+		break;
+	default: /* bad format */
+		result = -EINVAL;
+	}
+error:
+	return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_eda_store);
diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
new file mode 100644
index 0000000..a64cb82
--- /dev/null
+++ b/drivers/uwb/wlp/messages.c
@@ -0,0 +1,1946 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ * Message construction and parsing
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/wlp.h>
+#define D_LOCAL 6
+#include <linux/uwb/debug.h>
+#include "wlp-internal.h"
+
+static
+const char *__wlp_assoc_frame[] = {
+	[WLP_ASSOC_D1] = "WLP_ASSOC_D1",
+	[WLP_ASSOC_D2] = "WLP_ASSOC_D2",
+	[WLP_ASSOC_M1] = "WLP_ASSOC_M1",
+	[WLP_ASSOC_M2] = "WLP_ASSOC_M2",
+	[WLP_ASSOC_M3] = "WLP_ASSOC_M3",
+	[WLP_ASSOC_M4] = "WLP_ASSOC_M4",
+	[WLP_ASSOC_M5] = "WLP_ASSOC_M5",
+	[WLP_ASSOC_M6] = "WLP_ASSOC_M6",
+	[WLP_ASSOC_M7] = "WLP_ASSOC_M7",
+	[WLP_ASSOC_M8] = "WLP_ASSOC_M8",
+	[WLP_ASSOC_F0] = "WLP_ASSOC_F0",
+	[WLP_ASSOC_E1] = "WLP_ASSOC_E1",
+	[WLP_ASSOC_E2] = "WLP_ASSOC_E2",
+	[WLP_ASSOC_C1] = "WLP_ASSOC_C1",
+	[WLP_ASSOC_C2] = "WLP_ASSOC_C2",
+	[WLP_ASSOC_C3] = "WLP_ASSOC_C3",
+	[WLP_ASSOC_C4] = "WLP_ASSOC_C4",
+};
+
+static const char *wlp_assoc_frame_str(unsigned id)
+{
+	if (id >= ARRAY_SIZE(__wlp_assoc_frame))
+		return "unknown association frame";
+	return __wlp_assoc_frame[id];
+}
+
+static const char *__wlp_assc_error[] = {
+	"none",
+	"Authenticator Failure",
+	"Rogue activity suspected",
+	"Device busy",
+	"Setup Locked",
+	"Registrar not ready",
+	"Invalid WSS selection",
+	"Message timeout",
+	"Enrollment session timeout",
+	"Device password invalid",
+	"Unsupported version",
+	"Internal error",
+	"Undefined error",
+	"Numeric comparison failure",
+	"Waiting for user input",
+};
+
+static const char *wlp_assc_error_str(unsigned id)
+{
+	if (id >= ARRAY_SIZE(__wlp_assc_error))
+		return "unknown WLP association error";
+	return __wlp_assc_error[id];
+}
+
+static inline void wlp_set_attr_hdr(struct wlp_attr_hdr *hdr, unsigned type,
+				    size_t len)
+{
+	hdr->type = cpu_to_le16(type);
+	hdr->length = cpu_to_le16(len);
+}
+
+/*
+ * Populate fields of a constant sized attribute
+ *
+ * @returns: total size of attribute including size of new value
+ *
+ * We have two instances of this function (wlp_pset and wlp_set): one takes
+ * the value as a parameter, the other takes a pointer to the value as
+ * parameter. They thus only differ in how the value is assigned to the
+ * attribute.
+ *
+ * We use sizeof(*attr) - sizeof(struct wlp_attr_hdr) instead of
+ * sizeof(type) to be able to use this same code for the structures that
+ * contain 8bit enum values and be able to deal with pointer types.
+ */
+#define wlp_set(type, type_code, name)					\
+static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value)	\
+{									\
+	d_fnstart(6, NULL, "(attribute %p)\n", attr);			\
+	wlp_set_attr_hdr(&attr->hdr, type_code,				\
+			 sizeof(*attr) - sizeof(struct wlp_attr_hdr));	\
+	attr->name = value;						\
+	d_dump(6, NULL, attr, sizeof(*attr));				\
+	d_fnend(6, NULL, "(attribute %p)\n", attr);			\
+	return sizeof(*attr);						\
+}
+
+#define wlp_pset(type, type_code, name)					\
+static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value)	\
+{									\
+	d_fnstart(6, NULL, "(attribute %p)\n", attr);			\
+	wlp_set_attr_hdr(&attr->hdr, type_code,				\
+			 sizeof(*attr) - sizeof(struct wlp_attr_hdr));	\
+	attr->name = *value;						\
+	d_dump(6, NULL, attr, sizeof(*attr));				\
+	d_fnend(6, NULL, "(attribute %p)\n", attr);			\
+	return sizeof(*attr);						\
+}
+
+/**
+ * Populate fields of a variable attribute
+ *
+ * @returns: total size of attribute including size of new value
+ *
+ * Provided with a pointer to the memory area reserved for the
+ * attribute structure, the field is populated with the value. The
+ * reserved memory has to contain enough space for the value.
+ */
+#define wlp_vset(type, type_code, name)					\
+static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value,	\
+				size_t len)				\
+{									\
+	d_fnstart(6, NULL, "(attribute %p)\n", attr);			\
+	wlp_set_attr_hdr(&attr->hdr, type_code, len);			\
+	memcpy(attr->name, value, len);					\
+	d_dump(6, NULL, attr, sizeof(*attr) + len);			\
+	d_fnend(6, NULL, "(attribute %p)\n", attr);			\
+	return sizeof(*attr) + len;					\
+}
+
+wlp_vset(char *, WLP_ATTR_DEV_NAME, dev_name)
+wlp_vset(char *, WLP_ATTR_MANUF, manufacturer)
+wlp_set(enum wlp_assoc_type, WLP_ATTR_MSG_TYPE, msg_type)
+wlp_vset(char *, WLP_ATTR_MODEL_NAME, model_name)
+wlp_vset(char *, WLP_ATTR_MODEL_NR, model_nr)
+wlp_vset(char *, WLP_ATTR_SERIAL, serial)
+wlp_vset(char *, WLP_ATTR_WSS_NAME, wss_name)
+wlp_pset(struct wlp_uuid *, WLP_ATTR_UUID_E, uuid_e)
+wlp_pset(struct wlp_uuid *, WLP_ATTR_UUID_R, uuid_r)
+wlp_pset(struct wlp_uuid *, WLP_ATTR_WSSID, wssid)
+wlp_pset(struct wlp_dev_type *, WLP_ATTR_PRI_DEV_TYPE, prim_dev_type)
+/*wlp_pset(struct wlp_dev_type *, WLP_ATTR_SEC_DEV_TYPE, sec_dev_type)*/
+wlp_set(u8, WLP_ATTR_WLP_VER, version)
+wlp_set(enum wlp_assc_error, WLP_ATTR_WLP_ASSC_ERR, wlp_assc_err)
+wlp_set(enum wlp_wss_sel_mthd, WLP_ATTR_WSS_SEL_MTHD, wss_sel_mthd)
+wlp_set(u8, WLP_ATTR_ACC_ENRL, accept_enrl)
+wlp_set(u8, WLP_ATTR_WSS_SEC_STAT, wss_sec_status)
+wlp_pset(struct uwb_mac_addr *, WLP_ATTR_WSS_BCAST, wss_bcast)
+wlp_pset(struct wlp_nonce *, WLP_ATTR_ENRL_NONCE, enonce)
+wlp_pset(struct wlp_nonce *, WLP_ATTR_REG_NONCE, rnonce)
+wlp_set(u8, WLP_ATTR_WSS_TAG, wss_tag)
+wlp_pset(struct uwb_mac_addr *, WLP_ATTR_WSS_VIRT, wss_virt)
+
+/**
+ * Fill in the WSS information attributes
+ *
+ * We currently only support one WSS, and this is assumed in this function
+ * that can populate only one WSS information attribute.
+ */
+static size_t wlp_set_wss_info(struct wlp_attr_wss_info *attr,
+			       struct wlp_wss *wss)
+{
+	size_t datalen;
+	void *ptr = attr->wss_info;
+	size_t used = sizeof(*attr);
+	d_fnstart(6, NULL, "(attribute %p)\n", attr);
+	datalen = sizeof(struct wlp_wss_info) + strlen(wss->name);
+	wlp_set_attr_hdr(&attr->hdr, WLP_ATTR_WSS_INFO, datalen);
+	used = wlp_set_wssid(ptr, &wss->wssid);
+	used += wlp_set_wss_name(ptr + used, wss->name, strlen(wss->name));
+	used += wlp_set_accept_enrl(ptr + used, wss->accept_enroll);
+	used += wlp_set_wss_sec_status(ptr + used, wss->secure_status);
+	used += wlp_set_wss_bcast(ptr + used, &wss->bcast);
+	d_dump(6, NULL, attr, sizeof(*attr) + datalen);
+	d_fnend(6, NULL, "(attribute %p, used %d)\n",
+		attr, (int)(sizeof(*attr) + used));
+	return sizeof(*attr) + used;
+}
+
+/**
+ * Verify attribute header
+ *
+ * @hdr:     Pointer to attribute header that will be verified.
+ * @type:    Expected attribute type.
+ * @len:     Expected length of attribute value (excluding header).
+ *
+ * Most attribute values have a known length even when they do have a
+ * length field. This knowledge can be used via this function to verify
+ * that the length field matches the expected value.
+ */
+static int wlp_check_attr_hdr(struct wlp *wlp, struct wlp_attr_hdr *hdr,
+		       enum wlp_attr_type type, unsigned len)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+
+	if (le16_to_cpu(hdr->type) != type) {
+		dev_err(dev, "WLP: unexpected header type. Expected "
+			"%u, got %u.\n", type, le16_to_cpu(hdr->type));
+		return -EINVAL;
+	}
+	if (le16_to_cpu(hdr->length) != len) {
+		dev_err(dev, "WLP: unexpected length in header. Expected "
+			"%u, got %u.\n", len, le16_to_cpu(hdr->length));
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * Check if header of WSS information attribute valid
+ *
+ * @returns: length of WSS attributes (value of length attribute field) if
+ *             valid WSS information attribute found
+ *           -ENODATA if no WSS information attribute found
+ *           -EIO other error occured
+ *
+ * The WSS information attribute is optional. The function will be provided
+ * with a pointer to data that could _potentially_ be a WSS information
+ * attribute. If a valid WSS information attribute is found it will return
+ * 0, if no WSS information attribute is found it will return -ENODATA, and
+ * another error will be returned if it is a WSS information attribute, but
+ * some parsing failure occured.
+ */
+static int wlp_check_wss_info_attr_hdr(struct wlp *wlp,
+				       struct wlp_attr_hdr *hdr, size_t buflen)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	size_t len;
+	int result = 0;
+
+	if (buflen < sizeof(*hdr)) {
+		dev_err(dev, "WLP: Not enough space in buffer to parse"
+			" WSS information attribute header.\n");
+		result = -EIO;
+		goto out;
+	}
+	if (le16_to_cpu(hdr->type) != WLP_ATTR_WSS_INFO) {
+		/* WSS information is optional */
+		result = -ENODATA;
+		goto out;
+	}
+	len = le16_to_cpu(hdr->length);
+	if (buflen < sizeof(*hdr) + len) {
+		dev_err(dev, "WLP: Not enough space in buffer to parse "
+			"variable data. Got %d, expected %d.\n",
+			(int)buflen, (int)(sizeof(*hdr) + len));
+		result = -EIO;
+		goto out;
+	}
+	result = len;
+out:
+	return result;
+}
+
+
+/**
+ * Get value of attribute from fixed size attribute field.
+ *
+ * @attr:    Pointer to attribute field.
+ * @value:   Pointer to variable in which attribute value will be placed.
+ * @buflen:  Size of buffer in which attribute field (including header)
+ *           can be found.
+ * @returns: Amount of given buffer consumed by parsing for this attribute.
+ *
+ * The size and type of the value is known by the type of the attribute.
+ */
+#define wlp_get(type, type_code, name)					\
+ssize_t wlp_get_##name(struct wlp *wlp, struct wlp_attr_##name *attr,	\
+		      type *value, ssize_t buflen)			\
+{									\
+	struct device *dev = &wlp->rc->uwb_dev.dev;			\
+	if (buflen < 0)							\
+		return -EINVAL;						\
+	if (buflen < sizeof(*attr)) {					\
+		dev_err(dev, "WLP: Not enough space in buffer to parse"	\
+			" attribute field. Need %d, received %zu\n",	\
+			(int)sizeof(*attr), buflen);			\
+		return -EIO;						\
+	}								\
+	if (wlp_check_attr_hdr(wlp, &attr->hdr, type_code,		\
+			       sizeof(attr->name)) < 0) {		\
+		dev_err(dev, "WLP: Header verification failed. \n");	\
+		return -EINVAL;						\
+	}								\
+	*value = attr->name;						\
+	return sizeof(*attr);						\
+}
+
+#define wlp_get_sparse(type, type_code, name) \
+	static wlp_get(type, type_code, name)
+
+/**
+ * Get value of attribute from variable sized attribute field.
+ *
+ * @max:     The maximum size of this attribute. This value is dictated by
+ *           the maximum value from the WLP specification.
+ *
+ * @attr:    Pointer to attribute field.
+ * @value:   Pointer to variable that will contain the value. The memory
+ *           must already have been allocated for this value.
+ * @buflen:  Size of buffer in which attribute field (including header)
+ *           can be found.
+ * @returns: Amount of given bufferconsumed by parsing for this attribute.
+ */
+#define wlp_vget(type_val, type_code, name, max)			\
+static ssize_t wlp_get_##name(struct wlp *wlp,				\
+			      struct wlp_attr_##name *attr,		\
+			      type_val *value, ssize_t buflen)		\
+{									\
+	struct device *dev = &wlp->rc->uwb_dev.dev;			\
+	size_t len;							\
+	if (buflen < 0)							\
+		return -EINVAL;						\
+	if (buflen < sizeof(*attr)) {					\
+		dev_err(dev, "WLP: Not enough space in buffer to parse"	\
+			" header.\n");					\
+		return -EIO;						\
+	}								\
+	if (le16_to_cpu(attr->hdr.type) != type_code) {			\
+		dev_err(dev, "WLP: Unexpected attribute type. Got %u, "	\
+			"expected %u.\n", le16_to_cpu(attr->hdr.type),	\
+			type_code);					\
+		return -EINVAL;						\
+	}								\
+	len = le16_to_cpu(attr->hdr.length);				\
+	if (len > max) {						\
+		dev_err(dev, "WLP: Attribute larger than maximum "	\
+			"allowed. Received %zu, max is %d.\n", len,	\
+			(int)max);					\
+		return -EFBIG;						\
+	}								\
+	if (buflen < sizeof(*attr) + len) {				\
+		dev_err(dev, "WLP: Not enough space in buffer to parse "\
+			"variable data.\n");				\
+		return -EIO;						\
+	}								\
+	memcpy(value, (void *) attr + sizeof(*attr), len);		\
+	return sizeof(*attr) + len;					\
+}
+
+wlp_get(u8, WLP_ATTR_WLP_VER, version)
+wlp_get_sparse(enum wlp_wss_sel_mthd, WLP_ATTR_WSS_SEL_MTHD, wss_sel_mthd)
+wlp_get_sparse(struct wlp_dev_type, WLP_ATTR_PRI_DEV_TYPE, prim_dev_type)
+wlp_get_sparse(enum wlp_assc_error, WLP_ATTR_WLP_ASSC_ERR, wlp_assc_err)
+wlp_get_sparse(struct wlp_uuid, WLP_ATTR_UUID_E, uuid_e)
+wlp_get_sparse(struct wlp_uuid, WLP_ATTR_UUID_R, uuid_r)
+wlp_get(struct wlp_uuid, WLP_ATTR_WSSID, wssid)
+wlp_get_sparse(u8, WLP_ATTR_ACC_ENRL, accept_enrl)
+wlp_get_sparse(u8, WLP_ATTR_WSS_SEC_STAT, wss_sec_status)
+wlp_get_sparse(struct uwb_mac_addr, WLP_ATTR_WSS_BCAST, wss_bcast)
+wlp_get_sparse(u8, WLP_ATTR_WSS_TAG, wss_tag)
+wlp_get_sparse(struct uwb_mac_addr, WLP_ATTR_WSS_VIRT, wss_virt)
+wlp_get_sparse(struct wlp_nonce, WLP_ATTR_ENRL_NONCE, enonce)
+wlp_get_sparse(struct wlp_nonce, WLP_ATTR_REG_NONCE, rnonce)
+
+/* The buffers for the device info attributes can be found in the
+ * wlp_device_info struct. These buffers contain one byte more than the
+ * max allowed by the spec - this is done to be able to add the
+ * terminating \0 for user display. This terminating byte is not required
+ * in the actual attribute field (because it has a length field) so the
+ * maximum allowed for this value is one less than its size in the
+ * structure.
+ */
+wlp_vget(char, WLP_ATTR_WSS_NAME, wss_name,
+	 FIELD_SIZEOF(struct wlp_wss, name) - 1)
+wlp_vget(char, WLP_ATTR_DEV_NAME, dev_name,
+	 FIELD_SIZEOF(struct wlp_device_info, name) - 1)
+wlp_vget(char, WLP_ATTR_MANUF, manufacturer,
+	 FIELD_SIZEOF(struct wlp_device_info, manufacturer) - 1)
+wlp_vget(char, WLP_ATTR_MODEL_NAME, model_name,
+	 FIELD_SIZEOF(struct wlp_device_info, model_name) - 1)
+wlp_vget(char, WLP_ATTR_MODEL_NR, model_nr,
+	 FIELD_SIZEOF(struct wlp_device_info, model_nr) - 1)
+wlp_vget(char, WLP_ATTR_SERIAL, serial,
+	 FIELD_SIZEOF(struct wlp_device_info, serial) - 1)
+
+/**
+ * Retrieve WSS Name, Accept enroll, Secure status, Broadcast from WSS info
+ *
+ * @attr: pointer to WSS name attribute in WSS information attribute field
+ * @info: structure that will be populated with data from WSS information
+ *        field (WSS name, Accept enroll, secure status, broadcast address)
+ * @buflen: size of buffer
+ *
+ * Although the WSSID attribute forms part of the WSS info attribute it is
+ * retrieved separately and stored in a different location.
+ */
+static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp,
+				      struct wlp_attr_hdr *attr,
+				      struct wlp_wss_tmp_info *info,
+				      ssize_t buflen)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	void *ptr = attr;
+	size_t used = 0;
+	ssize_t result = -EINVAL;
+
+	d_printf(6, dev, "WLP: WSS info: Retrieving WSS name\n");
+	result = wlp_get_wss_name(wlp, ptr, info->name, buflen);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WSS name from "
+			"WSS info in D2 message.\n");
+		goto error_parse;
+	}
+	used += result;
+	d_printf(6, dev, "WLP: WSS info: Retrieving accept enroll\n");
+	result = wlp_get_accept_enrl(wlp, ptr + used, &info->accept_enroll,
+				     buflen - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain accepting "
+			"enrollment from WSS info in D2 message.\n");
+		goto error_parse;
+	}
+	if (info->accept_enroll != 0 && info->accept_enroll != 1) {
+		dev_err(dev, "WLP: invalid value for accepting "
+			"enrollment in D2 message.\n");
+		result = -EINVAL;
+		goto error_parse;
+	}
+	used += result;
+	d_printf(6, dev, "WLP: WSS info: Retrieving secure status\n");
+	result = wlp_get_wss_sec_status(wlp, ptr + used, &info->sec_status,
+					buflen - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain secure "
+			"status from WSS info in D2 message.\n");
+		goto error_parse;
+	}
+	if (info->sec_status != 0 && info->sec_status != 1) {
+		dev_err(dev, "WLP: invalid value for secure "
+			"status in D2 message.\n");
+		result = -EINVAL;
+		goto error_parse;
+	}
+	used += result;
+	d_printf(6, dev, "WLP: WSS info: Retrieving broadcast\n");
+	result = wlp_get_wss_bcast(wlp, ptr + used, &info->bcast,
+				   buflen - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain broadcast "
+			"address from WSS info in D2 message.\n");
+		goto error_parse;
+	}
+	used += result;
+	result = used;
+error_parse:
+	return result;
+}
+
+/**
+ * Create a new WSSID entry for the neighbor, allocate temporary storage
+ *
+ * Each neighbor can have many WSS active. We maintain a list of WSSIDs
+ * advertised by neighbor. During discovery we also cache information about
+ * these WSS in temporary storage.
+ *
+ * The temporary storage will be removed after it has been used (eg.
+ * displayed to user), the wssid element will be removed from the list when
+ * the neighbor is rediscovered or when it disappears.
+ */
+static struct wlp_wssid_e *wlp_create_wssid_e(struct wlp *wlp,
+					      struct wlp_neighbor_e *neighbor)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct wlp_wssid_e *wssid_e;
+
+	wssid_e = kzalloc(sizeof(*wssid_e), GFP_KERNEL);
+	if (wssid_e == NULL) {
+		dev_err(dev, "WLP: unable to allocate memory "
+			"for WSS information.\n");
+		goto error_alloc;
+	}
+	wssid_e->info = kzalloc(sizeof(struct wlp_wss_tmp_info), GFP_KERNEL);
+	if (wssid_e->info == NULL) {
+		dev_err(dev, "WLP: unable to allocate memory "
+			"for temporary WSS information.\n");
+		kfree(wssid_e);
+		wssid_e = NULL;
+		goto error_alloc;
+	}
+	list_add(&wssid_e->node, &neighbor->wssid);
+error_alloc:
+	return wssid_e;
+}
+
+/**
+ * Parse WSS information attribute
+ *
+ * @attr: pointer to WSS information attribute header
+ * @buflen: size of buffer in which WSS information attribute appears
+ * @wssid: will place wssid from WSS info attribute in this location
+ * @wss_info: will place other information from WSS information attribute
+ * in this location
+ *
+ * memory for @wssid and @wss_info must be allocated when calling this
+ */
+static ssize_t wlp_get_wss_info(struct wlp *wlp, struct wlp_attr_wss_info *attr,
+				size_t buflen, struct wlp_uuid *wssid,
+				struct wlp_wss_tmp_info *wss_info)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	ssize_t result;
+	size_t len;
+	size_t used = 0;
+	void *ptr;
+
+	result = wlp_check_wss_info_attr_hdr(wlp, (struct wlp_attr_hdr *)attr,
+					     buflen);
+	if (result < 0)
+		goto out;
+	len = result;
+	used = sizeof(*attr);
+	ptr = attr;
+	d_printf(6, dev, "WLP: WSS info: Retrieving WSSID\n");
+	result = wlp_get_wssid(wlp, ptr + used, wssid, buflen - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WSSID from WSS info.\n");
+		goto out;
+	}
+	used += result;
+	result = wlp_get_wss_info_attrs(wlp, ptr + used, wss_info,
+					buflen - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WSS information "
+			"from WSS information attributes. \n");
+		goto out;
+	}
+	used += result;
+	if (len + sizeof(*attr) != used) {
+		dev_err(dev, "WLP: Amount of data parsed does not "
+			"match length field. Parsed %zu, length "
+			"field %zu. \n", used, len);
+		result = -EINVAL;
+		goto out;
+	}
+	result = used;
+	d_printf(6, dev, "WLP: Successfully parsed WLP information "
+		 "attribute. used %zu bytes\n", used);
+out:
+	return result;
+}
+
+/**
+ * Retrieve WSS info from association frame
+ *
+ * @attr:     pointer to WSS information attribute
+ * @neighbor: ptr to neighbor being discovered, NULL if enrollment in
+ *            progress
+ * @wss:      ptr to WSS being enrolled in, NULL if discovery in progress
+ * @buflen:   size of buffer in which WSS information appears
+ *
+ * The WSS information attribute appears in the D2 association message.
+ * This message is used in two ways: to discover all neighbors or to enroll
+ * into a WSS activated by a neighbor. During discovery we only want to
+ * store the WSS info in a cache, to be deleted right after it has been
+ * used (eg. displayed to the user). During enrollment we store the WSS
+ * information for the lifetime of enrollment.
+ *
+ * During discovery we are interested in all WSS information, during
+ * enrollment we are only interested in the WSS being enrolled in. Even so,
+ * when in enrollment we keep parsing the message after finding the WSS of
+ * interest, this simplifies the calling routine in that it can be sure
+ * that all WSS information attributes have been parsed out of the message.
+ *
+ * Association frame is process with nbmutex held. The list access is safe.
+ */
+static ssize_t wlp_get_all_wss_info(struct wlp *wlp,
+				    struct wlp_attr_wss_info *attr,
+				    struct wlp_neighbor_e *neighbor,
+				    struct wlp_wss *wss, ssize_t buflen)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	size_t used = 0;
+	ssize_t result = -EINVAL;
+	struct wlp_attr_wss_info *cur;
+	struct wlp_uuid wssid;
+	struct wlp_wss_tmp_info wss_info;
+	unsigned enroll; /* 0 - discovery to cache, 1 - enrollment */
+	struct wlp_wssid_e *wssid_e;
+	char buf[WLP_WSS_UUID_STRSIZE];
+
+	d_fnstart(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d \n",
+		  wlp, attr, neighbor, wss, (int)buflen);
+	if (buflen < 0)
+		goto out;
+
+	if (neighbor != NULL && wss == NULL)
+		enroll = 0; /* discovery */
+	else if (wss != NULL && neighbor == NULL)
+		enroll = 1; /* enrollment */
+	else
+		goto out;
+
+	cur = attr;
+	while (buflen - used > 0) {
+		memset(&wss_info, 0, sizeof(wss_info));
+		cur = (void *)cur + used;
+		result = wlp_get_wss_info(wlp, cur, buflen - used, &wssid,
+					  &wss_info);
+		if (result == -ENODATA) {
+			result = used;
+			goto out;
+		} else if (result < 0) {
+			dev_err(dev, "WLP: Unable to parse WSS information "
+				"from WSS information attribute. \n");
+			result = -EINVAL;
+			goto error_parse;
+		}
+		if (enroll && !memcmp(&wssid, &wss->wssid, sizeof(wssid))) {
+			if (wss_info.accept_enroll != 1) {
+				dev_err(dev, "WLP: Requested WSS does "
+					"not accept enrollment.\n");
+				result = -EINVAL;
+				goto out;
+			}
+			memcpy(wss->name, wss_info.name, sizeof(wss->name));
+			wss->bcast = wss_info.bcast;
+			wss->secure_status = wss_info.sec_status;
+			wss->accept_enroll = wss_info.accept_enroll;
+			wss->state = WLP_WSS_STATE_PART_ENROLLED;
+			wlp_wss_uuid_print(buf, sizeof(buf), &wssid);
+			d_printf(2, dev, "WLP: Found WSS %s. Enrolling.\n",
+				 buf);
+		} else {
+			wssid_e = wlp_create_wssid_e(wlp, neighbor);
+			if (wssid_e == NULL) {
+				dev_err(dev, "WLP: Cannot create new WSSID "
+					"entry for neighbor %02x:%02x.\n",
+					neighbor->uwb_dev->dev_addr.data[1],
+					neighbor->uwb_dev->dev_addr.data[0]);
+				result = -ENOMEM;
+				goto out;
+			}
+			wssid_e->wssid = wssid;
+			*wssid_e->info = wss_info;
+		}
+		used += result;
+	}
+	result = used;
+error_parse:
+	if (result < 0 && !enroll) /* this was a discovery */
+		wlp_remove_neighbor_tmp_info(neighbor);
+out:
+	d_fnend(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d, "
+		"result %d \n", wlp, attr, neighbor, wss, (int)buflen,
+		(int)result);
+	return result;
+
+}
+
+/**
+ * Parse WSS information attributes into cache for discovery
+ *
+ * @attr: the first WSS information attribute in message
+ * @neighbor: the neighbor whose cache will be populated
+ * @buflen: size of the input buffer
+ */
+static ssize_t wlp_get_wss_info_to_cache(struct wlp *wlp,
+					 struct wlp_attr_wss_info *attr,
+					 struct wlp_neighbor_e *neighbor,
+					 ssize_t buflen)
+{
+	return wlp_get_all_wss_info(wlp, attr, neighbor, NULL, buflen);
+}
+
+/**
+ * Parse WSS information attributes into WSS struct for enrollment
+ *
+ * @attr: the first WSS information attribute in message
+ * @wss: the WSS that will be enrolled
+ * @buflen: size of the input buffer
+ */
+static ssize_t wlp_get_wss_info_to_enroll(struct wlp *wlp,
+					  struct wlp_attr_wss_info *attr,
+					  struct wlp_wss *wss, ssize_t buflen)
+{
+	return wlp_get_all_wss_info(wlp, attr, NULL, wss, buflen);
+}
+
+/**
+ * Construct a D1 association frame
+ *
+ * We use the radio control functions to determine the values of the device
+ * properties. These are of variable length and the total space needed is
+ * tallied first before we start constructing the message. The radio
+ * control functions return strings that are terminated with \0. This
+ * character should not be included in the message (there is a length field
+ * accompanying it in the attribute).
+ */
+static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss,
+			      struct sk_buff **skb)
+{
+
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	int result = 0;
+	struct wlp_device_info *info;
+	size_t used = 0;
+	struct wlp_frame_assoc *_d1;
+	struct sk_buff *_skb;
+	void *d1_itr;
+
+	d_fnstart(6, dev, "wlp %p\n", wlp);
+	if (wlp->dev_info == NULL) {
+		result = __wlp_setup_device_info(wlp);
+		if (result < 0) {
+			dev_err(dev, "WLP: Unable to setup device "
+				"information for D1 message.\n");
+			goto error;
+		}
+	}
+	info = wlp->dev_info;
+	d_printf(6, dev, "Local properties:\n"
+		 "Device name (%d bytes): %s\n"
+		 "Model name (%d bytes): %s\n"
+		 "Manufacturer (%d bytes): %s\n"
+		 "Model number (%d bytes): %s\n"
+		 "Serial number (%d bytes): %s\n"
+		 "Primary device type: \n"
+		 " Category: %d \n"
+		 " OUI: %02x:%02x:%02x \n"
+		 " OUI Subdivision: %u \n",
+		 (int)strlen(info->name), info->name,
+		 (int)strlen(info->model_name), info->model_name,
+		 (int)strlen(info->manufacturer), info->manufacturer,
+		 (int)strlen(info->model_nr),  info->model_nr,
+		 (int)strlen(info->serial), info->serial,
+		 info->prim_dev_type.category,
+		 info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1],
+		 info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv);
+	_skb = dev_alloc_skb(sizeof(*_d1)
+		      + sizeof(struct wlp_attr_uuid_e)
+		      + sizeof(struct wlp_attr_wss_sel_mthd)
+		      + sizeof(struct wlp_attr_dev_name)
+		      + strlen(info->name)
+		      + sizeof(struct wlp_attr_manufacturer)
+		      + strlen(info->manufacturer)
+		      + sizeof(struct wlp_attr_model_name)
+		      + strlen(info->model_name)
+		      + sizeof(struct wlp_attr_model_nr)
+		      + strlen(info->model_nr)
+		      + sizeof(struct wlp_attr_serial)
+		      + strlen(info->serial)
+		      + sizeof(struct wlp_attr_prim_dev_type)
+		      + sizeof(struct wlp_attr_wlp_assc_err));
+	if (_skb == NULL) {
+		dev_err(dev, "WLP: Cannot allocate memory for association "
+			"message.\n");
+		result = -ENOMEM;
+		goto error;
+	}
+	_d1 = (void *) _skb->data;
+	d_printf(6, dev, "D1 starts at %p \n", _d1);
+	_d1->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
+	_d1->hdr.type = WLP_FRAME_ASSOCIATION;
+	_d1->type = WLP_ASSOC_D1;
+
+	wlp_set_version(&_d1->version, WLP_VERSION);
+	wlp_set_msg_type(&_d1->msg_type, WLP_ASSOC_D1);
+	d1_itr = _d1->attr;
+	used = wlp_set_uuid_e(d1_itr, &wlp->uuid);
+	used += wlp_set_wss_sel_mthd(d1_itr + used, WLP_WSS_REG_SELECT);
+	used += wlp_set_dev_name(d1_itr + used, info->name,
+				 strlen(info->name));
+	used += wlp_set_manufacturer(d1_itr + used, info->manufacturer,
+				     strlen(info->manufacturer));
+	used += wlp_set_model_name(d1_itr + used, info->model_name,
+				   strlen(info->model_name));
+	used += wlp_set_model_nr(d1_itr + used, info->model_nr,
+				 strlen(info->model_nr));
+	used += wlp_set_serial(d1_itr + used, info->serial,
+			       strlen(info->serial));
+	used += wlp_set_prim_dev_type(d1_itr + used, &info->prim_dev_type);
+	used += wlp_set_wlp_assc_err(d1_itr + used, WLP_ASSOC_ERROR_NONE);
+	skb_put(_skb, sizeof(*_d1) + used);
+	d_printf(6, dev, "D1 message:\n");
+	d_dump(6, dev, _d1, sizeof(*_d1)
+		     + sizeof(struct wlp_attr_uuid_e)
+		     + sizeof(struct wlp_attr_wss_sel_mthd)
+		     + sizeof(struct wlp_attr_dev_name)
+		     + strlen(info->name)
+		     + sizeof(struct wlp_attr_manufacturer)
+		     + strlen(info->manufacturer)
+		     + sizeof(struct wlp_attr_model_name)
+		     + strlen(info->model_name)
+		     + sizeof(struct wlp_attr_model_nr)
+		     + strlen(info->model_nr)
+		     + sizeof(struct wlp_attr_serial)
+		     + strlen(info->serial)
+		     + sizeof(struct wlp_attr_prim_dev_type)
+		     + sizeof(struct wlp_attr_wlp_assc_err));
+	*skb = _skb;
+error:
+	d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result);
+	return result;
+}
+
+/**
+ * Construct a D2 association frame
+ *
+ * We use the radio control functions to determine the values of the device
+ * properties. These are of variable length and the total space needed is
+ * tallied first before we start constructing the message. The radio
+ * control functions return strings that are terminated with \0. This
+ * character should not be included in the message (there is a length field
+ * accompanying it in the attribute).
+ */
+static
+int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss,
+		       struct sk_buff **skb, struct wlp_uuid *uuid_e)
+{
+
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	int result = 0;
+	struct wlp_device_info *info;
+	size_t used = 0;
+	struct wlp_frame_assoc *_d2;
+	struct sk_buff *_skb;
+	void *d2_itr;
+	size_t mem_needed;
+
+	d_fnstart(6, dev, "wlp %p\n", wlp);
+	if (wlp->dev_info == NULL) {
+		result = __wlp_setup_device_info(wlp);
+		if (result < 0) {
+			dev_err(dev, "WLP: Unable to setup device "
+				"information for D2 message.\n");
+			goto error;
+		}
+	}
+	info = wlp->dev_info;
+	d_printf(6, dev, "Local properties:\n"
+		 "Device name (%d bytes): %s\n"
+		 "Model name (%d bytes): %s\n"
+		 "Manufacturer (%d bytes): %s\n"
+		 "Model number (%d bytes): %s\n"
+		 "Serial number (%d bytes): %s\n"
+		 "Primary device type: \n"
+		 " Category: %d \n"
+		 " OUI: %02x:%02x:%02x \n"
+		 " OUI Subdivision: %u \n",
+		 (int)strlen(info->name), info->name,
+		 (int)strlen(info->model_name), info->model_name,
+		 (int)strlen(info->manufacturer), info->manufacturer,
+		 (int)strlen(info->model_nr),  info->model_nr,
+		 (int)strlen(info->serial), info->serial,
+		 info->prim_dev_type.category,
+		 info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1],
+		 info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv);
+	mem_needed = sizeof(*_d2)
+		      + sizeof(struct wlp_attr_uuid_e)
+		      + sizeof(struct wlp_attr_uuid_r)
+		      + sizeof(struct wlp_attr_dev_name)
+		      + strlen(info->name)
+		      + sizeof(struct wlp_attr_manufacturer)
+		      + strlen(info->manufacturer)
+		      + sizeof(struct wlp_attr_model_name)
+		      + strlen(info->model_name)
+		      + sizeof(struct wlp_attr_model_nr)
+		      + strlen(info->model_nr)
+		      + sizeof(struct wlp_attr_serial)
+		      + strlen(info->serial)
+		      + sizeof(struct wlp_attr_prim_dev_type)
+		      + sizeof(struct wlp_attr_wlp_assc_err);
+	if (wlp->wss.state >= WLP_WSS_STATE_ACTIVE)
+		mem_needed += sizeof(struct wlp_attr_wss_info)
+			      + sizeof(struct wlp_wss_info)
+			      + strlen(wlp->wss.name);
+	_skb = dev_alloc_skb(mem_needed);
+	if (_skb == NULL) {
+		dev_err(dev, "WLP: Cannot allocate memory for association "
+			"message.\n");
+		result = -ENOMEM;
+		goto error;
+	}
+	_d2 = (void *) _skb->data;
+	d_printf(6, dev, "D2 starts at %p \n", _d2);
+	_d2->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
+	_d2->hdr.type = WLP_FRAME_ASSOCIATION;
+	_d2->type = WLP_ASSOC_D2;
+
+	wlp_set_version(&_d2->version, WLP_VERSION);
+	wlp_set_msg_type(&_d2->msg_type, WLP_ASSOC_D2);
+	d2_itr = _d2->attr;
+	used = wlp_set_uuid_e(d2_itr, uuid_e);
+	used += wlp_set_uuid_r(d2_itr + used, &wlp->uuid);
+	if (wlp->wss.state >= WLP_WSS_STATE_ACTIVE)
+		used += wlp_set_wss_info(d2_itr + used, &wlp->wss);
+	used += wlp_set_dev_name(d2_itr + used, info->name,
+				 strlen(info->name));
+	used += wlp_set_manufacturer(d2_itr + used, info->manufacturer,
+				     strlen(info->manufacturer));
+	used += wlp_set_model_name(d2_itr + used, info->model_name,
+				   strlen(info->model_name));
+	used += wlp_set_model_nr(d2_itr + used, info->model_nr,
+				 strlen(info->model_nr));
+	used += wlp_set_serial(d2_itr + used, info->serial,
+			       strlen(info->serial));
+	used += wlp_set_prim_dev_type(d2_itr + used, &info->prim_dev_type);
+	used += wlp_set_wlp_assc_err(d2_itr + used, WLP_ASSOC_ERROR_NONE);
+	skb_put(_skb, sizeof(*_d2) + used);
+	d_printf(6, dev, "D2 message:\n");
+	d_dump(6, dev, _d2, mem_needed);
+	*skb = _skb;
+error:
+	d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result);
+	return result;
+}
+
+/**
+ * Allocate memory for and populate fields of F0 association frame
+ *
+ * Currently (while focusing on unsecure enrollment) we ignore the
+ * nonce's that could be placed in the message. Only the error field is
+ * populated by the value provided by the caller.
+ */
+static
+int wlp_build_assoc_f0(struct wlp *wlp, struct sk_buff **skb,
+		       enum wlp_assc_error error)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	int result = -ENOMEM;
+	struct {
+		struct wlp_frame_assoc f0_hdr;
+		struct wlp_attr_enonce enonce;
+		struct wlp_attr_rnonce rnonce;
+		struct wlp_attr_wlp_assc_err assc_err;
+	} *f0;
+	struct sk_buff *_skb;
+	struct wlp_nonce tmp;
+
+	d_fnstart(6, dev, "wlp %p\n", wlp);
+	_skb = dev_alloc_skb(sizeof(*f0));
+	if (_skb == NULL) {
+		dev_err(dev, "WLP: Unable to allocate memory for F0 "
+			"association frame. \n");
+		goto error_alloc;
+	}
+	f0 = (void *) _skb->data;
+	d_printf(6, dev, "F0 starts at %p \n", f0);
+	f0->f0_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
+	f0->f0_hdr.hdr.type = WLP_FRAME_ASSOCIATION;
+	f0->f0_hdr.type = WLP_ASSOC_F0;
+	wlp_set_version(&f0->f0_hdr.version, WLP_VERSION);
+	wlp_set_msg_type(&f0->f0_hdr.msg_type, WLP_ASSOC_F0);
+	memset(&tmp, 0, sizeof(tmp));
+	wlp_set_enonce(&f0->enonce, &tmp);
+	wlp_set_rnonce(&f0->rnonce, &tmp);
+	wlp_set_wlp_assc_err(&f0->assc_err, error);
+	skb_put(_skb, sizeof(*f0));
+	*skb = _skb;
+	result = 0;
+error_alloc:
+	d_fnend(6, dev, "wlp %p, result %d \n", wlp, result);
+	return result;
+}
+
+/**
+ * Parse F0 frame
+ *
+ * We just retrieve the values and print it as an error to the user.
+ * Calling function already knows an error occured (F0 indicates error), so
+ * we just parse the content as debug for higher layers.
+ */
+int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct wlp_frame_assoc *f0 = (void *) skb->data;
+	void *ptr = skb->data;
+	size_t len = skb->len;
+	size_t used;
+	ssize_t result;
+	struct wlp_nonce enonce, rnonce;
+	enum wlp_assc_error assc_err;
+	char enonce_buf[WLP_WSS_NONCE_STRSIZE];
+	char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
+
+	used = sizeof(*f0);
+	result = wlp_get_enonce(wlp, ptr + used, &enonce, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain Enrollee nonce "
+			"attribute from F0 message.\n");
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_rnonce(wlp, ptr + used, &rnonce, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain Registrar nonce "
+			"attribute from F0 message.\n");
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WLP Association error "
+			"attribute from F0 message.\n");
+		goto error_parse;
+	}
+	wlp_wss_nonce_print(enonce_buf, sizeof(enonce_buf), &enonce);
+	wlp_wss_nonce_print(rnonce_buf, sizeof(rnonce_buf), &rnonce);
+	dev_err(dev, "WLP: Received F0 error frame from neighbor. Enrollee "
+		"nonce: %s, Registrar nonce: %s, WLP Association error: %s.\n",
+		enonce_buf, rnonce_buf, wlp_assc_error_str(assc_err));
+	result = 0;
+error_parse:
+	return result;
+}
+
+/**
+ * Retrieve variable device information from association message
+ *
+ * The device information parsed is not required in any message. This
+ * routine will thus not fail if an attribute is not present.
+ * The attributes are expected in a certain order, even if all are not
+ * present. The "attribute type" value is used to ensure the attributes
+ * are parsed in the correct order.
+ *
+ * If an error is encountered during parsing the function will return an
+ * error code, when this happens the given device_info structure may be
+ * partially filled.
+ */
+static
+int wlp_get_variable_info(struct wlp *wlp, void *data,
+			  struct wlp_device_info *dev_info, ssize_t len)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	size_t used = 0;
+	struct wlp_attr_hdr *hdr;
+	ssize_t result = 0;
+	unsigned last = 0;
+
+	while (len - used > 0) {
+		if (len - used < sizeof(*hdr)) {
+			dev_err(dev, "WLP: Partial data in frame, cannot "
+				"parse. \n");
+			goto error_parse;
+		}
+		hdr = data + used;
+		switch (le16_to_cpu(hdr->type)) {
+		case WLP_ATTR_MANUF:
+			if (last >= WLP_ATTR_MANUF) {
+				dev_err(dev, "WLP: Incorrect order of "
+					"attribute values in D1 msg.\n");
+				goto error_parse;
+			}
+			result = wlp_get_manufacturer(wlp, data + used,
+						      dev_info->manufacturer,
+						      len - used);
+			if (result < 0) {
+				dev_err(dev, "WLP: Unable to obtain "
+					"Manufacturer attribute from D1 "
+					"message.\n");
+				goto error_parse;
+			}
+			last = WLP_ATTR_MANUF;
+			used += result;
+			break;
+		case WLP_ATTR_MODEL_NAME:
+			if (last >= WLP_ATTR_MODEL_NAME) {
+				dev_err(dev, "WLP: Incorrect order of "
+					"attribute values in D1 msg.\n");
+				goto error_parse;
+			}
+			result = wlp_get_model_name(wlp, data + used,
+						    dev_info->model_name,
+						    len - used);
+			if (result < 0) {
+				dev_err(dev, "WLP: Unable to obtain Model "
+					"name attribute from D1 message.\n");
+				goto error_parse;
+			}
+			last = WLP_ATTR_MODEL_NAME;
+			used += result;
+			break;
+		case WLP_ATTR_MODEL_NR:
+			if (last >= WLP_ATTR_MODEL_NR) {
+				dev_err(dev, "WLP: Incorrect order of "
+					"attribute values in D1 msg.\n");
+				goto error_parse;
+			}
+			result = wlp_get_model_nr(wlp, data + used,
+						  dev_info->model_nr,
+						  len - used);
+			if (result < 0) {
+				dev_err(dev, "WLP: Unable to obtain Model "
+					"number attribute from D1 message.\n");
+				goto error_parse;
+			}
+			last = WLP_ATTR_MODEL_NR;
+			used += result;
+			break;
+		case WLP_ATTR_SERIAL:
+			if (last >= WLP_ATTR_SERIAL) {
+				dev_err(dev, "WLP: Incorrect order of "
+					"attribute values in D1 msg.\n");
+				goto error_parse;
+			}
+			result = wlp_get_serial(wlp, data + used,
+						dev_info->serial, len - used);
+			if (result < 0) {
+				dev_err(dev, "WLP: Unable to obtain Serial "
+					"number attribute from D1 message.\n");
+				goto error_parse;
+			}
+			last = WLP_ATTR_SERIAL;
+			used += result;
+			break;
+		case WLP_ATTR_PRI_DEV_TYPE:
+			if (last >= WLP_ATTR_PRI_DEV_TYPE) {
+				dev_err(dev, "WLP: Incorrect order of "
+					"attribute values in D1 msg.\n");
+				goto error_parse;
+			}
+			result = wlp_get_prim_dev_type(wlp, data + used,
+						       &dev_info->prim_dev_type,
+						       len - used);
+			if (result < 0) {
+				dev_err(dev, "WLP: Unable to obtain Primary "
+					"device type attribute from D1 "
+					"message.\n");
+				goto error_parse;
+			}
+			dev_info->prim_dev_type.category =
+				le16_to_cpu(dev_info->prim_dev_type.category);
+			dev_info->prim_dev_type.subID =
+				le16_to_cpu(dev_info->prim_dev_type.subID);
+			last = WLP_ATTR_PRI_DEV_TYPE;
+			used += result;
+			break;
+		default:
+			/* This is not variable device information. */
+			goto out;
+			break;
+		}
+	}
+out:
+	return used;
+error_parse:
+	return -EINVAL;
+}
+
+/**
+ * Parse incoming D1 frame, populate attribute values
+ *
+ * Caller provides pointers to memory already allocated for attributes
+ * expected in the D1 frame. These variables will be populated.
+ */
+static
+int wlp_parse_d1_frame(struct wlp *wlp, struct sk_buff *skb,
+		       struct wlp_uuid *uuid_e,
+		       enum wlp_wss_sel_mthd *sel_mthd,
+		       struct wlp_device_info *dev_info,
+		       enum wlp_assc_error *assc_err)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct wlp_frame_assoc *d1 = (void *) skb->data;
+	void *ptr = skb->data;
+	size_t len = skb->len;
+	size_t used;
+	ssize_t result;
+
+	used = sizeof(*d1);
+	result = wlp_get_uuid_e(wlp, ptr + used, uuid_e, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain UUID-E attribute from D1 "
+			"message.\n");
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_wss_sel_mthd(wlp, ptr + used, sel_mthd, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WSS selection method "
+			"from D1 message.\n");
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_dev_name(wlp, ptr + used, dev_info->name,
+				     len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain Device Name from D1 "
+			"message.\n");
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_variable_info(wlp, ptr + used, dev_info, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain Device Information from "
+			"D1 message.\n");
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_wlp_assc_err(wlp, ptr + used, assc_err, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WLP Association Error "
+			"Information from D1 message.\n");
+		goto error_parse;
+	}
+	result = 0;
+error_parse:
+	return result;
+}
+/**
+ * Handle incoming D1 frame
+ *
+ * The frame has already been verified to contain an Association header with
+ * the correct version number. Parse the incoming frame, construct and send
+ * a D2 frame in response.
+ *
+ * It is not clear what to do with most fields in the incoming D1 frame. We
+ * retrieve and discard the information here for now.
+ */
+void wlp_handle_d1_frame(struct work_struct *ws)
+{
+	struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws,
+						  struct wlp_assoc_frame_ctx,
+						  ws);
+	struct wlp *wlp = frame_ctx->wlp;
+	struct wlp_wss *wss = &wlp->wss;
+	struct sk_buff *skb = frame_ctx->skb;
+	struct uwb_dev_addr *src = &frame_ctx->src;
+	int result;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct wlp_uuid uuid_e;
+	enum wlp_wss_sel_mthd sel_mthd = 0;
+	struct wlp_device_info dev_info;
+	enum wlp_assc_error assc_err;
+	char uuid[WLP_WSS_UUID_STRSIZE];
+	struct sk_buff *resp = NULL;
+
+	/* Parse D1 frame */
+	d_fnstart(6, dev, "WLP: handle D1 frame. wlp = %p, skb = %p\n",
+		  wlp, skb);
+	mutex_lock(&wss->mutex);
+	mutex_lock(&wlp->mutex); /* to access wlp->uuid */
+	memset(&dev_info, 0, sizeof(dev_info));
+	result = wlp_parse_d1_frame(wlp, skb, &uuid_e, &sel_mthd, &dev_info,
+				    &assc_err);
+	if (result < 0) {
+		dev_err(dev, "WLP: Unable to parse incoming D1 frame.\n");
+		kfree_skb(skb);
+		goto out;
+	}
+	wlp_wss_uuid_print(uuid, sizeof(uuid), &uuid_e);
+	d_printf(6, dev, "From D1 frame:\n"
+		 "UUID-E: %s\n"
+		 "Selection method: %d\n"
+		 "Device name (%d bytes): %s\n"
+		 "Model name (%d bytes): %s\n"
+		 "Manufacturer (%d bytes): %s\n"
+		 "Model number (%d bytes): %s\n"
+		 "Serial number (%d bytes): %s\n"
+		 "Primary device type: \n"
+		 " Category: %d \n"
+		 " OUI: %02x:%02x:%02x \n"
+		 " OUI Subdivision: %u \n",
+		 uuid, sel_mthd,
+		 (int)strlen(dev_info.name), dev_info.name,
+		 (int)strlen(dev_info.model_name), dev_info.model_name,
+		 (int)strlen(dev_info.manufacturer), dev_info.manufacturer,
+		 (int)strlen(dev_info.model_nr),  dev_info.model_nr,
+		 (int)strlen(dev_info.serial), dev_info.serial,
+		 dev_info.prim_dev_type.category,
+		 dev_info.prim_dev_type.OUI[0],
+		 dev_info.prim_dev_type.OUI[1],
+		 dev_info.prim_dev_type.OUI[2],
+		 dev_info.prim_dev_type.OUIsubdiv);
+
+	kfree_skb(skb);
+	if (!wlp_uuid_is_set(&wlp->uuid)) {
+		dev_err(dev, "WLP: UUID is not set. Set via sysfs to "
+			"proceed. Respong to D1 message with error F0.\n");
+		result = wlp_build_assoc_f0(wlp, &resp,
+					    WLP_ASSOC_ERROR_NOT_READY);
+		if (result < 0) {
+			dev_err(dev, "WLP: Unable to construct F0 message.\n");
+			goto out;
+		}
+	} else {
+		/* Construct D2 frame */
+		result = wlp_build_assoc_d2(wlp, wss, &resp, &uuid_e);
+		if (result < 0) {
+			dev_err(dev, "WLP: Unable to construct D2 message.\n");
+			goto out;
+		}
+	}
+	/* Send D2 frame */
+	BUG_ON(wlp->xmit_frame == NULL);
+	result = wlp->xmit_frame(wlp, resp, src);
+	if (result < 0) {
+		dev_err(dev, "WLP: Unable to transmit D2 association "
+			"message: %d\n", result);
+		if (result == -ENXIO)
+			dev_err(dev, "WLP: Is network interface up? \n");
+		/* We could try again ... */
+		dev_kfree_skb_any(resp); /* we need to free if tx fails */
+	}
+out:
+	kfree(frame_ctx);
+	mutex_unlock(&wlp->mutex);
+	mutex_unlock(&wss->mutex);
+	d_fnend(6, dev, "WLP: handle D1 frame. wlp = %p\n", wlp);
+}
+
+/**
+ * Parse incoming D2 frame, create and populate temporary cache
+ *
+ * @skb: socket buffer in which D2 frame can be found
+ * @neighbor: the neighbor that sent the D2 frame
+ *
+ * Will allocate memory for temporary storage of information learned during
+ * discovery.
+ */
+int wlp_parse_d2_frame_to_cache(struct wlp *wlp, struct sk_buff *skb,
+				struct wlp_neighbor_e *neighbor)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct wlp_frame_assoc *d2 = (void *) skb->data;
+	void *ptr = skb->data;
+	size_t len = skb->len;
+	size_t used;
+	ssize_t result;
+	struct wlp_uuid uuid_e;
+	struct wlp_device_info *nb_info;
+	enum wlp_assc_error assc_err;
+
+	used = sizeof(*d2);
+	result = wlp_get_uuid_e(wlp, ptr + used, &uuid_e, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain UUID-E attribute from D2 "
+			"message.\n");
+		goto error_parse;
+	}
+	if (memcmp(&uuid_e, &wlp->uuid, sizeof(uuid_e))) {
+		dev_err(dev, "WLP: UUID-E in incoming D2 does not match "
+			"local UUID sent in D1. \n");
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_uuid_r(wlp, ptr + used, &neighbor->uuid, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain UUID-R attribute from D2 "
+			"message.\n");
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_wss_info_to_cache(wlp, ptr + used, neighbor,
+					   len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WSS information "
+			"from D2 message.\n");
+		goto error_parse;
+	}
+	used += result;
+	neighbor->info = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL);
+	if (neighbor->info == NULL) {
+		dev_err(dev, "WLP: cannot allocate memory to store device "
+			"info.\n");
+		result = -ENOMEM;
+		goto error_parse;
+	}
+	nb_info = neighbor->info;
+	result = wlp_get_dev_name(wlp, ptr + used, nb_info->name,
+				  len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain Device Name from D2 "
+			"message.\n");
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_variable_info(wlp, ptr + used, nb_info, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain Device Information from "
+			"D2 message.\n");
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WLP Association Error "
+			"Information from D2 message.\n");
+		goto error_parse;
+	}
+	if (assc_err != WLP_ASSOC_ERROR_NONE) {
+		dev_err(dev, "WLP: neighbor device returned association "
+			"error %d\n", assc_err);
+		result = -EINVAL;
+		goto error_parse;
+	}
+	result = 0;
+error_parse:
+	if (result < 0)
+		wlp_remove_neighbor_tmp_info(neighbor);
+	return result;
+}
+
+/**
+ * Parse incoming D2 frame, populate attribute values of WSS bein enrolled in
+ *
+ * @wss: our WSS that will be enrolled
+ * @skb: socket buffer in which D2 frame can be found
+ * @neighbor: the neighbor that sent the D2 frame
+ * @wssid: the wssid of the WSS in which we want to enroll
+ *
+ * Forms part of enrollment sequence. We are trying to enroll in WSS with
+ * @wssid by using @neighbor as registrar. A D1 message was sent to
+ * @neighbor and now we need to parse the D2 response. The neighbor's
+ * response is searched for the requested WSS and if found (and it accepts
+ * enrollment), we store the information.
+ */
+int wlp_parse_d2_frame_to_enroll(struct wlp_wss *wss, struct sk_buff *skb,
+				 struct wlp_neighbor_e *neighbor,
+				 struct wlp_uuid *wssid)
+{
+	struct wlp *wlp = container_of(wss, struct wlp, wss);
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	void *ptr = skb->data;
+	size_t len = skb->len;
+	size_t used;
+	ssize_t result;
+	struct wlp_uuid uuid_e;
+	struct wlp_uuid uuid_r;
+	struct wlp_device_info nb_info;
+	enum wlp_assc_error assc_err;
+	char uuid_bufA[WLP_WSS_UUID_STRSIZE];
+	char uuid_bufB[WLP_WSS_UUID_STRSIZE];
+
+	used = sizeof(struct wlp_frame_assoc);
+	result = wlp_get_uuid_e(wlp, ptr + used, &uuid_e, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain UUID-E attribute from D2 "
+			"message.\n");
+		goto error_parse;
+	}
+	if (memcmp(&uuid_e, &wlp->uuid, sizeof(uuid_e))) {
+		dev_err(dev, "WLP: UUID-E in incoming D2 does not match "
+			"local UUID sent in D1. \n");
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_uuid_r(wlp, ptr + used, &uuid_r, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain UUID-R attribute from D2 "
+			"message.\n");
+		goto error_parse;
+	}
+	if (memcmp(&uuid_r, &neighbor->uuid, sizeof(uuid_r))) {
+		wlp_wss_uuid_print(uuid_bufA, sizeof(uuid_bufA),
+				   &neighbor->uuid);
+		wlp_wss_uuid_print(uuid_bufB, sizeof(uuid_bufB), &uuid_r);
+		dev_err(dev, "WLP: UUID of neighbor does not match UUID "
+			"learned during discovery. Originally discovered: %s, "
+			"now from D2 message: %s\n", uuid_bufA, uuid_bufB);
+		result = -EINVAL;
+		goto error_parse;
+	}
+	used += result;
+	wss->wssid = *wssid;
+	result = wlp_get_wss_info_to_enroll(wlp, ptr + used, wss, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WSS information "
+			"from D2 message.\n");
+		goto error_parse;
+	}
+	if (wss->state != WLP_WSS_STATE_PART_ENROLLED) {
+		dev_err(dev, "WLP: D2 message did not contain information "
+			"for successful enrollment. \n");
+		result = -EINVAL;
+		goto error_parse;
+	}
+	used += result;
+	/* Place device information on stack to continue parsing of message */
+	result = wlp_get_dev_name(wlp, ptr + used, nb_info.name,
+				  len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain Device Name from D2 "
+			"message.\n");
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_variable_info(wlp, ptr + used, &nb_info, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain Device Information from "
+			"D2 message.\n");
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WLP Association Error "
+			"Information from D2 message.\n");
+		goto error_parse;
+	}
+	if (assc_err != WLP_ASSOC_ERROR_NONE) {
+		dev_err(dev, "WLP: neighbor device returned association "
+			"error %d\n", assc_err);
+		if (wss->state == WLP_WSS_STATE_PART_ENROLLED) {
+			dev_err(dev, "WLP: Enrolled in WSS (should not "
+				"happen according to spec). Undoing. \n");
+			wlp_wss_reset(wss);
+		}
+		result = -EINVAL;
+		goto error_parse;
+	}
+	result = 0;
+error_parse:
+	return result;
+}
+
+/**
+ * Parse C3/C4 frame into provided variables
+ *
+ * @wssid: will point to copy of wssid retrieved from C3/C4 frame
+ * @tag:   will point to copy of tag retrieved from C3/C4 frame
+ * @virt_addr: will point to copy of virtual address retrieved from C3/C4
+ * frame.
+ *
+ * Calling function has to allocate memory for these values.
+ *
+ * skb contains a valid C3/C4 frame, return the individual fields of this
+ * frame in the provided variables.
+ */
+int wlp_parse_c3c4_frame(struct wlp *wlp, struct sk_buff *skb,
+		       struct wlp_uuid *wssid, u8 *tag,
+		       struct uwb_mac_addr *virt_addr)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	int result;
+	void *ptr = skb->data;
+	size_t len = skb->len;
+	size_t used;
+	char buf[WLP_WSS_UUID_STRSIZE];
+	struct wlp_frame_assoc *assoc = ptr;
+
+	d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb);
+	used = sizeof(*assoc);
+	result = wlp_get_wssid(wlp, ptr + used, wssid, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WSSID attribute from "
+			"%s message.\n", wlp_assoc_frame_str(assoc->type));
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_wss_tag(wlp, ptr + used, tag, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WSS tag attribute from "
+			"%s message.\n", wlp_assoc_frame_str(assoc->type));
+		goto error_parse;
+	}
+	used += result;
+	result = wlp_get_wss_virt(wlp, ptr + used, virt_addr, len - used);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WSS virtual address "
+			"attribute from %s message.\n",
+			wlp_assoc_frame_str(assoc->type));
+		goto error_parse;
+	}
+	wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+	d_printf(6, dev, "WLP: parsed: WSSID %s, tag 0x%02x, virt "
+		 "%02x:%02x:%02x:%02x:%02x:%02x \n", buf, *tag,
+		 virt_addr->data[0], virt_addr->data[1], virt_addr->data[2],
+		 virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]);
+
+error_parse:
+	d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result);
+	return result;
+}
+
+/**
+ * Allocate memory for and populate fields of C1 or C2 association frame
+ *
+ * The C1 and C2 association frames appear identical - except for the type.
+ */
+static
+int wlp_build_assoc_c1c2(struct wlp *wlp, struct wlp_wss *wss,
+			 struct sk_buff **skb, enum wlp_assoc_type type)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	int result  = -ENOMEM;
+	struct {
+		struct wlp_frame_assoc c_hdr;
+		struct wlp_attr_wssid wssid;
+	} *c;
+	struct sk_buff *_skb;
+
+	d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss);
+	_skb = dev_alloc_skb(sizeof(*c));
+	if (_skb == NULL) {
+		dev_err(dev, "WLP: Unable to allocate memory for C1/C2 "
+			"association frame. \n");
+		goto error_alloc;
+	}
+	c = (void *) _skb->data;
+	d_printf(6, dev, "C1/C2 starts at %p \n", c);
+	c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
+	c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION;
+	c->c_hdr.type = type;
+	wlp_set_version(&c->c_hdr.version, WLP_VERSION);
+	wlp_set_msg_type(&c->c_hdr.msg_type, type);
+	wlp_set_wssid(&c->wssid, &wss->wssid);
+	skb_put(_skb, sizeof(*c));
+	d_printf(6, dev, "C1/C2 message:\n");
+	d_dump(6, dev, c, sizeof(*c));
+	*skb = _skb;
+	result = 0;
+error_alloc:
+	d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result);
+	return result;
+}
+
+
+static
+int wlp_build_assoc_c1(struct wlp *wlp, struct wlp_wss *wss,
+		       struct sk_buff **skb)
+{
+	return wlp_build_assoc_c1c2(wlp, wss, skb, WLP_ASSOC_C1);
+}
+
+static
+int wlp_build_assoc_c2(struct wlp *wlp, struct wlp_wss *wss,
+		       struct sk_buff **skb)
+{
+	return wlp_build_assoc_c1c2(wlp, wss, skb, WLP_ASSOC_C2);
+}
+
+
+/**
+ * Allocate memory for and populate fields of C3 or C4 association frame
+ *
+ * The C3 and C4 association frames appear identical - except for the type.
+ */
+static
+int wlp_build_assoc_c3c4(struct wlp *wlp, struct wlp_wss *wss,
+			 struct sk_buff **skb, enum wlp_assoc_type type)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	int result  = -ENOMEM;
+	struct {
+		struct wlp_frame_assoc c_hdr;
+		struct wlp_attr_wssid wssid;
+		struct wlp_attr_wss_tag wss_tag;
+		struct wlp_attr_wss_virt wss_virt;
+	} *c;
+	struct sk_buff *_skb;
+
+	d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss);
+	_skb = dev_alloc_skb(sizeof(*c));
+	if (_skb == NULL) {
+		dev_err(dev, "WLP: Unable to allocate memory for C3/C4 "
+			"association frame. \n");
+		goto error_alloc;
+	}
+	c = (void *) _skb->data;
+	d_printf(6, dev, "C3/C4 starts at %p \n", c);
+	c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
+	c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION;
+	c->c_hdr.type = type;
+	wlp_set_version(&c->c_hdr.version, WLP_VERSION);
+	wlp_set_msg_type(&c->c_hdr.msg_type, type);
+	wlp_set_wssid(&c->wssid, &wss->wssid);
+	wlp_set_wss_tag(&c->wss_tag, wss->tag);
+	wlp_set_wss_virt(&c->wss_virt, &wss->virtual_addr);
+	skb_put(_skb, sizeof(*c));
+	d_printf(6, dev, "C3/C4 message:\n");
+	d_dump(6, dev, c, sizeof(*c));
+	*skb = _skb;
+	result = 0;
+error_alloc:
+	d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result);
+	return result;
+}
+
+static
+int wlp_build_assoc_c3(struct wlp *wlp, struct wlp_wss *wss,
+		       struct sk_buff **skb)
+{
+	return wlp_build_assoc_c3c4(wlp, wss, skb, WLP_ASSOC_C3);
+}
+
+static
+int wlp_build_assoc_c4(struct wlp *wlp, struct wlp_wss *wss,
+		       struct sk_buff **skb)
+{
+	return wlp_build_assoc_c3c4(wlp, wss, skb, WLP_ASSOC_C4);
+}
+
+
+#define wlp_send_assoc(type, id)					\
+static int wlp_send_assoc_##type(struct wlp *wlp, struct wlp_wss *wss,	\
+				 struct uwb_dev_addr *dev_addr)		\
+{									\
+	struct device *dev = &wlp->rc->uwb_dev.dev;			\
+	int result;							\
+	struct sk_buff *skb = NULL;					\
+	d_fnstart(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n",	\
+		  wlp, wss, dev_addr->data[1], dev_addr->data[0]);	\
+	d_printf(6, dev, "WLP: Constructing %s frame. \n",		\
+		 wlp_assoc_frame_str(id));				\
+	/* Build the frame */						\
+	result = wlp_build_assoc_##type(wlp, wss, &skb);		\
+	if (result < 0) {						\
+		dev_err(dev, "WLP: Unable to construct %s association "	\
+			"frame: %d\n", wlp_assoc_frame_str(id), result);\
+		goto error_build_assoc;					\
+	}								\
+	/* Send the frame */						\
+	d_printf(6, dev, "Transmitting %s frame to %02x:%02x \n",	\
+		 wlp_assoc_frame_str(id),				\
+		 dev_addr->data[1], dev_addr->data[0]);			\
+	BUG_ON(wlp->xmit_frame == NULL);				\
+	result = wlp->xmit_frame(wlp, skb, dev_addr);			\
+	if (result < 0) {						\
+		dev_err(dev, "WLP: Unable to transmit %s association "	\
+			"message: %d\n", wlp_assoc_frame_str(id),	\
+			result);					\
+		if (result == -ENXIO)					\
+			dev_err(dev, "WLP: Is network interface "	\
+				"up? \n");				\
+		goto error_xmit;					\
+	}								\
+	return 0;							\
+error_xmit:								\
+	/* We could try again ... */					\
+	dev_kfree_skb_any(skb);/*we need to free if tx fails*/		\
+error_build_assoc:							\
+	d_fnend(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n",	\
+		wlp, wss, dev_addr->data[1], dev_addr->data[0]);	\
+	return result;							\
+}
+
+wlp_send_assoc(d1, WLP_ASSOC_D1)
+wlp_send_assoc(c1, WLP_ASSOC_C1)
+wlp_send_assoc(c3, WLP_ASSOC_C3)
+
+int wlp_send_assoc_frame(struct wlp *wlp, struct wlp_wss *wss,
+			 struct uwb_dev_addr *dev_addr,
+			 enum wlp_assoc_type type)
+{
+	int result = 0;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	switch (type) {
+	case WLP_ASSOC_D1:
+		result = wlp_send_assoc_d1(wlp, wss, dev_addr);
+		break;
+	case WLP_ASSOC_C1:
+		result = wlp_send_assoc_c1(wlp, wss, dev_addr);
+		break;
+	case WLP_ASSOC_C3:
+		result = wlp_send_assoc_c3(wlp, wss, dev_addr);
+		break;
+	default:
+		dev_err(dev, "WLP: Received request to send unknown "
+			"association message.\n");
+		result = -EINVAL;
+		break;
+	}
+	return result;
+}
+
+/**
+ * Handle incoming C1 frame
+ *
+ * The frame has already been verified to contain an Association header with
+ * the correct version number. Parse the incoming frame, construct and send
+ * a C2 frame in response.
+ */
+void wlp_handle_c1_frame(struct work_struct *ws)
+{
+	struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws,
+						  struct wlp_assoc_frame_ctx,
+						  ws);
+	struct wlp *wlp = frame_ctx->wlp;
+	struct wlp_wss *wss = &wlp->wss;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct wlp_frame_assoc *c1 = (void *) frame_ctx->skb->data;
+	unsigned int len = frame_ctx->skb->len;
+	struct uwb_dev_addr *src = &frame_ctx->src;
+	int result;
+	struct wlp_uuid wssid;
+	char buf[WLP_WSS_UUID_STRSIZE];
+	struct sk_buff *resp = NULL;
+
+	/* Parse C1 frame */
+	d_fnstart(6, dev, "WLP: handle C1 frame. wlp = %p, c1 = %p\n",
+		  wlp, c1);
+	mutex_lock(&wss->mutex);
+	result = wlp_get_wssid(wlp, (void *)c1 + sizeof(*c1), &wssid,
+			       len - sizeof(*c1));
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WSSID from C1 frame.\n");
+		goto out;
+	}
+	wlp_wss_uuid_print(buf, sizeof(buf), &wssid);
+	d_printf(6, dev, "Received C1 frame with WSSID %s \n", buf);
+	if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))
+	    && wss->state == WLP_WSS_STATE_ACTIVE) {
+		d_printf(6, dev, "WSSID from C1 frame is known locally "
+			 "and is active\n");
+		/* Construct C2 frame */
+		result = wlp_build_assoc_c2(wlp, wss, &resp);
+		if (result < 0) {
+			dev_err(dev, "WLP: Unable to construct C2 message.\n");
+			goto out;
+		}
+	} else {
+		d_printf(6, dev, "WSSID from C1 frame is not known locally "
+			 "or is not active\n");
+		/* Construct F0 frame */
+		result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV);
+		if (result < 0) {
+			dev_err(dev, "WLP: Unable to construct F0 message.\n");
+			goto out;
+		}
+	}
+	/* Send C2 frame */
+	d_printf(6, dev, "Transmitting response (C2/F0) frame to %02x:%02x \n",
+		 src->data[1], src->data[0]);
+	BUG_ON(wlp->xmit_frame == NULL);
+	result = wlp->xmit_frame(wlp, resp, src);
+	if (result < 0) {
+		dev_err(dev, "WLP: Unable to transmit response association "
+			"message: %d\n", result);
+		if (result == -ENXIO)
+			dev_err(dev, "WLP: Is network interface up? \n");
+		/* We could try again ... */
+		dev_kfree_skb_any(resp); /* we need to free if tx fails */
+	}
+out:
+	kfree_skb(frame_ctx->skb);
+	kfree(frame_ctx);
+	mutex_unlock(&wss->mutex);
+	d_fnend(6, dev, "WLP: handle C1 frame. wlp = %p\n", wlp);
+}
+
+/**
+ * Handle incoming C3 frame
+ *
+ * The frame has already been verified to contain an Association header with
+ * the correct version number. Parse the incoming frame, construct and send
+ * a C4 frame in response. If the C3 frame identifies a WSS that is locally
+ * active then we connect to this neighbor (add it to our EDA cache).
+ */
+void wlp_handle_c3_frame(struct work_struct *ws)
+{
+	struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws,
+						  struct wlp_assoc_frame_ctx,
+						  ws);
+	struct wlp *wlp = frame_ctx->wlp;
+	struct wlp_wss *wss = &wlp->wss;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct sk_buff *skb = frame_ctx->skb;
+	struct uwb_dev_addr *src = &frame_ctx->src;
+	int result;
+	char buf[WLP_WSS_UUID_STRSIZE];
+	struct sk_buff *resp = NULL;
+	struct wlp_uuid wssid;
+	u8 tag;
+	struct uwb_mac_addr virt_addr;
+
+	/* Parse C3 frame */
+	d_fnstart(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n",
+		  wlp, skb);
+	mutex_lock(&wss->mutex);
+	result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr);
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain values from C3 frame.\n");
+		goto out;
+	}
+	wlp_wss_uuid_print(buf, sizeof(buf), &wssid);
+	d_printf(6, dev, "Received C3 frame with WSSID %s \n", buf);
+	if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))
+	    && wss->state >= WLP_WSS_STATE_ACTIVE) {
+		d_printf(6, dev, "WSSID from C3 frame is known locally "
+			 "and is active\n");
+		result = wlp_eda_update_node(&wlp->eda, src, wss,
+					     (void *) virt_addr.data, tag,
+					     WLP_WSS_CONNECTED);
+		if (result < 0) {
+			dev_err(dev, "WLP: Unable to update EDA cache "
+				"with new connected neighbor information.\n");
+			result = wlp_build_assoc_f0(wlp, &resp,
+						    WLP_ASSOC_ERROR_INT);
+			if (result < 0) {
+				dev_err(dev, "WLP: Unable to construct F0 "
+					"message.\n");
+				goto out;
+			}
+		} else {
+			wss->state = WLP_WSS_STATE_CONNECTED;
+			/* Construct C4 frame */
+			result = wlp_build_assoc_c4(wlp, wss, &resp);
+			if (result < 0) {
+				dev_err(dev, "WLP: Unable to construct C4 "
+					"message.\n");
+				goto out;
+			}
+		}
+	} else {
+		d_printf(6, dev, "WSSID from C3 frame is not known locally "
+			 "or is not active\n");
+		/* Construct F0 frame */
+		result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV);
+		if (result < 0) {
+			dev_err(dev, "WLP: Unable to construct F0 message.\n");
+			goto out;
+		}
+	}
+	/* Send C4 frame */
+	d_printf(6, dev, "Transmitting response (C4/F0) frame to %02x:%02x \n",
+		 src->data[1], src->data[0]);
+	BUG_ON(wlp->xmit_frame == NULL);
+	result = wlp->xmit_frame(wlp, resp, src);
+	if (result < 0) {
+		dev_err(dev, "WLP: Unable to transmit response association "
+			"message: %d\n", result);
+		if (result == -ENXIO)
+			dev_err(dev, "WLP: Is network interface up? \n");
+		/* We could try again ... */
+		dev_kfree_skb_any(resp); /* we need to free if tx fails */
+	}
+out:
+	kfree_skb(frame_ctx->skb);
+	kfree(frame_ctx);
+	mutex_unlock(&wss->mutex);
+	d_fnend(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n",
+		wlp, skb);
+}
+
+
diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
new file mode 100644
index 0000000..1bb9b1f
--- /dev/null
+++ b/drivers/uwb/wlp/sysfs.c
@@ -0,0 +1,709 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ * sysfs functions
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: Docs
+ *
+ */
+
+#include <linux/wlp.h>
+#include "wlp-internal.h"
+
+static
+size_t wlp_wss_wssid_e_print(char *buf, size_t bufsize,
+			     struct wlp_wssid_e *wssid_e)
+{
+	size_t used = 0;
+	used += scnprintf(buf, bufsize, " WSS: ");
+	used += wlp_wss_uuid_print(buf + used, bufsize - used,
+				   &wssid_e->wssid);
+
+	if (wssid_e->info != NULL) {
+		used += scnprintf(buf + used, bufsize - used, " ");
+		used += uwb_mac_addr_print(buf + used, bufsize - used,
+					   &wssid_e->info->bcast);
+		used += scnprintf(buf + used, bufsize - used, " %u %u %s\n",
+				  wssid_e->info->accept_enroll,
+				  wssid_e->info->sec_status,
+				  wssid_e->info->name);
+	}
+	return used;
+}
+
+/**
+ * Print out information learned from neighbor discovery
+ *
+ * Some fields being printed may not be included in the device discovery
+ * information (it is not mandatory). We are thus careful how the
+ * information is printed to ensure it is clear to the user what field is
+ * being referenced.
+ * The information being printed is for one time use - temporary storage is
+ * cleaned after it is printed.
+ *
+ * Ideally sysfs output should be on one line. The information printed here
+ * contain a few strings so it will be hard to parse if they are all
+ * printed on the same line - without agreeing on a standard field
+ * separator.
+ */
+static
+ssize_t wlp_wss_neighborhood_print_remove(struct wlp *wlp, char *buf,
+				   size_t bufsize)
+{
+	size_t used = 0;
+	struct wlp_neighbor_e *neighb;
+	struct wlp_wssid_e *wssid_e;
+
+	mutex_lock(&wlp->nbmutex);
+	used = scnprintf(buf, bufsize, "#Neighbor information\n"
+			 "#uuid dev_addr\n"
+			 "# Device Name:\n# Model Name:\n# Manufacturer:\n"
+			 "# Model Nr:\n# Serial:\n"
+			 "# Pri Dev type: CategoryID OUI OUISubdiv "
+			 "SubcategoryID\n"
+			 "# WSS: WSSID WSS_name accept_enroll sec_status "
+			 "bcast\n"
+			 "# WSS: WSSID WSS_name accept_enroll sec_status "
+			 "bcast\n\n");
+	list_for_each_entry(neighb, &wlp->neighbors, node) {
+		if (bufsize - used <= 0)
+			goto out;
+		used += wlp_wss_uuid_print(buf + used, bufsize - used,
+					   &neighb->uuid);
+		buf[used++] = ' ';
+		used += uwb_dev_addr_print(buf + used, bufsize - used,
+					   &neighb->uwb_dev->dev_addr);
+		if (neighb->info != NULL)
+			used += scnprintf(buf + used, bufsize - used,
+					  "\n Device Name: %s\n"
+					  " Model Name: %s\n"
+					  " Manufacturer:%s \n"
+					  " Model Nr: %s\n"
+					  " Serial: %s\n"
+					  " Pri Dev type: "
+					  "%u %02x:%02x:%02x %u %u\n",
+					  neighb->info->name,
+					  neighb->info->model_name,
+					  neighb->info->manufacturer,
+					  neighb->info->model_nr,
+					  neighb->info->serial,
+					  neighb->info->prim_dev_type.category,
+					  neighb->info->prim_dev_type.OUI[0],
+					  neighb->info->prim_dev_type.OUI[1],
+					  neighb->info->prim_dev_type.OUI[2],
+					  neighb->info->prim_dev_type.OUIsubdiv,
+					  neighb->info->prim_dev_type.subID);
+		list_for_each_entry(wssid_e, &neighb->wssid, node) {
+			used += wlp_wss_wssid_e_print(buf + used,
+						      bufsize - used,
+						      wssid_e);
+		}
+		buf[used++] = '\n';
+		wlp_remove_neighbor_tmp_info(neighb);
+	}
+
+
+out:
+	mutex_unlock(&wlp->nbmutex);
+	return used;
+}
+
+
+/**
+ * Show properties of all WSS in neighborhood.
+ *
+ * Will trigger a complete discovery of WSS activated by this device and
+ * its neighbors.
+ */
+ssize_t wlp_neighborhood_show(struct wlp *wlp, char *buf)
+{
+	wlp_discover(wlp);
+	return wlp_wss_neighborhood_print_remove(wlp, buf, PAGE_SIZE);
+}
+EXPORT_SYMBOL_GPL(wlp_neighborhood_show);
+
+static
+ssize_t __wlp_wss_properties_show(struct wlp_wss *wss, char *buf,
+				  size_t bufsize)
+{
+	ssize_t result;
+
+	result = wlp_wss_uuid_print(buf, bufsize, &wss->wssid);
+	result += scnprintf(buf + result, bufsize - result, " ");
+	result += uwb_mac_addr_print(buf + result, bufsize - result,
+				     &wss->bcast);
+	result += scnprintf(buf + result, bufsize - result,
+			    " 0x%02x %u ", wss->hash, wss->secure_status);
+	result += wlp_wss_key_print(buf + result, bufsize - result,
+				    wss->master_key);
+	result += scnprintf(buf + result, bufsize - result, " 0x%02x ",
+			    wss->tag);
+	result += uwb_mac_addr_print(buf + result, bufsize - result,
+				     &wss->virtual_addr);
+	result += scnprintf(buf + result, bufsize - result, " %s", wss->name);
+	result += scnprintf(buf + result, bufsize - result,
+			    "\n\n#WSSID\n#WSS broadcast address\n"
+			    "#WSS hash\n#WSS secure status\n"
+			    "#WSS master key\n#WSS local tag\n"
+			    "#WSS local virtual EUI-48\n#WSS name\n");
+	return result;
+}
+
+/**
+ * Show which WSS is activated.
+ */
+ssize_t wlp_wss_activate_show(struct wlp_wss *wss, char *buf)
+{
+	int result = 0;
+
+	if (mutex_lock_interruptible(&wss->mutex))
+		goto out;
+	if (wss->state >= WLP_WSS_STATE_ACTIVE)
+		result = __wlp_wss_properties_show(wss, buf, PAGE_SIZE);
+	else
+		result = scnprintf(buf, PAGE_SIZE, "No local WSS active.\n");
+	result += scnprintf(buf + result, PAGE_SIZE - result,
+			"\n\n"
+			"# echo WSSID SECURE_STATUS ACCEPT_ENROLLMENT "
+			"NAME #create new WSS\n"
+			"# echo WSSID [DEV ADDR] #enroll in and activate "
+			"existing WSS, can request registrar\n"
+			"#\n"
+			"# WSSID is a 16 byte hex array. Eg. 12 A3 3B ... \n"
+			"# SECURE_STATUS 0 - unsecure, 1 - secure (default)\n"
+			"# ACCEPT_ENROLLMENT 0 - no, 1 - yes (default)\n"
+			"# NAME is the text string identifying the WSS\n"
+			"# DEV ADDR is the device address of neighbor "
+			"that should be registrar. Eg. 32:AB\n");
+
+	mutex_unlock(&wss->mutex);
+out:
+	return result;
+
+}
+EXPORT_SYMBOL_GPL(wlp_wss_activate_show);
+
+/**
+ * Create/activate a new WSS or enroll/activate in neighboring WSS
+ *
+ * The user can provide the WSSID of a WSS in which it wants to enroll.
+ * Only the WSSID is necessary if the WSS have been discovered before. If
+ * the WSS has not been discovered before, or the user wants to use a
+ * particular neighbor as its registrar, then the user can also provide a
+ * device address or the neighbor that will be used as registrar.
+ *
+ * A new WSS is created when the user provides a WSSID, secure status, and
+ * WSS name.
+ */
+ssize_t wlp_wss_activate_store(struct wlp_wss *wss,
+			       const char *buf, size_t size)
+{
+	ssize_t result = -EINVAL;
+	struct wlp_uuid wssid;
+	struct uwb_dev_addr dev;
+	struct uwb_dev_addr bcast = {.data = {0xff, 0xff} };
+	char name[65];
+	unsigned sec_status, accept;
+	memset(name, 0, sizeof(name));
+	result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx:%02hhx",
+			&wssid.data[0] , &wssid.data[1],
+			&wssid.data[2] , &wssid.data[3],
+			&wssid.data[4] , &wssid.data[5],
+			&wssid.data[6] , &wssid.data[7],
+			&wssid.data[8] , &wssid.data[9],
+			&wssid.data[10], &wssid.data[11],
+			&wssid.data[12], &wssid.data[13],
+			&wssid.data[14], &wssid.data[15],
+			&dev.data[1], &dev.data[0]);
+	if (result == 16 || result == 17) {
+		result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx "
+				"%02hhx %02hhx %02hhx %02hhx "
+				"%02hhx %02hhx %02hhx %02hhx "
+				"%02hhx %02hhx %02hhx %02hhx "
+				"%u %u %64c",
+				&wssid.data[0] , &wssid.data[1],
+				&wssid.data[2] , &wssid.data[3],
+				&wssid.data[4] , &wssid.data[5],
+				&wssid.data[6] , &wssid.data[7],
+				&wssid.data[8] , &wssid.data[9],
+				&wssid.data[10], &wssid.data[11],
+				&wssid.data[12], &wssid.data[13],
+				&wssid.data[14], &wssid.data[15],
+				&sec_status, &accept, name);
+		if (result == 16)
+			result = wlp_wss_enroll_activate(wss, &wssid, &bcast);
+		else if (result == 19) {
+			sec_status = sec_status == 0 ? 0 : 1;
+			accept = accept == 0 ? 0 : 1;
+			/* We read name using %c, so the newline needs to be
+			 * removed */
+			if (strlen(name) != sizeof(name) - 1)
+				name[strlen(name) - 1] = '\0';
+			result = wlp_wss_create_activate(wss, &wssid, name,
+							 sec_status, accept);
+		} else
+			result = -EINVAL;
+	} else if (result == 18)
+		result = wlp_wss_enroll_activate(wss, &wssid, &dev);
+	else
+		result = -EINVAL;
+	return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_wss_activate_store);
+
+/**
+ * Show the UUID of this host
+ */
+ssize_t wlp_uuid_show(struct wlp *wlp, char *buf)
+{
+	ssize_t result = 0;
+
+	mutex_lock(&wlp->mutex);
+	result = wlp_wss_uuid_print(buf, PAGE_SIZE, &wlp->uuid);
+	buf[result++] = '\n';
+	mutex_unlock(&wlp->mutex);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wlp_uuid_show);
+
+/**
+ * Store a new UUID for this host
+ *
+ * According to the spec this should be encoded as an octet string in the
+ * order the octets are shown in string representation in RFC 4122 (WLP
+ * 0.99 [Table 6])
+ *
+ * We do not check value provided by user.
+ */
+ssize_t wlp_uuid_store(struct wlp *wlp, const char *buf, size_t size)
+{
+	ssize_t result;
+	struct wlp_uuid uuid;
+
+	mutex_lock(&wlp->mutex);
+	result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx "
+			"%02hhx %02hhx %02hhx %02hhx ",
+			&uuid.data[0] , &uuid.data[1],
+			&uuid.data[2] , &uuid.data[3],
+			&uuid.data[4] , &uuid.data[5],
+			&uuid.data[6] , &uuid.data[7],
+			&uuid.data[8] , &uuid.data[9],
+			&uuid.data[10], &uuid.data[11],
+			&uuid.data[12], &uuid.data[13],
+			&uuid.data[14], &uuid.data[15]);
+	if (result != 16) {
+		result = -EINVAL;
+		goto error;
+	}
+	wlp->uuid = uuid;
+error:
+	mutex_unlock(&wlp->mutex);
+	return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_uuid_store);
+
+/**
+ * Show contents of members of device information structure
+ */
+#define wlp_dev_info_show(type)						\
+ssize_t wlp_dev_##type##_show(struct wlp *wlp, char *buf)		\
+{									\
+	ssize_t result = 0;						\
+	mutex_lock(&wlp->mutex);					\
+	if (wlp->dev_info == NULL) {					\
+		result = __wlp_setup_device_info(wlp);			\
+		if (result < 0)						\
+			goto out;					\
+	}								\
+	result = scnprintf(buf, PAGE_SIZE, "%s\n", wlp->dev_info->type);\
+out:									\
+	mutex_unlock(&wlp->mutex);					\
+	return result;							\
+}									\
+EXPORT_SYMBOL_GPL(wlp_dev_##type##_show);
+
+wlp_dev_info_show(name)
+wlp_dev_info_show(model_name)
+wlp_dev_info_show(model_nr)
+wlp_dev_info_show(manufacturer)
+wlp_dev_info_show(serial)
+
+/**
+ * Store contents of members of device information structure
+ */
+#define wlp_dev_info_store(type, len)					\
+ssize_t wlp_dev_##type##_store(struct wlp *wlp, const char *buf, size_t size)\
+{									\
+	ssize_t result;							\
+	char format[10];						\
+	mutex_lock(&wlp->mutex);					\
+	if (wlp->dev_info == NULL) {					\
+		result = __wlp_alloc_device_info(wlp);			\
+		if (result < 0)						\
+			goto out;					\
+	}								\
+	memset(wlp->dev_info->type, 0, sizeof(wlp->dev_info->type));	\
+	sprintf(format, "%%%uc", len);					\
+	result = sscanf(buf, format, wlp->dev_info->type);		\
+out:									\
+	mutex_unlock(&wlp->mutex);					\
+	return result < 0 ? result : size;				\
+}									\
+EXPORT_SYMBOL_GPL(wlp_dev_##type##_store);
+
+wlp_dev_info_store(name, 32)
+wlp_dev_info_store(manufacturer, 64)
+wlp_dev_info_store(model_name, 32)
+wlp_dev_info_store(model_nr, 32)
+wlp_dev_info_store(serial, 32)
+
+static
+const char *__wlp_dev_category[] = {
+	[WLP_DEV_CAT_COMPUTER] = "Computer",
+	[WLP_DEV_CAT_INPUT] = "Input device",
+	[WLP_DEV_CAT_PRINT_SCAN_FAX_COPIER] = "Printer, scanner, FAX, or "
+					      "Copier",
+	[WLP_DEV_CAT_CAMERA] = "Camera",
+	[WLP_DEV_CAT_STORAGE] = "Storage Network",
+	[WLP_DEV_CAT_INFRASTRUCTURE] = "Infrastructure",
+	[WLP_DEV_CAT_DISPLAY] = "Display",
+	[WLP_DEV_CAT_MULTIM] = "Multimedia device",
+	[WLP_DEV_CAT_GAMING] = "Gaming device",
+	[WLP_DEV_CAT_TELEPHONE] = "Telephone",
+	[WLP_DEV_CAT_OTHER] = "Other",
+};
+
+static
+const char *wlp_dev_category_str(unsigned cat)
+{
+	if ((cat >= WLP_DEV_CAT_COMPUTER && cat <= WLP_DEV_CAT_TELEPHONE)
+	    || cat == WLP_DEV_CAT_OTHER)
+		return __wlp_dev_category[cat];
+	return "unknown category";
+}
+
+ssize_t wlp_dev_prim_category_show(struct wlp *wlp, char *buf)
+{
+	ssize_t result = 0;
+	mutex_lock(&wlp->mutex);
+	if (wlp->dev_info == NULL) {
+		result = __wlp_setup_device_info(wlp);
+		if (result < 0)
+			goto out;
+	}
+	result = scnprintf(buf, PAGE_SIZE, "%s\n",
+		  wlp_dev_category_str(wlp->dev_info->prim_dev_type.category));
+out:
+	mutex_unlock(&wlp->mutex);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_category_show);
+
+ssize_t wlp_dev_prim_category_store(struct wlp *wlp, const char *buf,
+				    size_t size)
+{
+	ssize_t result;
+	u16 cat;
+	mutex_lock(&wlp->mutex);
+	if (wlp->dev_info == NULL) {
+		result = __wlp_alloc_device_info(wlp);
+		if (result < 0)
+			goto out;
+	}
+	result = sscanf(buf, "%hu", &cat);
+	if ((cat >= WLP_DEV_CAT_COMPUTER && cat <= WLP_DEV_CAT_TELEPHONE)
+	    || cat == WLP_DEV_CAT_OTHER)
+		wlp->dev_info->prim_dev_type.category = cat;
+	else
+		result = -EINVAL;
+out:
+	mutex_unlock(&wlp->mutex);
+	return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_category_store);
+
+ssize_t wlp_dev_prim_OUI_show(struct wlp *wlp, char *buf)
+{
+	ssize_t result = 0;
+	mutex_lock(&wlp->mutex);
+	if (wlp->dev_info == NULL) {
+		result = __wlp_setup_device_info(wlp);
+		if (result < 0)
+			goto out;
+	}
+	result = scnprintf(buf, PAGE_SIZE, "%02x:%02x:%02x\n",
+			   wlp->dev_info->prim_dev_type.OUI[0],
+			   wlp->dev_info->prim_dev_type.OUI[1],
+			   wlp->dev_info->prim_dev_type.OUI[2]);
+out:
+	mutex_unlock(&wlp->mutex);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_show);
+
+ssize_t wlp_dev_prim_OUI_store(struct wlp *wlp, const char *buf, size_t size)
+{
+	ssize_t result;
+	u8 OUI[3];
+	mutex_lock(&wlp->mutex);
+	if (wlp->dev_info == NULL) {
+		result = __wlp_alloc_device_info(wlp);
+		if (result < 0)
+			goto out;
+	}
+	result = sscanf(buf, "%hhx:%hhx:%hhx",
+			&OUI[0], &OUI[1], &OUI[2]);
+	if (result != 3) {
+		result = -EINVAL;
+		goto out;
+	} else
+		memcpy(wlp->dev_info->prim_dev_type.OUI, OUI, sizeof(OUI));
+out:
+	mutex_unlock(&wlp->mutex);
+	return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_store);
+
+
+ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *wlp, char *buf)
+{
+	ssize_t result = 0;
+	mutex_lock(&wlp->mutex);
+	if (wlp->dev_info == NULL) {
+		result = __wlp_setup_device_info(wlp);
+		if (result < 0)
+			goto out;
+	}
+	result = scnprintf(buf, PAGE_SIZE, "%u\n",
+			   wlp->dev_info->prim_dev_type.OUIsubdiv);
+out:
+	mutex_unlock(&wlp->mutex);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_sub_show);
+
+ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *wlp, const char *buf,
+				   size_t size)
+{
+	ssize_t result;
+	unsigned sub;
+	u8 max_sub = ~0;
+	mutex_lock(&wlp->mutex);
+	if (wlp->dev_info == NULL) {
+		result = __wlp_alloc_device_info(wlp);
+		if (result < 0)
+			goto out;
+	}
+	result = sscanf(buf, "%u", &sub);
+	if (sub <= max_sub)
+		wlp->dev_info->prim_dev_type.OUIsubdiv = sub;
+	else
+		result = -EINVAL;
+out:
+	mutex_unlock(&wlp->mutex);
+	return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_sub_store);
+
+ssize_t wlp_dev_prim_subcat_show(struct wlp *wlp, char *buf)
+{
+	ssize_t result = 0;
+	mutex_lock(&wlp->mutex);
+	if (wlp->dev_info == NULL) {
+		result = __wlp_setup_device_info(wlp);
+		if (result < 0)
+			goto out;
+	}
+	result = scnprintf(buf, PAGE_SIZE, "%u\n",
+			   wlp->dev_info->prim_dev_type.subID);
+out:
+	mutex_unlock(&wlp->mutex);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_subcat_show);
+
+ssize_t wlp_dev_prim_subcat_store(struct wlp *wlp, const char *buf,
+				  size_t size)
+{
+	ssize_t result;
+	unsigned sub;
+	__le16 max_sub = ~0;
+	mutex_lock(&wlp->mutex);
+	if (wlp->dev_info == NULL) {
+		result = __wlp_alloc_device_info(wlp);
+		if (result < 0)
+			goto out;
+	}
+	result = sscanf(buf, "%u", &sub);
+	if (sub <= max_sub)
+		wlp->dev_info->prim_dev_type.subID = sub;
+	else
+		result = -EINVAL;
+out:
+	mutex_unlock(&wlp->mutex);
+	return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_subcat_store);
+
+/**
+ * Subsystem implementation for interaction with individual WSS via sysfs
+ *
+ * Followed instructions for subsystem in Documentation/filesystems/sysfs.txt
+ */
+
+#define kobj_to_wlp_wss(obj) container_of(obj, struct wlp_wss, kobj)
+#define attr_to_wlp_wss_attr(_attr) \
+	container_of(_attr, struct wlp_wss_attribute, attr)
+
+/**
+ * Sysfs subsystem: forward read calls
+ *
+ * Sysfs operation for forwarding read call to the show method of the
+ * attribute owner
+ */
+static
+ssize_t wlp_wss_attr_show(struct kobject *kobj, struct attribute *attr,
+			  char *buf)
+{
+	struct wlp_wss_attribute *wss_attr = attr_to_wlp_wss_attr(attr);
+	struct wlp_wss *wss = kobj_to_wlp_wss(kobj);
+	ssize_t ret = -EIO;
+
+	if (wss_attr->show)
+		ret = wss_attr->show(wss, buf);
+	return ret;
+}
+/**
+ * Sysfs subsystem: forward write calls
+ *
+ * Sysfs operation for forwarding write call to the store method of the
+ * attribute owner
+ */
+static
+ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct wlp_wss_attribute *wss_attr = attr_to_wlp_wss_attr(attr);
+	struct wlp_wss *wss = kobj_to_wlp_wss(kobj);
+	ssize_t ret = -EIO;
+
+	if (wss_attr->store)
+		ret = wss_attr->store(wss, buf, count);
+	return ret;
+}
+
+static
+struct sysfs_ops wss_sysfs_ops = {
+	.show	= wlp_wss_attr_show,
+	.store	= wlp_wss_attr_store,
+};
+
+struct kobj_type wss_ktype = {
+	.release	= wlp_wss_release,
+	.sysfs_ops	= &wss_sysfs_ops,
+};
+
+
+/**
+ * Sysfs files for individual WSS
+ */
+
+/**
+ * Print static properties of this WSS
+ *
+ * The name of a WSS may not be null teminated. It's max size is 64 bytes
+ * so we copy it to a larger array just to make sure we print sane data.
+ */
+static ssize_t wlp_wss_properties_show(struct wlp_wss *wss, char *buf)
+{
+	int result = 0;
+
+	if (mutex_lock_interruptible(&wss->mutex))
+		goto out;
+	result = __wlp_wss_properties_show(wss, buf, PAGE_SIZE);
+	mutex_unlock(&wss->mutex);
+out:
+	return result;
+}
+WSS_ATTR(properties, S_IRUGO, wlp_wss_properties_show, NULL);
+
+/**
+ * Print all connected members of this WSS
+ * The EDA cache contains all members of WSS neighborhood.
+ */
+static ssize_t wlp_wss_members_show(struct wlp_wss *wss, char *buf)
+{
+	struct wlp *wlp = container_of(wss, struct wlp, wss);
+	return wlp_eda_show(wlp, buf);
+}
+WSS_ATTR(members, S_IRUGO, wlp_wss_members_show, NULL);
+
+static
+const char *__wlp_strstate[] = {
+	"none",
+	"partially enrolled",
+	"enrolled",
+	"active",
+	"connected",
+};
+
+static const char *wlp_wss_strstate(unsigned state)
+{
+	if (state >= ARRAY_SIZE(__wlp_strstate))
+		return "unknown state";
+	return __wlp_strstate[state];
+}
+
+/*
+ * Print current state of this WSS
+ */
+static ssize_t wlp_wss_state_show(struct wlp_wss *wss, char *buf)
+{
+	int result = 0;
+
+	if (mutex_lock_interruptible(&wss->mutex))
+		goto out;
+	result = scnprintf(buf, PAGE_SIZE, "%s\n",
+			   wlp_wss_strstate(wss->state));
+	mutex_unlock(&wss->mutex);
+out:
+	return result;
+}
+WSS_ATTR(state, S_IRUGO, wlp_wss_state_show, NULL);
+
+
+static
+struct attribute *wss_attrs[] = {
+	&wss_attr_properties.attr,
+	&wss_attr_members.attr,
+	&wss_attr_state.attr,
+	NULL,
+};
+
+struct attribute_group wss_attr_group = {
+	.name = NULL,	/* we want them in the same directory */
+	.attrs = wss_attrs,
+};
diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c
new file mode 100644
index 0000000..c701bd1
--- /dev/null
+++ b/drivers/uwb/wlp/txrx.c
@@ -0,0 +1,374 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ * Message exchange infrastructure
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: Docs
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/wlp.h>
+#define D_LOCAL 5
+#include <linux/uwb/debug.h>
+#include "wlp-internal.h"
+
+
+/**
+ * Direct incoming association msg to correct parsing routine
+ *
+ * We only expect D1, E1, C1, C3 messages as new. All other incoming
+ * association messages should form part of an established session that is
+ * handled elsewhere.
+ * The handling of these messages often require calling sleeping functions
+ * - this cannot be done in interrupt context. We use the kernel's
+ * workqueue to handle these messages.
+ */
+static
+void wlp_direct_assoc_frame(struct wlp *wlp, struct sk_buff *skb,
+			   struct uwb_dev_addr *src)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct wlp_frame_assoc *assoc = (void *) skb->data;
+	struct wlp_assoc_frame_ctx *frame_ctx;
+	d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb);
+	frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_ATOMIC);
+	if (frame_ctx == NULL) {
+		dev_err(dev, "WLP: Unable to allocate memory for association "
+			"frame handling.\n");
+		kfree_skb(skb);
+		goto out;
+	}
+	frame_ctx->wlp = wlp;
+	frame_ctx->skb = skb;
+	frame_ctx->src = *src;
+	switch (assoc->type) {
+	case WLP_ASSOC_D1:
+		d_printf(5, dev, "Received a D1 frame.\n");
+		INIT_WORK(&frame_ctx->ws, wlp_handle_d1_frame);
+		schedule_work(&frame_ctx->ws);
+		break;
+	case WLP_ASSOC_E1:
+		d_printf(5, dev, "Received a E1 frame. FIXME?\n");
+		kfree_skb(skb); /* Temporary until we handle it */
+		kfree(frame_ctx); /* Temporary until we handle it */
+		break;
+	case WLP_ASSOC_C1:
+		d_printf(5, dev, "Received a C1 frame.\n");
+		INIT_WORK(&frame_ctx->ws, wlp_handle_c1_frame);
+		schedule_work(&frame_ctx->ws);
+		break;
+	case WLP_ASSOC_C3:
+		d_printf(5, dev, "Received a C3 frame.\n");
+		INIT_WORK(&frame_ctx->ws, wlp_handle_c3_frame);
+		schedule_work(&frame_ctx->ws);
+		break;
+	default:
+		dev_err(dev, "Received unexpected association frame. "
+			"Type = %d \n", assoc->type);
+		kfree_skb(skb);
+		kfree(frame_ctx);
+		break;
+	}
+out:
+	d_fnend(5, dev, "wlp %p\n", wlp);
+}
+
+/**
+ * Process incoming association frame
+ *
+ * Although it could be possible to deal with some incoming association
+ * messages without creating a new session we are keeping things simple. We
+ * do not accept new association messages if there is a session in progress
+ * and the messages do not belong to that session.
+ *
+ * If an association message arrives that causes the creation of a session
+ * (WLP_ASSOC_E1) while we are in the process of creating a session then we
+ * rely on the neighbor mutex to protect the data. That is, the new session
+ * will not be started until the previous is completed.
+ */
+static
+void wlp_receive_assoc_frame(struct wlp *wlp, struct sk_buff *skb,
+			     struct uwb_dev_addr *src)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct wlp_frame_assoc *assoc = (void *) skb->data;
+	struct wlp_session *session = wlp->session;
+	u8 version;
+	d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb);
+
+	if (wlp_get_version(wlp, &assoc->version, &version,
+			    sizeof(assoc->version)) < 0)
+		goto error;
+	if (version != WLP_VERSION) {
+		dev_err(dev, "Unsupported WLP version in association "
+			"message.\n");
+		goto error;
+	}
+	if (session != NULL) {
+		/* Function that created this session is still holding the
+		 * &wlp->mutex to protect this session. */
+		if (assoc->type == session->exp_message ||
+		    assoc->type == WLP_ASSOC_F0) {
+			if (!memcmp(&session->neighbor_addr, src,
+				   sizeof(*src))) {
+				session->data = skb;
+				(session->cb)(wlp);
+			} else {
+				dev_err(dev, "Received expected message from "
+					"unexpected source.  Expected message "
+					"%d or F0 from %02x:%02x, but received "
+					"it from %02x:%02x. Dropping.\n",
+					session->exp_message,
+					session->neighbor_addr.data[1],
+					session->neighbor_addr.data[0],
+					src->data[1], src->data[0]);
+				goto error;
+			}
+		} else {
+			dev_err(dev, "Association already in progress. "
+				"Dropping.\n");
+			goto error;
+		}
+	} else {
+		wlp_direct_assoc_frame(wlp, skb, src);
+	}
+	d_fnend(5, dev, "wlp %p\n", wlp);
+	return;
+error:
+	kfree_skb(skb);
+	d_fnend(5, dev, "wlp %p\n", wlp);
+}
+
+/**
+ * Verify incoming frame is from connected neighbor, prep to pass to WLP client
+ *
+ * Verification proceeds according to WLP 0.99 [7.3.1]. The source address
+ * is used to determine which neighbor is sending the frame and the WSS tag
+ * is used to know to which WSS the frame belongs (we only support one WSS
+ * so this test is straight forward).
+ * With the WSS found we need to ensure that we are connected before
+ * allowing the exchange of data frames.
+ */
+static
+int wlp_verify_prep_rx_frame(struct wlp *wlp, struct sk_buff *skb,
+			     struct uwb_dev_addr *src)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	int result = -EINVAL;
+	struct wlp_eda_node eda_entry;
+	struct wlp_frame_std_abbrv_hdr *hdr = (void *) skb->data;
+
+	d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb);
+	/*verify*/
+	result = wlp_copy_eda_node(&wlp->eda, src, &eda_entry);
+	if (result < 0) {
+		if (printk_ratelimit())
+			dev_err(dev, "WLP: Incoming frame is from unknown "
+				"neighbor %02x:%02x.\n", src->data[1],
+				src->data[0]);
+		goto out;
+	}
+	if (hdr->tag != eda_entry.tag) {
+		if (printk_ratelimit())
+			dev_err(dev, "WLP: Tag of incoming frame from "
+				"%02x:%02x does not match expected tag. "
+				"Received 0x%02x, expected 0x%02x. \n",
+				src->data[1], src->data[0], hdr->tag,
+				eda_entry.tag);
+		result = -EINVAL;
+		goto out;
+	}
+	if (eda_entry.state != WLP_WSS_CONNECTED) {
+		if (printk_ratelimit())
+			dev_err(dev, "WLP: Incoming frame from "
+				"%02x:%02x does is not from connected WSS.\n",
+				src->data[1], src->data[0]);
+		result = -EINVAL;
+		goto out;
+	}
+	/*prep*/
+	skb_pull(skb, sizeof(*hdr));
+out:
+	d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result);
+	return result;
+}
+
+/**
+ * Receive a WLP frame from device
+ *
+ * @returns: 1 if calling function should free the skb
+ *           0 if it successfully handled skb and freed it
+ *           0 if error occured, will free skb in this case
+ */
+int wlp_receive_frame(struct device *dev, struct wlp *wlp, struct sk_buff *skb,
+		      struct uwb_dev_addr *src)
+{
+	unsigned len = skb->len;
+	void *ptr = skb->data;
+	struct wlp_frame_hdr *hdr;
+	int result = 0;
+
+	d_fnstart(6, dev, "skb (%p), len (%u)\n", skb, len);
+	if (len < sizeof(*hdr)) {
+		dev_err(dev, "Not enough data to parse WLP header.\n");
+		result = -EINVAL;
+		goto out;
+	}
+	hdr = ptr;
+	d_dump(6, dev, hdr, sizeof(*hdr));
+	if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) {
+		dev_err(dev, "Not a WLP frame type.\n");
+		result = -EINVAL;
+		goto out;
+	}
+	switch (hdr->type) {
+	case WLP_FRAME_STANDARD:
+		if (len < sizeof(struct wlp_frame_std_abbrv_hdr)) {
+			dev_err(dev, "Not enough data to parse Standard "
+				"WLP header.\n");
+			goto out;
+		}
+		result = wlp_verify_prep_rx_frame(wlp, skb, src);
+		if (result < 0) {
+			if (printk_ratelimit())
+				dev_err(dev, "WLP: Verification of frame "
+					"from neighbor %02x:%02x failed.\n",
+					src->data[1], src->data[0]);
+			goto out;
+		}
+		result = 1;
+		break;
+	case WLP_FRAME_ABBREVIATED:
+		dev_err(dev, "Abbreviated frame received. FIXME?\n");
+		kfree_skb(skb);
+		break;
+	case WLP_FRAME_CONTROL:
+		dev_err(dev, "Control frame received. FIXME?\n");
+		kfree_skb(skb);
+		break;
+	case WLP_FRAME_ASSOCIATION:
+		if (len < sizeof(struct wlp_frame_assoc)) {
+			dev_err(dev, "Not enough data to parse Association "
+				"WLP header.\n");
+			goto out;
+		}
+		d_printf(5, dev, "Association frame received.\n");
+		wlp_receive_assoc_frame(wlp, skb, src);
+		break;
+	default:
+		dev_err(dev, "Invalid frame received.\n");
+		result = -EINVAL;
+		break;
+	}
+out:
+	if (result < 0) {
+		kfree_skb(skb);
+		result = 0;
+	}
+	d_fnend(6, dev, "skb (%p)\n", skb);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wlp_receive_frame);
+
+
+/**
+ * Verify frame from network stack, prepare for further transmission
+ *
+ * @skb:   the socket buffer that needs to be prepared for transmission (it
+ *         is in need of a WLP header). If this is a broadcast frame we take
+ *         over the entire transmission.
+ *         If it is a unicast the WSS connection should already be established
+ *         and transmission will be done by the calling function.
+ * @dst:   On return this will contain the device address to which the
+ *         frame is destined.
+ * @returns: 0 on success no tx : WLP header sucessfully applied to skb buffer,
+ *                                calling function can proceed with tx
+ *           1 on success with tx : WLP will take over transmission of this
+ *                                  frame
+ *           <0 on error
+ *
+ * The network stack (WLP client) is attempting to transmit a frame. We can
+ * only transmit data if a local WSS is at least active (connection will be
+ * done here if this is a broadcast frame and neighbor also has the WSS
+ * active).
+ *
+ * The frame can be either broadcast or unicast. Broadcast in a WSS is
+ * supported via multicast, but we don't support multicast yet (until
+ * devices start to support MAB IEs). If a broadcast frame needs to be
+ * transmitted it is treated as a unicast frame to each neighbor. In this
+ * case the WLP takes over transmission of the skb and returns 1
+ * to the caller to indicate so. Also, in this case, if a neighbor has the
+ * same WSS activated but is not connected then the WSS connection will be
+ * done at this time. The neighbor's virtual address will be learned at
+ * this time.
+ *
+ * The destination address in a unicast frame is the virtual address of the
+ * neighbor. This address only becomes known when a WSS connection is
+ * established. We thus rely on a broadcast frame to trigger the setup of
+ * WSS connections to all neighbors before we are able to send unicast
+ * frames to them. This seems reasonable as IP would usually use ARP first
+ * before any unicast frames are sent.
+ *
+ * If we are already connected to the neighbor (neighbor's virtual address
+ * is known) we just prepare the WLP header and the caller will continue to
+ * send the frame.
+ *
+ * A failure in this function usually indicates something that cannot be
+ * fixed automatically. So, if this function fails (@return < 0) the calling
+ * function should not retry to send the frame as it will very likely keep
+ * failing.
+ *
+ */
+int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp,
+			 struct sk_buff *skb, struct uwb_dev_addr *dst)
+{
+	int result = -EINVAL;
+	struct ethhdr *eth_hdr = (void *) skb->data;
+
+	d_fnstart(6, dev, "wlp (%p), skb (%p) \n", wlp, skb);
+	if (is_broadcast_ether_addr(eth_hdr->h_dest)) {
+		d_printf(6, dev, "WLP: handling broadcast frame. \n");
+		result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb);
+		if (result < 0) {
+			if (printk_ratelimit())
+				dev_err(dev, "Unable to handle broadcast "
+					"frame from WLP client.\n");
+			goto out;
+		}
+		dev_kfree_skb_irq(skb);
+		result = 1;
+		/* Frame will be transmitted by WLP. */
+	} else {
+		d_printf(6, dev, "WLP: handling unicast frame. \n");
+		result = wlp_eda_for_virtual(&wlp->eda, eth_hdr->h_dest, dst,
+					     wlp_wss_prep_hdr, skb);
+		if (unlikely(result < 0)) {
+			if (printk_ratelimit())
+				dev_err(dev, "Unable to prepare "
+					"skb for transmission. \n");
+			goto out;
+		}
+	}
+out:
+	d_fnend(6, dev, "wlp (%p), skb (%p). result = %d \n", wlp, skb, result);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wlp_prepare_tx_frame);
diff --git a/drivers/uwb/wlp/wlp-internal.h b/drivers/uwb/wlp/wlp-internal.h
new file mode 100644
index 0000000..1c94fab
--- /dev/null
+++ b/drivers/uwb/wlp/wlp-internal.h
@@ -0,0 +1,228 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ * Internal API
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __WLP_INTERNAL_H__
+#define __WLP_INTERNAL_H__
+
+/**
+ * State of WSS connection
+ *
+ * A device needs to connect to a neighbor in an activated WSS before data
+ * can be transmitted. The spec also distinguishes between a new connection
+ * attempt and a connection attempt after previous connection attempts. The
+ * state WLP_WSS_CONNECT_FAILED is used for this scenario. See WLP 0.99
+ * [7.2.6]
+ */
+enum wlp_wss_connect {
+	WLP_WSS_UNCONNECTED = 0,
+	WLP_WSS_CONNECTED,
+	WLP_WSS_CONNECT_FAILED,
+};
+
+extern struct kobj_type wss_ktype;
+extern struct attribute_group wss_attr_group;
+
+extern int uwb_rc_ie_add(struct uwb_rc *, const struct uwb_ie_hdr *, size_t);
+extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie);
+
+
+/* This should be changed to a dynamic array where entries are sorted
+ * by eth_addr and search is done in a binary form
+ *
+ * Although thinking twice about it: this technologie's maximum reach
+ * is 10 meters...unless you want to pack too much stuff in around
+ * your radio controller/WLP device, the list will probably not be
+ * too big.
+ *
+ * In any case, there is probably some data structure in the kernel
+ * than we could reused for that already.
+ *
+ * The below structure is really just good while we support one WSS per
+ * host.
+ */
+struct wlp_eda_node {
+	struct list_head list_node;
+	unsigned char eth_addr[ETH_ALEN];
+	struct uwb_dev_addr dev_addr;
+	struct wlp_wss *wss;
+	unsigned char virt_addr[ETH_ALEN];
+	u8 tag;
+	enum wlp_wss_connect state;
+};
+
+typedef int (*wlp_eda_for_each_f)(struct wlp *, struct wlp_eda_node *, void *);
+
+extern void wlp_eda_init(struct wlp_eda *);
+extern void wlp_eda_release(struct wlp_eda *);
+extern int wlp_eda_create_node(struct wlp_eda *,
+			       const unsigned char eth_addr[ETH_ALEN],
+			       const struct uwb_dev_addr *);
+extern void wlp_eda_rm_node(struct wlp_eda *, const struct uwb_dev_addr *);
+extern int wlp_eda_update_node(struct wlp_eda *,
+			       const struct uwb_dev_addr *,
+			       struct wlp_wss *,
+			       const unsigned char virt_addr[ETH_ALEN],
+			       const u8, const enum wlp_wss_connect);
+extern int wlp_eda_update_node_state(struct wlp_eda *,
+				     const struct uwb_dev_addr *,
+				     const enum wlp_wss_connect);
+
+extern int wlp_copy_eda_node(struct wlp_eda *, struct uwb_dev_addr *,
+			     struct wlp_eda_node *);
+extern int wlp_eda_for_each(struct wlp_eda *, wlp_eda_for_each_f , void *);
+extern int wlp_eda_for_virtual(struct wlp_eda *,
+			       const unsigned char eth_addr[ETH_ALEN],
+			       struct uwb_dev_addr *,
+			       wlp_eda_for_each_f , void *);
+
+
+extern void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *);
+
+extern size_t wlp_wss_key_print(char *, size_t, u8 *);
+
+/* Function called when no more references to WSS exists */
+extern void wlp_wss_release(struct kobject *);
+
+extern void wlp_wss_reset(struct wlp_wss *);
+extern int wlp_wss_create_activate(struct wlp_wss *, struct wlp_uuid *,
+				   char *, unsigned, unsigned);
+extern int wlp_wss_enroll_activate(struct wlp_wss *, struct wlp_uuid *,
+				   struct uwb_dev_addr *);
+extern ssize_t wlp_discover(struct wlp *);
+
+extern int wlp_enroll_neighbor(struct wlp *, struct wlp_neighbor_e *,
+			       struct wlp_wss *, struct wlp_uuid *);
+extern int wlp_wss_is_active(struct wlp *, struct wlp_wss *,
+			     struct uwb_dev_addr *);
+
+struct wlp_assoc_conn_ctx {
+	struct work_struct ws;
+	struct wlp *wlp;
+	struct sk_buff *skb;
+	struct wlp_eda_node eda_entry;
+};
+
+
+extern int wlp_wss_connect_prep(struct wlp *, struct wlp_eda_node *, void *);
+extern int wlp_wss_send_copy(struct wlp *, struct wlp_eda_node *, void *);
+
+
+/* Message handling */
+struct wlp_assoc_frame_ctx {
+	struct work_struct ws;
+	struct wlp *wlp;
+	struct sk_buff *skb;
+	struct uwb_dev_addr src;
+};
+
+extern int wlp_wss_prep_hdr(struct wlp *, struct wlp_eda_node *, void *);
+extern void wlp_handle_d1_frame(struct work_struct *);
+extern int wlp_parse_d2_frame_to_cache(struct wlp *, struct sk_buff *,
+				       struct wlp_neighbor_e *);
+extern int wlp_parse_d2_frame_to_enroll(struct wlp_wss *, struct sk_buff *,
+					struct wlp_neighbor_e *,
+					struct wlp_uuid *);
+extern void wlp_handle_c1_frame(struct work_struct *);
+extern void wlp_handle_c3_frame(struct work_struct *);
+extern int wlp_parse_c3c4_frame(struct wlp *, struct sk_buff *,
+				struct wlp_uuid *, u8 *,
+				struct uwb_mac_addr *);
+extern int wlp_parse_f0(struct wlp *, struct sk_buff *);
+extern int wlp_send_assoc_frame(struct wlp *, struct wlp_wss *,
+				struct uwb_dev_addr *, enum wlp_assoc_type);
+extern ssize_t wlp_get_version(struct wlp *, struct wlp_attr_version *,
+			       u8 *, ssize_t);
+extern ssize_t wlp_get_wssid(struct wlp *, struct wlp_attr_wssid *,
+			     struct wlp_uuid *, ssize_t);
+extern int __wlp_alloc_device_info(struct wlp *);
+extern int __wlp_setup_device_info(struct wlp *);
+
+extern struct wlp_wss_attribute wss_attribute_properties;
+extern struct wlp_wss_attribute wss_attribute_members;
+extern struct wlp_wss_attribute wss_attribute_state;
+
+static inline
+size_t wlp_wss_uuid_print(char *buf, size_t bufsize, struct wlp_uuid *uuid)
+{
+	size_t result;
+
+	result = scnprintf(buf, bufsize,
+			  "%02x:%02x:%02x:%02x:%02x:%02x:"
+			  "%02x:%02x:%02x:%02x:%02x:%02x:"
+			  "%02x:%02x:%02x:%02x",
+			  uuid->data[0], uuid->data[1],
+			  uuid->data[2], uuid->data[3],
+			  uuid->data[4], uuid->data[5],
+			  uuid->data[6], uuid->data[7],
+			  uuid->data[8], uuid->data[9],
+			  uuid->data[10], uuid->data[11],
+			  uuid->data[12], uuid->data[13],
+			  uuid->data[14], uuid->data[15]);
+	return result;
+}
+
+/**
+ * FIXME: How should a nonce be displayed?
+ */
+static inline
+size_t wlp_wss_nonce_print(char *buf, size_t bufsize, struct wlp_nonce *nonce)
+{
+	size_t result;
+
+	result = scnprintf(buf, bufsize,
+			  "%02x %02x %02x %02x %02x %02x "
+			  "%02x %02x %02x %02x %02x %02x "
+			  "%02x %02x %02x %02x",
+			  nonce->data[0], nonce->data[1],
+			  nonce->data[2], nonce->data[3],
+			  nonce->data[4], nonce->data[5],
+			  nonce->data[6], nonce->data[7],
+			  nonce->data[8], nonce->data[9],
+			  nonce->data[10], nonce->data[11],
+			  nonce->data[12], nonce->data[13],
+			  nonce->data[14], nonce->data[15]);
+	return result;
+}
+
+
+static inline
+void wlp_session_cb(struct wlp *wlp)
+{
+	struct completion *completion = wlp->session->cb_priv;
+	complete(completion);
+}
+
+static inline
+int wlp_uuid_is_set(struct wlp_uuid *uuid)
+{
+	struct wlp_uuid zero_uuid = { .data = { 0x00, 0x00, 0x00, 0x00,
+						0x00, 0x00, 0x00, 0x00,
+						0x00, 0x00, 0x00, 0x00,
+						0x00, 0x00, 0x00, 0x00} };
+
+	if (!memcmp(uuid, &zero_uuid, sizeof(*uuid)))
+		return 0;
+	return 1;
+}
+
+#endif /* __WLP_INTERNAL_H__ */
diff --git a/drivers/uwb/wlp/wlp-lc.c b/drivers/uwb/wlp/wlp-lc.c
new file mode 100644
index 0000000..0799402e
--- /dev/null
+++ b/drivers/uwb/wlp/wlp-lc.c
@@ -0,0 +1,585 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/wlp.h>
+#define D_LOCAL 6
+#include <linux/uwb/debug.h>
+#include "wlp-internal.h"
+
+
+static
+void wlp_neighbor_init(struct wlp_neighbor_e *neighbor)
+{
+	INIT_LIST_HEAD(&neighbor->wssid);
+}
+
+/**
+ * Create area for device information storage
+ *
+ * wlp->mutex must be held
+ */
+int __wlp_alloc_device_info(struct wlp *wlp)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	BUG_ON(wlp->dev_info != NULL);
+	wlp->dev_info = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL);
+	if (wlp->dev_info == NULL) {
+		dev_err(dev, "WLP: Unable to allocate memory for "
+			"device information.\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+
+/**
+ * Fill in device information using function provided by driver
+ *
+ * wlp->mutex must be held
+ */
+static
+void __wlp_fill_device_info(struct wlp *wlp)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+
+	BUG_ON(wlp->fill_device_info == NULL);
+	d_printf(6, dev, "Retrieving device information "
+			 "from device driver.\n");
+	wlp->fill_device_info(wlp, wlp->dev_info);
+}
+
+/**
+ * Setup device information
+ *
+ * Allocate area for device information and populate it.
+ *
+ * wlp->mutex must be held
+ */
+int __wlp_setup_device_info(struct wlp *wlp)
+{
+	int result;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+
+	result = __wlp_alloc_device_info(wlp);
+	if (result < 0) {
+		dev_err(dev, "WLP: Unable to allocate area for "
+			"device information.\n");
+		return result;
+	}
+	__wlp_fill_device_info(wlp);
+	return 0;
+}
+
+/**
+ * Remove information about neighbor stored temporarily
+ *
+ * Information learned during discovey should only be stored when the
+ * device enrolls in the neighbor's WSS. We do need to store this
+ * information temporarily in order to present it to the user.
+ *
+ * We are only interested in keeping neighbor WSS information if that
+ * neighbor is accepting enrollment.
+ *
+ * should be called with wlp->nbmutex held
+ */
+void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *neighbor)
+{
+	struct wlp_wssid_e *wssid_e, *next;
+	u8 keep;
+	if (!list_empty(&neighbor->wssid)) {
+		list_for_each_entry_safe(wssid_e, next, &neighbor->wssid,
+					 node) {
+			if (wssid_e->info != NULL) {
+				keep = wssid_e->info->accept_enroll;
+				kfree(wssid_e->info);
+				wssid_e->info = NULL;
+				if (!keep) {
+					list_del(&wssid_e->node);
+					kfree(wssid_e);
+				}
+			}
+		}
+	}
+	if (neighbor->info != NULL) {
+		kfree(neighbor->info);
+		neighbor->info = NULL;
+	}
+}
+
+/**
+ * Populate WLP neighborhood cache with neighbor information
+ *
+ * A new neighbor is found. If it is discoverable then we add it to the
+ * neighborhood cache.
+ *
+ */
+static
+int wlp_add_neighbor(struct wlp *wlp, struct uwb_dev *dev)
+{
+	int result = 0;
+	int discoverable;
+	struct wlp_neighbor_e *neighbor;
+
+	d_fnstart(6, &dev->dev, "uwb %p \n", dev);
+	d_printf(6, &dev->dev, "Found neighbor device %02x:%02x \n",
+		 dev->dev_addr.data[1], dev->dev_addr.data[0]);
+	/**
+	 * FIXME:
+	 * Use contents of WLP IE found in beacon cache to determine if
+	 * neighbor is discoverable.
+	 * The device does not support WLP IE yet so this still needs to be
+	 * done. Until then we assume all devices are discoverable.
+	 */
+	discoverable = 1; /* will be changed when FIXME disappears */
+	if (discoverable) {
+		/* Add neighbor to cache for discovery */
+		neighbor = kzalloc(sizeof(*neighbor), GFP_KERNEL);
+		if (neighbor == NULL) {
+			dev_err(&dev->dev, "Unable to create memory for "
+				"new neighbor. \n");
+			result = -ENOMEM;
+			goto error_no_mem;
+		}
+		wlp_neighbor_init(neighbor);
+		uwb_dev_get(dev);
+		neighbor->uwb_dev = dev;
+		list_add(&neighbor->node, &wlp->neighbors);
+	}
+error_no_mem:
+	d_fnend(6, &dev->dev, "uwb %p, result = %d \n", dev, result);
+	return result;
+}
+
+/**
+ * Remove one neighbor from cache
+ */
+static
+void __wlp_neighbor_release(struct wlp_neighbor_e *neighbor)
+{
+	struct wlp_wssid_e *wssid_e, *next_wssid_e;
+
+	list_for_each_entry_safe(wssid_e, next_wssid_e,
+				 &neighbor->wssid, node) {
+		list_del(&wssid_e->node);
+		kfree(wssid_e);
+	}
+	uwb_dev_put(neighbor->uwb_dev);
+	list_del(&neighbor->node);
+	kfree(neighbor);
+}
+
+/**
+ * Clear entire neighborhood cache.
+ */
+static
+void __wlp_neighbors_release(struct wlp *wlp)
+{
+	struct wlp_neighbor_e *neighbor, *next;
+	if (list_empty(&wlp->neighbors))
+		return;
+	list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) {
+		__wlp_neighbor_release(neighbor);
+	}
+}
+
+static
+void wlp_neighbors_release(struct wlp *wlp)
+{
+	mutex_lock(&wlp->nbmutex);
+	__wlp_neighbors_release(wlp);
+	mutex_unlock(&wlp->nbmutex);
+}
+
+
+
+/**
+ * Send D1 message to neighbor, receive D2 message
+ *
+ * @neighbor: neighbor to which D1 message will be sent
+ * @wss:      if not NULL, it is an enrollment request for this WSS
+ * @wssid:    if wss not NULL, this is the wssid of the WSS in which we
+ *            want to enroll
+ *
+ * A D1/D2 exchange is done for one of two reasons: discovery or
+ * enrollment. If done for discovery the D1 message is sent to the neighbor
+ * and the contents of the D2 response is stored in a temporary cache.
+ * If done for enrollment the @wss and @wssid are provided also. In this
+ * case the D1 message is sent to the neighbor, the D2 response is parsed
+ * for enrollment of the WSS with wssid.
+ *
+ * &wss->mutex is held
+ */
+static
+int wlp_d1d2_exchange(struct wlp *wlp, struct wlp_neighbor_e *neighbor,
+		      struct wlp_wss *wss, struct wlp_uuid *wssid)
+{
+	int result;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	DECLARE_COMPLETION_ONSTACK(completion);
+	struct wlp_session session;
+	struct sk_buff  *skb;
+	struct wlp_frame_assoc *resp;
+	struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr;
+
+	mutex_lock(&wlp->mutex);
+	if (!wlp_uuid_is_set(&wlp->uuid)) {
+		dev_err(dev, "WLP: UUID is not set. Set via sysfs to "
+			"proceed.\n");
+		result = -ENXIO;
+		goto out;
+	}
+	/* Send D1 association frame */
+	result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_D1);
+	if (result < 0) {
+		dev_err(dev, "Unable to send D1 frame to neighbor "
+			"%02x:%02x (%d)\n", dev_addr->data[1],
+			dev_addr->data[0], result);
+		d_printf(6, dev, "Add placeholders into buffer next to "
+			 "neighbor information we have (dev address).\n");
+		goto out;
+	}
+	/* Create session, wait for response */
+	session.exp_message = WLP_ASSOC_D2;
+	session.cb = wlp_session_cb;
+	session.cb_priv = &completion;
+	session.neighbor_addr = *dev_addr;
+	BUG_ON(wlp->session != NULL);
+	wlp->session = &session;
+	/* Wait for D2/F0 frame */
+	result = wait_for_completion_interruptible_timeout(&completion,
+						   WLP_PER_MSG_TIMEOUT * HZ);
+	if (result == 0) {
+		result = -ETIMEDOUT;
+		dev_err(dev, "Timeout while sending D1 to neighbor "
+			     "%02x:%02x.\n", dev_addr->data[1],
+			     dev_addr->data[0]);
+		goto error_session;
+	}
+	if (result < 0) {
+		dev_err(dev, "Unable to discover/enroll neighbor %02x:%02x.\n",
+			dev_addr->data[1], dev_addr->data[0]);
+		goto error_session;
+	}
+	/* Parse message in session->data: it will be either D2 or F0 */
+	skb = session.data;
+	resp = (void *) skb->data;
+	d_printf(6, dev, "Received response to D1 frame. \n");
+	d_dump(6, dev, skb->data, skb->len > 72 ? 72 : skb->len);
+
+	if (resp->type == WLP_ASSOC_F0) {
+		result = wlp_parse_f0(wlp, skb);
+		if (result < 0)
+			dev_err(dev, "WLP: Unable to parse F0 from neighbor "
+				"%02x:%02x.\n", dev_addr->data[1],
+				dev_addr->data[0]);
+		result = -EINVAL;
+		goto error_resp_parse;
+	}
+	if (wss == NULL) {
+		/* Discovery */
+		result = wlp_parse_d2_frame_to_cache(wlp, skb, neighbor);
+		if (result < 0) {
+			dev_err(dev, "WLP: Unable to parse D2 message from "
+				"neighbor %02x:%02x for discovery.\n",
+				dev_addr->data[1], dev_addr->data[0]);
+			goto error_resp_parse;
+		}
+	} else {
+		/* Enrollment */
+		result = wlp_parse_d2_frame_to_enroll(wss, skb, neighbor,
+						      wssid);
+		if (result < 0) {
+			dev_err(dev, "WLP: Unable to parse D2 message from "
+				"neighbor %02x:%02x for enrollment.\n",
+				dev_addr->data[1], dev_addr->data[0]);
+			goto error_resp_parse;
+		}
+	}
+error_resp_parse:
+	kfree_skb(skb);
+error_session:
+	wlp->session = NULL;
+out:
+	mutex_unlock(&wlp->mutex);
+	return result;
+}
+
+/**
+ * Enroll into WSS of provided WSSID by using neighbor as registrar
+ *
+ * &wss->mutex is held
+ */
+int wlp_enroll_neighbor(struct wlp *wlp, struct wlp_neighbor_e *neighbor,
+			struct wlp_wss *wss, struct wlp_uuid *wssid)
+{
+	int result = 0;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	char buf[WLP_WSS_UUID_STRSIZE];
+	struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr;
+	wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+	d_fnstart(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n",
+		  wlp, neighbor, wss, wssid, buf);
+	d_printf(6, dev, "Complete me.\n");
+	result =  wlp_d1d2_exchange(wlp, neighbor, wss, wssid);
+	if (result < 0) {
+		dev_err(dev, "WLP: D1/D2 message exchange for enrollment "
+			"failed. result = %d \n", result);
+		goto out;
+	}
+	if (wss->state != WLP_WSS_STATE_PART_ENROLLED) {
+		dev_err(dev, "WLP: Unable to enroll into WSS %s using "
+			"neighbor %02x:%02x. \n", buf,
+			dev_addr->data[1], dev_addr->data[0]);
+		result = -EINVAL;
+		goto out;
+	}
+	if (wss->secure_status == WLP_WSS_SECURE) {
+		dev_err(dev, "FIXME: need to complete secure enrollment.\n");
+		result = -EINVAL;
+		goto error;
+	} else {
+		wss->state = WLP_WSS_STATE_ENROLLED;
+		d_printf(2, dev, "WLP: Success Enrollment into unsecure WSS "
+			 "%s using neighbor %02x:%02x. \n", buf,
+			 dev_addr->data[1], dev_addr->data[0]);
+	}
+
+	d_fnend(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n",
+		  wlp, neighbor, wss, wssid, buf);
+out:
+	return result;
+error:
+	wlp_wss_reset(wss);
+	return result;
+}
+
+/**
+ * Discover WSS information of neighbor's active WSS
+ */
+static
+int wlp_discover_neighbor(struct wlp *wlp,
+			  struct wlp_neighbor_e *neighbor)
+{
+	return wlp_d1d2_exchange(wlp, neighbor, NULL, NULL);
+}
+
+
+/**
+ * Each neighbor in the neighborhood cache is discoverable. Discover it.
+ *
+ * Discovery is done through sending of D1 association frame and parsing
+ * the D2 association frame response. Only wssid from D2 will be included
+ * in neighbor cache, rest is just displayed to user and forgotten.
+ *
+ * The discovery is not done in parallel. This is simple and enables us to
+ * maintain only one association context.
+ *
+ * The discovery of one neighbor does not affect the other, but if the
+ * discovery of a neighbor fails it is removed from the neighborhood cache.
+ */
+static
+int wlp_discover_all_neighbors(struct wlp *wlp)
+{
+	int result = 0;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct wlp_neighbor_e *neighbor, *next;
+
+	list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) {
+		result = wlp_discover_neighbor(wlp, neighbor);
+		if (result < 0) {
+			dev_err(dev, "WLP: Unable to discover neighbor "
+				"%02x:%02x, removing from neighborhood. \n",
+				neighbor->uwb_dev->dev_addr.data[1],
+				neighbor->uwb_dev->dev_addr.data[0]);
+			__wlp_neighbor_release(neighbor);
+		}
+	}
+	return result;
+}
+
+static int wlp_add_neighbor_helper(struct device *dev, void *priv)
+{
+	struct wlp *wlp = priv;
+	struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+
+	return wlp_add_neighbor(wlp, uwb_dev);
+}
+
+/**
+ * Discover WLP neighborhood
+ *
+ * Will send D1 association frame to all devices in beacon group that have
+ * discoverable bit set in WLP IE. D2 frames will be received, information
+ * displayed to user in @buf. Partial information (from D2 association
+ * frame) will be cached to assist with future association
+ * requests.
+ *
+ * The discovery of the WLP neighborhood is triggered by the user. This
+ * should occur infrequently and we thus free current cache and re-allocate
+ * memory if needed.
+ *
+ * If one neighbor fails during initial discovery (determining if it is a
+ * neighbor or not), we fail all - note that interaction with neighbor has
+ * not occured at this point so if a failure occurs we know something went wrong
+ * locally. We thus undo everything.
+ */
+ssize_t wlp_discover(struct wlp *wlp)
+{
+	int result = 0;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+
+	d_fnstart(6, dev, "wlp %p \n", wlp);
+	mutex_lock(&wlp->nbmutex);
+	/* Clear current neighborhood cache. */
+	__wlp_neighbors_release(wlp);
+	/* Determine which devices in neighborhood. Repopulate cache. */
+	result = uwb_dev_for_each(wlp->rc, wlp_add_neighbor_helper, wlp);
+	if (result < 0) {
+		/* May have partial neighbor information, release all. */
+		__wlp_neighbors_release(wlp);
+		goto error_dev_for_each;
+	}
+	/* Discover the properties of devices in neighborhood. */
+	result = wlp_discover_all_neighbors(wlp);
+	/* In case of failure we still print our partial results. */
+	if (result < 0) {
+		dev_err(dev, "Unable to fully discover neighborhood. \n");
+		result = 0;
+	}
+error_dev_for_each:
+	mutex_unlock(&wlp->nbmutex);
+	d_fnend(6, dev, "wlp %p \n", wlp);
+	return result;
+}
+
+/**
+ * Handle events from UWB stack
+ *
+ * We handle events conservatively. If a neighbor goes off the air we
+ * remove it from the neighborhood. If an association process is in
+ * progress this function will block waiting for the nbmutex to become
+ * free. The association process will thus be allowed to complete before it
+ * is removed.
+ */
+static
+void wlp_uwb_notifs_cb(void *_wlp, struct uwb_dev *uwb_dev,
+		       enum uwb_notifs event)
+{
+	struct wlp *wlp = _wlp;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct wlp_neighbor_e *neighbor, *next;
+	int result;
+	switch (event) {
+	case UWB_NOTIF_ONAIR:
+		d_printf(6, dev, "UWB device %02x:%02x is onair\n",
+				uwb_dev->dev_addr.data[1],
+				uwb_dev->dev_addr.data[0]);
+		result = wlp_eda_create_node(&wlp->eda,
+					     uwb_dev->mac_addr.data,
+					     &uwb_dev->dev_addr);
+		if (result < 0)
+			dev_err(dev, "WLP: Unable to add new neighbor "
+				"%02x:%02x to EDA cache.\n",
+				uwb_dev->dev_addr.data[1],
+				uwb_dev->dev_addr.data[0]);
+		break;
+	case UWB_NOTIF_OFFAIR:
+		d_printf(6, dev, "UWB device %02x:%02x is offair\n",
+				uwb_dev->dev_addr.data[1],
+				uwb_dev->dev_addr.data[0]);
+		wlp_eda_rm_node(&wlp->eda, &uwb_dev->dev_addr);
+		mutex_lock(&wlp->nbmutex);
+		list_for_each_entry_safe(neighbor, next, &wlp->neighbors,
+					 node) {
+			if (neighbor->uwb_dev == uwb_dev) {
+				d_printf(6, dev, "Removing device from "
+					 "neighborhood.\n");
+				__wlp_neighbor_release(neighbor);
+			}
+		}
+		mutex_unlock(&wlp->nbmutex);
+		break;
+	default:
+		dev_err(dev, "don't know how to handle event %d from uwb\n",
+				event);
+	}
+}
+
+int wlp_setup(struct wlp *wlp, struct uwb_rc *rc)
+{
+	struct device *dev = &rc->uwb_dev.dev;
+	int result;
+
+	d_fnstart(6, dev, "wlp %p\n", wlp);
+	BUG_ON(wlp->fill_device_info == NULL);
+	BUG_ON(wlp->xmit_frame == NULL);
+	BUG_ON(wlp->stop_queue == NULL);
+	BUG_ON(wlp->start_queue == NULL);
+	wlp->rc = rc;
+	wlp_eda_init(&wlp->eda);/* Set up address cache */
+	wlp->uwb_notifs_handler.cb = wlp_uwb_notifs_cb;
+	wlp->uwb_notifs_handler.data = wlp;
+	uwb_notifs_register(rc, &wlp->uwb_notifs_handler);
+
+	uwb_pal_init(&wlp->pal);
+	result = uwb_pal_register(rc, &wlp->pal);
+	if (result < 0)
+		uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler);
+
+	d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wlp_setup);
+
+void wlp_remove(struct wlp *wlp)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	d_fnstart(6, dev, "wlp %p\n", wlp);
+	wlp_neighbors_release(wlp);
+	uwb_pal_unregister(wlp->rc, &wlp->pal);
+	uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler);
+	wlp_eda_release(&wlp->eda);
+	mutex_lock(&wlp->mutex);
+	if (wlp->dev_info != NULL)
+		kfree(wlp->dev_info);
+	mutex_unlock(&wlp->mutex);
+	wlp->rc = NULL;
+	/* We have to use NULL here because this function can be called
+	 * when the device disappeared. */
+	d_fnend(6, NULL, "wlp %p\n", wlp);
+}
+EXPORT_SYMBOL_GPL(wlp_remove);
+
+/**
+ * wlp_reset_all - reset the WLP hardware
+ * @wlp: the WLP device to reset.
+ *
+ * This schedules a full hardware reset of the WLP device.  The radio
+ * controller and any other PALs will also be reset.
+ */
+void wlp_reset_all(struct wlp *wlp)
+{
+	uwb_rc_reset_all(wlp->rc);
+}
+EXPORT_SYMBOL_GPL(wlp_reset_all);
diff --git a/drivers/uwb/wlp/wss-lc.c b/drivers/uwb/wlp/wss-lc.c
new file mode 100644
index 0000000..96b18c9
--- /dev/null
+++ b/drivers/uwb/wlp/wss-lc.c
@@ -0,0 +1,1055 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Implementation of the WLP association protocol.
+ *
+ * FIXME: Docs
+ *
+ * A UWB network interface will configure a WSS through wlp_wss_setup() after
+ * the interface has been assigned a MAC address, typically after
+ * "ifconfig" has been called. When the interface goes down it should call
+ * wlp_wss_remove().
+ *
+ * When the WSS is ready for use the user interacts via sysfs to create,
+ * discover, and activate WSS.
+ *
+ * wlp_wss_enroll_activate()
+ *
+ * wlp_wss_create_activate()
+ * 	wlp_wss_set_wssid_hash()
+ * 		wlp_wss_comp_wssid_hash()
+ * 	wlp_wss_sel_bcast_addr()
+ * 	wlp_wss_sysfs_add()
+ *
+ * Called when no more references to WSS exist:
+ * 	wlp_wss_release()
+ * 		wlp_wss_reset()
+ */
+
+#include <linux/etherdevice.h> /* for is_valid_ether_addr */
+#include <linux/skbuff.h>
+#include <linux/wlp.h>
+#define D_LOCAL 5
+#include <linux/uwb/debug.h>
+#include "wlp-internal.h"
+
+
+size_t wlp_wss_key_print(char *buf, size_t bufsize, u8 *key)
+{
+	size_t result;
+
+	result = scnprintf(buf, bufsize,
+			  "%02x %02x %02x %02x %02x %02x "
+			  "%02x %02x %02x %02x %02x %02x "
+			  "%02x %02x %02x %02x",
+			  key[0], key[1], key[2], key[3],
+			  key[4], key[5], key[6], key[7],
+			  key[8], key[9], key[10], key[11],
+			  key[12], key[13], key[14], key[15]);
+	return result;
+}
+
+/**
+ * Compute WSSID hash
+ * WLP Draft 0.99 [7.2.1]
+ *
+ * The WSSID hash for a WSSID is the result of an octet-wise exclusive-OR
+ * of all octets in the WSSID.
+ */
+static
+u8 wlp_wss_comp_wssid_hash(struct wlp_uuid *wssid)
+{
+	return wssid->data[0]  ^ wssid->data[1]  ^ wssid->data[2]
+	       ^ wssid->data[3]  ^ wssid->data[4]  ^ wssid->data[5]
+	       ^ wssid->data[6]  ^ wssid->data[7]  ^ wssid->data[8]
+	       ^ wssid->data[9]  ^ wssid->data[10] ^ wssid->data[11]
+	       ^ wssid->data[12] ^ wssid->data[13] ^ wssid->data[14]
+	       ^ wssid->data[15];
+}
+
+/**
+ * Select a multicast EUI-48 for the WSS broadcast address.
+ * WLP Draft 0.99 [7.2.1]
+ *
+ * Selected based on the WiMedia Alliance OUI, 00-13-88, within the WLP
+ * range, [01-13-88-00-01-00, 01-13-88-00-01-FF] inclusive.
+ *
+ * This address is currently hardcoded.
+ * FIXME?
+ */
+static
+struct uwb_mac_addr wlp_wss_sel_bcast_addr(struct wlp_wss *wss)
+{
+	struct uwb_mac_addr bcast = {
+		.data = { 0x01, 0x13, 0x88, 0x00, 0x01, 0x00 }
+	};
+	return bcast;
+}
+
+/**
+ * Clear the contents of the WSS structure - all except kobj, mutex, virtual
+ *
+ * We do not want to reinitialize - the internal kobj should not change as
+ * it still points to the parent received during setup. The mutex should
+ * remain also. We thus just reset values individually.
+ * The virutal address assigned to WSS will remain the same for the
+ * lifetime of the WSS. We only reset the fields that can change during its
+ * lifetime.
+ */
+void wlp_wss_reset(struct wlp_wss *wss)
+{
+	struct wlp *wlp = container_of(wss, struct wlp, wss);
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	d_fnstart(5, dev, "wss (%p) \n", wss);
+	memset(&wss->wssid, 0, sizeof(wss->wssid));
+	wss->hash = 0;
+	memset(&wss->name[0], 0, sizeof(wss->name));
+	memset(&wss->bcast, 0, sizeof(wss->bcast));
+	wss->secure_status = WLP_WSS_UNSECURE;
+	memset(&wss->master_key[0], 0, sizeof(wss->master_key));
+	wss->tag = 0;
+	wss->state = WLP_WSS_STATE_NONE;
+	d_fnend(5, dev, "wss (%p) \n", wss);
+}
+
+/**
+ * Create sysfs infrastructure for WSS
+ *
+ * The WSS is configured to have the interface as parent (see wlp_wss_setup())
+ * a new sysfs directory that includes wssid as its name is created in the
+ * interface's sysfs directory. The group of files interacting with WSS are
+ * created also.
+ */
+static
+int wlp_wss_sysfs_add(struct wlp_wss *wss, char *wssid_str)
+{
+	struct wlp *wlp = container_of(wss, struct wlp, wss);
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	int result;
+
+	d_fnstart(5, dev, "wss (%p), wssid: %s\n", wss, wssid_str);
+	result = kobject_set_name(&wss->kobj, "wss-%s", wssid_str);
+	if (result < 0)
+		return result;
+	wss->kobj.ktype = &wss_ktype;
+	result = kobject_init_and_add(&wss->kobj,
+			&wss_ktype, wss->kobj.parent, "wlp");
+	if (result < 0) {
+		dev_err(dev, "WLP: Cannot register WSS kobject.\n");
+		goto error_kobject_register;
+	}
+	result = sysfs_create_group(&wss->kobj, &wss_attr_group);
+	if (result < 0) {
+		dev_err(dev, "WLP: Cannot register WSS attributes: %d\n",
+			result);
+		goto error_sysfs_create_group;
+	}
+	d_fnend(5, dev, "Completed. result = %d \n", result);
+	return 0;
+error_sysfs_create_group:
+
+	kobject_put(&wss->kobj); /* will free name if needed */
+	return result;
+error_kobject_register:
+	kfree(wss->kobj.name);
+	wss->kobj.name = NULL;
+	wss->kobj.ktype = NULL;
+	return result;
+}
+
+
+/**
+ * Release WSS
+ *
+ * No more references exist to this WSS. We should undo everything that was
+ * done in wlp_wss_create_activate() except removing the group. The group
+ * is not removed because an object can be unregistered before the group is
+ * created. We also undo any additional operations on the WSS after this
+ * (addition of members).
+ *
+ * If memory was allocated for the kobject's name then it will
+ * be freed by the kobject system during this time.
+ *
+ * The EDA cache is removed and reinitilized when the WSS is removed. We
+ * thus loose knowledge of members of this WSS at that time and need not do
+ * it here.
+ */
+void wlp_wss_release(struct kobject *kobj)
+{
+	struct wlp_wss *wss = container_of(kobj, struct wlp_wss, kobj);
+
+	wlp_wss_reset(wss);
+}
+
+/**
+ * Enroll into a WSS using provided neighbor as registrar
+ *
+ * First search the neighborhood information to learn which neighbor is
+ * referred to, next proceed with enrollment.
+ *
+ * &wss->mutex is held
+ */
+static
+int wlp_wss_enroll_target(struct wlp_wss *wss, struct wlp_uuid *wssid,
+			  struct uwb_dev_addr *dest)
+{
+	struct wlp *wlp = container_of(wss, struct wlp, wss);
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct wlp_neighbor_e *neighbor;
+	char buf[WLP_WSS_UUID_STRSIZE];
+	int result = -ENXIO;
+	struct uwb_dev_addr *dev_addr;
+
+	wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+	d_fnstart(5, dev, "wss %p, wssid %s, registrar %02x:%02x \n",
+		  wss, buf, dest->data[1], dest->data[0]);
+	mutex_lock(&wlp->nbmutex);
+	list_for_each_entry(neighbor, &wlp->neighbors, node) {
+		dev_addr = &neighbor->uwb_dev->dev_addr;
+		if (!memcmp(dest, dev_addr, sizeof(*dest))) {
+			d_printf(5, dev, "Neighbor %02x:%02x is valid, "
+				 "enrolling. \n",
+				 dev_addr->data[1], dev_addr->data[0]);
+			result = wlp_enroll_neighbor(wlp, neighbor, wss,
+						     wssid);
+			break;
+		}
+	}
+	if (result == -ENXIO)
+		dev_err(dev, "WLP: Cannot find neighbor %02x:%02x. \n",
+			dest->data[1], dest->data[0]);
+	mutex_unlock(&wlp->nbmutex);
+	d_fnend(5, dev, "wss %p, wssid %s, registrar %02x:%02x, result %d \n",
+		  wss, buf, dest->data[1], dest->data[0], result);
+	return result;
+}
+
+/**
+ * Enroll into a WSS previously discovered
+ *
+ * User provides WSSID of WSS, search for neighbor that has this WSS
+ * activated and attempt to enroll.
+ *
+ * &wss->mutex is held
+ */
+static
+int wlp_wss_enroll_discovered(struct wlp_wss *wss, struct wlp_uuid *wssid)
+{
+	struct wlp *wlp = container_of(wss, struct wlp, wss);
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct wlp_neighbor_e *neighbor;
+	struct wlp_wssid_e *wssid_e;
+	char buf[WLP_WSS_UUID_STRSIZE];
+	int result = -ENXIO;
+
+	wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+	d_fnstart(5, dev, "wss %p, wssid %s \n", wss, buf);
+	mutex_lock(&wlp->nbmutex);
+	list_for_each_entry(neighbor, &wlp->neighbors, node) {
+		list_for_each_entry(wssid_e, &neighbor->wssid, node) {
+			if (!memcmp(wssid, &wssid_e->wssid, sizeof(*wssid))) {
+				d_printf(5, dev, "Found WSSID %s in neighbor "
+					 "%02x:%02x cache. \n", buf,
+					 neighbor->uwb_dev->dev_addr.data[1],
+					 neighbor->uwb_dev->dev_addr.data[0]);
+				result = wlp_enroll_neighbor(wlp, neighbor,
+							     wss, wssid);
+				if (result == 0) /* enrollment success */
+					goto out;
+				break;
+			}
+		}
+	}
+out:
+	if (result == -ENXIO)
+		dev_err(dev, "WLP: Cannot find WSSID %s in cache. \n", buf);
+	mutex_unlock(&wlp->nbmutex);
+	d_fnend(5, dev, "wss %p, wssid %s, result %d \n", wss, buf, result);
+	return result;
+}
+
+/**
+ * Enroll into WSS with provided WSSID, registrar may be provided
+ *
+ * @wss: out WSS that will be enrolled
+ * @wssid: wssid of neighboring WSS that we want to enroll in
+ * @devaddr: registrar can be specified, will be broadcast (ff:ff) if any
+ *           neighbor can be used as registrar.
+ *
+ * &wss->mutex is held
+ */
+static
+int wlp_wss_enroll(struct wlp_wss *wss, struct wlp_uuid *wssid,
+		   struct uwb_dev_addr *devaddr)
+{
+	int result;
+	struct wlp *wlp = container_of(wss, struct wlp, wss);
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	char buf[WLP_WSS_UUID_STRSIZE];
+	struct uwb_dev_addr bcast = {.data = {0xff, 0xff} };
+
+	wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+	if (wss->state != WLP_WSS_STATE_NONE) {
+		dev_err(dev, "WLP: Already enrolled in WSS %s.\n", buf);
+		result = -EEXIST;
+		goto error;
+	}
+	if (!memcmp(&bcast, devaddr, sizeof(bcast))) {
+		d_printf(5, dev, "Request to enroll in discovered WSS "
+			 "with WSSID %s \n", buf);
+		result = wlp_wss_enroll_discovered(wss, wssid);
+	} else {
+		d_printf(5, dev, "Request to enroll in WSSID %s with "
+			 "registrar %02x:%02x\n", buf, devaddr->data[1],
+			 devaddr->data[0]);
+		result = wlp_wss_enroll_target(wss, wssid, devaddr);
+	}
+	if (result < 0) {
+		dev_err(dev, "WLP: Unable to enroll into WSS %s, result %d \n",
+			buf, result);
+		goto error;
+	}
+	d_printf(2, dev, "Successfully enrolled into WSS %s \n", buf);
+	result = wlp_wss_sysfs_add(wss, buf);
+	if (result < 0) {
+		dev_err(dev, "WLP: Unable to set up sysfs for WSS kobject.\n");
+		wlp_wss_reset(wss);
+	}
+error:
+	return result;
+
+}
+
+/**
+ * Activate given WSS
+ *
+ * Prior to activation a WSS must be enrolled. To activate a WSS a device
+ * includes the WSS hash in the WLP IE in its beacon in each superframe.
+ * WLP 0.99 [7.2.5].
+ *
+ * The WSS tag is also computed at this time. We only support one activated
+ * WSS so we can use the hash as a tag - there will never be a conflict.
+ *
+ * We currently only support one activated WSS so only one WSS hash is
+ * included in the WLP IE.
+ */
+static
+int wlp_wss_activate(struct wlp_wss *wss)
+{
+	struct wlp *wlp = container_of(wss, struct wlp, wss);
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct uwb_rc *uwb_rc = wlp->rc;
+	int result;
+	struct {
+		struct wlp_ie wlp_ie;
+		u8 hash; /* only include one hash */
+	} ie_data;
+
+	d_fnstart(5, dev, "Activating WSS %p. \n", wss);
+	BUG_ON(wss->state != WLP_WSS_STATE_ENROLLED);
+	wss->hash = wlp_wss_comp_wssid_hash(&wss->wssid);
+	wss->tag = wss->hash;
+	memset(&ie_data, 0, sizeof(ie_data));
+	ie_data.wlp_ie.hdr.element_id = UWB_IE_WLP;
+	ie_data.wlp_ie.hdr.length = sizeof(ie_data) - sizeof(struct uwb_ie_hdr);
+	wlp_ie_set_hash_length(&ie_data.wlp_ie, sizeof(ie_data.hash));
+	ie_data.hash = wss->hash;
+	result = uwb_rc_ie_add(uwb_rc, &ie_data.wlp_ie.hdr,
+			       sizeof(ie_data));
+	if (result < 0) {
+		dev_err(dev, "WLP: Unable to add WLP IE to beacon. "
+			"result = %d.\n", result);
+		goto error_wlp_ie;
+	}
+	wss->state = WLP_WSS_STATE_ACTIVE;
+	result = 0;
+error_wlp_ie:
+	d_fnend(5, dev, "Activating WSS %p, result = %d \n", wss, result);
+	return result;
+}
+
+/**
+ * Enroll in and activate WSS identified by provided WSSID
+ *
+ * The neighborhood cache should contain a list of all neighbors and the
+ * WSS they have activated. Based on that cache we search which neighbor we
+ * can perform the association process with. The user also has option to
+ * specify which neighbor it prefers as registrar.
+ * Successful enrollment is followed by activation.
+ * Successful activation will create the sysfs directory containing
+ * specific information regarding this WSS.
+ */
+int wlp_wss_enroll_activate(struct wlp_wss *wss, struct wlp_uuid *wssid,
+			    struct uwb_dev_addr *devaddr)
+{
+	struct wlp *wlp = container_of(wss, struct wlp, wss);
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	int result = 0;
+	char buf[WLP_WSS_UUID_STRSIZE];
+
+	d_fnstart(5, dev, "Enrollment and activation requested. \n");
+	mutex_lock(&wss->mutex);
+	result = wlp_wss_enroll(wss, wssid, devaddr);
+	if (result < 0) {
+		wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
+		dev_err(dev, "WLP: Enrollment into WSS %s failed.\n", buf);
+		goto error_enroll;
+	}
+	result = wlp_wss_activate(wss);
+	if (result < 0) {
+		dev_err(dev, "WLP: Unable to activate WSS. Undoing enrollment "
+			"result = %d \n", result);
+		/* Undo enrollment */
+		wlp_wss_reset(wss);
+		goto error_activate;
+	}
+error_activate:
+error_enroll:
+	mutex_unlock(&wss->mutex);
+	d_fnend(5, dev, "Completed. result = %d \n", result);
+	return result;
+}
+
+/**
+ * Create, enroll, and activate a new WSS
+ *
+ * @wssid: new wssid provided by user
+ * @name:  WSS name requested by used.
+ * @sec_status: security status requested by user
+ *
+ * A user requested the creation of a new WSS. All operations are done
+ * locally. The new WSS will be stored locally, the hash will be included
+ * in the WLP IE, and the sysfs infrastructure for this WSS will be
+ * created.
+ */
+int wlp_wss_create_activate(struct wlp_wss *wss, struct wlp_uuid *wssid,
+			    char *name, unsigned sec_status, unsigned accept)
+{
+	struct wlp *wlp = container_of(wss, struct wlp, wss);
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	int result = 0;
+	char buf[WLP_WSS_UUID_STRSIZE];
+	d_fnstart(5, dev, "Request to create new WSS.\n");
+	result = wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+	d_printf(5, dev, "Request to create WSS: WSSID=%s, name=%s, "
+		 "sec_status=%u, accepting enrollment=%u \n",
+		 buf, name, sec_status, accept);
+	if (!mutex_trylock(&wss->mutex)) {
+		dev_err(dev, "WLP: WLP association session in progress.\n");
+		return -EBUSY;
+	}
+	if (wss->state != WLP_WSS_STATE_NONE) {
+		dev_err(dev, "WLP: WSS already exists. Not creating new.\n");
+		result = -EEXIST;
+		goto out;
+	}
+	if (wss->kobj.parent == NULL) {
+		dev_err(dev, "WLP: WSS parent not ready. Is network interface "
+		       "up?\n");
+		result = -ENXIO;
+		goto out;
+	}
+	if (sec_status == WLP_WSS_SECURE) {
+		dev_err(dev, "WLP: FIXME Creation of secure WSS not "
+			"supported yet.\n");
+		result = -EINVAL;
+		goto out;
+	}
+	wss->wssid = *wssid;
+	memcpy(wss->name, name, sizeof(wss->name));
+	wss->bcast = wlp_wss_sel_bcast_addr(wss);
+	wss->secure_status = sec_status;
+	wss->accept_enroll = accept;
+	/*wss->virtual_addr is initialized in call to wlp_wss_setup*/
+	/* sysfs infrastructure */
+	result = wlp_wss_sysfs_add(wss, buf);
+	if (result < 0) {
+		dev_err(dev, "Cannot set up sysfs for WSS kobject.\n");
+		wlp_wss_reset(wss);
+		goto out;
+	} else
+		result = 0;
+	wss->state = WLP_WSS_STATE_ENROLLED;
+	result = wlp_wss_activate(wss);
+	if (result < 0) {
+		dev_err(dev, "WLP: Unable to activate WSS. Undoing "
+			"enrollment\n");
+		wlp_wss_reset(wss);
+		goto out;
+	}
+	result = 0;
+out:
+	mutex_unlock(&wss->mutex);
+	d_fnend(5, dev, "Completed. result = %d \n", result);
+	return result;
+}
+
+/**
+ * Determine if neighbor has WSS activated
+ *
+ * @returns: 1 if neighbor has WSS activated, zero otherwise
+ *
+ * This can be done in two ways:
+ * - send a C1 frame, parse C2/F0 response
+ * - examine the WLP IE sent by the neighbor
+ *
+ * The WLP IE is not fully supported in hardware so we use the C1/C2 frame
+ * exchange to determine if a WSS is activated. Using the WLP IE should be
+ * faster and should be used when it becomes possible.
+ */
+int wlp_wss_is_active(struct wlp *wlp, struct wlp_wss *wss,
+		      struct uwb_dev_addr *dev_addr)
+{
+	int result = 0;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	char buf[WLP_WSS_UUID_STRSIZE];
+	DECLARE_COMPLETION_ONSTACK(completion);
+	struct wlp_session session;
+	struct sk_buff  *skb;
+	struct wlp_frame_assoc *resp;
+	struct wlp_uuid wssid;
+
+	wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
+	d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
+		  wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
+	mutex_lock(&wlp->mutex);
+	/* Send C1 association frame */
+	result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C1);
+	if (result < 0) {
+		dev_err(dev, "Unable to send C1 frame to neighbor "
+			"%02x:%02x (%d)\n", dev_addr->data[1],
+			dev_addr->data[0], result);
+		result = 0;
+		goto out;
+	}
+	/* Create session, wait for response */
+	session.exp_message = WLP_ASSOC_C2;
+	session.cb = wlp_session_cb;
+	session.cb_priv = &completion;
+	session.neighbor_addr = *dev_addr;
+	BUG_ON(wlp->session != NULL);
+	wlp->session = &session;
+	/* Wait for C2/F0 frame */
+	result = wait_for_completion_interruptible_timeout(&completion,
+						   WLP_PER_MSG_TIMEOUT * HZ);
+	if (result == 0) {
+		dev_err(dev, "Timeout while sending C1 to neighbor "
+			     "%02x:%02x.\n", dev_addr->data[1],
+			     dev_addr->data[0]);
+		goto out;
+	}
+	if (result < 0) {
+		dev_err(dev, "Unable to send C1 to neighbor %02x:%02x.\n",
+			dev_addr->data[1], dev_addr->data[0]);
+		result = 0;
+		goto out;
+	}
+	/* Parse message in session->data: it will be either C2 or F0 */
+	skb = session.data;
+	resp = (void *) skb->data;
+	d_printf(5, dev, "Received response to C1 frame. \n");
+	d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len);
+	if (resp->type == WLP_ASSOC_F0) {
+		result = wlp_parse_f0(wlp, skb);
+		if (result < 0)
+			dev_err(dev, "WLP:  unable to parse incoming F0 "
+				"frame from neighbor %02x:%02x.\n",
+				dev_addr->data[1], dev_addr->data[0]);
+		result = 0;
+		goto error_resp_parse;
+	}
+	/* WLP version and message type fields have already been parsed */
+	result = wlp_get_wssid(wlp, (void *)resp + sizeof(*resp), &wssid,
+			       skb->len - sizeof(*resp));
+	if (result < 0) {
+		dev_err(dev, "WLP: unable to obtain WSSID from C2 frame.\n");
+		result = 0;
+		goto error_resp_parse;
+	}
+	if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) {
+		d_printf(5, dev, "WSSID in C2 frame matches local "
+			 "active WSS.\n");
+		result = 1;
+	} else {
+		dev_err(dev, "WLP: Received a C2 frame without matching "
+			"WSSID.\n");
+		result = 0;
+	}
+error_resp_parse:
+	kfree_skb(skb);
+out:
+	wlp->session = NULL;
+	mutex_unlock(&wlp->mutex);
+	d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
+		  wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
+	return result;
+}
+
+/**
+ * Activate connection with neighbor by updating EDA cache
+ *
+ * @wss:       local WSS to which neighbor wants to connect
+ * @dev_addr:  neighbor's address
+ * @wssid:     neighbor's WSSID - must be same as our WSS's WSSID
+ * @tag:       neighbor's WSS tag used to identify frames transmitted by it
+ * @virt_addr: neighbor's virtual EUI-48
+ */
+static
+int wlp_wss_activate_connection(struct wlp *wlp, struct wlp_wss *wss,
+				struct uwb_dev_addr *dev_addr,
+				struct wlp_uuid *wssid, u8 *tag,
+				struct uwb_mac_addr *virt_addr)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	int result = 0;
+	char buf[WLP_WSS_UUID_STRSIZE];
+	wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+	d_fnstart(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual "
+		  "%02x:%02x:%02x:%02x:%02x:%02x \n", wlp, wss, buf, *tag,
+		  virt_addr->data[0], virt_addr->data[1], virt_addr->data[2],
+		  virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]);
+
+	if (!memcmp(wssid, &wss->wssid, sizeof(*wssid))) {
+		d_printf(5, dev, "WSSID from neighbor frame matches local "
+			 "active WSS.\n");
+		/* Update EDA cache */
+		result = wlp_eda_update_node(&wlp->eda, dev_addr, wss,
+					     (void *) virt_addr->data, *tag,
+					     WLP_WSS_CONNECTED);
+		if (result < 0)
+			dev_err(dev, "WLP: Unable to update EDA cache "
+				"with new connected neighbor information.\n");
+	} else {
+		dev_err(dev, "WLP: Neighbor does not have matching "
+			"WSSID.\n");
+		result = -EINVAL;
+	}
+
+	d_fnend(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual "
+		  "%02x:%02x:%02x:%02x:%02x:%02x, result = %d \n",
+		  wlp, wss, buf, *tag,
+		  virt_addr->data[0], virt_addr->data[1], virt_addr->data[2],
+		  virt_addr->data[3], virt_addr->data[4], virt_addr->data[5],
+		  result);
+
+	return result;
+}
+
+/**
+ * Connect to WSS neighbor
+ *
+ * Use C3/C4 exchange to determine if neighbor has WSS activated and
+ * retrieve the WSS tag and virtual EUI-48 of the neighbor.
+ */
+static
+int wlp_wss_connect_neighbor(struct wlp *wlp, struct wlp_wss *wss,
+			     struct uwb_dev_addr *dev_addr)
+{
+	int result;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	char buf[WLP_WSS_UUID_STRSIZE];
+	struct wlp_uuid wssid;
+	u8 tag;
+	struct uwb_mac_addr virt_addr;
+	DECLARE_COMPLETION_ONSTACK(completion);
+	struct wlp_session session;
+	struct wlp_frame_assoc *resp;
+	struct sk_buff *skb;
+
+	wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
+	d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
+		  wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
+	mutex_lock(&wlp->mutex);
+	/* Send C3 association frame */
+	result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C3);
+	if (result < 0) {
+		dev_err(dev, "Unable to send C3 frame to neighbor "
+			"%02x:%02x (%d)\n", dev_addr->data[1],
+			dev_addr->data[0], result);
+		goto out;
+	}
+	/* Create session, wait for response */
+	session.exp_message = WLP_ASSOC_C4;
+	session.cb = wlp_session_cb;
+	session.cb_priv = &completion;
+	session.neighbor_addr = *dev_addr;
+	BUG_ON(wlp->session != NULL);
+	wlp->session = &session;
+	/* Wait for C4/F0 frame */
+	result = wait_for_completion_interruptible_timeout(&completion,
+						   WLP_PER_MSG_TIMEOUT * HZ);
+	if (result == 0) {
+		dev_err(dev, "Timeout while sending C3 to neighbor "
+			     "%02x:%02x.\n", dev_addr->data[1],
+			     dev_addr->data[0]);
+		result = -ETIMEDOUT;
+		goto out;
+	}
+	if (result < 0) {
+		dev_err(dev, "Unable to send C3 to neighbor %02x:%02x.\n",
+			dev_addr->data[1], dev_addr->data[0]);
+		goto out;
+	}
+	/* Parse message in session->data: it will be either C4 or F0 */
+	skb = session.data;
+	resp = (void *) skb->data;
+	d_printf(5, dev, "Received response to C3 frame. \n");
+	d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len);
+	if (resp->type == WLP_ASSOC_F0) {
+		result = wlp_parse_f0(wlp, skb);
+		if (result < 0)
+			dev_err(dev, "WLP: unable to parse incoming F0 "
+				"frame from neighbor %02x:%02x.\n",
+				dev_addr->data[1], dev_addr->data[0]);
+		result = -EINVAL;
+		goto error_resp_parse;
+	}
+	result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr);
+	if (result < 0) {
+		dev_err(dev, "WLP: Unable to parse C4 frame from neighbor.\n");
+		goto error_resp_parse;
+	}
+	result = wlp_wss_activate_connection(wlp, wss, dev_addr, &wssid, &tag,
+					     &virt_addr);
+	if (result < 0) {
+		dev_err(dev, "WLP: Unable to activate connection to "
+			"neighbor %02x:%02x.\n", dev_addr->data[1],
+			dev_addr->data[0]);
+		goto error_resp_parse;
+	}
+error_resp_parse:
+	kfree_skb(skb);
+out:
+	/* Record that we unsuccessfully tried to connect to this neighbor */
+	if (result < 0)
+		wlp_eda_update_node_state(&wlp->eda, dev_addr,
+					  WLP_WSS_CONNECT_FAILED);
+	wlp->session = NULL;
+	mutex_unlock(&wlp->mutex);
+	d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
+		  wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
+	return result;
+}
+
+/**
+ * Connect to neighbor with common WSS, send pending frame
+ *
+ * This function is scheduled when a frame is destined to a neighbor with
+ * which we do not have a connection. A copy of the EDA cache entry is
+ * provided - not the actual cache entry (because it is protected by a
+ * spinlock).
+ *
+ * First determine if neighbor has the same WSS activated, connect if it
+ * does. The C3/C4 exchange is dual purpose to determine if neighbor has
+ * WSS activated and proceed with the connection.
+ *
+ * The frame that triggered the connection setup is sent after connection
+ * setup.
+ *
+ * network queue is stopped - we need to restart when done
+ *
+ */
+static
+void wlp_wss_connect_send(struct work_struct *ws)
+{
+	struct wlp_assoc_conn_ctx *conn_ctx = container_of(ws,
+						  struct wlp_assoc_conn_ctx,
+						  ws);
+	struct wlp *wlp = conn_ctx->wlp;
+	struct sk_buff *skb = conn_ctx->skb;
+	struct wlp_eda_node *eda_entry = &conn_ctx->eda_entry;
+	struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
+	struct wlp_wss *wss = &wlp->wss;
+	int result;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	char buf[WLP_WSS_UUID_STRSIZE];
+
+	mutex_lock(&wss->mutex);
+	wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
+	d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
+		  wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
+	if (wss->state < WLP_WSS_STATE_ACTIVE) {
+		if (printk_ratelimit())
+			dev_err(dev, "WLP: Attempting to connect with "
+				"WSS that is not active or connected.\n");
+		dev_kfree_skb(skb);
+		goto out;
+	}
+	/* Establish connection - send C3 rcv C4 */
+	result = wlp_wss_connect_neighbor(wlp, wss, dev_addr);
+	if (result < 0) {
+		if (printk_ratelimit())
+			dev_err(dev, "WLP: Unable to establish connection "
+				"with neighbor %02x:%02x.\n",
+				dev_addr->data[1], dev_addr->data[0]);
+		dev_kfree_skb(skb);
+		goto out;
+	}
+	/* EDA entry changed, update the local copy being used */
+	result = wlp_copy_eda_node(&wlp->eda, dev_addr, eda_entry);
+	if (result < 0) {
+		if (printk_ratelimit())
+			dev_err(dev, "WLP: Cannot find EDA entry for "
+				"neighbor %02x:%02x \n",
+				dev_addr->data[1], dev_addr->data[0]);
+	}
+	result = wlp_wss_prep_hdr(wlp, eda_entry, skb);
+	if (result < 0) {
+		if (printk_ratelimit())
+			dev_err(dev, "WLP: Unable to prepare frame header for "
+				"transmission (neighbor %02x:%02x). \n",
+				dev_addr->data[1], dev_addr->data[0]);
+		dev_kfree_skb(skb);
+		goto out;
+	}
+	BUG_ON(wlp->xmit_frame == NULL);
+	result = wlp->xmit_frame(wlp, skb, dev_addr);
+	if (result < 0) {
+		if (printk_ratelimit())
+			dev_err(dev, "WLP: Unable to transmit frame: %d\n",
+				result);
+		if (result == -ENXIO)
+			dev_err(dev, "WLP: Is network interface up? \n");
+		/* We could try again ... */
+		dev_kfree_skb(skb);/*we need to free if tx fails */
+	}
+out:
+	kfree(conn_ctx);
+	BUG_ON(wlp->start_queue == NULL);
+	wlp->start_queue(wlp);
+	mutex_unlock(&wss->mutex);
+	d_fnend(5, dev, "wlp %p, wss %p (wssid %s)\n", wlp, wss, buf);
+}
+
+/**
+ * Add WLP header to outgoing skb
+ *
+ * @eda_entry: pointer to neighbor's entry in the EDA cache
+ * @_skb:      skb containing data destined to the neighbor
+ */
+int wlp_wss_prep_hdr(struct wlp *wlp, struct wlp_eda_node *eda_entry,
+		     void *_skb)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	int result = 0;
+	unsigned char *eth_addr = eda_entry->eth_addr;
+	struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
+	struct sk_buff *skb = _skb;
+	struct wlp_frame_std_abbrv_hdr *std_hdr;
+
+	d_fnstart(6, dev, "wlp %p \n", wlp);
+	if (eda_entry->state == WLP_WSS_CONNECTED) {
+		/* Add WLP header */
+		BUG_ON(skb_headroom(skb) < sizeof(*std_hdr));
+		std_hdr = (void *) __skb_push(skb, sizeof(*std_hdr));
+		std_hdr->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
+		std_hdr->hdr.type = WLP_FRAME_STANDARD;
+		std_hdr->tag = eda_entry->wss->tag;
+	} else {
+		if (printk_ratelimit())
+			dev_err(dev, "WLP: Destination neighbor (Ethernet: "
+				"%02x:%02x:%02x:%02x:%02x:%02x, Dev: "
+				"%02x:%02x) is not connected. \n", eth_addr[0],
+				eth_addr[1], eth_addr[2], eth_addr[3],
+				eth_addr[4], eth_addr[5], dev_addr->data[1],
+				dev_addr->data[0]);
+		result = -EINVAL;
+	}
+	d_fnend(6, dev, "wlp %p \n", wlp);
+	return result;
+}
+
+
+/**
+ * Prepare skb for neighbor: connect if not already and prep WLP header
+ *
+ * This function is called in interrupt context, but it needs to sleep. We
+ * temporarily stop the net queue to establish the WLP connection.
+ * Setup of the WLP connection and restart of queue is scheduled
+ * on the default work queue.
+ *
+ * run with eda->lock held (spinlock)
+ */
+int wlp_wss_connect_prep(struct wlp *wlp, struct wlp_eda_node *eda_entry,
+			 void *_skb)
+{
+	int result = 0;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
+	unsigned char *eth_addr = eda_entry->eth_addr;
+	struct sk_buff *skb = _skb;
+	struct wlp_assoc_conn_ctx *conn_ctx;
+
+	d_fnstart(5, dev, "wlp %p\n", wlp);
+	d_printf(5, dev, "To neighbor %02x:%02x with eth "
+		  "%02x:%02x:%02x:%02x:%02x:%02x\n", dev_addr->data[1],
+		  dev_addr->data[0], eth_addr[0], eth_addr[1], eth_addr[2],
+		  eth_addr[3], eth_addr[4], eth_addr[5]);
+	if (eda_entry->state == WLP_WSS_UNCONNECTED) {
+		/* We don't want any more packets while we set up connection */
+		BUG_ON(wlp->stop_queue == NULL);
+		wlp->stop_queue(wlp);
+		conn_ctx = kmalloc(sizeof(*conn_ctx), GFP_ATOMIC);
+		if (conn_ctx == NULL) {
+			if (printk_ratelimit())
+				dev_err(dev, "WLP: Unable to allocate memory "
+					"for connection handling.\n");
+			result = -ENOMEM;
+			goto out;
+		}
+		conn_ctx->wlp = wlp;
+		conn_ctx->skb = skb;
+		conn_ctx->eda_entry = *eda_entry;
+		INIT_WORK(&conn_ctx->ws, wlp_wss_connect_send);
+		schedule_work(&conn_ctx->ws);
+		result = 1;
+	} else if (eda_entry->state == WLP_WSS_CONNECT_FAILED) {
+		/* Previous connection attempts failed, don't retry - see
+		 * conditions for connection in WLP 0.99 [7.6.2] */
+		if (printk_ratelimit())
+			dev_err(dev, "Could not connect to neighbor "
+			 "previously. Not retrying. \n");
+		result = -ENONET;
+		goto out;
+	} else { /* eda_entry->state == WLP_WSS_CONNECTED */
+		d_printf(5, dev, "Neighbor is connected, preparing frame.\n");
+		result = wlp_wss_prep_hdr(wlp, eda_entry, skb);
+	}
+out:
+	d_fnend(5, dev, "wlp %p, result = %d \n", wlp, result);
+	return result;
+}
+
+/**
+ * Emulate broadcast: copy skb, send copy to neighbor (connect if not already)
+ *
+ * We need to copy skbs in the case where we emulate broadcast through
+ * unicast. We copy instead of clone because we are modifying the data of
+ * the frame after copying ... clones share data so we cannot emulate
+ * broadcast using clones.
+ *
+ * run with eda->lock held (spinlock)
+ */
+int wlp_wss_send_copy(struct wlp *wlp, struct wlp_eda_node *eda_entry,
+		      void *_skb)
+{
+	int result = -ENOMEM;
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	struct sk_buff *skb = _skb;
+	struct sk_buff *copy;
+	struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
+
+	d_fnstart(5, dev, "to neighbor %02x:%02x, skb (%p) \n",
+		  dev_addr->data[1], dev_addr->data[0], skb);
+	copy = skb_copy(skb, GFP_ATOMIC);
+	if (copy == NULL) {
+		if (printk_ratelimit())
+			dev_err(dev, "WLP: Unable to copy skb for "
+				"transmission.\n");
+		goto out;
+	}
+	result = wlp_wss_connect_prep(wlp, eda_entry, copy);
+	if (result < 0) {
+		if (printk_ratelimit())
+			dev_err(dev, "WLP: Unable to connect/send skb "
+				"to neighbor.\n");
+		dev_kfree_skb_irq(copy);
+		goto out;
+	} else if (result == 1)
+		/* Frame will be transmitted separately */
+		goto out;
+	BUG_ON(wlp->xmit_frame == NULL);
+	result = wlp->xmit_frame(wlp, copy, dev_addr);
+	if (result < 0) {
+		if (printk_ratelimit())
+			dev_err(dev, "WLP: Unable to transmit frame: %d\n",
+				result);
+		if ((result == -ENXIO) && printk_ratelimit())
+			dev_err(dev, "WLP: Is network interface up? \n");
+		/* We could try again ... */
+		dev_kfree_skb_irq(copy);/*we need to free if tx fails */
+	}
+out:
+	d_fnend(5, dev, "to neighbor %02x:%02x \n", dev_addr->data[1],
+		  dev_addr->data[0]);
+	return result;
+}
+
+
+/**
+ * Setup WSS
+ *
+ * Should be called by network driver after the interface has been given a
+ * MAC address.
+ */
+int wlp_wss_setup(struct net_device *net_dev, struct wlp_wss *wss)
+{
+	struct wlp *wlp = container_of(wss, struct wlp, wss);
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	int result = 0;
+	d_fnstart(5, dev, "wss (%p) \n", wss);
+	mutex_lock(&wss->mutex);
+	wss->kobj.parent = &net_dev->dev.kobj;
+	if (!is_valid_ether_addr(net_dev->dev_addr)) {
+		dev_err(dev, "WLP: Invalid MAC address. Cannot use for"
+		       "virtual.\n");
+		result = -EINVAL;
+		goto out;
+	}
+	memcpy(wss->virtual_addr.data, net_dev->dev_addr,
+	       sizeof(wss->virtual_addr.data));
+out:
+	mutex_unlock(&wss->mutex);
+	d_fnend(5, dev, "wss (%p) \n", wss);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wlp_wss_setup);
+
+/**
+ * Remove WSS
+ *
+ * Called by client that configured WSS through wlp_wss_setup(). This
+ * function is called when client no longer needs WSS, eg. client shuts
+ * down.
+ *
+ * We remove the WLP IE from the beacon before initiating local cleanup.
+ */
+void wlp_wss_remove(struct wlp_wss *wss)
+{
+	struct wlp *wlp = container_of(wss, struct wlp, wss);
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	d_fnstart(5, dev, "wss (%p) \n", wss);
+	mutex_lock(&wss->mutex);
+	if (wss->state == WLP_WSS_STATE_ACTIVE)
+		uwb_rc_ie_rm(wlp->rc, UWB_IE_WLP);
+	if (wss->state != WLP_WSS_STATE_NONE) {
+		sysfs_remove_group(&wss->kobj, &wss_attr_group);
+		kobject_put(&wss->kobj);
+	}
+	wss->kobj.parent = NULL;
+	memset(&wss->virtual_addr, 0, sizeof(wss->virtual_addr));
+	/* Cleanup EDA cache */
+	wlp_eda_release(&wlp->eda);
+	wlp_eda_init(&wlp->eda);
+	mutex_unlock(&wss->mutex);
+	d_fnend(5, dev, "wss (%p) \n", wss);
+}
+EXPORT_SYMBOL_GPL(wlp_wss_remove);
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 6fa0b9d..d4cfed0 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -19,7 +19,7 @@
 #include <linux/backlight.h>
 
 #include <cpu/dac.h>
-#include <asm/hp6xx.h>
+#include <mach/hp6xx.h>
 #include <asm/hd64461.h>
 
 #define HP680_MAX_INTENSITY 255
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 217c511..cd5f20d 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1002,101 +1002,132 @@
  	return ret;
 }
 
-static int 
-fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+static long
+fb_ioctl(struct file *file, unsigned int cmd,
 	 unsigned long arg)
 {
+	struct inode *inode = file->f_path.dentry->d_inode;
 	int fbidx = iminor(inode);
-	struct fb_info *info = registered_fb[fbidx];
-	struct fb_ops *fb = info->fbops;
+	struct fb_info *info;
+	struct fb_ops *fb;
 	struct fb_var_screeninfo var;
 	struct fb_fix_screeninfo fix;
 	struct fb_con2fbmap con2fb;
 	struct fb_cmap_user cmap;
 	struct fb_event event;
 	void __user *argp = (void __user *)arg;
-	int i;
-	
-	if (!fb)
+	long ret = 0;
+
+	info = registered_fb[fbidx];
+	mutex_lock(&info->lock);
+	fb = info->fbops;
+
+	if (!fb) {
+		mutex_unlock(&info->lock);
 		return -ENODEV;
+	}
 	switch (cmd) {
 	case FBIOGET_VSCREENINFO:
-		return copy_to_user(argp, &info->var,
+		ret = copy_to_user(argp, &info->var,
 				    sizeof(var)) ? -EFAULT : 0;
+		break;
 	case FBIOPUT_VSCREENINFO:
-		if (copy_from_user(&var, argp, sizeof(var)))
-			return -EFAULT;
+		if (copy_from_user(&var, argp, sizeof(var))) {
+			ret =  -EFAULT;
+			break;
+		}
 		acquire_console_sem();
 		info->flags |= FBINFO_MISC_USEREVENT;
-		i = fb_set_var(info, &var);
+		ret = fb_set_var(info, &var);
 		info->flags &= ~FBINFO_MISC_USEREVENT;
 		release_console_sem();
-		if (i) return i;
-		if (copy_to_user(argp, &var, sizeof(var)))
-			return -EFAULT;
-		return 0;
+		if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
+			ret = -EFAULT;
+		break;
 	case FBIOGET_FSCREENINFO:
-		return copy_to_user(argp, &info->fix,
+		ret = copy_to_user(argp, &info->fix,
 				    sizeof(fix)) ? -EFAULT : 0;
+		break;
 	case FBIOPUTCMAP:
 		if (copy_from_user(&cmap, argp, sizeof(cmap)))
-			return -EFAULT;
-		return (fb_set_user_cmap(&cmap, info));
+			ret = -EFAULT;
+		else
+			ret = fb_set_user_cmap(&cmap, info);
+		break;
 	case FBIOGETCMAP:
 		if (copy_from_user(&cmap, argp, sizeof(cmap)))
-			return -EFAULT;
-		return fb_cmap_to_user(&info->cmap, &cmap);
+			ret = -EFAULT;
+		else
+			ret = fb_cmap_to_user(&info->cmap, &cmap);
+		break;
 	case FBIOPAN_DISPLAY:
-		if (copy_from_user(&var, argp, sizeof(var)))
-			return -EFAULT;
+		if (copy_from_user(&var, argp, sizeof(var))) {
+			ret = -EFAULT;
+			break;
+		}
 		acquire_console_sem();
-		i = fb_pan_display(info, &var);
+		ret = fb_pan_display(info, &var);
 		release_console_sem();
-		if (i)
-			return i;
-		if (copy_to_user(argp, &var, sizeof(var)))
-			return -EFAULT;
-		return 0;
+		if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
+			ret = -EFAULT;
+		break;
 	case FBIO_CURSOR:
-		return -EINVAL;
+		ret = -EINVAL;
+		break;
 	case FBIOGET_CON2FBMAP:
 		if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
-			return -EFAULT;
-		if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
-		    return -EINVAL;
-		con2fb.framebuffer = -1;
-		event.info = info;
-		event.data = &con2fb;
-		fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event);
-		return copy_to_user(argp, &con2fb,
+			ret = -EFAULT;
+		else if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
+			ret = -EINVAL;
+		else {
+			con2fb.framebuffer = -1;
+			event.info = info;
+			event.data = &con2fb;
+			fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP,
+								&event);
+			ret = copy_to_user(argp, &con2fb,
 				    sizeof(con2fb)) ? -EFAULT : 0;
+		}
+		break;
 	case FBIOPUT_CON2FBMAP:
-		if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
-			return - EFAULT;
-		if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
-		    return -EINVAL;
-		if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
-		    return -EINVAL;
+		if (copy_from_user(&con2fb, argp, sizeof(con2fb))) {
+			ret = -EFAULT;
+			break;
+		}
+		if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES) {
+			ret = -EINVAL;
+			break;
+		}
+		if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX) {
+			ret = -EINVAL;
+			break;
+		}
 		if (!registered_fb[con2fb.framebuffer])
-		    request_module("fb%d", con2fb.framebuffer);
-		if (!registered_fb[con2fb.framebuffer])
-		    return -EINVAL;
+			request_module("fb%d", con2fb.framebuffer);
+		if (!registered_fb[con2fb.framebuffer]) {
+			ret = -EINVAL;
+			break;
+		}
 		event.info = info;
 		event.data = &con2fb;
-		return fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP,
+		ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP,
 					      &event);
+		break;
 	case FBIOBLANK:
 		acquire_console_sem();
 		info->flags |= FBINFO_MISC_USEREVENT;
-		i = fb_blank(info, arg);
+		ret = fb_blank(info, arg);
 		info->flags &= ~FBINFO_MISC_USEREVENT;
 		release_console_sem();
-		return i;
+		break;;
 	default:
 		if (fb->fb_ioctl == NULL)
-			return -EINVAL;
-		return fb->fb_ioctl(info, cmd, arg);
+			ret = -ENOTTY;
+		else
+			ret = fb->fb_ioctl(info, cmd, arg);
 	}
+	mutex_unlock(&info->lock);
+	return ret;
 }
 
 #ifdef CONFIG_COMPAT
@@ -1150,7 +1181,7 @@
 	    put_user(compat_ptr(data), &cmap->transp))
 		return -EFAULT;
 
-	err = fb_ioctl(inode, file, cmd, (unsigned long) cmap);
+	err = fb_ioctl(file, cmd, (unsigned long) cmap);
 
 	if (!err) {
 		if (copy_in_user(&cmap32->start,
@@ -1204,7 +1235,7 @@
 
 	old_fs = get_fs();
 	set_fs(KERNEL_DS);
-	err = fb_ioctl(inode, file, cmd, (unsigned long) &fix);
+	err = fb_ioctl(file, cmd, (unsigned long) &fix);
 	set_fs(old_fs);
 
 	if (!err)
@@ -1222,7 +1253,7 @@
 	struct fb_ops *fb = info->fbops;
 	long ret = -ENOIOCTLCMD;
 
-	lock_kernel();
+	mutex_lock(&info->lock);
 	switch(cmd) {
 	case FBIOGET_VSCREENINFO:
 	case FBIOPUT_VSCREENINFO:
@@ -1231,7 +1262,7 @@
 	case FBIOPUT_CON2FBMAP:
 		arg = (unsigned long) compat_ptr(arg);
 	case FBIOBLANK:
-		ret = fb_ioctl(inode, file, cmd, arg);
+		ret = fb_ioctl(file, cmd, arg);
 		break;
 
 	case FBIOGET_FSCREENINFO:
@@ -1248,7 +1279,7 @@
 			ret = fb->fb_compat_ioctl(info, cmd, arg);
 		break;
 	}
-	unlock_kernel();
+	mutex_unlock(&info->lock);
 	return ret;
 }
 #endif
@@ -1270,13 +1301,13 @@
 		return -ENODEV;
 	if (fb->fb_mmap) {
 		int res;
-		lock_kernel();
+		mutex_lock(&info->lock);
 		res = fb->fb_mmap(info, vma);
-		unlock_kernel();
+		mutex_unlock(&info->lock);
 		return res;
 	}
 
-	lock_kernel();
+	mutex_lock(&info->lock);
 
 	/* frame buffer memory */
 	start = info->fix.smem_start;
@@ -1285,13 +1316,13 @@
 		/* memory mapped io */
 		off -= len;
 		if (info->var.accel_flags) {
-			unlock_kernel();
+			mutex_unlock(&info->lock);
 			return -EINVAL;
 		}
 		start = info->fix.mmio_start;
 		len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
 	}
-	unlock_kernel();
+	mutex_unlock(&info->lock);
 	start &= PAGE_MASK;
 	if ((vma->vm_end - vma->vm_start + off) > len)
 		return -EINVAL;
@@ -1315,13 +1346,13 @@
 
 	if (fbidx >= FB_MAX)
 		return -ENODEV;
-	lock_kernel();
-	if (!(info = registered_fb[fbidx]))
+	info = registered_fb[fbidx];
+	if (!info)
 		request_module("fb%d", fbidx);
-	if (!(info = registered_fb[fbidx])) {
-		res = -ENODEV;
-		goto out;
-	}
+	info = registered_fb[fbidx];
+	if (!info)
+		return -ENODEV;
+	mutex_lock(&info->lock);
 	if (!try_module_get(info->fbops->owner)) {
 		res = -ENODEV;
 		goto out;
@@ -1337,7 +1368,7 @@
 		fb_deferred_io_open(info, inode, file);
 #endif
 out:
-	unlock_kernel();
+	mutex_unlock(&info->lock);
 	return res;
 }
 
@@ -1346,11 +1377,11 @@
 {
 	struct fb_info * const info = file->private_data;
 
-	lock_kernel();
+	mutex_lock(&info->lock);
 	if (info->fbops->fb_release)
 		info->fbops->fb_release(info,1);
 	module_put(info->fbops->owner);
-	unlock_kernel();
+	mutex_unlock(&info->lock);
 	return 0;
 }
 
@@ -1358,7 +1389,7 @@
 	.owner =	THIS_MODULE,
 	.read =		fb_read,
 	.write =	fb_write,
-	.ioctl =	fb_ioctl,
+	.unlocked_ioctl = fb_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = fb_compat_ioctl,
 #endif
@@ -1429,6 +1460,7 @@
 		if (!registered_fb[i])
 			break;
 	fb_info->node = i;
+	mutex_init(&fb_info->lock);
 
 	fb_info->dev = device_create(fb_class, fb_info->device,
 				     MKDEV(FB_MAJOR, i), NULL, "fb%d", i);
diff --git a/drivers/video/imacfb.c b/drivers/video/imacfb.c
deleted file mode 100644
index e69de29..0000000
--- a/drivers/video/imacfb.c
+++ /dev/null
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 4c32c06..efff672 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -16,7 +16,7 @@
 #include <linux/clk.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
-#include <asm/sh_mobile_lcdc.h>
+#include <video/sh_mobile_lcdc.h>
 
 #define PALETTE_NR 16
 
@@ -34,7 +34,9 @@
 
 struct sh_mobile_lcdc_priv {
 	void __iomem *base;
+#ifdef CONFIG_HAVE_CLK
 	struct clk *clk;
+#endif
 	unsigned long lddckr;
 	struct sh_mobile_lcdc_chan ch[2];
 };
@@ -260,6 +262,11 @@
 		tmp = ch->ldmt1r_value;
 		tmp |= (lcd_cfg->sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 1 << 28;
 		tmp |= (lcd_cfg->sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 1 << 27;
+		tmp |= (ch->cfg.flags & LCDC_FLAGS_DWPOL) ? 1 << 26 : 0;
+		tmp |= (ch->cfg.flags & LCDC_FLAGS_DIPOL) ? 1 << 25 : 0;
+		tmp |= (ch->cfg.flags & LCDC_FLAGS_DAPOL) ? 1 << 24 : 0;
+		tmp |= (ch->cfg.flags & LCDC_FLAGS_HSCNT) ? 1 << 17 : 0;
+		tmp |= (ch->cfg.flags & LCDC_FLAGS_DWCNT) ? 1 << 16 : 0;
 		lcdc_write_chan(ch, LDMT1R, tmp);
 
 		/* setup SYS bus */
@@ -422,6 +429,7 @@
 
 	priv->lddckr = icksel << 16;
 
+#ifdef CONFIG_HAVE_CLK
 	if (str) {
 		priv->clk = clk_get(dev, str);
 		if (IS_ERR(priv->clk)) {
@@ -431,6 +439,7 @@
 
 		clk_enable(priv->clk);
 	}
+#endif
 
 	return 0;
 }
@@ -585,7 +594,6 @@
 		goto err1;
 	}
 
-	priv->lddckr = pdata->lddckr;
 	priv->base = ioremap_nocache(res->start, (res->end - res->start) + 1);
 
 	for (i = 0; i < j; i++) {
@@ -688,10 +696,12 @@
 		fb_dealloc_cmap(&info->cmap);
 	}
 
+#ifdef CONFIG_HAVE_CLK
 	if (priv->clk) {
 		clk_disable(priv->clk);
 		clk_put(priv->clk);
 	}
+#endif
 
 	if (priv->base)
 		iounmap(priv->base);
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index ed6b057..1f09d4e 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -80,7 +80,6 @@
 	.attr = {
 		.name = "w1_slave",
 		.mode = S_IRUGO,
-		.owner = THIS_MODULE,
 	},
 	.size = DS2760_DATA_SIZE,
 	.read = w1_ds2760_read_bin,
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index 05a2810..8782ec1 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -154,7 +154,7 @@
 		return -EINVAL;
 
 	for (i = 0x0F; i > -1; i--)
-		if (wd_times[i] > t)
+		if (wd_times[i] >= t)
 			break;
 	wd_margin = i;
 	return 0;
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index c73b5e2..ada8ad8 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -102,7 +102,7 @@
 
 	} else {
 		printk(KERN_ERR PFX "No W83697UG/UF could be found\n");
-		return -EIO;
+		return;
 	}
 
 	outb_p(0x07, WDT_EFER); /* point to logical device number reg */
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index c3290bc..9ce1ab6 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -125,7 +125,7 @@
 
 	BUG_ON(irq == -1);
 #ifdef CONFIG_SMP
-	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+	irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu);
 #endif
 
 	__clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
@@ -137,10 +137,12 @@
 static void init_evtchn_cpu_bindings(void)
 {
 #ifdef CONFIG_SMP
+	struct irq_desc *desc;
 	int i;
+
 	/* By default all event channels notify CPU#0. */
-	for (i = 0; i < NR_IRQS; i++)
-		irq_desc[i].affinity = cpumask_of_cpu(0);
+	for_each_irq_desc(i, desc)
+		desc->affinity = cpumask_of_cpu(0);
 #endif
 
 	memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
@@ -229,12 +231,12 @@
 	int irq;
 
 	/* Only allocate from dynirq range */
-	for (irq = 0; irq < NR_IRQS; irq++)
+	for_each_irq_nr(irq)
 		if (irq_bindcount[irq] == 0)
 			break;
 
-	if (irq == NR_IRQS)
-		panic("No available IRQ to bind to: increase NR_IRQS!\n");
+	if (irq == nr_irqs)
+		panic("No available IRQ to bind to: increase nr_irqs!\n");
 
 	return irq;
 }
@@ -790,7 +792,7 @@
 		mask_evtchn(evtchn);
 
 	/* No IRQ <-> event-channel mappings. */
-	for (irq = 0; irq < NR_IRQS; irq++)
+	for_each_irq_nr(irq)
 		irq_info[irq].evtchn = 0; /* zap event-channel binding */
 
 	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
@@ -822,7 +824,7 @@
 		mask_evtchn(i);
 
 	/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
-	for (i = 0; i < NR_IRQS; i++)
+	for_each_irq_nr(i)
 		irq_bindcount[i] = 0;
 
 	irq_ctx_init(smp_processor_id());
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index c061c3f..24eb010 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -30,8 +30,8 @@
 #include <linux/parser.h>
 #include <linux/idr.h>
 #include <net/9p/9p.h>
-#include <net/9p/transport.h>
 #include <net/9p/client.h>
+#include <net/9p/transport.h>
 #include "v9fs.h"
 #include "v9fs_vfs.h"
 
@@ -234,7 +234,7 @@
 	if (!v9ses->clnt->dotu)
 		v9ses->flags &= ~V9FS_EXTENDED;
 
-	v9ses->maxdata = v9ses->clnt->msize;
+	v9ses->maxdata = v9ses->clnt->msize - P9_IOHDRSZ;
 
 	/* for legacy mode, fall back to V9FS_ACCESS_ANY */
 	if (!v9fs_extended(v9ses) &&
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 57997fa..c295ba7 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -46,9 +46,11 @@
 
 struct inode *v9fs_get_inode(struct super_block *sb, int mode);
 ino_t v9fs_qid2ino(struct p9_qid *qid);
-void v9fs_stat2inode(struct p9_stat *, struct inode *, struct super_block *);
+void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
 int v9fs_dir_release(struct inode *inode, struct file *filp);
 int v9fs_file_open(struct inode *inode, struct file *file);
-void v9fs_inode2stat(struct inode *inode, struct p9_stat *stat);
+void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
 void v9fs_dentry_release(struct dentry *);
 int v9fs_uflags2omode(int uflags, int extended);
+
+ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 97d3aed..6fcb1e7 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -38,7 +38,6 @@
 
 #include "v9fs.h"
 #include "v9fs_vfs.h"
-#include "fid.h"
 
 /**
  * v9fs_vfs_readpage - read an entire page in from 9P
@@ -53,14 +52,12 @@
 	int retval;
 	loff_t offset;
 	char *buffer;
-	struct p9_fid *fid;
 
 	P9_DPRINTK(P9_DEBUG_VFS, "\n");
-	fid = filp->private_data;
 	buffer = kmap(page);
 	offset = page_offset(page);
 
-	retval = p9_client_readn(fid, buffer, offset, PAGE_CACHE_SIZE);
+	retval = v9fs_file_readn(filp, buffer, NULL, offset, PAGE_CACHE_SIZE);
 	if (retval < 0)
 		goto done;
 
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index e298fe1..873cd31 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -45,7 +45,7 @@
  *
  */
 
-static inline int dt_type(struct p9_stat *mistat)
+static inline int dt_type(struct p9_wstat *mistat)
 {
 	unsigned long perm = mistat->mode;
 	int rettype = DT_REG;
@@ -69,32 +69,58 @@
 static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
 	int over;
+	struct p9_wstat st;
+	int err;
 	struct p9_fid *fid;
-	struct v9fs_session_info *v9ses;
-	struct inode *inode;
-	struct p9_stat *st;
+	int buflen;
+	char *statbuf;
+	int n, i = 0;
 
 	P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
-	inode = filp->f_path.dentry->d_inode;
-	v9ses = v9fs_inode2v9ses(inode);
 	fid = filp->private_data;
-	while ((st = p9_client_dirread(fid, filp->f_pos)) != NULL) {
-		if (IS_ERR(st))
-			return PTR_ERR(st);
 
-		over = filldir(dirent, st->name.str, st->name.len, filp->f_pos,
-			v9fs_qid2ino(&st->qid), dt_type(st));
+	buflen = fid->clnt->msize - P9_IOHDRSZ;
+	statbuf = kmalloc(buflen, GFP_KERNEL);
+	if (!statbuf)
+		return -ENOMEM;
 
-		if (over)
+	while (1) {
+		err = v9fs_file_readn(filp, statbuf, NULL, buflen,
+								fid->rdir_fpos);
+		if (err <= 0)
 			break;
 
-		filp->f_pos += st->size;
-		kfree(st);
-		st = NULL;
+		n = err;
+		while (i < n) {
+			err = p9stat_read(statbuf + i, buflen-i, &st,
+							fid->clnt->dotu);
+			if (err) {
+				P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err);
+				err = -EIO;
+				p9stat_free(&st);
+				goto free_and_exit;
+			}
+
+			i += st.size+2;
+			fid->rdir_fpos += st.size+2;
+
+			over = filldir(dirent, st.name, strlen(st.name),
+			    filp->f_pos, v9fs_qid2ino(&st.qid), dt_type(&st));
+
+			filp->f_pos += st.size+2;
+
+			p9stat_free(&st);
+
+			if (over) {
+				err = 0;
+				goto free_and_exit;
+			}
+		}
 	}
 
-	kfree(st);
-	return 0;
+free_and_exit:
+	kfree(statbuf);
+	return err;
 }
 
 
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 52944d2..68bf2af 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -120,23 +120,72 @@
 }
 
 /**
- * v9fs_file_read - read from a file
+ * v9fs_file_readn - read from a file
  * @filp: file pointer to read
  * @data: data buffer to read data into
+ * @udata: user data buffer to read data into
  * @count: size of buffer
  * @offset: offset at which to read data
  *
  */
+
+ssize_t
+v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
+	       u64 offset)
+{
+	int n, total;
+	struct p9_fid *fid = filp->private_data;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "fid %d offset %llu count %d\n", fid->fid,
+					(long long unsigned) offset, count);
+
+	n = 0;
+	total = 0;
+	do {
+		n = p9_client_read(fid, data, udata, offset, count);
+		if (n <= 0)
+			break;
+
+		if (data)
+			data += n;
+		if (udata)
+			udata += n;
+
+		offset += n;
+		count -= n;
+		total += n;
+	} while (count > 0 && n == (fid->clnt->msize - P9_IOHDRSZ));
+
+	if (n < 0)
+		total = n;
+
+	return total;
+}
+
+/**
+ * v9fs_file_read - read from a file
+ * @filp: file pointer to read
+ * @udata: user data buffer to read data into
+ * @count: size of buffer
+ * @offset: offset at which to read data
+ *
+ */
+
 static ssize_t
-v9fs_file_read(struct file *filp, char __user * data, size_t count,
+v9fs_file_read(struct file *filp, char __user *udata, size_t count,
 	       loff_t * offset)
 {
 	int ret;
 	struct p9_fid *fid;
 
-	P9_DPRINTK(P9_DEBUG_VFS, "\n");
+	P9_DPRINTK(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
 	fid = filp->private_data;
-	ret = p9_client_uread(fid, data, *offset, count);
+
+	if (count > (fid->clnt->msize - P9_IOHDRSZ))
+		ret = v9fs_file_readn(filp, NULL, udata, count, *offset);
+	else
+		ret = p9_client_read(fid, NULL, udata, *offset, count);
+
 	if (ret > 0)
 		*offset += ret;
 
@@ -156,19 +205,38 @@
 v9fs_file_write(struct file *filp, const char __user * data,
 		size_t count, loff_t * offset)
 {
-	int ret;
+	int n, rsize, total = 0;
 	struct p9_fid *fid;
+	struct p9_client *clnt;
 	struct inode *inode = filp->f_path.dentry->d_inode;
+	int origin = *offset;
 
 	P9_DPRINTK(P9_DEBUG_VFS, "data %p count %d offset %x\n", data,
 		(int)count, (int)*offset);
 
 	fid = filp->private_data;
-	ret = p9_client_uwrite(fid, data, *offset, count);
-	if (ret > 0) {
-		invalidate_inode_pages2_range(inode->i_mapping, *offset,
-								*offset+ret);
-		*offset += ret;
+	clnt = fid->clnt;
+
+	rsize = fid->iounit;
+	if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
+		rsize = clnt->msize - P9_IOHDRSZ;
+
+	do {
+		if (count < rsize)
+			rsize = count;
+
+		n = p9_client_write(fid, NULL, data+total, *offset+total,
+									rsize);
+		if (n <= 0)
+			break;
+		count -= n;
+		total += n;
+	} while (count > 0);
+
+	if (total > 0) {
+		invalidate_inode_pages2_range(inode->i_mapping, origin,
+								origin+total);
+		*offset += total;
 	}
 
 	if (*offset > inode->i_size) {
@@ -176,7 +244,10 @@
 		inode->i_blocks = (inode->i_size + 512 - 1) >> 9;
 	}
 
-	return ret;
+	if (n < 0)
+		return n;
+
+	return total;
 }
 
 static const struct file_operations v9fs_cached_file_operations = {
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index e83aa5e..8314d3f4 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -334,7 +334,7 @@
 {
 	int err, umode;
 	struct inode *ret;
-	struct p9_stat *st;
+	struct p9_wstat *st;
 
 	ret = NULL;
 	st = p9_client_stat(fid);
@@ -417,6 +417,8 @@
 	struct p9_fid *dfid, *ofid, *fid;
 	struct inode *inode;
 
+	P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+
 	err = 0;
 	ofid = NULL;
 	fid = NULL;
@@ -424,6 +426,7 @@
 	dfid = v9fs_fid_clone(dentry->d_parent);
 	if (IS_ERR(dfid)) {
 		err = PTR_ERR(dfid);
+		P9_DPRINTK(P9_DEBUG_VFS, "fid clone failed %d\n", err);
 		dfid = NULL;
 		goto error;
 	}
@@ -432,18 +435,22 @@
 	ofid = p9_client_walk(dfid, 0, NULL, 1);
 	if (IS_ERR(ofid)) {
 		err = PTR_ERR(ofid);
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
 		ofid = NULL;
 		goto error;
 	}
 
 	err = p9_client_fcreate(ofid, name, perm, mode, extension);
-	if (err < 0)
+	if (err < 0) {
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_fcreate failed %d\n", err);
 		goto error;
+	}
 
 	/* now walk from the parent so we can get unopened fid */
 	fid = p9_client_walk(dfid, 1, &name, 0);
 	if (IS_ERR(fid)) {
 		err = PTR_ERR(fid);
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
 		fid = NULL;
 		goto error;
 	} else
@@ -453,6 +460,7 @@
 	inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
 	if (IS_ERR(inode)) {
 		err = PTR_ERR(inode);
+		P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
 		goto error;
 	}
 
@@ -734,7 +742,7 @@
 	int err;
 	struct v9fs_session_info *v9ses;
 	struct p9_fid *fid;
-	struct p9_stat *st;
+	struct p9_wstat *st;
 
 	P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
 	err = -EPERM;
@@ -815,10 +823,9 @@
  */
 
 void
-v9fs_stat2inode(struct p9_stat *stat, struct inode *inode,
+v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
 	struct super_block *sb)
 {
-	int n;
 	char ext[32];
 	struct v9fs_session_info *v9ses = sb->s_fs_info;
 
@@ -842,11 +849,7 @@
 		int major = -1;
 		int minor = -1;
 
-		n = stat->extension.len;
-		if (n > sizeof(ext)-1)
-			n = sizeof(ext)-1;
-		memmove(ext, stat->extension.str, n);
-		ext[n] = 0;
+		strncpy(ext, stat->extension, sizeof(ext));
 		sscanf(ext, "%c %u %u", &type, &major, &minor);
 		switch (type) {
 		case 'c':
@@ -857,10 +860,11 @@
 			break;
 		default:
 			P9_DPRINTK(P9_DEBUG_ERROR,
-				"Unknown special type %c (%.*s)\n", type,
-				stat->extension.len, stat->extension.str);
+				"Unknown special type %c %s\n", type,
+				stat->extension);
 		};
 		inode->i_rdev = MKDEV(major, minor);
+		init_special_inode(inode, inode->i_mode, inode->i_rdev);
 	} else
 		inode->i_rdev = 0;
 
@@ -904,7 +908,7 @@
 
 	struct v9fs_session_info *v9ses;
 	struct p9_fid *fid;
-	struct p9_stat *st;
+	struct p9_wstat *st;
 
 	P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
 	retval = -EPERM;
@@ -926,15 +930,10 @@
 	}
 
 	/* copy extension buffer into buffer */
-	if (st->extension.len < buflen)
-		buflen = st->extension.len + 1;
-
-	memmove(buffer, st->extension.str, buflen - 1);
-	buffer[buflen-1] = 0;
+	strncpy(buffer, st->extension, buflen);
 
 	P9_DPRINTK(P9_DEBUG_VFS,
-		"%s -> %.*s (%s)\n", dentry->d_name.name, st->extension.len,
-		st->extension.str, buffer);
+		"%s -> %s (%s)\n", dentry->d_name.name, st->extension, buffer);
 
 	retval = buflen;
 
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index bf59c39..d6cb1a0 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -111,7 +111,7 @@
 	struct inode *inode = NULL;
 	struct dentry *root = NULL;
 	struct v9fs_session_info *v9ses = NULL;
-	struct p9_stat *st = NULL;
+	struct p9_wstat *st = NULL;
 	int mode = S_IRWXUGO | S_ISVTX;
 	uid_t uid = current->fsuid;
 	gid_t gid = current->fsgid;
@@ -161,10 +161,14 @@
 
 	sb->s_root = root;
 	root->d_inode->i_ino = v9fs_qid2ino(&st->qid);
+
 	v9fs_stat2inode(st, root->d_inode, sb);
+
 	v9fs_fid_add(root, fid);
+	p9stat_free(st);
 	kfree(st);
 
+P9_DPRINTK(P9_DEBUG_VFS, " return simple set mount\n");
 	return simple_set_mnt(mnt, sb);
 
 release_sb:
diff --git a/fs/Kconfig b/fs/Kconfig
index 9e9d70c..e46297f 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -6,61 +6,9 @@
 
 if BLOCK
 
-config EXT2_FS
-	tristate "Second extended fs support"
-	help
-	  Ext2 is a standard Linux file system for hard disks.
-
-	  To compile this file system support as a module, choose M here: the
-	  module will be called ext2.
-
-	  If unsure, say Y.
-
-config EXT2_FS_XATTR
-	bool "Ext2 extended attributes"
-	depends on EXT2_FS
-	help
-	  Extended attributes are name:value pairs associated with inodes by
-	  the kernel or by users (see the attr(5) manual page, or visit
-	  <http://acl.bestbits.at/> for details).
-
-	  If unsure, say N.
-
-config EXT2_FS_POSIX_ACL
-	bool "Ext2 POSIX Access Control Lists"
-	depends on EXT2_FS_XATTR
-	select FS_POSIX_ACL
-	help
-	  Posix Access Control Lists (ACLs) support permissions for users and
-	  groups beyond the owner/group/world scheme.
-
-	  To learn more about Access Control Lists, visit the Posix ACLs for
-	  Linux website <http://acl.bestbits.at/>.
-
-	  If you don't know what Access Control Lists are, say N
-
-config EXT2_FS_SECURITY
-	bool "Ext2 Security Labels"
-	depends on EXT2_FS_XATTR
-	help
-	  Security labels support alternative access control models
-	  implemented by security modules like SELinux.  This option
-	  enables an extended attribute handler for file security
-	  labels in the ext2 filesystem.
-
-	  If you are not using a security module that requires using
-	  extended attributes for file security labels, say N.
-
-config EXT2_FS_XIP
-	bool "Ext2 execute in place support"
-	depends on EXT2_FS && MMU
-	help
-	  Execute in place can be used on memory-backed block devices. If you
-	  enable this option, you can select to mount block devices which are
-	  capable of this feature without using the page cache.
-
-	  If you do not use a block device that is capable of using this,
-	  or if unsure, say N.
+source "fs/ext2/Kconfig"
+source "fs/ext3/Kconfig"
+source "fs/ext4/Kconfig"
 
 config FS_XIP
 # execute in place
@@ -68,218 +16,8 @@
 	depends on EXT2_FS_XIP
 	default y
 
-config EXT3_FS
-	tristate "Ext3 journalling file system support"
-	select JBD
-	help
-	  This is the journalling version of the Second extended file system
-	  (often called ext3), the de facto standard Linux file system
-	  (method to organize files on a storage device) for hard disks.
-
-	  The journalling code included in this driver means you do not have
-	  to run e2fsck (file system checker) on your file systems after a
-	  crash.  The journal keeps track of any changes that were being made
-	  at the time the system crashed, and can ensure that your file system
-	  is consistent without the need for a lengthy check.
-
-	  Other than adding the journal to the file system, the on-disk format
-	  of ext3 is identical to ext2.  It is possible to freely switch
-	  between using the ext3 driver and the ext2 driver, as long as the
-	  file system has been cleanly unmounted, or e2fsck is run on the file
-	  system.
-
-	  To add a journal on an existing ext2 file system or change the
-	  behavior of ext3 file systems, you can use the tune2fs utility ("man
-	  tune2fs").  To modify attributes of files and directories on ext3
-	  file systems, use chattr ("man chattr").  You need to be using
-	  e2fsprogs version 1.20 or later in order to create ext3 journals
-	  (available at <http://sourceforge.net/projects/e2fsprogs/>).
-
-	  To compile this file system support as a module, choose M here: the
-	  module will be called ext3.
-
-config EXT3_FS_XATTR
-	bool "Ext3 extended attributes"
-	depends on EXT3_FS
-	default y
-	help
-	  Extended attributes are name:value pairs associated with inodes by
-	  the kernel or by users (see the attr(5) manual page, or visit
-	  <http://acl.bestbits.at/> for details).
-
-	  If unsure, say N.
-
-	  You need this for POSIX ACL support on ext3.
-
-config EXT3_FS_POSIX_ACL
-	bool "Ext3 POSIX Access Control Lists"
-	depends on EXT3_FS_XATTR
-	select FS_POSIX_ACL
-	help
-	  Posix Access Control Lists (ACLs) support permissions for users and
-	  groups beyond the owner/group/world scheme.
-
-	  To learn more about Access Control Lists, visit the Posix ACLs for
-	  Linux website <http://acl.bestbits.at/>.
-
-	  If you don't know what Access Control Lists are, say N
-
-config EXT3_FS_SECURITY
-	bool "Ext3 Security Labels"
-	depends on EXT3_FS_XATTR
-	help
-	  Security labels support alternative access control models
-	  implemented by security modules like SELinux.  This option
-	  enables an extended attribute handler for file security
-	  labels in the ext3 filesystem.
-
-	  If you are not using a security module that requires using
-	  extended attributes for file security labels, say N.
-
-config EXT4_FS
-	tristate "The Extended 4 (ext4) filesystem"
-	select JBD2
-	select CRC16
-	help
-	  This is the next generation of the ext3 filesystem.
-
-	  Unlike the change from ext2 filesystem to ext3 filesystem,
-	  the on-disk format of ext4 is not forwards compatible with
-	  ext3; it is based on extent maps and it supports 48-bit
-	  physical block numbers.  The ext4 filesystem also supports delayed
-	  allocation, persistent preallocation, high resolution time stamps,
-	  and a number of other features to improve performance and speed
-	  up fsck time.  For more information, please see the web pages at
-	  http://ext4.wiki.kernel.org.
-
-	  The ext4 filesystem will support mounting an ext3
-	  filesystem; while there will be some performance gains from
-	  the delayed allocation and inode table readahead, the best
-	  performance gains will require enabling ext4 features in the
-	  filesystem, or formating a new filesystem as an ext4
-	  filesystem initially.
-
-	  To compile this file system support as a module, choose M here. The
-	  module will be called ext4dev.
-
-	  If unsure, say N.
-
-config EXT4DEV_COMPAT
-	bool "Enable ext4dev compatibility"
-	depends on EXT4_FS
-	help
-	  Starting with 2.6.28, the name of the ext4 filesystem was
-	  renamed from ext4dev to ext4.  Unfortunately there are some
-	  legacy userspace programs (such as klibc's fstype) have
-	  "ext4dev" hardcoded.
-
-	  To enable backwards compatibility so that systems that are
-	  still expecting to mount ext4 filesystems using ext4dev,
-	  chose Y here.   This feature will go away by 2.6.31, so
-	  please arrange to get your userspace programs fixed!
-
-config EXT4_FS_XATTR
-	bool "Ext4 extended attributes"
-	depends on EXT4_FS
-	default y
-	help
-	  Extended attributes are name:value pairs associated with inodes by
-	  the kernel or by users (see the attr(5) manual page, or visit
-	  <http://acl.bestbits.at/> for details).
-
-	  If unsure, say N.
-
-	  You need this for POSIX ACL support on ext4.
-
-config EXT4_FS_POSIX_ACL
-	bool "Ext4 POSIX Access Control Lists"
-	depends on EXT4_FS_XATTR
-	select FS_POSIX_ACL
-	help
-	  POSIX Access Control Lists (ACLs) support permissions for users and
-	  groups beyond the owner/group/world scheme.
-
-	  To learn more about Access Control Lists, visit the POSIX ACLs for
-	  Linux website <http://acl.bestbits.at/>.
-
-	  If you don't know what Access Control Lists are, say N
-
-config EXT4_FS_SECURITY
-	bool "Ext4 Security Labels"
-	depends on EXT4_FS_XATTR
-	help
-	  Security labels support alternative access control models
-	  implemented by security modules like SELinux.  This option
-	  enables an extended attribute handler for file security
-	  labels in the ext4 filesystem.
-
-	  If you are not using a security module that requires using
-	  extended attributes for file security labels, say N.
-
-config JBD
-	tristate
-	help
-	  This is a generic journalling layer for block devices.  It is
-	  currently used by the ext3 file system, but it could also be
-	  used to add journal support to other file systems or block
-	  devices such as RAID or LVM.
-
-	  If you are using the ext3 file system, you need to say Y here.
-	  If you are not using ext3 then you will probably want to say N.
-
-	  To compile this device as a module, choose M here: the module will be
-	  called jbd.  If you are compiling ext3 into the kernel, you
-	  cannot compile this code as a module.
-
-config JBD_DEBUG
-	bool "JBD (ext3) debugging support"
-	depends on JBD && DEBUG_FS
-	help
-	  If you are using the ext3 journaled file system (or potentially any
-	  other file system/device using JBD), this option allows you to
-	  enable debugging output while the system is running, in order to
-	  help track down any problems you are having.  By default the
-	  debugging output will be turned off.
-
-	  If you select Y here, then you will be able to turn on debugging
-	  with "echo N > /sys/kernel/debug/jbd/jbd-debug", where N is a
-	  number between 1 and 5, the higher the number, the more debugging
-	  output is generated.  To turn debugging off again, do
-	  "echo 0 > /sys/kernel/debug/jbd/jbd-debug".
-
-config JBD2
-	tristate
-	select CRC32
-	help
-	  This is a generic journaling layer for block devices that support
-	  both 32-bit and 64-bit block numbers.  It is currently used by
-	  the ext4 and OCFS2 filesystems, but it could also be used to add
-	  journal support to other file systems or block devices such
-	  as RAID or LVM.
-
-	  If you are using ext4 or OCFS2, you need to say Y here.
-	  If you are not using ext4 or OCFS2 then you will
-	  probably want to say N.
-
-	  To compile this device as a module, choose M here. The module will be
-	  called jbd2.  If you are compiling ext4 or OCFS2 into the kernel,
-	  you cannot compile this code as a module.
-
-config JBD2_DEBUG
-	bool "JBD2 (ext4) debugging support"
-	depends on JBD2 && DEBUG_FS
-	help
-	  If you are using the ext4 journaled file system (or
-	  potentially any other filesystem/device using JBD2), this option
-	  allows you to enable debugging output while the system is running,
-	  in order to help track down any problems you are having.
-	  By default, the debugging output will be turned off.
-
-	  If you select Y here, then you will be able to turn on debugging
-	  with "echo N > /sys/kernel/debug/jbd2/jbd2-debug", where N is a
-	  number between 1 and 5. The higher the number, the more debugging
-	  output is generated.  To turn debugging off again, do
-	  "echo 0 > /sys/kernel/debug/jbd2/jbd2-debug".
+source "fs/jbd/Kconfig"
+source "fs/jbd2/Kconfig"
 
 config FS_MBCACHE
 # Meta block cache for Extended Attributes (ext2/ext3/ext4)
@@ -665,7 +403,7 @@
 	  N here.
 
 config FUSE_FS
-	tristate "Filesystem in Userspace support"
+	tristate "FUSE (Filesystem in Userspace) support"
 	help
 	  With FUSE it is possible to implement a fully functional filesystem
 	  in a userspace program.
@@ -1168,195 +906,7 @@
 	  To compile the EFS file system support as a module, choose M here: the
 	  module will be called efs.
 
-config JFFS2_FS
-	tristate "Journalling Flash File System v2 (JFFS2) support"
-	select CRC32
-	depends on MTD
-	help
-	  JFFS2 is the second generation of the Journalling Flash File System
-	  for use on diskless embedded devices. It provides improved wear
-	  levelling, compression and support for hard links. You cannot use
-	  this on normal block devices, only on 'MTD' devices.
-
-	  Further information on the design and implementation of JFFS2 is
-	  available at <http://sources.redhat.com/jffs2/>.
-
-config JFFS2_FS_DEBUG
-	int "JFFS2 debugging verbosity (0 = quiet, 2 = noisy)"
-	depends on JFFS2_FS
-	default "0"
-	help
-	  This controls the amount of debugging messages produced by the JFFS2
-	  code. Set it to zero for use in production systems. For evaluation,
-	  testing and debugging, it's advisable to set it to one. This will
-	  enable a few assertions and will print debugging messages at the
-	  KERN_DEBUG loglevel, where they won't normally be visible. Level 2
-	  is unlikely to be useful - it enables extra debugging in certain
-	  areas which at one point needed debugging, but when the bugs were
-	  located and fixed, the detailed messages were relegated to level 2.
-
-	  If reporting bugs, please try to have available a full dump of the
-	  messages at debug level 1 while the misbehaviour was occurring.
-
-config JFFS2_FS_WRITEBUFFER
-	bool "JFFS2 write-buffering support"
-	depends on JFFS2_FS
-	default y
-	help
-	  This enables the write-buffering support in JFFS2.
-
-	  This functionality is required to support JFFS2 on the following
-	  types of flash devices:
-	    - NAND flash
-	    - NOR flash with transparent ECC
-	    - DataFlash
-
-config JFFS2_FS_WBUF_VERIFY
-	bool "Verify JFFS2 write-buffer reads"
-	depends on JFFS2_FS_WRITEBUFFER
-	default n
-	help
-	  This causes JFFS2 to read back every page written through the
-	  write-buffer, and check for errors.
-
-config JFFS2_SUMMARY
-	bool "JFFS2 summary support (EXPERIMENTAL)"
-	depends on JFFS2_FS && EXPERIMENTAL
-	default n
-	help
-	  This feature makes it possible to use summary information
-	  for faster filesystem mount.
-
-	  The summary information can be inserted into a filesystem image
-	  by the utility 'sumtool'.
-
-	  If unsure, say 'N'.
-
-config JFFS2_FS_XATTR
-	bool "JFFS2 XATTR support (EXPERIMENTAL)"
-	depends on JFFS2_FS && EXPERIMENTAL
-	default n
-	help
-	  Extended attributes are name:value pairs associated with inodes by
-	  the kernel or by users (see the attr(5) manual page, or visit
-	  <http://acl.bestbits.at/> for details).
-
-	  If unsure, say N.
-
-config JFFS2_FS_POSIX_ACL
-	bool "JFFS2 POSIX Access Control Lists"
-	depends on JFFS2_FS_XATTR
-	default y
-	select FS_POSIX_ACL
-	help
-	  Posix Access Control Lists (ACLs) support permissions for users and
-	  groups beyond the owner/group/world scheme.
-
-	  To learn more about Access Control Lists, visit the Posix ACLs for
-	  Linux website <http://acl.bestbits.at/>.
-
-	  If you don't know what Access Control Lists are, say N
-
-config JFFS2_FS_SECURITY
-	bool "JFFS2 Security Labels"
-	depends on JFFS2_FS_XATTR
-	default y
-	help
-	  Security labels support alternative access control models
-	  implemented by security modules like SELinux.  This option
-	  enables an extended attribute handler for file security
-	  labels in the jffs2 filesystem.
-
-	  If you are not using a security module that requires using
-	  extended attributes for file security labels, say N.
-
-config JFFS2_COMPRESSION_OPTIONS
-	bool "Advanced compression options for JFFS2"
-	depends on JFFS2_FS
-	default n
-	help
-	  Enabling this option allows you to explicitly choose which
-	  compression modules, if any, are enabled in JFFS2. Removing
-	  compressors can mean you cannot read existing file systems,
-	  and enabling experimental compressors can mean that you
-	  write a file system which cannot be read by a standard kernel.
-
-	  If unsure, you should _definitely_ say 'N'.
-
-config JFFS2_ZLIB
-	bool "JFFS2 ZLIB compression support" if JFFS2_COMPRESSION_OPTIONS
-	select ZLIB_INFLATE
-	select ZLIB_DEFLATE
-	depends on JFFS2_FS
-	default y
-	help
-	  Zlib is designed to be a free, general-purpose, legally unencumbered,
-	  lossless data-compression library for use on virtually any computer
-	  hardware and operating system. See <http://www.gzip.org/zlib/> for
-	  further information.
-
-	  Say 'Y' if unsure.
-
-config JFFS2_LZO
-	bool "JFFS2 LZO compression support" if JFFS2_COMPRESSION_OPTIONS
-	select LZO_COMPRESS
-	select LZO_DECOMPRESS
-	depends on JFFS2_FS
-	default n
-	help
-	  minilzo-based compression. Generally works better than Zlib.
-
-	  This feature was added in July, 2007. Say 'N' if you need
-	  compatibility with older bootloaders or kernels.
-
-config JFFS2_RTIME
-	bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS
-	depends on JFFS2_FS
-	default y
-	help
-	  Rtime does manage to recompress already-compressed data. Say 'Y' if unsure.
-
-config JFFS2_RUBIN
-	bool "JFFS2 RUBIN compression support" if JFFS2_COMPRESSION_OPTIONS
-	depends on JFFS2_FS
-	default n
-	help
-	  RUBINMIPS and DYNRUBIN compressors. Say 'N' if unsure.
-
-choice
-	prompt "JFFS2 default compression mode" if JFFS2_COMPRESSION_OPTIONS
-	default JFFS2_CMODE_PRIORITY
-	depends on JFFS2_FS
-	help
-	  You can set here the default compression mode of JFFS2 from
-	  the available compression modes. Don't touch if unsure.
-
-config JFFS2_CMODE_NONE
-	bool "no compression"
-	help
-	  Uses no compression.
-
-config JFFS2_CMODE_PRIORITY
-	bool "priority"
-	help
-	  Tries the compressors in a predefined order and chooses the first
-	  successful one.
-
-config JFFS2_CMODE_SIZE
-	bool "size (EXPERIMENTAL)"
-	help
-	  Tries all compressors and chooses the one which has the smallest
-	  result.
-
-config JFFS2_CMODE_FAVOURLZO
-	bool "Favour LZO"
-	help
-	  Tries all compressors and chooses the one which has the smallest
-	  result but gives some preference to LZO (which has faster
-	  decompression) at the expense of size.
-
-endchoice
-
+source "fs/jffs2/Kconfig"
 # UBIFS File system configuration
 source "fs/ubifs/Kconfig"
 
@@ -1913,148 +1463,7 @@
 
 	  smbmount from samba 2.2.0 or later supports this.
 
-config CIFS
-	tristate "CIFS support (advanced network filesystem, SMBFS successor)"
-	depends on INET
-	select NLS
-	help
-	  This is the client VFS module for the Common Internet File System
-	  (CIFS) protocol which is the successor to the Server Message Block 
-	  (SMB) protocol, the native file sharing mechanism for most early
-	  PC operating systems.  The CIFS protocol is fully supported by 
-	  file servers such as Windows 2000 (including Windows 2003, NT 4  
-	  and Windows XP) as well by Samba (which provides excellent CIFS
-	  server support for Linux and many other operating systems). Limited
-	  support for OS/2 and Windows ME and similar servers is provided as
-	  well.
-
-	  The cifs module provides an advanced network file system
-	  client for mounting to CIFS compliant servers.  It includes
-	  support for DFS (hierarchical name space), secure per-user
-	  session establishment via Kerberos or NTLM or NTLMv2,
-	  safe distributed caching (oplock), optional packet
-	  signing, Unicode and other internationalization improvements.
-	  If you need to mount to Samba or Windows from this machine, say Y.
-
-config CIFS_STATS
-        bool "CIFS statistics"
-        depends on CIFS
-        help
-          Enabling this option will cause statistics for each server share
-	  mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
-
-config CIFS_STATS2
-	bool "Extended statistics"
-	depends on CIFS_STATS
-	help
-	  Enabling this option will allow more detailed statistics on SMB
-	  request timing to be displayed in /proc/fs/cifs/DebugData and also
-	  allow optional logging of slow responses to dmesg (depending on the
-	  value of /proc/fs/cifs/cifsFYI, see fs/cifs/README for more details).
-	  These additional statistics may have a minor effect on performance
-	  and memory utilization.
-
-	  Unless you are a developer or are doing network performance analysis
-	  or tuning, say N.
-
-config CIFS_WEAK_PW_HASH
-	bool "Support legacy servers which use weaker LANMAN security"
-	depends on CIFS
-	help
-	  Modern CIFS servers including Samba and most Windows versions
-	  (since 1997) support stronger NTLM (and even NTLMv2 and Kerberos)
-	  security mechanisms. These hash the password more securely
-	  than the mechanisms used in the older LANMAN version of the
-	  SMB protocol but LANMAN based authentication is needed to
-	  establish sessions with some old SMB servers.
-
-	  Enabling this option allows the cifs module to mount to older
-	  LANMAN based servers such as OS/2 and Windows 95, but such
-	  mounts may be less secure than mounts using NTLM or more recent
-	  security mechanisms if you are on a public network.  Unless you
-	  have a need to access old SMB servers (and are on a private
-	  network) you probably want to say N.  Even if this support
-	  is enabled in the kernel build, LANMAN authentication will not be
-	  used automatically. At runtime LANMAN mounts are disabled but
-	  can be set to required (or optional) either in
-	  /proc/fs/cifs (see fs/cifs/README for more detail) or via an
-	  option on the mount command. This support is disabled by
-	  default in order to reduce the possibility of a downgrade
-	  attack.
-
-	  If unsure, say N.
-
-config CIFS_UPCALL
-	  bool "Kerberos/SPNEGO advanced session setup"
-	  depends on CIFS && KEYS
-	  help
-	    Enables an upcall mechanism for CIFS which accesses
-	    userspace helper utilities to provide SPNEGO packaged (RFC 4178)
-	    Kerberos tickets which are needed to mount to certain secure servers
-	    (for which more secure Kerberos authentication is required). If
-	    unsure, say N.
-
-config CIFS_XATTR
-        bool "CIFS extended attributes"
-        depends on CIFS
-        help
-          Extended attributes are name:value pairs associated with inodes by
-          the kernel or by users (see the attr(5) manual page, or visit
-          <http://acl.bestbits.at/> for details).  CIFS maps the name of
-          extended attributes beginning with the user namespace prefix
-          to SMB/CIFS EAs. EAs are stored on Windows servers without the
-          user namespace prefix, but their names are seen by Linux cifs clients
-          prefaced by the user namespace prefix. The system namespace
-          (used by some filesystems to store ACLs) is not supported at
-          this time.
-
-          If unsure, say N.
-
-config CIFS_POSIX
-        bool "CIFS POSIX Extensions"
-        depends on CIFS_XATTR
-        help
-          Enabling this option will cause the cifs client to attempt to
-	  negotiate a newer dialect with servers, such as Samba 3.0.5
-	  or later, that optionally can handle more POSIX like (rather
-	  than Windows like) file behavior.  It also enables
-	  support for POSIX ACLs (getfacl and setfacl) to servers
-	  (such as Samba 3.10 and later) which can negotiate
-	  CIFS POSIX ACL support.  If unsure, say N.
-
-config CIFS_DEBUG2
-	bool "Enable additional CIFS debugging routines"
-	depends on CIFS
-	help
-	   Enabling this option adds a few more debugging routines
-	   to the cifs code which slightly increases the size of
-	   the cifs module and can cause additional logging of debug
-	   messages in some error paths, slowing performance. This
-	   option can be turned off unless you are debugging
-	   cifs problems.  If unsure, say N.
-
-config CIFS_EXPERIMENTAL
-	  bool "CIFS Experimental Features (EXPERIMENTAL)"
-	  depends on CIFS && EXPERIMENTAL
-	  help
-	    Enables cifs features under testing. These features are
-	    experimental and currently include DFS support and directory 
-	    change notification ie fcntl(F_DNOTIFY), as well as the upcall
-	    mechanism which will be used for Kerberos session negotiation
-	    and uid remapping.  Some of these features also may depend on 
-	    setting a value of 1 to the pseudo-file /proc/fs/cifs/Experimental
-	    (which is disabled by default). See the file fs/cifs/README 
-	    for more details.  If unsure, say N.
-
-config CIFS_DFS_UPCALL
-	  bool "DFS feature support (EXPERIMENTAL)"
-	  depends on CIFS_EXPERIMENTAL
-	  depends on KEYS
-	  help
-	    Enables an upcall mechanism for CIFS which contacts userspace
-	    helper utilities to provide server name resolution (host names to
-	    IP addresses) which is needed for implicit mounts of DFS junction
-	    points. If unsure, say N.
+source "fs/cifs/Kconfig"
 
 config NCP_FS
 	tristate "NCP file system support (to mount NetWare volumes)"
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 801db13..ce9fb3f 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -40,6 +40,28 @@
 
 	  It is also possible to run FDPIC ELF binaries on MMU linux also.
 
+config CORE_DUMP_DEFAULT_ELF_HEADERS
+	bool "Write ELF core dumps with partial segments"
+	default n
+	depends on BINFMT_ELF
+	help
+	  ELF core dump files describe each memory mapping of the crashed
+	  process, and can contain or omit the memory contents of each one.
+	  The contents of an unmodified text mapping are omitted by default.
+
+	  For an unmodified text mapping of an ELF object, including just
+	  the first page of the file in a core dump makes it possible to
+	  identify the build ID bits in the file, without paying the i/o
+	  cost and disk space to dump all the text.  However, versions of
+	  GDB before 6.7 are confused by ELF core dump files in this format.
+
+	  The core dump behavior can be controlled per process using
+	  the /proc/PID/coredump_filter pseudo-file; this setting is
+	  inherited.  See Documentation/filesystems/proc.txt for details.
+
+	  This config option changes the default setting of coredump_filter
+	  seen at boot time.  If unsure, say N.
+
 config BINFMT_FLAT
 	bool "Kernel support for flat binaries"
 	depends on !MMU && (!FRV || BROKEN)
diff --git a/fs/Makefile b/fs/Makefile
index d0c69f5..2168c90 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -71,7 +71,7 @@
 # Do not add any filesystems before this line
 obj-$(CONFIG_REISERFS_FS)	+= reiserfs/
 obj-$(CONFIG_EXT3_FS)		+= ext3/ # Before ext2 so root fs can be ext3
-obj-$(CONFIG_EXT4_FS)		+= ext4/ # Before ext2 so root fs can be ext4dev
+obj-$(CONFIG_EXT4_FS)		+= ext4/ # Before ext2 so root fs can be ext4
 obj-$(CONFIG_JBD)		+= jbd/
 obj-$(CONFIG_JBD2)		+= jbd2/
 obj-$(CONFIG_EXT2_FS)		+= ext2/
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index c76afa2..8fcfa39 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1156,16 +1156,24 @@
 static unsigned long vma_dump_size(struct vm_area_struct *vma,
 				   unsigned long mm_flags)
 {
+#define FILTER(type)	(mm_flags & (1UL << MMF_DUMP_##type))
+
 	/* The vma can be set up to tell us the answer directly.  */
 	if (vma->vm_flags & VM_ALWAYSDUMP)
 		goto whole;
 
+	/* Hugetlb memory check */
+	if (vma->vm_flags & VM_HUGETLB) {
+		if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
+			goto whole;
+		if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
+			goto whole;
+	}
+
 	/* Do not dump I/O mapped devices or special mappings */
 	if (vma->vm_flags & (VM_IO | VM_RESERVED))
 		return 0;
 
-#define FILTER(type)	(mm_flags & (1UL << MMF_DUMP_##type))
-
 	/* By default, dump shared memory if mapped from an anonymous file. */
 	if (vma->vm_flags & VM_SHARED) {
 		if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ?
@@ -1333,20 +1341,15 @@
 	prstatus->pr_pgrp = task_pgrp_vnr(p);
 	prstatus->pr_sid = task_session_vnr(p);
 	if (thread_group_leader(p)) {
+		struct task_cputime cputime;
+
 		/*
-		 * This is the record for the group leader.  Add in the
-		 * cumulative times of previous dead threads.  This total
-		 * won't include the time of each live thread whose state
-		 * is included in the core dump.  The final total reported
-		 * to our parent process when it calls wait4 will include
-		 * those sums as well as the little bit more time it takes
-		 * this and each other thread to finish dying after the
-		 * core dump synchronization phase.
+		 * This is the record for the group leader.  It shows the
+		 * group-wide total, not its individual thread total.
 		 */
-		cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
-				   &prstatus->pr_utime);
-		cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
-				   &prstatus->pr_stime);
+		thread_group_cputime(p, &cputime);
+		cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
+		cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
 	} else {
 		cputime_to_timeval(p->utime, &prstatus->pr_utime);
 		cputime_to_timeval(p->stime, &prstatus->pr_stime);
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 0e8367c..5b5424c 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1390,20 +1390,15 @@
 	prstatus->pr_pgrp = task_pgrp_vnr(p);
 	prstatus->pr_sid = task_session_vnr(p);
 	if (thread_group_leader(p)) {
+		struct task_cputime cputime;
+
 		/*
-		 * This is the record for the group leader.  Add in the
-		 * cumulative times of previous dead threads.  This total
-		 * won't include the time of each live thread whose state
-		 * is included in the core dump.  The final total reported
-		 * to our parent process when it calls wait4 will include
-		 * those sums as well as the little bit more time it takes
-		 * this and each other thread to finish dying after the
-		 * core dump synchronization phase.
+		 * This is the record for the group leader.  It shows the
+		 * group-wide total, not its individual thread total.
 		 */
-		cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
-				   &prstatus->pr_utime);
-		cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
-				   &prstatus->pr_stime);
+		thread_group_cputime(p, &cputime);
+		cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
+		cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
 	} else {
 		cputime_to_timeval(p->utime, &prstatus->pr_utime);
 		cputime_to_timeval(p->stime, &prstatus->pr_stime);
diff --git a/fs/buffer.c b/fs/buffer.c
index ac78d4c..6569fda 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -76,8 +76,7 @@
 
 void unlock_buffer(struct buffer_head *bh)
 {
-	smp_mb__before_clear_bit();
-	clear_buffer_locked(bh);
+	clear_bit_unlock(BH_Lock, &bh->b_state);
 	smp_mb__after_clear_bit();
 	wake_up_bit(&bh->b_state, BH_Lock);
 }
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
new file mode 100644
index 0000000..341a989
--- /dev/null
+++ b/fs/cifs/Kconfig
@@ -0,0 +1,142 @@
+config CIFS
+	tristate "CIFS support (advanced network filesystem, SMBFS successor)"
+	depends on INET
+	select NLS
+	help
+	  This is the client VFS module for the Common Internet File System
+	  (CIFS) protocol which is the successor to the Server Message Block
+	  (SMB) protocol, the native file sharing mechanism for most early
+	  PC operating systems.  The CIFS protocol is fully supported by
+	  file servers such as Windows 2000 (including Windows 2003, NT 4
+	  and Windows XP) as well by Samba (which provides excellent CIFS
+	  server support for Linux and many other operating systems). Limited
+	  support for OS/2 and Windows ME and similar servers is provided as
+	  well.
+
+	  The cifs module provides an advanced network file system
+	  client for mounting to CIFS compliant servers.  It includes
+	  support for DFS (hierarchical name space), secure per-user
+	  session establishment via Kerberos or NTLM or NTLMv2,
+	  safe distributed caching (oplock), optional packet
+	  signing, Unicode and other internationalization improvements.
+	  If you need to mount to Samba or Windows from this machine, say Y.
+
+config CIFS_STATS
+        bool "CIFS statistics"
+        depends on CIFS
+        help
+          Enabling this option will cause statistics for each server share
+	  mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
+
+config CIFS_STATS2
+	bool "Extended statistics"
+	depends on CIFS_STATS
+	help
+	  Enabling this option will allow more detailed statistics on SMB
+	  request timing to be displayed in /proc/fs/cifs/DebugData and also
+	  allow optional logging of slow responses to dmesg (depending on the
+	  value of /proc/fs/cifs/cifsFYI, see fs/cifs/README for more details).
+	  These additional statistics may have a minor effect on performance
+	  and memory utilization.
+
+	  Unless you are a developer or are doing network performance analysis
+	  or tuning, say N.
+
+config CIFS_WEAK_PW_HASH
+	bool "Support legacy servers which use weaker LANMAN security"
+	depends on CIFS
+	help
+	  Modern CIFS servers including Samba and most Windows versions
+	  (since 1997) support stronger NTLM (and even NTLMv2 and Kerberos)
+	  security mechanisms. These hash the password more securely
+	  than the mechanisms used in the older LANMAN version of the
+	  SMB protocol but LANMAN based authentication is needed to
+	  establish sessions with some old SMB servers.
+
+	  Enabling this option allows the cifs module to mount to older
+	  LANMAN based servers such as OS/2 and Windows 95, but such
+	  mounts may be less secure than mounts using NTLM or more recent
+	  security mechanisms if you are on a public network.  Unless you
+	  have a need to access old SMB servers (and are on a private
+	  network) you probably want to say N.  Even if this support
+	  is enabled in the kernel build, LANMAN authentication will not be
+	  used automatically. At runtime LANMAN mounts are disabled but
+	  can be set to required (or optional) either in
+	  /proc/fs/cifs (see fs/cifs/README for more detail) or via an
+	  option on the mount command. This support is disabled by
+	  default in order to reduce the possibility of a downgrade
+	  attack.
+
+	  If unsure, say N.
+
+config CIFS_UPCALL
+	  bool "Kerberos/SPNEGO advanced session setup"
+	  depends on CIFS && KEYS
+	  help
+	    Enables an upcall mechanism for CIFS which accesses
+	    userspace helper utilities to provide SPNEGO packaged (RFC 4178)
+	    Kerberos tickets which are needed to mount to certain secure servers
+	    (for which more secure Kerberos authentication is required). If
+	    unsure, say N.
+
+config CIFS_XATTR
+        bool "CIFS extended attributes"
+        depends on CIFS
+        help
+          Extended attributes are name:value pairs associated with inodes by
+          the kernel or by users (see the attr(5) manual page, or visit
+          <http://acl.bestbits.at/> for details).  CIFS maps the name of
+          extended attributes beginning with the user namespace prefix
+          to SMB/CIFS EAs. EAs are stored on Windows servers without the
+          user namespace prefix, but their names are seen by Linux cifs clients
+          prefaced by the user namespace prefix. The system namespace
+          (used by some filesystems to store ACLs) is not supported at
+          this time.
+
+          If unsure, say N.
+
+config CIFS_POSIX
+        bool "CIFS POSIX Extensions"
+        depends on CIFS_XATTR
+        help
+          Enabling this option will cause the cifs client to attempt to
+	  negotiate a newer dialect with servers, such as Samba 3.0.5
+	  or later, that optionally can handle more POSIX like (rather
+	  than Windows like) file behavior.  It also enables
+	  support for POSIX ACLs (getfacl and setfacl) to servers
+	  (such as Samba 3.10 and later) which can negotiate
+	  CIFS POSIX ACL support.  If unsure, say N.
+
+config CIFS_DEBUG2
+	bool "Enable additional CIFS debugging routines"
+	depends on CIFS
+	help
+	   Enabling this option adds a few more debugging routines
+	   to the cifs code which slightly increases the size of
+	   the cifs module and can cause additional logging of debug
+	   messages in some error paths, slowing performance. This
+	   option can be turned off unless you are debugging
+	   cifs problems.  If unsure, say N.
+
+config CIFS_EXPERIMENTAL
+	  bool "CIFS Experimental Features (EXPERIMENTAL)"
+	  depends on CIFS && EXPERIMENTAL
+	  help
+	    Enables cifs features under testing. These features are
+	    experimental and currently include DFS support and directory
+	    change notification ie fcntl(F_DNOTIFY), as well as the upcall
+	    mechanism which will be used for Kerberos session negotiation
+	    and uid remapping.  Some of these features also may depend on
+	    setting a value of 1 to the pseudo-file /proc/fs/cifs/Experimental
+	    (which is disabled by default). See the file fs/cifs/README
+	    for more details.  If unsure, say N.
+
+config CIFS_DFS_UPCALL
+	  bool "DFS feature support (EXPERIMENTAL)"
+	  depends on CIFS_EXPERIMENTAL
+	  depends on KEYS
+	  help
+	    Enables an upcall mechanism for CIFS which contacts userspace
+	    helper utilities to provide server name resolution (host names to
+	    IP addresses) which is needed for implicit mounts of DFS junction
+	    points. If unsure, say N.
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index c4a8a06..62d8bd8 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1791,7 +1791,7 @@
 		SetPageUptodate(page);
 		unlock_page(page);
 		if (!pagevec_add(plru_pvec, page))
-			__pagevec_lru_add(plru_pvec);
+			__pagevec_lru_add_file(plru_pvec);
 		data += PAGE_CACHE_SIZE;
 	}
 	return;
@@ -1925,7 +1925,7 @@
 		bytes_read = 0;
 	}
 
-	pagevec_lru_add(&lru_pvec);
+	pagevec_lru_add_file(&lru_pvec);
 
 /* need to free smb_read_data buf before exit */
 	if (smb_read_data) {
diff --git a/fs/exec.c b/fs/exec.c
index a41e790..4e834f1 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1386,7 +1386,7 @@
  * name into corename, which must have space for at least
  * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
  */
-static int format_corename(char *corename, int nr_threads, long signr)
+static int format_corename(char *corename, long signr)
 {
 	const char *pat_ptr = core_pattern;
 	int ispipe = (*pat_ptr == '|');
@@ -1493,8 +1493,7 @@
 	 * If core_pattern does not include a %p (as is the default)
 	 * and core_uses_pid is set, then .%pid will be appended to
 	 * the filename. Do not do this for piped commands. */
-	if (!ispipe && !pid_in_pattern
-	    && (core_uses_pid || nr_threads)) {
+	if (!ispipe && !pid_in_pattern && core_uses_pid) {
 		rc = snprintf(out_ptr, out_end - out_ptr,
 			      ".%d", task_tgid_vnr(current));
 		if (rc > out_end - out_ptr)
@@ -1757,7 +1756,7 @@
 	 * uses lock_kernel()
 	 */
  	lock_kernel();
-	ispipe = format_corename(corename, retval, signr);
+	ispipe = format_corename(corename, signr);
 	unlock_kernel();
 	/*
 	 * Don't bother to check the RLIMIT_CORE value if core_pattern points
diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig
new file mode 100644
index 0000000..14a6780
--- /dev/null
+++ b/fs/ext2/Kconfig
@@ -0,0 +1,55 @@
+config EXT2_FS
+	tristate "Second extended fs support"
+	help
+	  Ext2 is a standard Linux file system for hard disks.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called ext2.
+
+	  If unsure, say Y.
+
+config EXT2_FS_XATTR
+	bool "Ext2 extended attributes"
+	depends on EXT2_FS
+	help
+	  Extended attributes are name:value pairs associated with inodes by
+	  the kernel or by users (see the attr(5) manual page, or visit
+	  <http://acl.bestbits.at/> for details).
+
+	  If unsure, say N.
+
+config EXT2_FS_POSIX_ACL
+	bool "Ext2 POSIX Access Control Lists"
+	depends on EXT2_FS_XATTR
+	select FS_POSIX_ACL
+	help
+	  Posix Access Control Lists (ACLs) support permissions for users and
+	  groups beyond the owner/group/world scheme.
+
+	  To learn more about Access Control Lists, visit the Posix ACLs for
+	  Linux website <http://acl.bestbits.at/>.
+
+	  If you don't know what Access Control Lists are, say N
+
+config EXT2_FS_SECURITY
+	bool "Ext2 Security Labels"
+	depends on EXT2_FS_XATTR
+	help
+	  Security labels support alternative access control models
+	  implemented by security modules like SELinux.  This option
+	  enables an extended attribute handler for file security
+	  labels in the ext2 filesystem.
+
+	  If you are not using a security module that requires using
+	  extended attributes for file security labels, say N.
+
+config EXT2_FS_XIP
+	bool "Ext2 execute in place support"
+	depends on EXT2_FS && MMU
+	help
+	  Execute in place can be used on memory-backed block devices. If you
+	  enable this option, you can select to mount block devices which are
+	  capable of this feature without using the page cache.
+
+	  If you do not use a block device that is capable of using this,
+	  or if unsure, say N.
diff --git a/fs/ext3/Kconfig b/fs/ext3/Kconfig
new file mode 100644
index 0000000..8e0cfe4
--- /dev/null
+++ b/fs/ext3/Kconfig
@@ -0,0 +1,67 @@
+config EXT3_FS
+	tristate "Ext3 journalling file system support"
+	select JBD
+	help
+	  This is the journalling version of the Second extended file system
+	  (often called ext3), the de facto standard Linux file system
+	  (method to organize files on a storage device) for hard disks.
+
+	  The journalling code included in this driver means you do not have
+	  to run e2fsck (file system checker) on your file systems after a
+	  crash.  The journal keeps track of any changes that were being made
+	  at the time the system crashed, and can ensure that your file system
+	  is consistent without the need for a lengthy check.
+
+	  Other than adding the journal to the file system, the on-disk format
+	  of ext3 is identical to ext2.  It is possible to freely switch
+	  between using the ext3 driver and the ext2 driver, as long as the
+	  file system has been cleanly unmounted, or e2fsck is run on the file
+	  system.
+
+	  To add a journal on an existing ext2 file system or change the
+	  behavior of ext3 file systems, you can use the tune2fs utility ("man
+	  tune2fs").  To modify attributes of files and directories on ext3
+	  file systems, use chattr ("man chattr").  You need to be using
+	  e2fsprogs version 1.20 or later in order to create ext3 journals
+	  (available at <http://sourceforge.net/projects/e2fsprogs/>).
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called ext3.
+
+config EXT3_FS_XATTR
+	bool "Ext3 extended attributes"
+	depends on EXT3_FS
+	default y
+	help
+	  Extended attributes are name:value pairs associated with inodes by
+	  the kernel or by users (see the attr(5) manual page, or visit
+	  <http://acl.bestbits.at/> for details).
+
+	  If unsure, say N.
+
+	  You need this for POSIX ACL support on ext3.
+
+config EXT3_FS_POSIX_ACL
+	bool "Ext3 POSIX Access Control Lists"
+	depends on EXT3_FS_XATTR
+	select FS_POSIX_ACL
+	help
+	  Posix Access Control Lists (ACLs) support permissions for users and
+	  groups beyond the owner/group/world scheme.
+
+	  To learn more about Access Control Lists, visit the Posix ACLs for
+	  Linux website <http://acl.bestbits.at/>.
+
+	  If you don't know what Access Control Lists are, say N
+
+config EXT3_FS_SECURITY
+	bool "Ext3 Security Labels"
+	depends on EXT3_FS_XATTR
+	help
+	  Security labels support alternative access control models
+	  implemented by security modules like SELinux.  This option
+	  enables an extended attribute handler for file security
+	  labels in the ext3 filesystem.
+
+	  If you are not using a security module that requires using
+	  extended attributes for file security labels, say N.
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 92fd033..f5b57a2 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -1547,6 +1547,7 @@
 	 * turn off reservation for this allocation
 	 */
 	if (my_rsv && (free_blocks < windowsz)
+		&& (free_blocks > 0)
 		&& (rsv_is_empty(&my_rsv->rsv_window)))
 		my_rsv = NULL;
 
@@ -1585,7 +1586,7 @@
 		 * free blocks is less than half of the reservation
 		 * window size.
 		 */
-		if (free_blocks <= (windowsz/2))
+		if (my_rsv && (free_blocks <= (windowsz/2)))
 			continue;
 
 		brelse(bitmap_bh);
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 2eea96e..4c82531 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -102,6 +102,7 @@
 	int err;
 	struct inode *inode = filp->f_path.dentry->d_inode;
 	int ret = 0;
+	int dir_has_error = 0;
 
 	sb = inode->i_sb;
 
@@ -148,9 +149,12 @@
 		 * of recovering data when there's a bad sector
 		 */
 		if (!bh) {
-			ext3_error (sb, "ext3_readdir",
-				"directory #%lu contains a hole at offset %lu",
-				inode->i_ino, (unsigned long)filp->f_pos);
+			if (!dir_has_error) {
+				ext3_error(sb, __func__, "directory #%lu "
+					"contains a hole at offset %lld",
+					inode->i_ino, filp->f_pos);
+				dir_has_error = 1;
+			}
 			/* corrupt size?  Maybe no more blocks to read */
 			if (filp->f_pos > inode->i_blocks << 9)
 				break;
@@ -410,7 +414,7 @@
 				get_dtype(sb, fname->file_type));
 		if (error) {
 			filp->f_pos = curr_pos;
-			info->extra_fname = fname->next;
+			info->extra_fname = fname;
 			return error;
 		}
 		fname = fname->next;
@@ -449,11 +453,21 @@
 	 * If there are any leftover names on the hash collision
 	 * chain, return them first.
 	 */
-	if (info->extra_fname &&
-	    call_filldir(filp, dirent, filldir, info->extra_fname))
-		goto finished;
+	if (info->extra_fname) {
+		if (call_filldir(filp, dirent, filldir, info->extra_fname))
+			goto finished;
 
-	if (!info->curr_node)
+		info->extra_fname = NULL;
+		info->curr_node = rb_next(info->curr_node);
+		if (!info->curr_node) {
+			if (info->next_hash == ~0) {
+				filp->f_pos = EXT3_HTREE_EOF;
+				goto finished;
+			}
+			info->curr_hash = info->next_hash;
+			info->curr_minor_hash = 0;
+		}
+	} else if (!info->curr_node)
 		info->curr_node = rb_first(&info->root);
 
 	while (1) {
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index ebfec4d..f8424ad 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1186,6 +1186,13 @@
 		ext3_journal_stop(handle);
 		unlock_page(page);
 		page_cache_release(page);
+		/*
+		 * block_write_begin may have instantiated a few blocks
+		 * outside i_size.  Trim these off again. Don't need
+		 * i_size_read because we hold i_mutex.
+		 */
+		if (pos + len > inode->i_size)
+			vmtruncate(inode, inode->i_size);
 	}
 	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
 		goto retry;
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 0d0c701..b7394d0 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -239,7 +239,7 @@
 	case EXT3_IOC_GROUP_EXTEND: {
 		ext3_fsblk_t n_blocks_count;
 		struct super_block *sb = inode->i_sb;
-		int err;
+		int err, err2;
 
 		if (!capable(CAP_SYS_RESOURCE))
 			return -EPERM;
@@ -254,8 +254,10 @@
 		}
 		err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count);
 		journal_lock_updates(EXT3_SB(sb)->s_journal);
-		journal_flush(EXT3_SB(sb)->s_journal);
+		err2 = journal_flush(EXT3_SB(sb)->s_journal);
 		journal_unlock_updates(EXT3_SB(sb)->s_journal);
+		if (err == 0)
+			err = err2;
 group_extend_out:
 		mnt_drop_write(filp->f_path.mnt);
 		return err;
@@ -263,7 +265,7 @@
 	case EXT3_IOC_GROUP_ADD: {
 		struct ext3_new_group_data input;
 		struct super_block *sb = inode->i_sb;
-		int err;
+		int err, err2;
 
 		if (!capable(CAP_SYS_RESOURCE))
 			return -EPERM;
@@ -280,8 +282,10 @@
 
 		err = ext3_group_add(sb, &input);
 		journal_lock_updates(EXT3_SB(sb)->s_journal);
-		journal_flush(EXT3_SB(sb)->s_journal);
+		err2 = journal_flush(EXT3_SB(sb)->s_journal);
 		journal_unlock_updates(EXT3_SB(sb)->s_journal);
+		if (err == 0)
+			err = err2;
 group_add_out:
 		mnt_drop_write(filp->f_path.mnt);
 		return err;
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 77278e9..78fdf38 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -790,7 +790,8 @@
 
 	if (reserved_gdb || gdb_off == 0) {
 		if (!EXT3_HAS_COMPAT_FEATURE(sb,
-					     EXT3_FEATURE_COMPAT_RESIZE_INODE)){
+					     EXT3_FEATURE_COMPAT_RESIZE_INODE)
+		    || !le16_to_cpu(es->s_reserved_gdt_blocks)) {
 			ext3_warning(sb, __func__,
 				     "No reserved GDT blocks, can't resize");
 			return -EPERM;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 399a96a..cac29ee 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -393,7 +393,8 @@
 	int i;
 
 	ext3_xattr_put_super(sb);
-	journal_destroy(sbi->s_journal);
+	if (journal_destroy(sbi->s_journal) < 0)
+		ext3_abort(sb, __func__, "Couldn't clean up the journal");
 	if (!(sb->s_flags & MS_RDONLY)) {
 		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
 		es->s_state = cpu_to_le16(sbi->s_mount_state);
@@ -625,6 +626,9 @@
 	else if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
 		seq_puts(seq, ",data=writeback");
 
+	if (test_opt(sb, DATA_ERR_ABORT))
+		seq_puts(seq, ",data_err=abort");
+
 	ext3_show_quota_options(seq, sb);
 
 	return 0;
@@ -754,6 +758,7 @@
 	Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
 	Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
 	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
+	Opt_data_err_abort, Opt_data_err_ignore,
 	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
 	Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
@@ -796,6 +801,8 @@
 	{Opt_data_journal, "data=journal"},
 	{Opt_data_ordered, "data=ordered"},
 	{Opt_data_writeback, "data=writeback"},
+	{Opt_data_err_abort, "data_err=abort"},
+	{Opt_data_err_ignore, "data_err=ignore"},
 	{Opt_offusrjquota, "usrjquota="},
 	{Opt_usrjquota, "usrjquota=%s"},
 	{Opt_offgrpjquota, "grpjquota="},
@@ -1011,6 +1018,12 @@
 				sbi->s_mount_opt |= data_opt;
 			}
 			break;
+		case Opt_data_err_abort:
+			set_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
+			break;
+		case Opt_data_err_ignore:
+			clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
+			break;
 #ifdef CONFIG_QUOTA
 		case Opt_usrjquota:
 			qtype = USRQUOTA;
@@ -1986,6 +1999,10 @@
 		journal->j_flags |= JFS_BARRIER;
 	else
 		journal->j_flags &= ~JFS_BARRIER;
+	if (test_opt(sb, DATA_ERR_ABORT))
+		journal->j_flags |= JFS_ABORT_ON_SYNCDATA_ERR;
+	else
+		journal->j_flags &= ~JFS_ABORT_ON_SYNCDATA_ERR;
 	spin_unlock(&journal->j_state_lock);
 }
 
@@ -2280,7 +2297,9 @@
 	journal_t *journal = EXT3_SB(sb)->s_journal;
 
 	journal_lock_updates(journal);
-	journal_flush(journal);
+	if (journal_flush(journal) < 0)
+		goto out;
+
 	lock_super(sb);
 	if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) &&
 	    sb->s_flags & MS_RDONLY) {
@@ -2289,6 +2308,8 @@
 		ext3_commit_super(sb, es, 1);
 	}
 	unlock_super(sb);
+
+out:
 	journal_unlock_updates(journal);
 }
 
@@ -2388,7 +2409,13 @@
 
 		/* Now we set up the journal barrier. */
 		journal_lock_updates(journal);
-		journal_flush(journal);
+
+		/*
+		 * We don't want to clear needs_recovery flag when we failed
+		 * to flush the journal.
+		 */
+		if (journal_flush(journal) < 0)
+			return;
 
 		/* Journal blocked and flushed, clear needs_recovery flag. */
 		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
@@ -2806,8 +2833,12 @@
 		 * otherwise be livelocked...
 		 */
 		journal_lock_updates(EXT3_SB(sb)->s_journal);
-		journal_flush(EXT3_SB(sb)->s_journal);
+		err = journal_flush(EXT3_SB(sb)->s_journal);
 		journal_unlock_updates(EXT3_SB(sb)->s_journal);
+		if (err) {
+			path_put(&nd.path);
+			return err;
+		}
 	}
 
 	err = vfs_quota_on_path(sb, type, format_id, &nd.path);
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
new file mode 100644
index 0000000..7505482
--- /dev/null
+++ b/fs/ext4/Kconfig
@@ -0,0 +1,79 @@
+config EXT4_FS
+	tristate "The Extended 4 (ext4) filesystem"
+	select JBD2
+	select CRC16
+	help
+	  This is the next generation of the ext3 filesystem.
+
+	  Unlike the change from ext2 filesystem to ext3 filesystem,
+	  the on-disk format of ext4 is not forwards compatible with
+	  ext3; it is based on extent maps and it supports 48-bit
+	  physical block numbers.  The ext4 filesystem also supports delayed
+	  allocation, persistent preallocation, high resolution time stamps,
+	  and a number of other features to improve performance and speed
+	  up fsck time.  For more information, please see the web pages at
+	  http://ext4.wiki.kernel.org.
+
+	  The ext4 filesystem will support mounting an ext3
+	  filesystem; while there will be some performance gains from
+	  the delayed allocation and inode table readahead, the best
+	  performance gains will require enabling ext4 features in the
+	  filesystem, or formating a new filesystem as an ext4
+	  filesystem initially.
+
+	  To compile this file system support as a module, choose M here. The
+	  module will be called ext4.
+
+	  If unsure, say N.
+
+config EXT4DEV_COMPAT
+	bool "Enable ext4dev compatibility"
+	depends on EXT4_FS
+	help
+	  Starting with 2.6.28, the name of the ext4 filesystem was
+	  renamed from ext4dev to ext4.  Unfortunately there are some
+	  legacy userspace programs (such as klibc's fstype) have
+	  "ext4dev" hardcoded.
+
+	  To enable backwards compatibility so that systems that are
+	  still expecting to mount ext4 filesystems using ext4dev,
+	  chose Y here.   This feature will go away by 2.6.31, so
+	  please arrange to get your userspace programs fixed!
+
+config EXT4_FS_XATTR
+	bool "Ext4 extended attributes"
+	depends on EXT4_FS
+	default y
+	help
+	  Extended attributes are name:value pairs associated with inodes by
+	  the kernel or by users (see the attr(5) manual page, or visit
+	  <http://acl.bestbits.at/> for details).
+
+	  If unsure, say N.
+
+	  You need this for POSIX ACL support on ext4.
+
+config EXT4_FS_POSIX_ACL
+	bool "Ext4 POSIX Access Control Lists"
+	depends on EXT4_FS_XATTR
+	select FS_POSIX_ACL
+	help
+	  POSIX Access Control Lists (ACLs) support permissions for users and
+	  groups beyond the owner/group/world scheme.
+
+	  To learn more about Access Control Lists, visit the POSIX ACLs for
+	  Linux website <http://acl.bestbits.at/>.
+
+	  If you don't know what Access Control Lists are, say N
+
+config EXT4_FS_SECURITY
+	bool "Ext4 Security Labels"
+	depends on EXT4_FS_XATTR
+	help
+	  Security labels support alternative access control models
+	  implemented by security modules like SELinux.  This option
+	  enables an extended attribute handler for file security
+	  labels in the ext4 filesystem.
+
+	  If you are not using a security module that requires using
+	  extended attributes for file security labels, say N.
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index bd2ece2..b9821be 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -568,8 +568,16 @@
 
 	/* this isn't the right place to decide whether block is metadata
 	 * inode.c/extents.c knows better, but for safety ... */
-	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
-			ext4_should_journal_data(inode))
+	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+		metadata = 1;
+
+	/* We need to make sure we don't reuse
+	 * block released untill the transaction commit.
+	 * writeback mode have weak data consistency so
+	 * don't force data as metadata when freeing block
+	 * for writeback mode.
+	 */
+	if (metadata == 0 && !ext4_should_writeback_data(inode))
 		metadata = 1;
 
 	sb = inode->i_sb;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 6690a41..4880cc3 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -511,7 +511,6 @@
 /*
  * Mount flags
  */
-#define EXT4_MOUNT_CHECK		0x00001	/* Do mount-time checks */
 #define EXT4_MOUNT_OLDALLOC		0x00002  /* Don't use the new Orlov allocator */
 #define EXT4_MOUNT_GRPID		0x00004	/* Create files with directory's group */
 #define EXT4_MOUNT_DEBUG		0x00008	/* Some debugging messages */
diff --git a/fs/ext4/ext4_sb.h b/fs/ext4/ext4_sb.h
index 6a0b40d..445fde6 100644
--- a/fs/ext4/ext4_sb.h
+++ b/fs/ext4/ext4_sb.h
@@ -99,9 +99,6 @@
 	struct inode *s_buddy_cache;
 	long s_blocks_reserved;
 	spinlock_t s_reserve_lock;
-	struct list_head s_active_transaction;
-	struct list_head s_closed_transaction;
-	struct list_head s_committed_transaction;
 	spinlock_t s_md_lock;
 	tid_t s_last_transaction;
 	unsigned short *s_mb_offsets, *s_mb_maxs;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9b4ec9d..8dbf695 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1648,6 +1648,7 @@
 	int ret = 0, err, nr_pages, i;
 	unsigned long index, end;
 	struct pagevec pvec;
+	long pages_skipped;
 
 	BUG_ON(mpd->next_page <= mpd->first_page);
 	pagevec_init(&pvec, 0);
@@ -1655,20 +1656,30 @@
 	end = mpd->next_page - 1;
 
 	while (index <= end) {
-		/* XXX: optimize tail */
-		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
+		/*
+		 * We can use PAGECACHE_TAG_DIRTY lookup here because
+		 * even though we have cleared the dirty flag on the page
+		 * We still keep the page in the radix tree with tag
+		 * PAGECACHE_TAG_DIRTY. See clear_page_dirty_for_io.
+		 * The PAGECACHE_TAG_DIRTY is cleared in set_page_writeback
+		 * which is called via the below writepage callback.
+		 */
+		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+					PAGECACHE_TAG_DIRTY,
+					min(end - index,
+					(pgoff_t)PAGEVEC_SIZE-1) + 1);
 		if (nr_pages == 0)
 			break;
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
 
-			index = page->index;
-			if (index > end)
-				break;
-			index++;
-
+			pages_skipped = mpd->wbc->pages_skipped;
 			err = mapping->a_ops->writepage(page, mpd->wbc);
-			if (!err)
+			if (!err && (pages_skipped == mpd->wbc->pages_skipped))
+				/*
+				 * have successfully written the page
+				 * without skipping the same
+				 */
 				mpd->pages_written++;
 			/*
 			 * In error case, we have to continue because
@@ -2104,7 +2115,6 @@
 			       struct writeback_control *wbc,
 			       struct mpage_da_data *mpd)
 {
-	long to_write;
 	int ret;
 
 	if (!mpd->get_block)
@@ -2119,19 +2129,18 @@
 	mpd->pages_written = 0;
 	mpd->retval = 0;
 
-	to_write = wbc->nr_to_write;
-
 	ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, mpd);
-
 	/*
 	 * Handle last extent of pages
 	 */
 	if (!mpd->io_done && mpd->next_page != mpd->first_page) {
 		if (mpage_da_map_blocks(mpd) == 0)
 			mpage_da_submit_io(mpd);
-	}
 
-	wbc->nr_to_write = to_write - mpd->pages_written;
+		mpd->io_done = 1;
+		ret = MPAGE_DA_EXTENT_TAIL;
+	}
+	wbc->nr_to_write -= mpd->pages_written;
 	return ret;
 }
 
@@ -2360,12 +2369,14 @@
 static int ext4_da_writepages(struct address_space *mapping,
 			      struct writeback_control *wbc)
 {
+	pgoff_t	index;
+	int range_whole = 0;
 	handle_t *handle = NULL;
-	loff_t range_start = 0;
 	struct mpage_da_data mpd;
 	struct inode *inode = mapping->host;
+	int no_nrwrite_index_update;
+	long pages_written = 0, pages_skipped;
 	int needed_blocks, ret = 0, nr_to_writebump = 0;
-	long to_write, pages_skipped = 0;
 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
 
 	/*
@@ -2385,23 +2396,26 @@
 		nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
 		wbc->nr_to_write = sbi->s_mb_stream_request;
 	}
+	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+		range_whole = 1;
 
-	if (!wbc->range_cyclic)
-		/*
-		 * If range_cyclic is not set force range_cont
-		 * and save the old writeback_index
-		 */
-		wbc->range_cont = 1;
-
-	range_start =  wbc->range_start;
-	pages_skipped = wbc->pages_skipped;
+	if (wbc->range_cyclic)
+		index = mapping->writeback_index;
+	else
+		index = wbc->range_start >> PAGE_CACHE_SHIFT;
 
 	mpd.wbc = wbc;
 	mpd.inode = mapping->host;
 
-restart_loop:
-	to_write = wbc->nr_to_write;
-	while (!ret && to_write > 0) {
+	/*
+	 * we don't want write_cache_pages to update
+	 * nr_to_write and writeback_index
+	 */
+	no_nrwrite_index_update = wbc->no_nrwrite_index_update;
+	wbc->no_nrwrite_index_update = 1;
+	pages_skipped = wbc->pages_skipped;
+
+	while (!ret && wbc->nr_to_write > 0) {
 
 		/*
 		 * we  insert one extent at a time. So we need
@@ -2422,48 +2436,53 @@
 			dump_stack();
 			goto out_writepages;
 		}
-		to_write -= wbc->nr_to_write;
-
 		mpd.get_block = ext4_da_get_block_write;
 		ret = mpage_da_writepages(mapping, wbc, &mpd);
 
 		ext4_journal_stop(handle);
 
-		if (mpd.retval == -ENOSPC)
+		if (mpd.retval == -ENOSPC) {
+			/* commit the transaction which would
+			 * free blocks released in the transaction
+			 * and try again
+			 */
 			jbd2_journal_force_commit_nested(sbi->s_journal);
-
-		/* reset the retry count */
-		if (ret == MPAGE_DA_EXTENT_TAIL) {
+			wbc->pages_skipped = pages_skipped;
+			ret = 0;
+		} else if (ret == MPAGE_DA_EXTENT_TAIL) {
 			/*
 			 * got one extent now try with
 			 * rest of the pages
 			 */
-			to_write += wbc->nr_to_write;
+			pages_written += mpd.pages_written;
+			wbc->pages_skipped = pages_skipped;
 			ret = 0;
-		} else if (wbc->nr_to_write) {
+		} else if (wbc->nr_to_write)
 			/*
 			 * There is no more writeout needed
 			 * or we requested for a noblocking writeout
 			 * and we found the device congested
 			 */
-			to_write += wbc->nr_to_write;
 			break;
-		}
-		wbc->nr_to_write = to_write;
 	}
+	if (pages_skipped != wbc->pages_skipped)
+		printk(KERN_EMERG "This should not happen leaving %s "
+				"with nr_to_write = %ld ret = %d\n",
+				__func__, wbc->nr_to_write, ret);
 
-	if (wbc->range_cont && (pages_skipped != wbc->pages_skipped)) {
-		/* We skipped pages in this loop */
-		wbc->range_start = range_start;
-		wbc->nr_to_write = to_write +
-				wbc->pages_skipped - pages_skipped;
-		wbc->pages_skipped = pages_skipped;
-		goto restart_loop;
-	}
+	/* Update index */
+	index += pages_written;
+	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
+		/*
+		 * set the writeback_index so that range_cyclic
+		 * mode will write it back later
+		 */
+		mapping->writeback_index = index;
 
 out_writepages:
-	wbc->nr_to_write = to_write - nr_to_writebump;
-	wbc->range_start = range_start;
+	if (!no_nrwrite_index_update)
+		wbc->no_nrwrite_index_update = 0;
+	wbc->nr_to_write -= nr_to_writebump;
 	return ret;
 }
 
@@ -4175,7 +4194,6 @@
 	struct inode *inode = &(ei->vfs_inode);
 	u64 i_blocks = inode->i_blocks;
 	struct super_block *sb = inode->i_sb;
-	int err = 0;
 
 	if (i_blocks <= ~0U) {
 		/*
@@ -4185,36 +4203,27 @@
 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
 		raw_inode->i_blocks_high = 0;
 		ei->i_flags &= ~EXT4_HUGE_FILE_FL;
-	} else if (i_blocks <= 0xffffffffffffULL) {
+		return 0;
+	}
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
+		return -EFBIG;
+
+	if (i_blocks <= 0xffffffffffffULL) {
 		/*
 		 * i_blocks can be represented in a 48 bit variable
 		 * as multiple of 512 bytes
 		 */
-		err = ext4_update_rocompat_feature(handle, sb,
-					    EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
-		if (err)
-			goto  err_out;
-		/* i_block is stored in the split  48 bit fields */
 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
 		ei->i_flags &= ~EXT4_HUGE_FILE_FL;
 	} else {
-		/*
-		 * i_blocks should be represented in a 48 bit variable
-		 * as multiple of  file system block size
-		 */
-		err = ext4_update_rocompat_feature(handle, sb,
-					    EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
-		if (err)
-			goto  err_out;
 		ei->i_flags |= EXT4_HUGE_FILE_FL;
 		/* i_block is stored in file system block size */
 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
 	}
-err_out:
-	return err;
+	return 0;
 }
 
 /*
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index b580714..dfe17a1 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2300,6 +2300,7 @@
 	}
 
 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
+	meta_group_info[i]->bb_free_root.rb_node = NULL;;
 
 #ifdef DOUBLE_CHECK
 	{
@@ -2522,9 +2523,6 @@
 	}
 
 	spin_lock_init(&sbi->s_md_lock);
-	INIT_LIST_HEAD(&sbi->s_active_transaction);
-	INIT_LIST_HEAD(&sbi->s_closed_transaction);
-	INIT_LIST_HEAD(&sbi->s_committed_transaction);
 	spin_lock_init(&sbi->s_bal_lock);
 
 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
@@ -2553,6 +2551,8 @@
 	ext4_mb_init_per_dev_proc(sb);
 	ext4_mb_history_init(sb);
 
+	sbi->s_journal->j_commit_callback = release_blocks_on_commit;
+
 	printk(KERN_INFO "EXT4-fs: mballoc enabled\n");
 	return 0;
 }
@@ -2568,7 +2568,7 @@
 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
 		list_del(&pa->pa_group_list);
 		count++;
-		kfree(pa);
+		kmem_cache_free(ext4_pspace_cachep, pa);
 	}
 	if (count)
 		mb_debug("mballoc: %u PAs left\n", count);
@@ -2582,15 +2582,6 @@
 	struct ext4_group_info *grinfo;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 
-	/* release freed, non-committed blocks */
-	spin_lock(&sbi->s_md_lock);
-	list_splice_init(&sbi->s_closed_transaction,
-			&sbi->s_committed_transaction);
-	list_splice_init(&sbi->s_active_transaction,
-			&sbi->s_committed_transaction);
-	spin_unlock(&sbi->s_md_lock);
-	ext4_mb_free_committed_blocks(sb);
-
 	if (sbi->s_group_info) {
 		for (i = 0; i < sbi->s_groups_count; i++) {
 			grinfo = ext4_get_group_info(sb, i);
@@ -2644,61 +2635,57 @@
 	return 0;
 }
 
-static noinline_for_stack void
-ext4_mb_free_committed_blocks(struct super_block *sb)
+/*
+ * This function is called by the jbd2 layer once the commit has finished,
+ * so we know we can free the blocks that were released with that commit.
+ */
+static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
 {
-	struct ext4_sb_info *sbi = EXT4_SB(sb);
-	int err;
-	int i;
-	int count = 0;
-	int count2 = 0;
-	struct ext4_free_metadata *md;
+	struct super_block *sb = journal->j_private;
 	struct ext4_buddy e4b;
+	struct ext4_group_info *db;
+	int err, count = 0, count2 = 0;
+	struct ext4_free_data *entry;
+	ext4_fsblk_t discard_block;
+	struct list_head *l, *ltmp;
 
-	if (list_empty(&sbi->s_committed_transaction))
-		return;
-
-	/* there is committed blocks to be freed yet */
-	do {
-		/* get next array of blocks */
-		md = NULL;
-		spin_lock(&sbi->s_md_lock);
-		if (!list_empty(&sbi->s_committed_transaction)) {
-			md = list_entry(sbi->s_committed_transaction.next,
-					struct ext4_free_metadata, list);
-			list_del(&md->list);
-		}
-		spin_unlock(&sbi->s_md_lock);
-
-		if (md == NULL)
-			break;
+	list_for_each_safe(l, ltmp, &txn->t_private_list) {
+		entry = list_entry(l, struct ext4_free_data, list);
 
 		mb_debug("gonna free %u blocks in group %lu (0x%p):",
-				md->num, md->group, md);
+			 entry->count, entry->group, entry);
 
-		err = ext4_mb_load_buddy(sb, md->group, &e4b);
+		err = ext4_mb_load_buddy(sb, entry->group, &e4b);
 		/* we expect to find existing buddy because it's pinned */
 		BUG_ON(err != 0);
 
+		db = e4b.bd_info;
 		/* there are blocks to put in buddy to make them really free */
-		count += md->num;
+		count += entry->count;
 		count2++;
-		ext4_lock_group(sb, md->group);
-		for (i = 0; i < md->num; i++) {
-			mb_debug(" %u", md->blocks[i]);
-			mb_free_blocks(NULL, &e4b, md->blocks[i], 1);
+		ext4_lock_group(sb, entry->group);
+		/* Take it out of per group rb tree */
+		rb_erase(&entry->node, &(db->bb_free_root));
+		mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
+
+		if (!db->bb_free_root.rb_node) {
+			/* No more items in the per group rb tree
+			 * balance refcounts from ext4_mb_free_metadata()
+			 */
+			page_cache_release(e4b.bd_buddy_page);
+			page_cache_release(e4b.bd_bitmap_page);
 		}
-		mb_debug("\n");
-		ext4_unlock_group(sb, md->group);
+		ext4_unlock_group(sb, entry->group);
+		discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb)
+			+ entry->start_blk
+			+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
+		trace_mark(ext4_discard_blocks, "dev %s blk %llu count %u", sb->s_id,
+			   (unsigned long long) discard_block, entry->count);
+		sb_issue_discard(sb, discard_block, entry->count);
 
-		/* balance refcounts from ext4_mb_free_metadata() */
-		page_cache_release(e4b.bd_buddy_page);
-		page_cache_release(e4b.bd_bitmap_page);
-
-		kfree(md);
+		kmem_cache_free(ext4_free_ext_cachep, entry);
 		ext4_mb_release_desc(&e4b);
-
-	} while (md);
+	}
 
 	mb_debug("freed %u blocks in %u structures\n", count, count2);
 }
@@ -2712,6 +2699,7 @@
 
 static int ext4_mb_init_per_dev_proc(struct super_block *sb)
 {
+#ifdef CONFIG_PROC_FS
 	mode_t mode = S_IFREG | S_IRUGO | S_IWUSR;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct proc_dir_entry *proc;
@@ -2735,10 +2723,14 @@
 	remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
 	remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_proc);
 	return -ENOMEM;
+#else
+	return 0;
+#endif
 }
 
 static int ext4_mb_destroy_per_dev_proc(struct super_block *sb)
 {
+#ifdef CONFIG_PROC_FS
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 
 	if (sbi->s_proc == NULL)
@@ -2750,7 +2742,7 @@
 	remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_proc);
 	remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
 	remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_proc);
-
+#endif
 	return 0;
 }
 
@@ -2771,6 +2763,16 @@
 		kmem_cache_destroy(ext4_pspace_cachep);
 		return -ENOMEM;
 	}
+
+	ext4_free_ext_cachep =
+		kmem_cache_create("ext4_free_block_extents",
+				     sizeof(struct ext4_free_data),
+				     0, SLAB_RECLAIM_ACCOUNT, NULL);
+	if (ext4_free_ext_cachep == NULL) {
+		kmem_cache_destroy(ext4_pspace_cachep);
+		kmem_cache_destroy(ext4_ac_cachep);
+		return -ENOMEM;
+	}
 	return 0;
 }
 
@@ -2779,6 +2781,7 @@
 	/* XXX: synchronize_rcu(); */
 	kmem_cache_destroy(ext4_pspace_cachep);
 	kmem_cache_destroy(ext4_ac_cachep);
+	kmem_cache_destroy(ext4_free_ext_cachep);
 }
 
 
@@ -4324,8 +4327,6 @@
 		goto out1;
 	}
 
-	ext4_mb_poll_new_transaction(sb, handle);
-
 	*errp = ext4_mb_initialize_context(ac, ar);
 	if (*errp) {
 		ar->len = 0;
@@ -4384,35 +4385,20 @@
 
 	return block;
 }
-static void ext4_mb_poll_new_transaction(struct super_block *sb,
-						handle_t *handle)
+
+/*
+ * We can merge two free data extents only if the physical blocks
+ * are contiguous, AND the extents were freed by the same transaction,
+ * AND the blocks are associated with the same group.
+ */
+static int can_merge(struct ext4_free_data *entry1,
+			struct ext4_free_data *entry2)
 {
-	struct ext4_sb_info *sbi = EXT4_SB(sb);
-
-	if (sbi->s_last_transaction == handle->h_transaction->t_tid)
-		return;
-
-	/* new transaction! time to close last one and free blocks for
-	 * committed transaction. we know that only transaction can be
-	 * active, so previos transaction can be being logged and we
-	 * know that transaction before previous is known to be already
-	 * logged. this means that now we may free blocks freed in all
-	 * transactions before previous one. hope I'm clear enough ... */
-
-	spin_lock(&sbi->s_md_lock);
-	if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
-		mb_debug("new transaction %lu, old %lu\n",
-				(unsigned long) handle->h_transaction->t_tid,
-				(unsigned long) sbi->s_last_transaction);
-		list_splice_init(&sbi->s_closed_transaction,
-				&sbi->s_committed_transaction);
-		list_splice_init(&sbi->s_active_transaction,
-				&sbi->s_closed_transaction);
-		sbi->s_last_transaction = handle->h_transaction->t_tid;
-	}
-	spin_unlock(&sbi->s_md_lock);
-
-	ext4_mb_free_committed_blocks(sb);
+	if ((entry1->t_tid == entry2->t_tid) &&
+	    (entry1->group == entry2->group) &&
+	    ((entry1->start_blk + entry1->count) == entry2->start_blk))
+		return 1;
+	return 0;
 }
 
 static noinline_for_stack int
@@ -4422,57 +4408,80 @@
 	struct ext4_group_info *db = e4b->bd_info;
 	struct super_block *sb = e4b->bd_sb;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
-	struct ext4_free_metadata *md;
-	int i;
+	struct ext4_free_data *entry, *new_entry;
+	struct rb_node **n = &db->bb_free_root.rb_node, *node;
+	struct rb_node *parent = NULL, *new_node;
+
 
 	BUG_ON(e4b->bd_bitmap_page == NULL);
 	BUG_ON(e4b->bd_buddy_page == NULL);
 
+	new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+	new_entry->start_blk = block;
+	new_entry->group  = group;
+	new_entry->count = count;
+	new_entry->t_tid = handle->h_transaction->t_tid;
+	new_node = &new_entry->node;
+
 	ext4_lock_group(sb, group);
-	for (i = 0; i < count; i++) {
-		md = db->bb_md_cur;
-		if (md && db->bb_tid != handle->h_transaction->t_tid) {
-			db->bb_md_cur = NULL;
-			md = NULL;
-		}
-
-		if (md == NULL) {
-			ext4_unlock_group(sb, group);
-			md = kmalloc(sizeof(*md), GFP_NOFS);
-			if (md == NULL)
-				return -ENOMEM;
-			md->num = 0;
-			md->group = group;
-
-			ext4_lock_group(sb, group);
-			if (db->bb_md_cur == NULL) {
-				spin_lock(&sbi->s_md_lock);
-				list_add(&md->list, &sbi->s_active_transaction);
-				spin_unlock(&sbi->s_md_lock);
-				/* protect buddy cache from being freed,
-				 * otherwise we'll refresh it from
-				 * on-disk bitmap and lose not-yet-available
-				 * blocks */
-				page_cache_get(e4b->bd_buddy_page);
-				page_cache_get(e4b->bd_bitmap_page);
-				db->bb_md_cur = md;
-				db->bb_tid = handle->h_transaction->t_tid;
-				mb_debug("new md 0x%p for group %lu\n",
-						md, md->group);
-			} else {
-				kfree(md);
-				md = db->bb_md_cur;
-			}
-		}
-
-		BUG_ON(md->num >= EXT4_BB_MAX_BLOCKS);
-		md->blocks[md->num] = block + i;
-		md->num++;
-		if (md->num == EXT4_BB_MAX_BLOCKS) {
-			/* no more space, put full container on a sb's list */
-			db->bb_md_cur = NULL;
+	if (!*n) {
+		/* first free block exent. We need to
+		   protect buddy cache from being freed,
+		 * otherwise we'll refresh it from
+		 * on-disk bitmap and lose not-yet-available
+		 * blocks */
+		page_cache_get(e4b->bd_buddy_page);
+		page_cache_get(e4b->bd_bitmap_page);
+	}
+	while (*n) {
+		parent = *n;
+		entry = rb_entry(parent, struct ext4_free_data, node);
+		if (block < entry->start_blk)
+			n = &(*n)->rb_left;
+		else if (block >= (entry->start_blk + entry->count))
+			n = &(*n)->rb_right;
+		else {
+			ext4_error(sb, __func__,
+			    "Double free of blocks %d (%d %d)\n",
+			    block, entry->start_blk, entry->count);
+			return 0;
 		}
 	}
+
+	rb_link_node(new_node, parent, n);
+	rb_insert_color(new_node, &db->bb_free_root);
+
+	/* Now try to see the extent can be merged to left and right */
+	node = rb_prev(new_node);
+	if (node) {
+		entry = rb_entry(node, struct ext4_free_data, node);
+		if (can_merge(entry, new_entry)) {
+			new_entry->start_blk = entry->start_blk;
+			new_entry->count += entry->count;
+			rb_erase(node, &(db->bb_free_root));
+			spin_lock(&sbi->s_md_lock);
+			list_del(&entry->list);
+			spin_unlock(&sbi->s_md_lock);
+			kmem_cache_free(ext4_free_ext_cachep, entry);
+		}
+	}
+
+	node = rb_next(new_node);
+	if (node) {
+		entry = rb_entry(node, struct ext4_free_data, node);
+		if (can_merge(new_entry, entry)) {
+			new_entry->count += entry->count;
+			rb_erase(node, &(db->bb_free_root));
+			spin_lock(&sbi->s_md_lock);
+			list_del(&entry->list);
+			spin_unlock(&sbi->s_md_lock);
+			kmem_cache_free(ext4_free_ext_cachep, entry);
+		}
+	}
+	/* Add the extent to transaction's private list */
+	spin_lock(&sbi->s_md_lock);
+	list_add(&new_entry->list, &handle->h_transaction->t_private_list);
+	spin_unlock(&sbi->s_md_lock);
 	ext4_unlock_group(sb, group);
 	return 0;
 }
@@ -4500,8 +4509,6 @@
 
 	*freed = 0;
 
-	ext4_mb_poll_new_transaction(sb, handle);
-
 	sbi = EXT4_SB(sb);
 	es = EXT4_SB(sb)->s_es;
 	if (block < le32_to_cpu(es->s_first_data_block) ||
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index b3b4828..b5dff1f 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -18,6 +18,8 @@
 #include <linux/pagemap.h>
 #include <linux/seq_file.h>
 #include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/marker.h>
 #include "ext4_jbd2.h"
 #include "ext4.h"
 #include "group.h"
@@ -98,23 +100,29 @@
 
 static struct kmem_cache *ext4_pspace_cachep;
 static struct kmem_cache *ext4_ac_cachep;
+static struct kmem_cache *ext4_free_ext_cachep;
 
-#ifdef EXT4_BB_MAX_BLOCKS
-#undef EXT4_BB_MAX_BLOCKS
-#endif
-#define EXT4_BB_MAX_BLOCKS	30
+struct ext4_free_data {
+	/* this links the free block information from group_info */
+	struct rb_node node;
 
-struct ext4_free_metadata {
-	ext4_group_t group;
-	unsigned short num;
-	ext4_grpblk_t  blocks[EXT4_BB_MAX_BLOCKS];
+	/* this links the free block information from ext4_sb_info */
 	struct list_head list;
+
+	/* group which free block extent belongs */
+	ext4_group_t group;
+
+	/* free block extent */
+	ext4_grpblk_t start_blk;
+	ext4_grpblk_t count;
+
+	/* transaction which freed this extent */
+	tid_t	t_tid;
 };
 
 struct ext4_group_info {
 	unsigned long	bb_state;
-	unsigned long	bb_tid;
-	struct ext4_free_metadata *bb_md_cur;
+	struct rb_root  bb_free_root;
 	unsigned short	bb_first_free;
 	unsigned short	bb_free;
 	unsigned short	bb_fragments;
@@ -261,8 +269,6 @@
 
 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
 					ext4_group_t group);
-static void ext4_mb_poll_new_transaction(struct super_block *, handle_t *);
-static void ext4_mb_free_committed_blocks(struct super_block *);
 static void ext4_mb_return_to_preallocation(struct inode *inode,
 					struct ext4_buddy *e4b, sector_t block,
 					int count);
@@ -270,6 +276,7 @@
 			struct super_block *, struct ext4_prealloc_space *pa);
 static int ext4_mb_init_per_dev_proc(struct super_block *sb);
 static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
+static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
 
 
 static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index dea8f13..9b2b2bc 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -374,66 +374,6 @@
 	 */
 }
 
-int ext4_update_compat_feature(handle_t *handle,
-					struct super_block *sb, __u32 compat)
-{
-	int err = 0;
-	if (!EXT4_HAS_COMPAT_FEATURE(sb, compat)) {
-		err = ext4_journal_get_write_access(handle,
-				EXT4_SB(sb)->s_sbh);
-		if (err)
-			return err;
-		EXT4_SET_COMPAT_FEATURE(sb, compat);
-		sb->s_dirt = 1;
-		handle->h_sync = 1;
-		BUFFER_TRACE(EXT4_SB(sb)->s_sbh,
-					"call ext4_journal_dirty_met adata");
-		err = ext4_journal_dirty_metadata(handle,
-				EXT4_SB(sb)->s_sbh);
-	}
-	return err;
-}
-
-int ext4_update_rocompat_feature(handle_t *handle,
-					struct super_block *sb, __u32 rocompat)
-{
-	int err = 0;
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, rocompat)) {
-		err = ext4_journal_get_write_access(handle,
-				EXT4_SB(sb)->s_sbh);
-		if (err)
-			return err;
-		EXT4_SET_RO_COMPAT_FEATURE(sb, rocompat);
-		sb->s_dirt = 1;
-		handle->h_sync = 1;
-		BUFFER_TRACE(EXT4_SB(sb)->s_sbh,
-					"call ext4_journal_dirty_met adata");
-		err = ext4_journal_dirty_metadata(handle,
-				EXT4_SB(sb)->s_sbh);
-	}
-	return err;
-}
-
-int ext4_update_incompat_feature(handle_t *handle,
-					struct super_block *sb, __u32 incompat)
-{
-	int err = 0;
-	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, incompat)) {
-		err = ext4_journal_get_write_access(handle,
-				EXT4_SB(sb)->s_sbh);
-		if (err)
-			return err;
-		EXT4_SET_INCOMPAT_FEATURE(sb, incompat);
-		sb->s_dirt = 1;
-		handle->h_sync = 1;
-		BUFFER_TRACE(EXT4_SB(sb)->s_sbh,
-					"call ext4_journal_dirty_met adata");
-		err = ext4_journal_dirty_metadata(handle,
-				EXT4_SB(sb)->s_sbh);
-	}
-	return err;
-}
-
 /*
  * Open the external journal device
  */
@@ -904,7 +844,7 @@
 enum {
 	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
 	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
-	Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
+	Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov,
 	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
 	Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
 	Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
@@ -915,7 +855,7 @@
 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
 	Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
 	Opt_grpquota, Opt_extents, Opt_noextents, Opt_i_version,
-	Opt_mballoc, Opt_nomballoc, Opt_stripe, Opt_delalloc, Opt_nodelalloc,
+	Opt_stripe, Opt_delalloc, Opt_nodelalloc,
 	Opt_inode_readahead_blks
 };
 
@@ -933,8 +873,6 @@
 	{Opt_err_panic, "errors=panic"},
 	{Opt_err_ro, "errors=remount-ro"},
 	{Opt_nouid32, "nouid32"},
-	{Opt_nocheck, "nocheck"},
-	{Opt_nocheck, "check=none"},
 	{Opt_debug, "debug"},
 	{Opt_oldalloc, "oldalloc"},
 	{Opt_orlov, "orlov"},
@@ -973,8 +911,6 @@
 	{Opt_extents, "extents"},
 	{Opt_noextents, "noextents"},
 	{Opt_i_version, "i_version"},
-	{Opt_mballoc, "mballoc"},
-	{Opt_nomballoc, "nomballoc"},
 	{Opt_stripe, "stripe=%u"},
 	{Opt_resize, "resize"},
 	{Opt_delalloc, "delalloc"},
@@ -1073,9 +1009,6 @@
 		case Opt_nouid32:
 			set_opt(sbi->s_mount_opt, NO_UID32);
 			break;
-		case Opt_nocheck:
-			clear_opt(sbi->s_mount_opt, CHECK);
-			break;
 		case Opt_debug:
 			set_opt(sbi->s_mount_opt, DEBUG);
 			break;
@@ -1618,14 +1551,14 @@
 		if (block_bitmap < first_block || block_bitmap > last_block) {
 			printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
 			       "Block bitmap for group %lu not in group "
-			       "(block %llu)!", i, block_bitmap);
+			       "(block %llu)!\n", i, block_bitmap);
 			return 0;
 		}
 		inode_bitmap = ext4_inode_bitmap(sb, gdp);
 		if (inode_bitmap < first_block || inode_bitmap > last_block) {
 			printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
 			       "Inode bitmap for group %lu not in group "
-			       "(block %llu)!", i, inode_bitmap);
+			       "(block %llu)!\n", i, inode_bitmap);
 			return 0;
 		}
 		inode_table = ext4_inode_table(sb, gdp);
@@ -1633,7 +1566,7 @@
 		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
 			printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
 			       "Inode table for group %lu not in group "
-			       "(block %llu)!", i, inode_table);
+			       "(block %llu)!\n", i, inode_table);
 			return 0;
 		}
 		spin_lock(sb_bgl_lock(sbi, i));
@@ -1778,13 +1711,13 @@
  *
  * Note, this does *not* consider any metadata overhead for vfs i_blocks.
  */
-static loff_t ext4_max_size(int blkbits)
+static loff_t ext4_max_size(int blkbits, int has_huge_files)
 {
 	loff_t res;
 	loff_t upper_limit = MAX_LFS_FILESIZE;
 
 	/* small i_blocks in vfs inode? */
-	if (sizeof(blkcnt_t) < sizeof(u64)) {
+	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
 		/*
 		 * CONFIG_LSF is not enabled implies the inode
 		 * i_block represent total blocks in 512 bytes
@@ -1814,7 +1747,7 @@
  * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
  * We need to be 1 filesystem block less than the 2^48 sector limit.
  */
-static loff_t ext4_max_bitmap_size(int bits)
+static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
 {
 	loff_t res = EXT4_NDIR_BLOCKS;
 	int meta_blocks;
@@ -1827,11 +1760,11 @@
 	 * total number of  512 bytes blocks of the file
 	 */
 
-	if (sizeof(blkcnt_t) < sizeof(u64)) {
+	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
 		/*
-		 * CONFIG_LSF is not enabled implies the inode
-		 * i_block represent total blocks in 512 bytes
-		 * 32 == size of vfs inode i_blocks * 8
+		 * !has_huge_files or CONFIG_LSF is not enabled
+		 * implies the inode i_block represent total blocks in
+		 * 512 bytes 32 == size of vfs inode i_blocks * 8
 		 */
 		upper_limit = (1LL << 32) - 1;
 
@@ -1940,7 +1873,7 @@
 	int blocksize;
 	int db_count;
 	int i;
-	int needs_recovery;
+	int needs_recovery, has_huge_files;
 	__le32 features;
 	__u64 blocks_count;
 	int err;
@@ -2081,7 +2014,9 @@
 		       sb->s_id, le32_to_cpu(features));
 		goto failed_mount;
 	}
-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
+	has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb,
+				    EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
+	if (has_huge_files) {
 		/*
 		 * Large file size enabled file system can only be
 		 * mount if kernel is build with CONFIG_LSF
@@ -2131,8 +2066,9 @@
 		}
 	}
 
-	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits);
-	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits);
+	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
+						      has_huge_files);
+	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
 
 	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
 		sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
@@ -2456,6 +2392,21 @@
 			"available.\n");
 	}
 
+	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
+		printk(KERN_WARNING "EXT4-fs: Ignoring delalloc option - "
+				"requested data journaling mode\n");
+		clear_opt(sbi->s_mount_opt, DELALLOC);
+	} else if (test_opt(sb, DELALLOC))
+		printk(KERN_INFO "EXT4-fs: delayed allocation enabled\n");
+
+	ext4_ext_init(sb);
+	err = ext4_mb_init(sb, needs_recovery);
+	if (err) {
+		printk(KERN_ERR "EXT4-fs: failed to initalize mballoc (%d)\n",
+		       err);
+		goto failed_mount4;
+	}
+
 	/*
 	 * akpm: core read_super() calls in here with the superblock locked.
 	 * That deadlocks, because orphan cleanup needs to lock the superblock
@@ -2475,21 +2426,6 @@
 	       test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA ? "ordered":
 	       "writeback");
 
-	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
-		printk(KERN_WARNING "EXT4-fs: Ignoring delalloc option - "
-				"requested data journaling mode\n");
-		clear_opt(sbi->s_mount_opt, DELALLOC);
-	} else if (test_opt(sb, DELALLOC))
-		printk(KERN_INFO "EXT4-fs: delayed allocation enabled\n");
-
-	ext4_ext_init(sb);
-	err = ext4_mb_init(sb, needs_recovery);
-	if (err) {
-		printk(KERN_ERR "EXT4-fs: failed to initalize mballoc (%d)\n",
-		       err);
-		goto failed_mount4;
-	}
-
 	lock_kernel();
 	return 0;
 
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 2bada6b..34930a9 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -101,6 +101,8 @@
 		file->f_op = &fuse_direct_io_file_operations;
 	if (!(outarg->open_flags & FOPEN_KEEP_CACHE))
 		invalidate_inode_pages2(inode->i_mapping);
+	if (outarg->open_flags & FOPEN_NONSEEKABLE)
+		nonseekable_open(inode, file);
 	ff->fh = outarg->fh;
 	file->private_data = fuse_file_get(ff);
 }
@@ -1448,6 +1450,9 @@
 	mutex_lock(&inode->i_mutex);
 	switch (origin) {
 	case SEEK_END:
+		retval = fuse_update_attributes(inode, NULL, file, NULL);
+		if (retval)
+			return retval;
 		offset += i_size_read(inode);
 		break;
 	case SEEK_CUR:
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 3a87607..35accfd 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -6,6 +6,9 @@
   See the file COPYING.
 */
 
+#ifndef _FS_FUSE_I_H
+#define _FS_FUSE_I_H
+
 #include <linux/fuse.h>
 #include <linux/fs.h>
 #include <linux/mount.h>
@@ -655,3 +658,5 @@
 void fuse_release_nowrite(struct inode *inode);
 
 u64 fuse_get_attr_version(struct fuse_conn *fc);
+
+#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 6a84388..54b1f0e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -865,7 +865,7 @@
 	if (is_bdev) {
 		fc->destroy_req = fuse_request_alloc();
 		if (!fc->destroy_req)
-			goto err_put_root;
+			goto err_free_init_req;
 	}
 
 	mutex_lock(&fuse_mutex);
@@ -895,6 +895,7 @@
 
  err_unlock:
 	mutex_unlock(&fuse_mutex);
+ err_free_init_req:
 	fuse_request_free(init_req);
  err_put_root:
 	dput(root_dentry);
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index fec8f61..0022eec 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -199,6 +199,9 @@
 		goto done;
 	}
 
+	if (inode->i_ino == HFSPLUS_EXT_CNID)
+		return -EIO;
+
 	mutex_lock(&HFSPLUS_I(inode).extents_lock);
 	res = hfsplus_ext_read_extent(inode, ablock);
 	if (!res) {
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index b085d64..963be64 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -254,6 +254,8 @@
 {
 	if (HFSPLUS_IS_RSRC(inode))
 		inode = HFSPLUS_I(inode).rsrc_inode;
+	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
+		return -EOVERFLOW;
 	atomic_inc(&HFSPLUS_I(inode).opencnt);
 	return 0;
 }
diff --git a/fs/jbd/Kconfig b/fs/jbd/Kconfig
new file mode 100644
index 0000000..4e28bee
--- /dev/null
+++ b/fs/jbd/Kconfig
@@ -0,0 +1,30 @@
+config JBD
+	tristate
+	help
+	  This is a generic journalling layer for block devices.  It is
+	  currently used by the ext3 file system, but it could also be
+	  used to add journal support to other file systems or block
+	  devices such as RAID or LVM.
+
+	  If you are using the ext3 file system, you need to say Y here.
+	  If you are not using ext3 then you will probably want to say N.
+
+	  To compile this device as a module, choose M here: the module will be
+	  called jbd.  If you are compiling ext3 into the kernel, you
+	  cannot compile this code as a module.
+
+config JBD_DEBUG
+	bool "JBD (ext3) debugging support"
+	depends on JBD && DEBUG_FS
+	help
+	  If you are using the ext3 journaled file system (or potentially any
+	  other file system/device using JBD), this option allows you to
+	  enable debugging output while the system is running, in order to
+	  help track down any problems you are having.  By default the
+	  debugging output will be turned off.
+
+	  If you select Y here, then you will be able to turn on debugging
+	  with "echo N > /sys/kernel/debug/jbd/jbd-debug", where N is a
+	  number between 1 and 5, the higher the number, the more debugging
+	  output is generated.  To turn debugging off again, do
+	  "echo 0 > /sys/kernel/debug/jbd/jbd-debug".
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index a5432bb..1bd8d4a 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -93,7 +93,8 @@
 	int ret = 0;
 	struct buffer_head *bh = jh2bh(jh);
 
-	if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) {
+	if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
+	    !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
 		JBUFFER_TRACE(jh, "remove from checkpoint list");
 		ret = __journal_remove_checkpoint(jh) + 1;
 		jbd_unlock_bh_state(bh);
@@ -126,14 +127,29 @@
 
 		/*
 		 * Test again, another process may have checkpointed while we
-		 * were waiting for the checkpoint lock
+		 * were waiting for the checkpoint lock. If there are no
+		 * outstanding transactions there is nothing to checkpoint and
+		 * we can't make progress. Abort the journal in this case.
 		 */
 		spin_lock(&journal->j_state_lock);
+		spin_lock(&journal->j_list_lock);
 		nblocks = jbd_space_needed(journal);
 		if (__log_space_left(journal) < nblocks) {
+			int chkpt = journal->j_checkpoint_transactions != NULL;
+
+			spin_unlock(&journal->j_list_lock);
 			spin_unlock(&journal->j_state_lock);
-			log_do_checkpoint(journal);
+			if (chkpt) {
+				log_do_checkpoint(journal);
+			} else {
+				printk(KERN_ERR "%s: no transactions\n",
+				       __func__);
+				journal_abort(journal, 0);
+			}
+
 			spin_lock(&journal->j_state_lock);
+		} else {
+			spin_unlock(&journal->j_list_lock);
 		}
 		mutex_unlock(&journal->j_checkpoint_mutex);
 	}
@@ -160,21 +176,25 @@
  * buffers. Note that we take the buffers in the opposite ordering
  * from the one in which they were submitted for IO.
  *
+ * Return 0 on success, and return <0 if some buffers have failed
+ * to be written out.
+ *
  * Called with j_list_lock held.
  */
-static void __wait_cp_io(journal_t *journal, transaction_t *transaction)
+static int __wait_cp_io(journal_t *journal, transaction_t *transaction)
 {
 	struct journal_head *jh;
 	struct buffer_head *bh;
 	tid_t this_tid;
 	int released = 0;
+	int ret = 0;
 
 	this_tid = transaction->t_tid;
 restart:
 	/* Did somebody clean up the transaction in the meanwhile? */
 	if (journal->j_checkpoint_transactions != transaction ||
 			transaction->t_tid != this_tid)
-		return;
+		return ret;
 	while (!released && transaction->t_checkpoint_io_list) {
 		jh = transaction->t_checkpoint_io_list;
 		bh = jh2bh(jh);
@@ -194,6 +214,9 @@
 			spin_lock(&journal->j_list_lock);
 			goto restart;
 		}
+		if (unlikely(buffer_write_io_error(bh)))
+			ret = -EIO;
+
 		/*
 		 * Now in whatever state the buffer currently is, we know that
 		 * it has been written out and so we can drop it from the list
@@ -203,6 +226,8 @@
 		journal_remove_journal_head(bh);
 		__brelse(bh);
 	}
+
+	return ret;
 }
 
 #define NR_BATCH	64
@@ -226,7 +251,8 @@
  * Try to flush one buffer from the checkpoint list to disk.
  *
  * Return 1 if something happened which requires us to abort the current
- * scan of the checkpoint list.
+ * scan of the checkpoint list.  Return <0 if the buffer has failed to
+ * be written out.
  *
  * Called with j_list_lock held and drops it if 1 is returned
  * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
@@ -256,6 +282,9 @@
 		log_wait_commit(journal, tid);
 		ret = 1;
 	} else if (!buffer_dirty(bh)) {
+		ret = 1;
+		if (unlikely(buffer_write_io_error(bh)))
+			ret = -EIO;
 		J_ASSERT_JH(jh, !buffer_jbddirty(bh));
 		BUFFER_TRACE(bh, "remove from checkpoint");
 		__journal_remove_checkpoint(jh);
@@ -263,7 +292,6 @@
 		jbd_unlock_bh_state(bh);
 		journal_remove_journal_head(bh);
 		__brelse(bh);
-		ret = 1;
 	} else {
 		/*
 		 * Important: we are about to write the buffer, and
@@ -295,6 +323,7 @@
  * to disk. We submit larger chunks of data at once.
  *
  * The journal should be locked before calling this function.
+ * Called with j_checkpoint_mutex held.
  */
 int log_do_checkpoint(journal_t *journal)
 {
@@ -318,6 +347,7 @@
 	 * OK, we need to start writing disk blocks.  Take one transaction
 	 * and write it.
 	 */
+	result = 0;
 	spin_lock(&journal->j_list_lock);
 	if (!journal->j_checkpoint_transactions)
 		goto out;
@@ -334,7 +364,7 @@
 		int batch_count = 0;
 		struct buffer_head *bhs[NR_BATCH];
 		struct journal_head *jh;
-		int retry = 0;
+		int retry = 0, err;
 
 		while (!retry && transaction->t_checkpoint_list) {
 			struct buffer_head *bh;
@@ -347,6 +377,8 @@
 				break;
 			}
 			retry = __process_buffer(journal, jh, bhs,&batch_count);
+			if (retry < 0 && !result)
+				result = retry;
 			if (!retry && (need_resched() ||
 				spin_needbreak(&journal->j_list_lock))) {
 				spin_unlock(&journal->j_list_lock);
@@ -371,14 +403,18 @@
 		 * Now we have cleaned up the first transaction's checkpoint
 		 * list. Let's clean up the second one
 		 */
-		__wait_cp_io(journal, transaction);
+		err = __wait_cp_io(journal, transaction);
+		if (!result)
+			result = err;
 	}
 out:
 	spin_unlock(&journal->j_list_lock);
-	result = cleanup_journal_tail(journal);
 	if (result < 0)
-		return result;
-	return 0;
+		journal_abort(journal, result);
+	else
+		result = cleanup_journal_tail(journal);
+
+	return (result < 0) ? result : 0;
 }
 
 /*
@@ -394,8 +430,9 @@
  * This is the only part of the journaling code which really needs to be
  * aware of transaction aborts.  Checkpointing involves writing to the
  * main filesystem area rather than to the journal, so it can proceed
- * even in abort state, but we must not update the journal superblock if
- * we have an abort error outstanding.
+ * even in abort state, but we must not update the super block if
+ * checkpointing may have failed.  Otherwise, we would lose some metadata
+ * buffers which should be written-back to the filesystem.
  */
 
 int cleanup_journal_tail(journal_t *journal)
@@ -404,6 +441,9 @@
 	tid_t		first_tid;
 	unsigned long	blocknr, freed;
 
+	if (is_journal_aborted(journal))
+		return 1;
+
 	/* OK, work out the oldest transaction remaining in the log, and
 	 * the log block it starts at.
 	 *
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index ae08c05..25719d9 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -482,6 +482,8 @@
 		printk(KERN_WARNING
 			"JBD: Detected IO errors while flushing file data "
 			"on %s\n", bdevname(journal->j_fs_dev, b));
+		if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR)
+			journal_abort(journal, err);
 		err = 0;
 	}
 
@@ -518,9 +520,10 @@
 		jh = commit_transaction->t_buffers;
 
 		/* If we're in abort mode, we just un-journal the buffer and
-		   release it for background writing. */
+		   release it. */
 
 		if (is_journal_aborted(journal)) {
+			clear_buffer_jbddirty(jh2bh(jh));
 			JBUFFER_TRACE(jh, "journal is aborting: refile");
 			journal_refile_buffer(journal, jh);
 			/* If that was the last one, we need to clean up
@@ -762,6 +765,9 @@
 		/* AKPM: bforget here */
 	}
 
+	if (err)
+		journal_abort(journal, err);
+
 	jbd_debug(3, "JBD: commit phase 6\n");
 
 	if (journal_write_commit_record(journal, commit_transaction))
@@ -852,6 +858,8 @@
 		if (buffer_jbddirty(bh)) {
 			JBUFFER_TRACE(jh, "add to new checkpointing trans");
 			__journal_insert_checkpoint(jh, commit_transaction);
+			if (is_journal_aborted(journal))
+				clear_buffer_jbddirty(bh);
 			JBUFFER_TRACE(jh, "refile for checkpoint writeback");
 			__journal_refile_buffer(jh);
 			jbd_unlock_bh_state(bh);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index aa7143a..9e4fa52 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1121,9 +1121,12 @@
  *
  * Release a journal_t structure once it is no longer in use by the
  * journaled object.
+ * Return <0 if we couldn't clean up the journal.
  */
-void journal_destroy(journal_t *journal)
+int journal_destroy(journal_t *journal)
 {
+	int err = 0;
+
 	/* Wait for the commit thread to wake up and die. */
 	journal_kill_thread(journal);
 
@@ -1146,11 +1149,16 @@
 	J_ASSERT(journal->j_checkpoint_transactions == NULL);
 	spin_unlock(&journal->j_list_lock);
 
-	/* We can now mark the journal as empty. */
-	journal->j_tail = 0;
-	journal->j_tail_sequence = ++journal->j_transaction_sequence;
 	if (journal->j_sb_buffer) {
-		journal_update_superblock(journal, 1);
+		if (!is_journal_aborted(journal)) {
+			/* We can now mark the journal as empty. */
+			journal->j_tail = 0;
+			journal->j_tail_sequence =
+				++journal->j_transaction_sequence;
+			journal_update_superblock(journal, 1);
+		} else {
+			err = -EIO;
+		}
 		brelse(journal->j_sb_buffer);
 	}
 
@@ -1160,6 +1168,8 @@
 		journal_destroy_revoke(journal);
 	kfree(journal->j_wbuf);
 	kfree(journal);
+
+	return err;
 }
 
 
@@ -1359,10 +1369,16 @@
 	spin_lock(&journal->j_list_lock);
 	while (!err && journal->j_checkpoint_transactions != NULL) {
 		spin_unlock(&journal->j_list_lock);
+		mutex_lock(&journal->j_checkpoint_mutex);
 		err = log_do_checkpoint(journal);
+		mutex_unlock(&journal->j_checkpoint_mutex);
 		spin_lock(&journal->j_list_lock);
 	}
 	spin_unlock(&journal->j_list_lock);
+
+	if (is_journal_aborted(journal))
+		return -EIO;
+
 	cleanup_journal_tail(journal);
 
 	/* Finally, mark the journal as really needing no recovery.
@@ -1384,7 +1400,7 @@
 	J_ASSERT(journal->j_head == journal->j_tail);
 	J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
 	spin_unlock(&journal->j_state_lock);
-	return err;
+	return 0;
 }
 
 /**
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index 43bc5e5..db5e982 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -223,7 +223,7 @@
  */
 int journal_recover(journal_t *journal)
 {
-	int			err;
+	int			err, err2;
 	journal_superblock_t *	sb;
 
 	struct recovery_info	info;
@@ -261,7 +261,10 @@
 	journal->j_transaction_sequence = ++info.end_transaction;
 
 	journal_clear_revoke(journal);
-	sync_blockdev(journal->j_fs_dev);
+	err2 = sync_blockdev(journal->j_fs_dev);
+	if (!err)
+		err = err2;
+
 	return err;
 }
 
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 0540ca2..d15cd6e 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -954,9 +954,10 @@
 	journal_t *journal = handle->h_transaction->t_journal;
 	int need_brelse = 0;
 	struct journal_head *jh;
+	int ret = 0;
 
 	if (is_handle_aborted(handle))
-		return 0;
+		return ret;
 
 	jh = journal_add_journal_head(bh);
 	JBUFFER_TRACE(jh, "entry");
@@ -1067,7 +1068,16 @@
 				   time if it is redirtied */
 			}
 
-			/* journal_clean_data_list() may have got there first */
+			/*
+			 * We cannot remove the buffer with io error from the
+			 * committing transaction, because otherwise it would
+			 * miss the error and the commit would not abort.
+			 */
+			if (unlikely(!buffer_uptodate(bh))) {
+				ret = -EIO;
+				goto no_journal;
+			}
+
 			if (jh->b_transaction != NULL) {
 				JBUFFER_TRACE(jh, "unfile from commit");
 				__journal_temp_unlink_buffer(jh);
@@ -1108,7 +1118,7 @@
 	}
 	JBUFFER_TRACE(jh, "exit");
 	journal_put_journal_head(jh);
-	return 0;
+	return ret;
 }
 
 /**
diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig
new file mode 100644
index 0000000..f32f346
--- /dev/null
+++ b/fs/jbd2/Kconfig
@@ -0,0 +1,33 @@
+config JBD2
+	tristate
+	select CRC32
+	help
+	  This is a generic journaling layer for block devices that support
+	  both 32-bit and 64-bit block numbers.  It is currently used by
+	  the ext4 and OCFS2 filesystems, but it could also be used to add
+	  journal support to other file systems or block devices such
+	  as RAID or LVM.
+
+	  If you are using ext4 or OCFS2, you need to say Y here.
+	  If you are not using ext4 or OCFS2 then you will
+	  probably want to say N.
+
+	  To compile this device as a module, choose M here. The module will be
+	  called jbd2.  If you are compiling ext4 or OCFS2 into the kernel,
+	  you cannot compile this code as a module.
+
+config JBD2_DEBUG
+	bool "JBD2 (ext4) debugging support"
+	depends on JBD2 && DEBUG_FS
+	help
+	  If you are using the ext4 journaled file system (or
+	  potentially any other filesystem/device using JBD2), this option
+	  allows you to enable debugging output while the system is running,
+	  in order to help track down any problems you are having.
+	  By default, the debugging output will be turned off.
+
+	  If you select Y here, then you will be able to turn on debugging
+	  with "echo N > /sys/kernel/debug/jbd2/jbd2-debug", where N is a
+	  number between 1 and 5. The higher the number, the more debugging
+	  output is generated.  To turn debugging off again, do
+	  "echo 0 > /sys/kernel/debug/jbd2/jbd2-debug".
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 0abe02c..8b119e1 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -995,6 +995,9 @@
 	}
 	spin_unlock(&journal->j_list_lock);
 
+	if (journal->j_commit_callback)
+		journal->j_commit_callback(journal, commit_transaction);
+
 	trace_mark(jbd2_end_commit, "dev %s transaction %d head %d",
 		   journal->j_devname, commit_transaction->t_tid,
 		   journal->j_tail_sequence);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index e5d5405..39b7805 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -52,6 +52,7 @@
 	transaction->t_expires = jiffies + journal->j_commit_interval;
 	spin_lock_init(&transaction->t_handle_lock);
 	INIT_LIST_HEAD(&transaction->t_inode_list);
+	INIT_LIST_HEAD(&transaction->t_private_list);
 
 	/* Set up the commit timer for the new transaction. */
 	journal->j_commit_timer.expires = round_jiffies(transaction->t_expires);
diff --git a/fs/jffs2/Kconfig b/fs/jffs2/Kconfig
new file mode 100644
index 0000000..6ae169c
--- /dev/null
+++ b/fs/jffs2/Kconfig
@@ -0,0 +1,188 @@
+config JFFS2_FS
+	tristate "Journalling Flash File System v2 (JFFS2) support"
+	select CRC32
+	depends on MTD
+	help
+	  JFFS2 is the second generation of the Journalling Flash File System
+	  for use on diskless embedded devices. It provides improved wear
+	  levelling, compression and support for hard links. You cannot use
+	  this on normal block devices, only on 'MTD' devices.
+
+	  Further information on the design and implementation of JFFS2 is
+	  available at <http://sources.redhat.com/jffs2/>.
+
+config JFFS2_FS_DEBUG
+	int "JFFS2 debugging verbosity (0 = quiet, 2 = noisy)"
+	depends on JFFS2_FS
+	default "0"
+	help
+	  This controls the amount of debugging messages produced by the JFFS2
+	  code. Set it to zero for use in production systems. For evaluation,
+	  testing and debugging, it's advisable to set it to one. This will
+	  enable a few assertions and will print debugging messages at the
+	  KERN_DEBUG loglevel, where they won't normally be visible. Level 2
+	  is unlikely to be useful - it enables extra debugging in certain
+	  areas which at one point needed debugging, but when the bugs were
+	  located and fixed, the detailed messages were relegated to level 2.
+
+	  If reporting bugs, please try to have available a full dump of the
+	  messages at debug level 1 while the misbehaviour was occurring.
+
+config JFFS2_FS_WRITEBUFFER
+	bool "JFFS2 write-buffering support"
+	depends on JFFS2_FS
+	default y
+	help
+	  This enables the write-buffering support in JFFS2.
+
+	  This functionality is required to support JFFS2 on the following
+	  types of flash devices:
+	    - NAND flash
+	    - NOR flash with transparent ECC
+	    - DataFlash
+
+config JFFS2_FS_WBUF_VERIFY
+	bool "Verify JFFS2 write-buffer reads"
+	depends on JFFS2_FS_WRITEBUFFER
+	default n
+	help
+	  This causes JFFS2 to read back every page written through the
+	  write-buffer, and check for errors.
+
+config JFFS2_SUMMARY
+	bool "JFFS2 summary support (EXPERIMENTAL)"
+	depends on JFFS2_FS && EXPERIMENTAL
+	default n
+	help
+	  This feature makes it possible to use summary information
+	  for faster filesystem mount.
+
+	  The summary information can be inserted into a filesystem image
+	  by the utility 'sumtool'.
+
+	  If unsure, say 'N'.
+
+config JFFS2_FS_XATTR
+	bool "JFFS2 XATTR support (EXPERIMENTAL)"
+	depends on JFFS2_FS && EXPERIMENTAL
+	default n
+	help
+	  Extended attributes are name:value pairs associated with inodes by
+	  the kernel or by users (see the attr(5) manual page, or visit
+	  <http://acl.bestbits.at/> for details).
+
+	  If unsure, say N.
+
+config JFFS2_FS_POSIX_ACL
+	bool "JFFS2 POSIX Access Control Lists"
+	depends on JFFS2_FS_XATTR
+	default y
+	select FS_POSIX_ACL
+	help
+	  Posix Access Control Lists (ACLs) support permissions for users and
+	  groups beyond the owner/group/world scheme.
+
+	  To learn more about Access Control Lists, visit the Posix ACLs for
+	  Linux website <http://acl.bestbits.at/>.
+
+	  If you don't know what Access Control Lists are, say N
+
+config JFFS2_FS_SECURITY
+	bool "JFFS2 Security Labels"
+	depends on JFFS2_FS_XATTR
+	default y
+	help
+	  Security labels support alternative access control models
+	  implemented by security modules like SELinux.  This option
+	  enables an extended attribute handler for file security
+	  labels in the jffs2 filesystem.
+
+	  If you are not using a security module that requires using
+	  extended attributes for file security labels, say N.
+
+config JFFS2_COMPRESSION_OPTIONS
+	bool "Advanced compression options for JFFS2"
+	depends on JFFS2_FS
+	default n
+	help
+	  Enabling this option allows you to explicitly choose which
+	  compression modules, if any, are enabled in JFFS2. Removing
+	  compressors can mean you cannot read existing file systems,
+	  and enabling experimental compressors can mean that you
+	  write a file system which cannot be read by a standard kernel.
+
+	  If unsure, you should _definitely_ say 'N'.
+
+config JFFS2_ZLIB
+	bool "JFFS2 ZLIB compression support" if JFFS2_COMPRESSION_OPTIONS
+	select ZLIB_INFLATE
+	select ZLIB_DEFLATE
+	depends on JFFS2_FS
+	default y
+	help
+	  Zlib is designed to be a free, general-purpose, legally unencumbered,
+	  lossless data-compression library for use on virtually any computer
+	  hardware and operating system. See <http://www.gzip.org/zlib/> for
+	  further information.
+
+	  Say 'Y' if unsure.
+
+config JFFS2_LZO
+	bool "JFFS2 LZO compression support" if JFFS2_COMPRESSION_OPTIONS
+	select LZO_COMPRESS
+	select LZO_DECOMPRESS
+	depends on JFFS2_FS
+	default n
+	help
+	  minilzo-based compression. Generally works better than Zlib.
+
+	  This feature was added in July, 2007. Say 'N' if you need
+	  compatibility with older bootloaders or kernels.
+
+config JFFS2_RTIME
+	bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS
+	depends on JFFS2_FS
+	default y
+	help
+	  Rtime does manage to recompress already-compressed data. Say 'Y' if unsure.
+
+config JFFS2_RUBIN
+	bool "JFFS2 RUBIN compression support" if JFFS2_COMPRESSION_OPTIONS
+	depends on JFFS2_FS
+	default n
+	help
+	  RUBINMIPS and DYNRUBIN compressors. Say 'N' if unsure.
+
+choice
+	prompt "JFFS2 default compression mode" if JFFS2_COMPRESSION_OPTIONS
+	default JFFS2_CMODE_PRIORITY
+	depends on JFFS2_FS
+	help
+	  You can set here the default compression mode of JFFS2 from
+	  the available compression modes. Don't touch if unsure.
+
+config JFFS2_CMODE_NONE
+	bool "no compression"
+	help
+	  Uses no compression.
+
+config JFFS2_CMODE_PRIORITY
+	bool "priority"
+	help
+	  Tries the compressors in a predefined order and chooses the first
+	  successful one.
+
+config JFFS2_CMODE_SIZE
+	bool "size (EXPERIMENTAL)"
+	help
+	  Tries all compressors and chooses the one which has the smallest
+	  result.
+
+config JFFS2_CMODE_FAVOURLZO
+	bool "Favour LZO"
+	help
+	  Tries all compressors and chooses the one which has the smallest
+	  result but gives some preference to LZO (which has faster
+	  decompression) at the expense of size.
+
+endchoice
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index 86739ee..f25e70c 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -53,8 +53,8 @@
 }
 
 /* jffs2_compress:
- * @data: Pointer to uncompressed data
- * @cdata: Pointer to returned pointer to buffer for compressed data
+ * @data_in: Pointer to uncompressed data
+ * @cpage_out: Pointer to returned pointer to buffer for compressed data
  * @datalen: On entry, holds the amount of data available for compression.
  *	On exit, expected to hold the amount of data actually compressed.
  * @cdatalen: On entry, holds the amount of space available for compressed
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index cd219ef..b1aaae8 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -311,7 +311,7 @@
 	/* FIXME: If you care. We'd need to use frags for the target
 	   if it grows much more than this */
 	if (targetlen > 254)
-		return -EINVAL;
+		return -ENAMETOOLONG;
 
 	ri = jffs2_alloc_raw_inode();
 
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index dddb2a6..259461b 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -68,7 +68,7 @@
 	instr->len = c->sector_size;
 	instr->callback = jffs2_erase_callback;
 	instr->priv = (unsigned long)(&instr[1]);
-	instr->fail_addr = 0xffffffff;
+	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
 
 	((struct erase_priv_struct *)instr->priv)->jeb = jeb;
 	((struct erase_priv_struct *)instr->priv)->c = c;
@@ -175,7 +175,7 @@
 {
 	/* For NAND, if the failure did not occur at the device level for a
 	   specific physical page, don't bother updating the bad block table. */
-	if (jffs2_cleanmarker_oob(c) && (bad_offset != 0xffffffff)) {
+	if (jffs2_cleanmarker_oob(c) && (bad_offset != MTD_FAIL_ADDR_UNKNOWN)) {
 		/* We had a device-level failure to erase.  Let's see if we've
 		   failed too many times. */
 		if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 086c438..249305d 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -207,6 +207,8 @@
 	buf->f_files = 0;
 	buf->f_ffree = 0;
 	buf->f_namelen = JFFS2_MAX_NAME_LEN;
+	buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
+	buf->f_fsid.val[1] = c->mtd->index;
 
 	spin_lock(&c->erase_completion_lock);
 	avail = c->dirty_size + c->free_size;
@@ -440,14 +442,14 @@
 
 	memset(ri, 0, sizeof(*ri));
 	/* Set OS-specific defaults for new inodes */
-	ri->uid = cpu_to_je16(current->fsuid);
+	ri->uid = cpu_to_je16(current_fsuid());
 
 	if (dir_i->i_mode & S_ISGID) {
 		ri->gid = cpu_to_je16(dir_i->i_gid);
 		if (S_ISDIR(mode))
 			mode |= S_ISGID;
 	} else {
-		ri->gid = cpu_to_je16(current->fsgid);
+		ri->gid = cpu_to_je16(current_fsgid());
 	}
 
 	/* POSIX ACLs have to be processed now, at least partly.
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index a9bf960..0875b60 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -261,6 +261,10 @@
 
 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
 
+	/* adjust write buffer offset, else we get a non contiguous write bug */
+	if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
+		c->wbuf_ofs = 0xffffffff;
+
 	D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
 
 	return 0;
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 0e78b00..d9a721e 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -679,10 +679,7 @@
 
 	memset(c->wbuf,0xff,c->wbuf_pagesize);
 	/* adjust write buffer offset, else we get a non contiguous write bug */
-	if (SECTOR_ADDR(c->wbuf_ofs) == SECTOR_ADDR(c->wbuf_ofs+c->wbuf_pagesize))
-		c->wbuf_ofs += c->wbuf_pagesize;
-	else
-		c->wbuf_ofs = 0xffffffff;
+	c->wbuf_ofs += c->wbuf_pagesize;
 	c->wbuf_len = 0;
 	return 0;
 }
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 6a09760..c2e9cfd 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -40,6 +40,16 @@
 static const int nfs_set_port_min = 0;
 static const int nfs_set_port_max = 65535;
 
+/*
+ * If the kernel has IPv6 support available, always listen for
+ * both AF_INET and AF_INET6 requests.
+ */
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static const sa_family_t	nfs_callback_family = AF_INET6;
+#else
+static const sa_family_t	nfs_callback_family = AF_INET;
+#endif
+
 static int param_set_port(const char *val, struct kernel_param *kp)
 {
 	char *endp;
@@ -106,7 +116,7 @@
 	if (nfs_callback_info.users++ || nfs_callback_info.task != NULL)
 		goto out;
 	serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE,
-				AF_INET, NULL);
+				nfs_callback_family, NULL);
 	ret = -ENOMEM;
 	if (!serv)
 		goto out_err;
@@ -116,7 +126,8 @@
 	if (ret <= 0)
 		goto out_err;
 	nfs_callback_tcpport = ret;
-	dprintk("Callback port = 0x%x\n", nfs_callback_tcpport);
+	dprintk("NFS: Callback listener port = %u (af %u)\n",
+			nfs_callback_tcpport, nfs_callback_family);
 
 	nfs_callback_info.rqst = svc_prepare_thread(serv, &serv->sv_pools[0]);
 	if (IS_ERR(nfs_callback_info.rqst)) {
@@ -149,8 +160,8 @@
 	mutex_unlock(&nfs_callback_mutex);
 	return ret;
 out_err:
-	dprintk("Couldn't create callback socket or server thread; err = %d\n",
-		ret);
+	dprintk("NFS: Couldn't create callback socket or server thread; "
+		"err = %d\n", ret);
 	nfs_callback_info.users--;
 	goto out;
 }
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 2ab70d4..efdba2e 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1517,7 +1517,7 @@
 	if (!add_to_page_cache(page, dentry->d_inode->i_mapping, 0,
 							GFP_KERNEL)) {
 		pagevec_add(&lru_pvec, page);
-		pagevec_lru_add(&lru_pvec);
+		pagevec_lru_add_file(&lru_pvec);
 		SetPageUptodate(page);
 		unlock_page(page);
 	} else
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c910413..83e700a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1659,8 +1659,10 @@
 		struct nfs_open_context *ctx;
 
 		ctx = nfs_file_open_context(sattr->ia_file);
-		cred = ctx->cred;
-		state = ctx->state;
+		if (ctx) {
+			cred = ctx->cred;
+			state = ctx->state;
+		}
 	}
 
 	status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 8b28b95c..a3b0061 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2459,7 +2459,7 @@
 		compare_super = NULL;
 
 	/* Get a superblock - note that we may end up sharing one that already exists */
-	s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
+	s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
 	if (IS_ERR(s)) {
 		error = PTR_ERR(s);
 		goto out_err_nosb;
@@ -2544,7 +2544,7 @@
 		compare_super = NULL;
 
 	/* Get a superblock - note that we may end up sharing one that already exists */
-	s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
+	s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
 	if (IS_ERR(s)) {
 		error = PTR_ERR(s);
 		goto out_err_nosb;
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 9dc036f..5cd882b 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -99,7 +99,7 @@
 	int fsidtype;
 	char *ep;
 	struct svc_expkey key;
-	struct svc_expkey *ek;
+	struct svc_expkey *ek = NULL;
 
 	if (mesg[mlen-1] != '\n')
 		return -EINVAL;
@@ -107,7 +107,8 @@
 
 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
 	err = -ENOMEM;
-	if (!buf) goto out;
+	if (!buf)
+		goto out;
 
 	err = -EINVAL;
 	if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
@@ -151,16 +152,16 @@
 
 	/* now we want a pathname, or empty meaning NEGATIVE  */
 	err = -EINVAL;
-	if ((len=qword_get(&mesg, buf, PAGE_SIZE)) < 0)
+	len = qword_get(&mesg, buf, PAGE_SIZE);
+	if (len < 0)
 		goto out;
 	dprintk("Path seems to be <%s>\n", buf);
 	err = 0;
 	if (len == 0) {
 		set_bit(CACHE_NEGATIVE, &key.h.flags);
 		ek = svc_expkey_update(&key, ek);
-		if (ek)
-			cache_put(&ek->h, &svc_expkey_cache);
-		else err = -ENOMEM;
+		if (!ek)
+			err = -ENOMEM;
 	} else {
 		struct nameidata nd;
 		err = path_lookup(buf, 0, &nd);
@@ -171,14 +172,14 @@
 		key.ek_path = nd.path;
 
 		ek = svc_expkey_update(&key, ek);
-		if (ek)
-			cache_put(&ek->h, &svc_expkey_cache);
-		else
+		if (!ek)
 			err = -ENOMEM;
 		path_put(&nd.path);
 	}
 	cache_flush();
  out:
+	if (ek)
+		cache_put(&ek->h, &svc_expkey_cache);
 	if (dom)
 		auth_domain_put(dom);
 	kfree(buf);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 59eeb46..07e4f5d 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -249,6 +249,10 @@
 	if (error < 0)
 		return error;
 
+	error = lockd_up();
+	if (error < 0)
+		return error;
+
 	error = svc_create_xprt(nfsd_serv, "tcp", port,
 					SVC_SOCK_DEFAULTS);
 	if (error < 0)
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index aa1d0d6..9609eb5 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -410,6 +410,7 @@
 static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
 {
 	ssize_t buflen;
+	ssize_t ret;
 
 	buflen = vfs_getxattr(dentry, key, NULL, 0);
 	if (buflen <= 0)
@@ -419,7 +420,10 @@
 	if (!*buf)
 		return -ENOMEM;
 
-	return vfs_getxattr(dentry, key, *buf, buflen);
+	ret = vfs_getxattr(dentry, key, *buf, buflen);
+	if (ret < 0)
+		kfree(*buf);
+	return ret;
 }
 #endif
 
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index d020866..3140a44 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -439,7 +439,7 @@
 			pages[nr] = *cached_page;
 			page_cache_get(*cached_page);
 			if (unlikely(!pagevec_add(lru_pvec, *cached_page)))
-				__pagevec_lru_add(lru_pvec);
+				__pagevec_lru_add_file(lru_pvec);
 			*cached_page = NULL;
 		}
 		index++;
@@ -2084,7 +2084,7 @@
 						OSYNC_METADATA|OSYNC_DATA);
 		}
   	}
-	pagevec_lru_add(&lru_pvec);
+	pagevec_lru_add_file(&lru_pvec);
 	ntfs_debug("Done.  Returning %s (written 0x%lx, status %li).",
 			written ? "written" : "status", (unsigned long)written,
 			(long)status);
diff --git a/fs/proc/array.c b/fs/proc/array.c
index f4bc0e7..bb9f4b0 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -388,20 +388,20 @@
 
 		/* add up live thread stats at the group level */
 		if (whole) {
+			struct task_cputime cputime;
 			struct task_struct *t = task;
 			do {
 				min_flt += t->min_flt;
 				maj_flt += t->maj_flt;
-				utime = cputime_add(utime, task_utime(t));
-				stime = cputime_add(stime, task_stime(t));
 				gtime = cputime_add(gtime, task_gtime(t));
 				t = next_thread(t);
 			} while (t != task);
 
 			min_flt += sig->min_flt;
 			maj_flt += sig->maj_flt;
-			utime = cputime_add(utime, sig->utime);
-			stime = cputime_add(stime, sig->stime);
+			thread_group_cputime(task, &cputime);
+			utime = cputime.utime;
+			stime = cputime.stime;
 			gtime = cputime_add(gtime, sig->gtime);
 		}
 
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 59ea42e..7ea52c7 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -30,6 +30,7 @@
 #include <linux/mm.h>
 #include <linux/mmzone.h>
 #include <linux/pagemap.h>
+#include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/swap.h>
 #include <linux/slab.h>
@@ -136,6 +137,8 @@
 	unsigned long allowed;
 	struct vmalloc_info vmi;
 	long cached;
+	unsigned long pages[NR_LRU_LISTS];
+	int lru;
 
 /*
  * display in kilobytes.
@@ -154,51 +157,70 @@
 
 	get_vmalloc_info(&vmi);
 
+	for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
+		pages[lru] = global_page_state(NR_LRU_BASE + lru);
+
 	/*
 	 * Tagged format, for easy grepping and expansion.
 	 */
 	len = sprintf(page,
-		"MemTotal:     %8lu kB\n"
-		"MemFree:      %8lu kB\n"
-		"Buffers:      %8lu kB\n"
-		"Cached:       %8lu kB\n"
-		"SwapCached:   %8lu kB\n"
-		"Active:       %8lu kB\n"
-		"Inactive:     %8lu kB\n"
+		"MemTotal:       %8lu kB\n"
+		"MemFree:        %8lu kB\n"
+		"Buffers:        %8lu kB\n"
+		"Cached:         %8lu kB\n"
+		"SwapCached:     %8lu kB\n"
+		"Active:         %8lu kB\n"
+		"Inactive:       %8lu kB\n"
+		"Active(anon):   %8lu kB\n"
+		"Inactive(anon): %8lu kB\n"
+		"Active(file):   %8lu kB\n"
+		"Inactive(file): %8lu kB\n"
+#ifdef CONFIG_UNEVICTABLE_LRU
+		"Unevictable:    %8lu kB\n"
+		"Mlocked:        %8lu kB\n"
+#endif
 #ifdef CONFIG_HIGHMEM
-		"HighTotal:    %8lu kB\n"
-		"HighFree:     %8lu kB\n"
-		"LowTotal:     %8lu kB\n"
-		"LowFree:      %8lu kB\n"
+		"HighTotal:      %8lu kB\n"
+		"HighFree:       %8lu kB\n"
+		"LowTotal:       %8lu kB\n"
+		"LowFree:        %8lu kB\n"
 #endif
-		"SwapTotal:    %8lu kB\n"
-		"SwapFree:     %8lu kB\n"
-		"Dirty:        %8lu kB\n"
-		"Writeback:    %8lu kB\n"
-		"AnonPages:    %8lu kB\n"
-		"Mapped:       %8lu kB\n"
-		"Slab:         %8lu kB\n"
-		"SReclaimable: %8lu kB\n"
-		"SUnreclaim:   %8lu kB\n"
-		"PageTables:   %8lu kB\n"
+		"SwapTotal:      %8lu kB\n"
+		"SwapFree:       %8lu kB\n"
+		"Dirty:          %8lu kB\n"
+		"Writeback:      %8lu kB\n"
+		"AnonPages:      %8lu kB\n"
+		"Mapped:         %8lu kB\n"
+		"Slab:           %8lu kB\n"
+		"SReclaimable:   %8lu kB\n"
+		"SUnreclaim:     %8lu kB\n"
+		"PageTables:     %8lu kB\n"
 #ifdef CONFIG_QUICKLIST
-		"Quicklists:   %8lu kB\n"
+		"Quicklists:     %8lu kB\n"
 #endif
-		"NFS_Unstable: %8lu kB\n"
-		"Bounce:       %8lu kB\n"
-		"WritebackTmp: %8lu kB\n"
-		"CommitLimit:  %8lu kB\n"
-		"Committed_AS: %8lu kB\n"
-		"VmallocTotal: %8lu kB\n"
-		"VmallocUsed:  %8lu kB\n"
-		"VmallocChunk: %8lu kB\n",
+		"NFS_Unstable:   %8lu kB\n"
+		"Bounce:         %8lu kB\n"
+		"WritebackTmp:   %8lu kB\n"
+		"CommitLimit:    %8lu kB\n"
+		"Committed_AS:   %8lu kB\n"
+		"VmallocTotal:   %8lu kB\n"
+		"VmallocUsed:    %8lu kB\n"
+		"VmallocChunk:   %8lu kB\n",
 		K(i.totalram),
 		K(i.freeram),
 		K(i.bufferram),
 		K(cached),
 		K(total_swapcache_pages),
-		K(global_page_state(NR_ACTIVE)),
-		K(global_page_state(NR_INACTIVE)),
+		K(pages[LRU_ACTIVE_ANON]   + pages[LRU_ACTIVE_FILE]),
+		K(pages[LRU_INACTIVE_ANON] + pages[LRU_INACTIVE_FILE]),
+		K(pages[LRU_ACTIVE_ANON]),
+		K(pages[LRU_INACTIVE_ANON]),
+		K(pages[LRU_ACTIVE_FILE]),
+		K(pages[LRU_INACTIVE_FILE]),
+#ifdef CONFIG_UNEVICTABLE_LRU
+		K(pages[LRU_UNEVICTABLE]),
+		K(global_page_state(NR_MLOCK)),
+#endif
 #ifdef CONFIG_HIGHMEM
 		K(i.totalhigh),
 		K(i.freehigh),
@@ -500,17 +522,13 @@
 
 static int show_stat(struct seq_file *p, void *v)
 {
-	int i;
+	int i, j;
 	unsigned long jif;
 	cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
 	cputime64_t guest;
 	u64 sum = 0;
 	struct timespec boottime;
-	unsigned int *per_irq_sum;
-
-	per_irq_sum = kzalloc(sizeof(unsigned int)*NR_IRQS, GFP_KERNEL);
-	if (!per_irq_sum)
-		return -ENOMEM;
+	unsigned int per_irq_sum;
 
 	user = nice = system = idle = iowait =
 		irq = softirq = steal = cputime64_zero;
@@ -519,8 +537,6 @@
 	jif = boottime.tv_sec;
 
 	for_each_possible_cpu(i) {
-		int j;
-
 		user = cputime64_add(user, kstat_cpu(i).cpustat.user);
 		nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
 		system = cputime64_add(system, kstat_cpu(i).cpustat.system);
@@ -530,11 +546,10 @@
 		softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
 		steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
 		guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
-		for (j = 0; j < NR_IRQS; j++) {
-			unsigned int temp = kstat_cpu(i).irqs[j];
-			sum += temp;
-			per_irq_sum[j] += temp;
-		}
+
+		for_each_irq_nr(j)
+			sum += kstat_irqs_cpu(j, i);
+
 		sum += arch_irq_stat_cpu(i);
 	}
 	sum += arch_irq_stat();
@@ -576,8 +591,15 @@
 	}
 	seq_printf(p, "intr %llu", (unsigned long long)sum);
 
-	for (i = 0; i < NR_IRQS; i++)
-		seq_printf(p, " %u", per_irq_sum[i]);
+	/* sum again ? it could be updated? */
+	for_each_irq_nr(j) {
+		per_irq_sum = 0;
+
+		for_each_possible_cpu(i)
+			per_irq_sum += kstat_irqs_cpu(j, i);
+
+		seq_printf(p, " %u", per_irq_sum);
+	}
 
 	seq_printf(p,
 		"\nctxt %llu\n"
@@ -591,7 +613,6 @@
 		nr_running(),
 		nr_iowait());
 
-	kfree(per_irq_sum);
 	return 0;
 }
 
@@ -630,15 +651,14 @@
  */
 static void *int_seq_start(struct seq_file *f, loff_t *pos)
 {
-	return (*pos <= NR_IRQS) ? pos : NULL;
+	return (*pos <= nr_irqs) ? pos : NULL;
 }
 
+
 static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos)
 {
 	(*pos)++;
-	if (*pos > NR_IRQS)
-		return NULL;
-	return pos;
+	return (*pos <= nr_irqs) ? pos : NULL;
 }
 
 static void int_seq_stop(struct seq_file *f, void *v)
@@ -646,7 +666,6 @@
 	/* Nothing to do */
 }
 
-
 static const struct seq_operations int_seq_ops = {
 	.start = int_seq_start,
 	.next  = int_seq_next,
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 841368b..cd9ca67 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -32,9 +32,6 @@
 /* Total size of vmcore file. */
 static u64 vmcore_size;
 
-/* Stores the physical address of elf header of crash image. */
-unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
-
 struct proc_dir_entry *proc_vmcore = NULL;
 
 /* Reads a page from the oldmem device from given offset. */
@@ -647,7 +644,7 @@
 	int rc = 0;
 
 	/* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
-	if (!(elfcorehdr_addr < ELFCORE_ADDR_MAX))
+	if (!(is_vmcore_usable()))
 		return rc;
 	rc = parse_crash_elf_headers();
 	if (rc) {
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 5145cb9..76acdbc 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -112,12 +112,12 @@
 			goto add_error;
 
 		if (!pagevec_add(&lru_pvec, page))
-			__pagevec_lru_add(&lru_pvec);
+			__pagevec_lru_add_file(&lru_pvec);
 
 		unlock_page(page);
 	}
 
-	pagevec_lru_add(&lru_pvec);
+	pagevec_lru_add_file(&lru_pvec);
 	return 0;
 
  fsize_exceeded:
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index b131234..f031d1c 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -61,6 +61,7 @@
 		inode->i_mapping->a_ops = &ramfs_aops;
 		inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
 		mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
+		mapping_set_unevictable(inode->i_mapping);
 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 		switch (mode & S_IFMT) {
 		default:
diff --git a/fs/seq_file.c b/fs/seq_file.c
index bd20f7f..eba2eab 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -452,17 +452,34 @@
 
 int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits)
 {
-	size_t len = bitmap_scnprintf_len(nr_bits);
-
-	if (m->count + len < m->size) {
-		bitmap_scnprintf(m->buf + m->count, m->size - m->count,
-				 bits, nr_bits);
-		m->count += len;
-		return 0;
+	if (m->count < m->size) {
+		int len = bitmap_scnprintf(m->buf + m->count,
+				m->size - m->count, bits, nr_bits);
+		if (m->count + len < m->size) {
+			m->count += len;
+			return 0;
+		}
 	}
 	m->count = m->size;
 	return -1;
 }
+EXPORT_SYMBOL(seq_bitmap);
+
+int seq_bitmap_list(struct seq_file *m, unsigned long *bits,
+		unsigned int nr_bits)
+{
+	if (m->count < m->size) {
+		int len = bitmap_scnlistprintf(m->buf + m->count,
+				m->size - m->count, bits, nr_bits);
+		if (m->count + len < m->size) {
+			m->count += len;
+			return 0;
+		}
+	}
+	m->count = m->size;
+	return -1;
+}
+EXPORT_SYMBOL(seq_bitmap_list);
 
 static void *single_start(struct seq_file *p, loff_t *pos)
 {
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 73db464..1a4973e 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -414,19 +414,21 @@
 	 *    @c->lst.empty_lebs + @c->freeable_cnt + @c->idx_gc_cnt -
 	 *    @c->lst.taken_empty_lebs
 	 *
-	 * @empty_lebs are available because they are empty. @freeable_cnt are
-	 * available because they contain only free and dirty space and the
-	 * index allocation always occurs after wbufs are synch'ed.
-	 * @idx_gc_cnt are available because they are index LEBs that have been
-	 * garbage collected (including trivial GC) and are awaiting the commit
-	 * before they can be unmapped - note that the in-the-gaps method will
-	 * grab these if it needs them. @taken_empty_lebs are empty_lebs that
-	 * have already been allocated for some purpose (also includes those
-	 * LEBs on the @idx_gc list).
+	 * @c->lst.empty_lebs are available because they are empty.
+	 * @c->freeable_cnt are available because they contain only free and
+	 * dirty space, @c->idx_gc_cnt are available because they are index
+	 * LEBs that have been garbage collected and are awaiting the commit
+	 * before they can be used. And the in-the-gaps method will grab these
+	 * if it needs them. @c->lst.taken_empty_lebs are empty LEBs that have
+	 * already been allocated for some purpose.
 	 *
-	 * Note, @taken_empty_lebs may temporarily be higher by one because of
-	 * the way we serialize LEB allocations and budgeting. See a comment in
-	 * 'ubifs_find_free_space()'.
+	 * Note, @c->idx_gc_cnt is included to both @c->lst.empty_lebs (because
+	 * these LEBs are empty) and to @c->lst.taken_empty_lebs (because they
+	 * are taken until after the commit).
+	 *
+	 * Note, @c->lst.taken_empty_lebs may temporarily be higher by one
+	 * because of the way we serialize LEB allocations and budgeting. See a
+	 * comment in 'ubifs_find_free_space()'.
 	 */
 	lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt -
 	       c->lst.taken_empty_lebs;
diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c
index 5bb51da..a0ada59 100644
--- a/fs/ubifs/compress.c
+++ b/fs/ubifs/compress.c
@@ -91,8 +91,6 @@
  *
  * Note, if the input buffer was not compressed, it is copied to the output
  * buffer and %UBIFS_COMPR_NONE is returned in @compr_type.
- *
- * This functions returns %0 on success or a negative error code on failure.
  */
 void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len,
 		    int *compr_type)
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index d7f7645..7186400 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -222,30 +222,38 @@
 {
 	const struct ubifs_inode *ui = ubifs_inode(inode);
 
-	printk(KERN_DEBUG "inode      %lu\n", inode->i_ino);
-	printk(KERN_DEBUG "size       %llu\n",
+	printk(KERN_DEBUG "Dump in-memory inode:");
+	printk(KERN_DEBUG "\tinode          %lu\n", inode->i_ino);
+	printk(KERN_DEBUG "\tsize           %llu\n",
 	       (unsigned long long)i_size_read(inode));
-	printk(KERN_DEBUG "nlink      %u\n", inode->i_nlink);
-	printk(KERN_DEBUG "uid        %u\n", (unsigned int)inode->i_uid);
-	printk(KERN_DEBUG "gid        %u\n", (unsigned int)inode->i_gid);
-	printk(KERN_DEBUG "atime      %u.%u\n",
+	printk(KERN_DEBUG "\tnlink          %u\n", inode->i_nlink);
+	printk(KERN_DEBUG "\tuid            %u\n", (unsigned int)inode->i_uid);
+	printk(KERN_DEBUG "\tgid            %u\n", (unsigned int)inode->i_gid);
+	printk(KERN_DEBUG "\tatime          %u.%u\n",
 	       (unsigned int)inode->i_atime.tv_sec,
 	       (unsigned int)inode->i_atime.tv_nsec);
-	printk(KERN_DEBUG "mtime      %u.%u\n",
+	printk(KERN_DEBUG "\tmtime          %u.%u\n",
 	       (unsigned int)inode->i_mtime.tv_sec,
 	       (unsigned int)inode->i_mtime.tv_nsec);
-	printk(KERN_DEBUG "ctime       %u.%u\n",
+	printk(KERN_DEBUG "\tctime          %u.%u\n",
 	       (unsigned int)inode->i_ctime.tv_sec,
 	       (unsigned int)inode->i_ctime.tv_nsec);
-	printk(KERN_DEBUG "creat_sqnum %llu\n", ui->creat_sqnum);
-	printk(KERN_DEBUG "xattr_size  %u\n", ui->xattr_size);
-	printk(KERN_DEBUG "xattr_cnt   %u\n", ui->xattr_cnt);
-	printk(KERN_DEBUG "xattr_names %u\n", ui->xattr_names);
-	printk(KERN_DEBUG "dirty       %u\n", ui->dirty);
-	printk(KERN_DEBUG "xattr       %u\n", ui->xattr);
-	printk(KERN_DEBUG "flags       %d\n", ui->flags);
-	printk(KERN_DEBUG "compr_type  %d\n", ui->compr_type);
-	printk(KERN_DEBUG "data_len    %d\n", ui->data_len);
+	printk(KERN_DEBUG "\tcreat_sqnum    %llu\n", ui->creat_sqnum);
+	printk(KERN_DEBUG "\txattr_size     %u\n", ui->xattr_size);
+	printk(KERN_DEBUG "\txattr_cnt      %u\n", ui->xattr_cnt);
+	printk(KERN_DEBUG "\txattr_names    %u\n", ui->xattr_names);
+	printk(KERN_DEBUG "\tdirty          %u\n", ui->dirty);
+	printk(KERN_DEBUG "\txattr          %u\n", ui->xattr);
+	printk(KERN_DEBUG "\tbulk_read      %u\n", ui->xattr);
+	printk(KERN_DEBUG "\tsynced_i_size  %llu\n",
+	       (unsigned long long)ui->synced_i_size);
+	printk(KERN_DEBUG "\tui_size        %llu\n",
+	       (unsigned long long)ui->ui_size);
+	printk(KERN_DEBUG "\tflags          %d\n", ui->flags);
+	printk(KERN_DEBUG "\tcompr_type     %d\n", ui->compr_type);
+	printk(KERN_DEBUG "\tlast_page_read %lu\n", ui->last_page_read);
+	printk(KERN_DEBUG "\tread_in_a_row  %lu\n", ui->read_in_a_row);
+	printk(KERN_DEBUG "\tdata_len       %d\n", ui->data_len);
 }
 
 void dbg_dump_node(const struct ubifs_info *c, const void *node)
@@ -647,6 +655,43 @@
 	}
 }
 
+void dbg_dump_lpt_info(struct ubifs_info *c)
+{
+	int i;
+
+	spin_lock(&dbg_lock);
+	printk(KERN_DEBUG "\tlpt_sz:        %lld\n", c->lpt_sz);
+	printk(KERN_DEBUG "\tpnode_sz:      %d\n", c->pnode_sz);
+	printk(KERN_DEBUG "\tnnode_sz:      %d\n", c->nnode_sz);
+	printk(KERN_DEBUG "\tltab_sz:       %d\n", c->ltab_sz);
+	printk(KERN_DEBUG "\tlsave_sz:      %d\n", c->lsave_sz);
+	printk(KERN_DEBUG "\tbig_lpt:       %d\n", c->big_lpt);
+	printk(KERN_DEBUG "\tlpt_hght:      %d\n", c->lpt_hght);
+	printk(KERN_DEBUG "\tpnode_cnt:     %d\n", c->pnode_cnt);
+	printk(KERN_DEBUG "\tnnode_cnt:     %d\n", c->nnode_cnt);
+	printk(KERN_DEBUG "\tdirty_pn_cnt:  %d\n", c->dirty_pn_cnt);
+	printk(KERN_DEBUG "\tdirty_nn_cnt:  %d\n", c->dirty_nn_cnt);
+	printk(KERN_DEBUG "\tlsave_cnt:     %d\n", c->lsave_cnt);
+	printk(KERN_DEBUG "\tspace_bits:    %d\n", c->space_bits);
+	printk(KERN_DEBUG "\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits);
+	printk(KERN_DEBUG "\tlpt_offs_bits: %d\n", c->lpt_offs_bits);
+	printk(KERN_DEBUG "\tlpt_spc_bits:  %d\n", c->lpt_spc_bits);
+	printk(KERN_DEBUG "\tpcnt_bits:     %d\n", c->pcnt_bits);
+	printk(KERN_DEBUG "\tlnum_bits:     %d\n", c->lnum_bits);
+	printk(KERN_DEBUG "\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs);
+	printk(KERN_DEBUG "\tLPT head is at %d:%d\n",
+	       c->nhead_lnum, c->nhead_offs);
+	printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n", c->ltab_lnum, c->ltab_offs);
+	if (c->big_lpt)
+		printk(KERN_DEBUG "\tLPT lsave is at %d:%d\n",
+		       c->lsave_lnum, c->lsave_offs);
+	for (i = 0; i < c->lpt_lebs; i++)
+		printk(KERN_DEBUG "\tLPT LEB %d free %d dirty %d tgc %d "
+		       "cmt %d\n", i + c->lpt_first, c->ltab[i].free,
+		       c->ltab[i].dirty, c->ltab[i].tgc, c->ltab[i].cmt);
+	spin_unlock(&dbg_lock);
+}
+
 void dbg_dump_leb(const struct ubifs_info *c, int lnum)
 {
 	struct ubifs_scan_leb *sleb;
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 50315fc..33d6b95 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -224,6 +224,7 @@
 void dbg_dump_budg(struct ubifs_info *c);
 void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp);
 void dbg_dump_lprops(struct ubifs_info *c);
+void dbg_dump_lpt_info(struct ubifs_info *c);
 void dbg_dump_leb(const struct ubifs_info *c, int lnum);
 void dbg_dump_znode(const struct ubifs_info *c,
 		    const struct ubifs_znode *znode);
@@ -249,6 +250,8 @@
 int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot);
 int dbg_check_cats(struct ubifs_info *c);
 int dbg_check_ltab(struct ubifs_info *c);
+int dbg_chk_lpt_free_spc(struct ubifs_info *c);
+int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len);
 int dbg_check_synced_i_size(struct inode *inode);
 int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir);
 int dbg_check_tnc(struct ubifs_info *c, int extra);
@@ -367,6 +370,7 @@
 #define dbg_dump_budg(c)                      ({})
 #define dbg_dump_lprop(c, lp)                 ({})
 #define dbg_dump_lprops(c)                    ({})
+#define dbg_dump_lpt_info(c)                  ({})
 #define dbg_dump_leb(c, lnum)                 ({})
 #define dbg_dump_znode(c, znode)              ({})
 #define dbg_dump_heap(c, heap, cat)           ({})
@@ -379,6 +383,8 @@
 #define dbg_check_old_index(c, zroot)              0
 #define dbg_check_cats(c)                          0
 #define dbg_check_ltab(c)                          0
+#define dbg_chk_lpt_free_spc(c)                    0
+#define dbg_chk_lpt_sz(c, action, len)             0
 #define dbg_check_synced_i_size(inode)             0
 #define dbg_check_dir_size(c, dir)                 0
 #define dbg_check_tnc(c, x)                        0
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 3d698e2..51cf511 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -147,6 +147,12 @@
 				err = ret;
 				if (err != -ENOENT)
 					break;
+			} else if (block + 1 == beyond) {
+				int dlen = le32_to_cpu(dn->size);
+				int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
+
+				if (ilen && ilen < dlen)
+					memset(addr + ilen, 0, dlen - ilen);
 			}
 		}
 		if (++i >= UBIFS_BLOCKS_PER_PAGE)
@@ -577,8 +583,262 @@
 	return copied;
 }
 
+/**
+ * populate_page - copy data nodes into a page for bulk-read.
+ * @c: UBIFS file-system description object
+ * @page: page
+ * @bu: bulk-read information
+ * @n: next zbranch slot
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int populate_page(struct ubifs_info *c, struct page *page,
+			 struct bu_info *bu, int *n)
+{
+	int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
+	struct inode *inode = page->mapping->host;
+	loff_t i_size = i_size_read(inode);
+	unsigned int page_block;
+	void *addr, *zaddr;
+	pgoff_t end_index;
+
+	dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
+		inode->i_ino, page->index, i_size, page->flags);
+
+	addr = zaddr = kmap(page);
+
+	end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+	if (!i_size || page->index > end_index) {
+		hole = 1;
+		memset(addr, 0, PAGE_CACHE_SIZE);
+		goto out_hole;
+	}
+
+	page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
+	while (1) {
+		int err, len, out_len, dlen;
+
+		if (nn >= bu->cnt) {
+			hole = 1;
+			memset(addr, 0, UBIFS_BLOCK_SIZE);
+		} else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
+			struct ubifs_data_node *dn;
+
+			dn = bu->buf + (bu->zbranch[nn].offs - offs);
+
+			ubifs_assert(dn->ch.sqnum >
+				     ubifs_inode(inode)->creat_sqnum);
+
+			len = le32_to_cpu(dn->size);
+			if (len <= 0 || len > UBIFS_BLOCK_SIZE)
+				goto out_err;
+
+			dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
+			out_len = UBIFS_BLOCK_SIZE;
+			err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
+					       le16_to_cpu(dn->compr_type));
+			if (err || len != out_len)
+				goto out_err;
+
+			if (len < UBIFS_BLOCK_SIZE)
+				memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
+
+			nn += 1;
+			read = (i << UBIFS_BLOCK_SHIFT) + len;
+		} else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
+			nn += 1;
+			continue;
+		} else {
+			hole = 1;
+			memset(addr, 0, UBIFS_BLOCK_SIZE);
+		}
+		if (++i >= UBIFS_BLOCKS_PER_PAGE)
+			break;
+		addr += UBIFS_BLOCK_SIZE;
+		page_block += 1;
+	}
+
+	if (end_index == page->index) {
+		int len = i_size & (PAGE_CACHE_SIZE - 1);
+
+		if (len && len < read)
+			memset(zaddr + len, 0, read - len);
+	}
+
+out_hole:
+	if (hole) {
+		SetPageChecked(page);
+		dbg_gen("hole");
+	}
+
+	SetPageUptodate(page);
+	ClearPageError(page);
+	flush_dcache_page(page);
+	kunmap(page);
+	*n = nn;
+	return 0;
+
+out_err:
+	ClearPageUptodate(page);
+	SetPageError(page);
+	flush_dcache_page(page);
+	kunmap(page);
+	ubifs_err("bad data node (block %u, inode %lu)",
+		  page_block, inode->i_ino);
+	return -EINVAL;
+}
+
+/**
+ * ubifs_do_bulk_read - do bulk-read.
+ * @c: UBIFS file-system description object
+ * @page1: first page
+ *
+ * This function returns %1 if the bulk-read is done, otherwise %0 is returned.
+ */
+static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1)
+{
+	pgoff_t offset = page1->index, end_index;
+	struct address_space *mapping = page1->mapping;
+	struct inode *inode = mapping->host;
+	struct ubifs_inode *ui = ubifs_inode(inode);
+	struct bu_info *bu;
+	int err, page_idx, page_cnt, ret = 0, n = 0;
+	loff_t isize;
+
+	bu = kmalloc(sizeof(struct bu_info), GFP_NOFS);
+	if (!bu)
+		return 0;
+
+	bu->buf_len = c->bulk_read_buf_size;
+	bu->buf = kmalloc(bu->buf_len, GFP_NOFS);
+	if (!bu->buf)
+		goto out_free;
+
+	data_key_init(c, &bu->key, inode->i_ino,
+		      offset << UBIFS_BLOCKS_PER_PAGE_SHIFT);
+
+	err = ubifs_tnc_get_bu_keys(c, bu);
+	if (err)
+		goto out_warn;
+
+	if (bu->eof) {
+		/* Turn off bulk-read at the end of the file */
+		ui->read_in_a_row = 1;
+		ui->bulk_read = 0;
+	}
+
+	page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
+	if (!page_cnt) {
+		/*
+		 * This happens when there are multiple blocks per page and the
+		 * blocks for the first page we are looking for, are not
+		 * together. If all the pages were like this, bulk-read would
+		 * reduce performance, so we turn it off for a while.
+		 */
+		ui->read_in_a_row = 0;
+		ui->bulk_read = 0;
+		goto out_free;
+	}
+
+	if (bu->cnt) {
+		err = ubifs_tnc_bulk_read(c, bu);
+		if (err)
+			goto out_warn;
+	}
+
+	err = populate_page(c, page1, bu, &n);
+	if (err)
+		goto out_warn;
+
+	unlock_page(page1);
+	ret = 1;
+
+	isize = i_size_read(inode);
+	if (isize == 0)
+		goto out_free;
+	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
+
+	for (page_idx = 1; page_idx < page_cnt; page_idx++) {
+		pgoff_t page_offset = offset + page_idx;
+		struct page *page;
+
+		if (page_offset > end_index)
+			break;
+		page = find_or_create_page(mapping, page_offset,
+					   GFP_NOFS | __GFP_COLD);
+		if (!page)
+			break;
+		if (!PageUptodate(page))
+			err = populate_page(c, page, bu, &n);
+		unlock_page(page);
+		page_cache_release(page);
+		if (err)
+			break;
+	}
+
+	ui->last_page_read = offset + page_idx - 1;
+
+out_free:
+	kfree(bu->buf);
+	kfree(bu);
+	return ret;
+
+out_warn:
+	ubifs_warn("ignoring error %d and skipping bulk-read", err);
+	goto out_free;
+}
+
+/**
+ * ubifs_bulk_read - determine whether to bulk-read and, if so, do it.
+ * @page: page from which to start bulk-read.
+ *
+ * Some flash media are capable of reading sequentially at faster rates. UBIFS
+ * bulk-read facility is designed to take advantage of that, by reading in one
+ * go consecutive data nodes that are also located consecutively in the same
+ * LEB. This function returns %1 if a bulk-read is done and %0 otherwise.
+ */
+static int ubifs_bulk_read(struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+	struct ubifs_info *c = inode->i_sb->s_fs_info;
+	struct ubifs_inode *ui = ubifs_inode(inode);
+	pgoff_t index = page->index, last_page_read = ui->last_page_read;
+	int ret = 0;
+
+	ui->last_page_read = index;
+
+	if (!c->bulk_read)
+		return 0;
+	/*
+	 * Bulk-read is protected by ui_mutex, but it is an optimization, so
+	 * don't bother if we cannot lock the mutex.
+	 */
+	if (!mutex_trylock(&ui->ui_mutex))
+		return 0;
+	if (index != last_page_read + 1) {
+		/* Turn off bulk-read if we stop reading sequentially */
+		ui->read_in_a_row = 1;
+		if (ui->bulk_read)
+			ui->bulk_read = 0;
+		goto out_unlock;
+	}
+	if (!ui->bulk_read) {
+		ui->read_in_a_row += 1;
+		if (ui->read_in_a_row < 3)
+			goto out_unlock;
+		/* Three reads in a row, so switch on bulk-read */
+		ui->bulk_read = 1;
+	}
+	ret = ubifs_do_bulk_read(c, page);
+out_unlock:
+	mutex_unlock(&ui->ui_mutex);
+	return ret;
+}
+
 static int ubifs_readpage(struct file *file, struct page *page)
 {
+	if (ubifs_bulk_read(page))
+		return 0;
 	do_readpage(page);
 	unlock_page(page);
 	return 0;
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c
index 47814cd..717d79c 100644
--- a/fs/ubifs/find.c
+++ b/fs/ubifs/find.c
@@ -901,11 +901,11 @@
 	 * it is needed now for this commit.
 	 */
 	lp = ubifs_lpt_lookup_dirty(c, lnum);
-	if (unlikely(IS_ERR(lp)))
+	if (IS_ERR(lp))
 		return PTR_ERR(lp);
 	lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC,
 			     lp->flags | LPROPS_INDEX, -1);
-	if (unlikely(IS_ERR(lp)))
+	if (IS_ERR(lp))
 		return PTR_ERR(lp);
 	dbg_find("LEB %d, dirty %d and free %d flags %#x",
 		 lp->lnum, lp->dirty, lp->free, lp->flags);
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 02aba36..0bef650 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -96,6 +96,48 @@
 }
 
 /**
+ * joinup - bring data nodes for an inode together.
+ * @c: UBIFS file-system description object
+ * @sleb: describes scanned LEB
+ * @inum: inode number
+ * @blk: block number
+ * @data: list to which to add data nodes
+ *
+ * This function looks at the first few nodes in the scanned LEB @sleb and adds
+ * them to @data if they are data nodes from @inum and have a larger block
+ * number than @blk. This function returns %0 on success and a negative error
+ * code on failure.
+ */
+static int joinup(struct ubifs_info *c, struct ubifs_scan_leb *sleb, ino_t inum,
+		  unsigned int blk, struct list_head *data)
+{
+	int err, cnt = 6, lnum = sleb->lnum, offs;
+	struct ubifs_scan_node *snod, *tmp;
+	union ubifs_key *key;
+
+	list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
+		key = &snod->key;
+		if (key_inum(c, key) == inum &&
+		    key_type(c, key) == UBIFS_DATA_KEY &&
+		    key_block(c, key) > blk) {
+			offs = snod->offs;
+			err = ubifs_tnc_has_node(c, key, 0, lnum, offs, 0);
+			if (err < 0)
+				return err;
+			list_del(&snod->list);
+			if (err) {
+				list_add_tail(&snod->list, data);
+				blk = key_block(c, key);
+			} else
+				kfree(snod);
+			cnt = 6;
+		} else if (--cnt == 0)
+			break;
+	}
+	return 0;
+}
+
+/**
  * move_nodes - move nodes.
  * @c: UBIFS file-system description object
  * @sleb: describes nodes to move
@@ -116,16 +158,21 @@
 static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
 {
 	struct ubifs_scan_node *snod, *tmp;
-	struct list_head large, medium, small;
+	struct list_head data, large, medium, small;
 	struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
 	int avail, err, min = INT_MAX;
+	unsigned int blk = 0;
+	ino_t inum = 0;
 
+	INIT_LIST_HEAD(&data);
 	INIT_LIST_HEAD(&large);
 	INIT_LIST_HEAD(&medium);
 	INIT_LIST_HEAD(&small);
 
-	list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
-		struct list_head *lst;
+	while (!list_empty(&sleb->nodes)) {
+		struct list_head *lst = sleb->nodes.next;
+
+		snod = list_entry(lst, struct ubifs_scan_node, list);
 
 		ubifs_assert(snod->type != UBIFS_IDX_NODE);
 		ubifs_assert(snod->type != UBIFS_REF_NODE);
@@ -136,7 +183,6 @@
 		if (err < 0)
 			goto out;
 
-		lst = &snod->list;
 		list_del(lst);
 		if (!err) {
 			/* The node is obsolete, remove it from the list */
@@ -145,15 +191,30 @@
 		}
 
 		/*
-		 * Sort the list of nodes so that large nodes go first, and
-		 * small nodes go last.
+		 * Sort the list of nodes so that data nodes go first, large
+		 * nodes go second, and small nodes go last.
 		 */
-		if (snod->len > MEDIUM_NODE_WM)
-			list_add(lst, &large);
+		if (key_type(c, &snod->key) == UBIFS_DATA_KEY) {
+			if (inum != key_inum(c, &snod->key)) {
+				if (inum) {
+					/*
+					 * Try to move data nodes from the same
+					 * inode together.
+					 */
+					err = joinup(c, sleb, inum, blk, &data);
+					if (err)
+						goto out;
+				}
+				inum = key_inum(c, &snod->key);
+				blk = key_block(c, &snod->key);
+			}
+			list_add_tail(lst, &data);
+		} else if (snod->len > MEDIUM_NODE_WM)
+			list_add_tail(lst, &large);
 		else if (snod->len > SMALL_NODE_WM)
-			list_add(lst, &medium);
+			list_add_tail(lst, &medium);
 		else
-			list_add(lst, &small);
+			list_add_tail(lst, &small);
 
 		/* And find the smallest node */
 		if (snod->len < min)
@@ -164,6 +225,7 @@
 	 * Join the tree lists so that we'd have one roughly sorted list
 	 * ('large' will be the head of the joined list).
 	 */
+	list_splice(&data, &large);
 	list_splice(&medium, large.prev);
 	list_splice(&small, large.prev);
 
@@ -653,7 +715,7 @@
 	 */
 	while (1) {
 		lp = ubifs_fast_find_freeable(c);
-		if (unlikely(IS_ERR(lp))) {
+		if (IS_ERR(lp)) {
 			err = PTR_ERR(lp);
 			goto out;
 		}
@@ -665,7 +727,7 @@
 		if (err)
 			goto out;
 		lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
-		if (unlikely(IS_ERR(lp))) {
+		if (IS_ERR(lp)) {
 			err = PTR_ERR(lp);
 			goto out;
 		}
@@ -680,7 +742,7 @@
 	/* Record index freeable LEBs for unmapping after commit */
 	while (1) {
 		lp = ubifs_fast_find_frdi_idx(c);
-		if (unlikely(IS_ERR(lp))) {
+		if (IS_ERR(lp)) {
 			err = PTR_ERR(lp);
 			goto out;
 		}
@@ -696,7 +758,7 @@
 		/* Don't release the LEB until after the next commit */
 		flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
 		lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
-		if (unlikely(IS_ERR(lp))) {
+		if (IS_ERR(lp)) {
 			err = PTR_ERR(lp);
 			kfree(idx_gc);
 			goto out;
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 054363f..0168271 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -62,6 +62,7 @@
 {
 	if (!c->ro_media) {
 		c->ro_media = 1;
+		c->no_chk_data_crc = 0;
 		ubifs_warn("switched to read-only mode, error %d", err);
 		dbg_dump_stack();
 	}
@@ -74,6 +75,7 @@
  * @lnum: logical eraseblock number
  * @offs: offset within the logical eraseblock
  * @quiet: print no messages
+ * @chk_crc: indicates whether to always check the CRC
  *
  * This function checks node magic number and CRC checksum. This function also
  * validates node length to prevent UBIFS from becoming crazy when an attacker
@@ -85,7 +87,7 @@
  * or magic.
  */
 int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
-		     int offs, int quiet)
+		     int offs, int quiet, int chk_crc)
 {
 	int err = -EINVAL, type, node_len;
 	uint32_t crc, node_crc, magic;
@@ -121,6 +123,10 @@
 		   node_len > c->ranges[type].max_len)
 		goto out_len;
 
+	if (!chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc)
+		if (c->no_chk_data_crc)
+			return 0;
+
 	crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
 	node_crc = le32_to_cpu(ch->crc);
 	if (crc != node_crc) {
@@ -722,7 +728,7 @@
 		goto out;
 	}
 
-	err = ubifs_check_node(c, buf, lnum, offs, 0);
+	err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
 	if (err) {
 		ubifs_err("expected node type %d", type);
 		return err;
@@ -781,7 +787,7 @@
 		goto out;
 	}
 
-	err = ubifs_check_node(c, buf, lnum, offs, 0);
+	err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
 	if (err) {
 		ubifs_err("expected node type %d", type);
 		return err;
diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h
index 8f74760..9ee6508 100644
--- a/fs/ubifs/key.h
+++ b/fs/ubifs/key.h
@@ -484,7 +484,7 @@
  * @key2: the second key to compare
  *
  * This function compares 2 keys and returns %-1 if @key1 is less than
- * @key2, 0 if the keys are equivalent and %1 if @key1 is greater than @key2.
+ * @key2, %0 if the keys are equivalent and %1 if @key1 is greater than @key2.
  */
 static inline int keys_cmp(const struct ubifs_info *c,
 			   const union ubifs_key *key1,
@@ -503,6 +503,26 @@
 }
 
 /**
+ * keys_eq - determine if keys are equivalent.
+ * @c: UBIFS file-system description object
+ * @key1: the first key to compare
+ * @key2: the second key to compare
+ *
+ * This function compares 2 keys and returns %1 if @key1 is equal to @key2 and
+ * %0 if not.
+ */
+static inline int keys_eq(const struct ubifs_info *c,
+			  const union ubifs_key *key1,
+			  const union ubifs_key *key2)
+{
+	if (key1->u32[0] != key2->u32[0])
+		return 0;
+	if (key1->u32[1] != key2->u32[1])
+		return 0;
+	return 1;
+}
+
+/**
  * is_hash_key - is a key vulnerable to hash collisions.
  * @c: UBIFS file-system description object
  * @key: key
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index 2ba93da..f27176e 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -125,6 +125,7 @@
 			}
 		}
 	}
+
 	/* Not greater than parent, so compare to children */
 	while (1) {
 		/* Compare to left child */
@@ -460,18 +461,6 @@
 }
 
 /**
- * ubifs_get_lprops - get reference to LEB properties.
- * @c: the UBIFS file-system description object
- *
- * This function locks lprops. Lprops have to be unlocked by
- * 'ubifs_release_lprops()'.
- */
-void ubifs_get_lprops(struct ubifs_info *c)
-{
-	mutex_lock(&c->lp_mutex);
-}
-
-/**
  * calc_dark - calculate LEB dark space size.
  * @c: the UBIFS file-system description object
  * @spc: amount of free and dirty space in the LEB
@@ -576,7 +565,6 @@
 	ubifs_assert(!(lprops->free & 7) && !(lprops->dirty & 7));
 
 	spin_lock(&c->space_lock);
-
 	if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size)
 		c->lst.taken_empty_lebs -= 1;
 
@@ -637,31 +625,12 @@
 		c->lst.taken_empty_lebs += 1;
 
 	change_category(c, lprops);
-
 	c->idx_gc_cnt += idx_gc_cnt;
-
 	spin_unlock(&c->space_lock);
-
 	return lprops;
 }
 
 /**
- * ubifs_release_lprops - release lprops lock.
- * @c: the UBIFS file-system description object
- *
- * This function has to be called after each 'ubifs_get_lprops()' call to
- * unlock lprops.
- */
-void ubifs_release_lprops(struct ubifs_info *c)
-{
-	ubifs_assert(mutex_is_locked(&c->lp_mutex));
-	ubifs_assert(c->lst.empty_lebs >= 0 &&
-		     c->lst.empty_lebs <= c->main_lebs);
-
-	mutex_unlock(&c->lp_mutex);
-}
-
-/**
  * ubifs_get_lp_stats - get lprops statistics.
  * @c: UBIFS file-system description object
  * @st: return statistics
@@ -1262,7 +1231,6 @@
 	}
 
 	ubifs_scan_destroy(sleb);
-
 	return LPT_SCAN_CONTINUE;
 
 out_print:
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index 9ff2463..db8bd0e 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -109,7 +109,8 @@
 	c->lpt_sz = (long long)c->pnode_cnt * c->pnode_sz;
 	c->lpt_sz += (long long)c->nnode_cnt * c->nnode_sz;
 	c->lpt_sz += c->ltab_sz;
-	c->lpt_sz += c->lsave_sz;
+	if (c->big_lpt)
+		c->lpt_sz += c->lsave_sz;
 
 	/* Add wastage */
 	sz = c->lpt_sz;
@@ -287,25 +288,56 @@
 	const int k = 32 - nrbits;
 	uint8_t *p = *addr;
 	int b = *pos;
-	uint32_t val;
+	uint32_t uninitialized_var(val);
+	const int bytes = (nrbits + b + 7) >> 3;
 
 	ubifs_assert(nrbits > 0);
 	ubifs_assert(nrbits <= 32);
 	ubifs_assert(*pos >= 0);
 	ubifs_assert(*pos < 8);
 	if (b) {
-		val = p[1] | ((uint32_t)p[2] << 8) | ((uint32_t)p[3] << 16) |
-		      ((uint32_t)p[4] << 24);
+		switch (bytes) {
+		case 2:
+			val = p[1];
+			break;
+		case 3:
+			val = p[1] | ((uint32_t)p[2] << 8);
+			break;
+		case 4:
+			val = p[1] | ((uint32_t)p[2] << 8) |
+				     ((uint32_t)p[3] << 16);
+			break;
+		case 5:
+			val = p[1] | ((uint32_t)p[2] << 8) |
+				     ((uint32_t)p[3] << 16) |
+				     ((uint32_t)p[4] << 24);
+		}
 		val <<= (8 - b);
 		val |= *p >> b;
 		nrbits += b;
-	} else
-		val = p[0] | ((uint32_t)p[1] << 8) | ((uint32_t)p[2] << 16) |
-		      ((uint32_t)p[3] << 24);
+	} else {
+		switch (bytes) {
+		case 1:
+			val = p[0];
+			break;
+		case 2:
+			val = p[0] | ((uint32_t)p[1] << 8);
+			break;
+		case 3:
+			val = p[0] | ((uint32_t)p[1] << 8) |
+				     ((uint32_t)p[2] << 16);
+			break;
+		case 4:
+			val = p[0] | ((uint32_t)p[1] << 8) |
+				     ((uint32_t)p[2] << 16) |
+				     ((uint32_t)p[3] << 24);
+			break;
+		}
+	}
 	val <<= k;
 	val >>= k;
 	b = nrbits & 7;
-	p += nrbits / 8;
+	p += nrbits >> 3;
 	*addr = p;
 	*pos = b;
 	ubifs_assert((val >> nrbits) == 0 || nrbits - b == 32);
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index 5f0b83e..eed5a00 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -177,8 +177,6 @@
 			return 0;
 		}
 	}
-	dbg_err("last LEB %d", *lnum);
-	dump_stack();
 	return -ENOSPC;
 }
 
@@ -193,6 +191,9 @@
 	int lnum, offs, len, alen, done_lsave, done_ltab, err;
 	struct ubifs_cnode *cnode;
 
+	err = dbg_chk_lpt_sz(c, 0, 0);
+	if (err)
+		return err;
 	cnode = c->lpt_cnext;
 	if (!cnode)
 		return 0;
@@ -206,6 +207,7 @@
 		c->lsave_lnum = lnum;
 		c->lsave_offs = offs;
 		offs += c->lsave_sz;
+		dbg_chk_lpt_sz(c, 1, c->lsave_sz);
 	}
 
 	if (offs + c->ltab_sz <= c->leb_size) {
@@ -213,6 +215,7 @@
 		c->ltab_lnum = lnum;
 		c->ltab_offs = offs;
 		offs += c->ltab_sz;
+		dbg_chk_lpt_sz(c, 1, c->ltab_sz);
 	}
 
 	do {
@@ -226,9 +229,10 @@
 		while (offs + len > c->leb_size) {
 			alen = ALIGN(offs, c->min_io_size);
 			upd_ltab(c, lnum, c->leb_size - alen, alen - offs);
+			dbg_chk_lpt_sz(c, 2, alen - offs);
 			err = alloc_lpt_leb(c, &lnum);
 			if (err)
-				return err;
+				goto no_space;
 			offs = 0;
 			ubifs_assert(lnum >= c->lpt_first &&
 				     lnum <= c->lpt_last);
@@ -238,6 +242,7 @@
 				c->lsave_lnum = lnum;
 				c->lsave_offs = offs;
 				offs += c->lsave_sz;
+				dbg_chk_lpt_sz(c, 1, c->lsave_sz);
 				continue;
 			}
 			if (!done_ltab) {
@@ -245,6 +250,7 @@
 				c->ltab_lnum = lnum;
 				c->ltab_offs = offs;
 				offs += c->ltab_sz;
+				dbg_chk_lpt_sz(c, 1, c->ltab_sz);
 				continue;
 			}
 			break;
@@ -257,6 +263,7 @@
 			c->lpt_offs = offs;
 		}
 		offs += len;
+		dbg_chk_lpt_sz(c, 1, len);
 		cnode = cnode->cnext;
 	} while (cnode && cnode != c->lpt_cnext);
 
@@ -265,9 +272,10 @@
 		if (offs + c->lsave_sz > c->leb_size) {
 			alen = ALIGN(offs, c->min_io_size);
 			upd_ltab(c, lnum, c->leb_size - alen, alen - offs);
+			dbg_chk_lpt_sz(c, 2, alen - offs);
 			err = alloc_lpt_leb(c, &lnum);
 			if (err)
-				return err;
+				goto no_space;
 			offs = 0;
 			ubifs_assert(lnum >= c->lpt_first &&
 				     lnum <= c->lpt_last);
@@ -276,6 +284,7 @@
 		c->lsave_lnum = lnum;
 		c->lsave_offs = offs;
 		offs += c->lsave_sz;
+		dbg_chk_lpt_sz(c, 1, c->lsave_sz);
 	}
 
 	/* Make sure to place LPT's own lprops table */
@@ -283,9 +292,10 @@
 		if (offs + c->ltab_sz > c->leb_size) {
 			alen = ALIGN(offs, c->min_io_size);
 			upd_ltab(c, lnum, c->leb_size - alen, alen - offs);
+			dbg_chk_lpt_sz(c, 2, alen - offs);
 			err = alloc_lpt_leb(c, &lnum);
 			if (err)
-				return err;
+				goto no_space;
 			offs = 0;
 			ubifs_assert(lnum >= c->lpt_first &&
 				     lnum <= c->lpt_last);
@@ -294,11 +304,23 @@
 		c->ltab_lnum = lnum;
 		c->ltab_offs = offs;
 		offs += c->ltab_sz;
+		dbg_chk_lpt_sz(c, 1, c->ltab_sz);
 	}
 
 	alen = ALIGN(offs, c->min_io_size);
 	upd_ltab(c, lnum, c->leb_size - alen, alen - offs);
+	dbg_chk_lpt_sz(c, 4, alen - offs);
+	err = dbg_chk_lpt_sz(c, 3, alen);
+	if (err)
+		return err;
 	return 0;
+
+no_space:
+	ubifs_err("LPT out of space");
+	dbg_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, "
+		"done_lsave %d", lnum, offs, len, done_ltab, done_lsave);
+	dbg_dump_lpt_info(c);
+	return err;
 }
 
 /**
@@ -333,8 +355,6 @@
 			*lnum = i + c->lpt_first;
 			return 0;
 		}
-	dbg_err("last LEB %d", *lnum);
-	dump_stack();
 	return -ENOSPC;
 }
 
@@ -369,12 +389,14 @@
 		done_lsave = 1;
 		ubifs_pack_lsave(c, buf + offs, c->lsave);
 		offs += c->lsave_sz;
+		dbg_chk_lpt_sz(c, 1, c->lsave_sz);
 	}
 
 	if (offs + c->ltab_sz <= c->leb_size) {
 		done_ltab = 1;
 		ubifs_pack_ltab(c, buf + offs, c->ltab_cmt);
 		offs += c->ltab_sz;
+		dbg_chk_lpt_sz(c, 1, c->ltab_sz);
 	}
 
 	/* Loop for each cnode */
@@ -392,10 +414,12 @@
 						       alen, UBI_SHORTTERM);
 				if (err)
 					return err;
+				dbg_chk_lpt_sz(c, 4, alen - wlen);
 			}
+			dbg_chk_lpt_sz(c, 2, 0);
 			err = realloc_lpt_leb(c, &lnum);
 			if (err)
-				return err;
+				goto no_space;
 			offs = 0;
 			from = 0;
 			ubifs_assert(lnum >= c->lpt_first &&
@@ -408,12 +432,14 @@
 				done_lsave = 1;
 				ubifs_pack_lsave(c, buf + offs, c->lsave);
 				offs += c->lsave_sz;
+				dbg_chk_lpt_sz(c, 1, c->lsave_sz);
 				continue;
 			}
 			if (!done_ltab) {
 				done_ltab = 1;
 				ubifs_pack_ltab(c, buf + offs, c->ltab_cmt);
 				offs += c->ltab_sz;
+				dbg_chk_lpt_sz(c, 1, c->ltab_sz);
 				continue;
 			}
 			break;
@@ -435,6 +461,7 @@
 		clear_bit(COW_ZNODE, &cnode->flags);
 		smp_mb__after_clear_bit();
 		offs += len;
+		dbg_chk_lpt_sz(c, 1, len);
 		cnode = cnode->cnext;
 	} while (cnode && cnode != c->lpt_cnext);
 
@@ -448,9 +475,10 @@
 					      UBI_SHORTTERM);
 			if (err)
 				return err;
+			dbg_chk_lpt_sz(c, 2, alen - wlen);
 			err = realloc_lpt_leb(c, &lnum);
 			if (err)
-				return err;
+				goto no_space;
 			offs = 0;
 			ubifs_assert(lnum >= c->lpt_first &&
 				     lnum <= c->lpt_last);
@@ -461,6 +489,7 @@
 		done_lsave = 1;
 		ubifs_pack_lsave(c, buf + offs, c->lsave);
 		offs += c->lsave_sz;
+		dbg_chk_lpt_sz(c, 1, c->lsave_sz);
 	}
 
 	/* Make sure to place LPT's own lprops table */
@@ -473,9 +502,10 @@
 					      UBI_SHORTTERM);
 			if (err)
 				return err;
+			dbg_chk_lpt_sz(c, 2, alen - wlen);
 			err = realloc_lpt_leb(c, &lnum);
 			if (err)
-				return err;
+				goto no_space;
 			offs = 0;
 			ubifs_assert(lnum >= c->lpt_first &&
 				     lnum <= c->lpt_last);
@@ -486,6 +516,7 @@
 		done_ltab = 1;
 		ubifs_pack_ltab(c, buf + offs, c->ltab_cmt);
 		offs += c->ltab_sz;
+		dbg_chk_lpt_sz(c, 1, c->ltab_sz);
 	}
 
 	/* Write remaining data in buffer */
@@ -495,6 +526,12 @@
 	err = ubifs_leb_write(c, lnum, buf + from, from, alen, UBI_SHORTTERM);
 	if (err)
 		return err;
+
+	dbg_chk_lpt_sz(c, 4, alen - wlen);
+	err = dbg_chk_lpt_sz(c, 3, ALIGN(offs, c->min_io_size));
+	if (err)
+		return err;
+
 	c->nhead_lnum = lnum;
 	c->nhead_offs = ALIGN(offs, c->min_io_size);
 
@@ -503,7 +540,15 @@
 	dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs);
 	if (c->big_lpt)
 		dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs);
+
 	return 0;
+
+no_space:
+	ubifs_err("LPT out of space mismatch");
+	dbg_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab "
+	        "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave);
+	dbg_dump_lpt_info(c);
+	return err;
 }
 
 /**
@@ -1044,6 +1089,8 @@
 	int pos = 0, node_type, node_len;
 	uint16_t crc, calc_crc;
 
+	if (len < UBIFS_LPT_CRC_BYTES + (UBIFS_LPT_TYPE_BITS + 7) / 8)
+		return 0;
 	node_type = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_TYPE_BITS);
 	if (node_type == UBIFS_LPT_NOT_A_NODE)
 		return 0;
@@ -1156,6 +1203,9 @@
 	dbg_lp("");
 
 	mutex_lock(&c->lp_mutex);
+	err = dbg_chk_lpt_free_spc(c);
+	if (err)
+		goto out;
 	err = dbg_check_ltab(c);
 	if (err)
 		goto out;
@@ -1645,4 +1695,121 @@
 	return 0;
 }
 
+/**
+ * dbg_chk_lpt_free_spc - check LPT free space is enough to write entire LPT.
+ * @c: the UBIFS file-system description object
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+int dbg_chk_lpt_free_spc(struct ubifs_info *c)
+{
+	long long free = 0;
+	int i;
+
+	for (i = 0; i < c->lpt_lebs; i++) {
+		if (c->ltab[i].tgc || c->ltab[i].cmt)
+			continue;
+		if (i + c->lpt_first == c->nhead_lnum)
+			free += c->leb_size - c->nhead_offs;
+		else if (c->ltab[i].free == c->leb_size)
+			free += c->leb_size;
+	}
+	if (free < c->lpt_sz) {
+		dbg_err("LPT space error: free %lld lpt_sz %lld",
+			free, c->lpt_sz);
+		dbg_dump_lpt_info(c);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * dbg_chk_lpt_sz - check LPT does not write more than LPT size.
+ * @c: the UBIFS file-system description object
+ * @action: action
+ * @len: length written
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
+{
+	long long chk_lpt_sz, lpt_sz;
+	int err = 0;
+
+	switch (action) {
+	case 0:
+		c->chk_lpt_sz = 0;
+		c->chk_lpt_sz2 = 0;
+		c->chk_lpt_lebs = 0;
+		c->chk_lpt_wastage = 0;
+		if (c->dirty_pn_cnt > c->pnode_cnt) {
+			dbg_err("dirty pnodes %d exceed max %d",
+				c->dirty_pn_cnt, c->pnode_cnt);
+			err = -EINVAL;
+		}
+		if (c->dirty_nn_cnt > c->nnode_cnt) {
+			dbg_err("dirty nnodes %d exceed max %d",
+				c->dirty_nn_cnt, c->nnode_cnt);
+			err = -EINVAL;
+		}
+		return err;
+	case 1:
+		c->chk_lpt_sz += len;
+		return 0;
+	case 2:
+		c->chk_lpt_sz += len;
+		c->chk_lpt_wastage += len;
+		c->chk_lpt_lebs += 1;
+		return 0;
+	case 3:
+		chk_lpt_sz = c->leb_size;
+		chk_lpt_sz *= c->chk_lpt_lebs;
+		chk_lpt_sz += len - c->nhead_offs;
+		if (c->chk_lpt_sz != chk_lpt_sz) {
+			dbg_err("LPT wrote %lld but space used was %lld",
+				c->chk_lpt_sz, chk_lpt_sz);
+			err = -EINVAL;
+		}
+		if (c->chk_lpt_sz > c->lpt_sz) {
+			dbg_err("LPT wrote %lld but lpt_sz is %lld",
+				c->chk_lpt_sz, c->lpt_sz);
+			err = -EINVAL;
+		}
+		if (c->chk_lpt_sz2 && c->chk_lpt_sz != c->chk_lpt_sz2) {
+			dbg_err("LPT layout size %lld but wrote %lld",
+				c->chk_lpt_sz, c->chk_lpt_sz2);
+			err = -EINVAL;
+		}
+		if (c->chk_lpt_sz2 && c->new_nhead_offs != len) {
+			dbg_err("LPT new nhead offs: expected %d was %d",
+				c->new_nhead_offs, len);
+			err = -EINVAL;
+		}
+		lpt_sz = (long long)c->pnode_cnt * c->pnode_sz;
+		lpt_sz += (long long)c->nnode_cnt * c->nnode_sz;
+		lpt_sz += c->ltab_sz;
+		if (c->big_lpt)
+			lpt_sz += c->lsave_sz;
+		if (c->chk_lpt_sz - c->chk_lpt_wastage > lpt_sz) {
+			dbg_err("LPT chk_lpt_sz %lld + waste %lld exceeds %lld",
+				c->chk_lpt_sz, c->chk_lpt_wastage, lpt_sz);
+			err = -EINVAL;
+		}
+		if (err)
+			dbg_dump_lpt_info(c);
+		c->chk_lpt_sz2 = c->chk_lpt_sz;
+		c->chk_lpt_sz = 0;
+		c->chk_lpt_wastage = 0;
+		c->chk_lpt_lebs = 0;
+		c->new_nhead_offs = len;
+		return err;
+	case 4:
+		c->chk_lpt_sz += len;
+		c->chk_lpt_wastage += len;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
 #endif /* CONFIG_UBIFS_FS_DEBUG */
diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h
index 4c12a92..4fa81d8 100644
--- a/fs/ubifs/misc.h
+++ b/fs/ubifs/misc.h
@@ -310,4 +310,31 @@
 	return ubifs_tnc_locate(c, key, node, NULL, NULL);
 }
 
+/**
+ * ubifs_get_lprops - get reference to LEB properties.
+ * @c: the UBIFS file-system description object
+ *
+ * This function locks lprops. Lprops have to be unlocked by
+ * 'ubifs_release_lprops()'.
+ */
+static inline void ubifs_get_lprops(struct ubifs_info *c)
+{
+	mutex_lock(&c->lp_mutex);
+}
+
+/**
+ * ubifs_release_lprops - release lprops lock.
+ * @c: the UBIFS file-system description object
+ *
+ * This function has to be called after each 'ubifs_get_lprops()' call to
+ * unlock lprops.
+ */
+static inline void ubifs_release_lprops(struct ubifs_info *c)
+{
+	ubifs_assert(mutex_is_locked(&c->lp_mutex));
+	ubifs_assert(c->lst.empty_lebs >= 0 &&
+		     c->lst.empty_lebs <= c->main_lebs);
+	mutex_unlock(&c->lp_mutex);
+}
+
 #endif /* __UBIFS_MISC_H__ */
diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c
index acf5c5f..0ed8247 100644
--- a/fs/ubifs/scan.c
+++ b/fs/ubifs/scan.c
@@ -87,7 +87,7 @@
 
 	dbg_scan("scanning %s", dbg_ntype(ch->node_type));
 
-	if (ubifs_check_node(c, buf, lnum, offs, quiet))
+	if (ubifs_check_node(c, buf, lnum, offs, quiet, 1))
 		return SCANNED_A_CORRUPT_NODE;
 
 	if (ch->node_type == UBIFS_PAD_NODE) {
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 9a92203..8780efb 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -401,6 +401,16 @@
 	else if (c->mount_opts.unmount_mode == 1)
 		seq_printf(s, ",norm_unmount");
 
+	if (c->mount_opts.bulk_read == 2)
+		seq_printf(s, ",bulk_read");
+	else if (c->mount_opts.bulk_read == 1)
+		seq_printf(s, ",no_bulk_read");
+
+	if (c->mount_opts.chk_data_crc == 2)
+		seq_printf(s, ",chk_data_crc");
+	else if (c->mount_opts.chk_data_crc == 1)
+		seq_printf(s, ",no_chk_data_crc");
+
 	return 0;
 }
 
@@ -408,13 +418,26 @@
 {
 	struct ubifs_info *c = sb->s_fs_info;
 	int i, ret = 0, err;
+	long long bud_bytes;
 
-	if (c->jheads)
+	if (c->jheads) {
 		for (i = 0; i < c->jhead_cnt; i++) {
 			err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
 			if (err && !ret)
 				ret = err;
 		}
+
+		/* Commit the journal unless it has too little data */
+		spin_lock(&c->buds_lock);
+		bud_bytes = c->bud_bytes;
+		spin_unlock(&c->buds_lock);
+		if (bud_bytes > c->leb_size) {
+			err = ubifs_run_commit(c);
+			if (err)
+				return err;
+		}
+	}
+
 	/*
 	 * We ought to call sync for c->ubi but it does not have one. If it had
 	 * it would in turn call mtd->sync, however mtd operations are
@@ -538,6 +561,18 @@
 	 * calculations when reporting free space.
 	 */
 	c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ;
+	/* Buffer size for bulk-reads */
+	c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
+	if (c->bulk_read_buf_size > c->leb_size)
+		c->bulk_read_buf_size = c->leb_size;
+	if (c->bulk_read_buf_size > 128 * 1024) {
+		/* Check if we can kmalloc more than 128KiB */
+		void *try = kmalloc(c->bulk_read_buf_size, GFP_KERNEL);
+
+		kfree(try);
+		if (!try)
+			c->bulk_read_buf_size = 128 * 1024;
+	}
 	return 0;
 }
 
@@ -840,17 +875,29 @@
  *
  * Opt_fast_unmount: do not run a journal commit before un-mounting
  * Opt_norm_unmount: run a journal commit before un-mounting
+ * Opt_bulk_read: enable bulk-reads
+ * Opt_no_bulk_read: disable bulk-reads
+ * Opt_chk_data_crc: check CRCs when reading data nodes
+ * Opt_no_chk_data_crc: do not check CRCs when reading data nodes
  * Opt_err: just end of array marker
  */
 enum {
 	Opt_fast_unmount,
 	Opt_norm_unmount,
+	Opt_bulk_read,
+	Opt_no_bulk_read,
+	Opt_chk_data_crc,
+	Opt_no_chk_data_crc,
 	Opt_err,
 };
 
 static const match_table_t tokens = {
 	{Opt_fast_unmount, "fast_unmount"},
 	{Opt_norm_unmount, "norm_unmount"},
+	{Opt_bulk_read, "bulk_read"},
+	{Opt_no_bulk_read, "no_bulk_read"},
+	{Opt_chk_data_crc, "chk_data_crc"},
+	{Opt_no_chk_data_crc, "no_chk_data_crc"},
 	{Opt_err, NULL},
 };
 
@@ -888,6 +935,22 @@
 			c->mount_opts.unmount_mode = 1;
 			c->fast_unmount = 0;
 			break;
+		case Opt_bulk_read:
+			c->mount_opts.bulk_read = 2;
+			c->bulk_read = 1;
+			break;
+		case Opt_no_bulk_read:
+			c->mount_opts.bulk_read = 1;
+			c->bulk_read = 0;
+			break;
+		case Opt_chk_data_crc:
+			c->mount_opts.chk_data_crc = 2;
+			c->no_chk_data_crc = 0;
+			break;
+		case Opt_no_chk_data_crc:
+			c->mount_opts.chk_data_crc = 1;
+			c->no_chk_data_crc = 1;
+			break;
 		default:
 			ubifs_err("unrecognized mount option \"%s\" "
 				  "or missing value", p);
@@ -996,6 +1059,8 @@
 			goto out_free;
 	}
 
+	c->always_chk_crc = 1;
+
 	err = ubifs_read_superblock(c);
 	if (err)
 		goto out_free;
@@ -1032,8 +1097,6 @@
 
 		/* Create background thread */
 		c->bgt = kthread_create(ubifs_bg_thread, c, c->bgt_name);
-		if (!c->bgt)
-			c->bgt = ERR_PTR(-EINVAL);
 		if (IS_ERR(c->bgt)) {
 			err = PTR_ERR(c->bgt);
 			c->bgt = NULL;
@@ -1139,24 +1202,28 @@
 	if (err)
 		goto out_infos;
 
+	c->always_chk_crc = 0;
+
 	ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"",
 		  c->vi.ubi_num, c->vi.vol_id, c->vi.name);
 	if (mounted_read_only)
 		ubifs_msg("mounted read-only");
 	x = (long long)c->main_lebs * c->leb_size;
-	ubifs_msg("file system size: %lld bytes (%lld KiB, %lld MiB, %d LEBs)",
-		  x, x >> 10, x >> 20, c->main_lebs);
+	ubifs_msg("file system size:   %lld bytes (%lld KiB, %lld MiB, %d "
+		  "LEBs)", x, x >> 10, x >> 20, c->main_lebs);
 	x = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes;
-	ubifs_msg("journal size: %lld bytes (%lld KiB, %lld MiB, %d LEBs)",
-		  x, x >> 10, x >> 20, c->log_lebs + c->max_bud_cnt);
-	ubifs_msg("default compressor: %s", ubifs_compr_name(c->default_compr));
-	ubifs_msg("media format %d, latest format %d",
+	ubifs_msg("journal size:       %lld bytes (%lld KiB, %lld MiB, %d "
+		  "LEBs)", x, x >> 10, x >> 20, c->log_lebs + c->max_bud_cnt);
+	ubifs_msg("media format:       %d (latest is %d)",
 		  c->fmt_version, UBIFS_FORMAT_VERSION);
+	ubifs_msg("default compressor: %s", ubifs_compr_name(c->default_compr));
+	ubifs_msg("reserved for root:  %llu bytes (%llu KiB)",
+		c->report_rp_size, c->report_rp_size >> 10);
 
 	dbg_msg("compiled on:         " __DATE__ " at " __TIME__);
 	dbg_msg("min. I/O unit size:  %d bytes", c->min_io_size);
 	dbg_msg("LEB size:            %d bytes (%d KiB)",
-		c->leb_size, c->leb_size / 1024);
+		c->leb_size, c->leb_size >> 10);
 	dbg_msg("data journal heads:  %d",
 		c->jhead_cnt - NONDATA_JHEADS_CNT);
 	dbg_msg("UUID:                %02X%02X%02X%02X-%02X%02X"
@@ -1282,6 +1349,7 @@
 
 	mutex_lock(&c->umount_mutex);
 	c->remounting_rw = 1;
+	c->always_chk_crc = 1;
 
 	/* Check for enough free space */
 	if (ubifs_calc_available(c, c->min_idx_lebs) <= 0) {
@@ -1345,20 +1413,20 @@
 
 	/* Create background thread */
 	c->bgt = kthread_create(ubifs_bg_thread, c, c->bgt_name);
-	if (!c->bgt)
-		c->bgt = ERR_PTR(-EINVAL);
 	if (IS_ERR(c->bgt)) {
 		err = PTR_ERR(c->bgt);
 		c->bgt = NULL;
 		ubifs_err("cannot spawn \"%s\", error %d",
 			  c->bgt_name, err);
-		return err;
+		goto out;
 	}
 	wake_up_process(c->bgt);
 
 	c->orph_buf = vmalloc(c->leb_size);
-	if (!c->orph_buf)
-		return -ENOMEM;
+	if (!c->orph_buf) {
+		err = -ENOMEM;
+		goto out;
+	}
 
 	/* Check for enough log space */
 	lnum = c->lhead_lnum + 1;
@@ -1385,6 +1453,7 @@
 	dbg_gen("re-mounted read-write");
 	c->vfs_sb->s_flags &= ~MS_RDONLY;
 	c->remounting_rw = 0;
+	c->always_chk_crc = 0;
 	mutex_unlock(&c->umount_mutex);
 	return 0;
 
@@ -1400,6 +1469,7 @@
 	c->ileb_buf = NULL;
 	ubifs_lpt_free(c, 1);
 	c->remounting_rw = 0;
+	c->always_chk_crc = 0;
 	mutex_unlock(&c->umount_mutex);
 	return err;
 }
@@ -1408,12 +1478,9 @@
  * commit_on_unmount - commit the journal when un-mounting.
  * @c: UBIFS file-system description object
  *
- * This function is called during un-mounting and it commits the journal unless
- * the "fast unmount" mode is enabled. It also avoids committing the journal if
- * it contains too few data.
- *
- * Sometimes recovery requires the journal to be committed at least once, and
- * this function takes care about this.
+ * This function is called during un-mounting and re-mounting, and it commits
+ * the journal unless the "fast unmount" mode is enabled. It also avoids
+ * committing the journal if it contains too few data.
  */
 static void commit_on_unmount(struct ubifs_info *c)
 {
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 7634c59..d27fd91 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -284,7 +284,7 @@
 	}
 
 	zn = copy_znode(c, znode);
-	if (unlikely(IS_ERR(zn)))
+	if (IS_ERR(zn))
 		return zn;
 
 	if (zbr->len) {
@@ -470,6 +470,10 @@
 	if (node_len != len)
 		return 0;
 
+	if (type == UBIFS_DATA_NODE && !c->always_chk_crc)
+		if (c->no_chk_data_crc)
+			return 0;
+
 	crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
 	node_crc = le32_to_cpu(ch->crc);
 	if (crc != node_crc)
@@ -1128,7 +1132,7 @@
 			ubifs_assert(znode == c->zroot.znode);
 			znode = dirty_cow_znode(c, &c->zroot);
 		}
-		if (unlikely(IS_ERR(znode)) || !p)
+		if (IS_ERR(znode) || !p)
 			break;
 		ubifs_assert(path[p - 1] >= 0);
 		ubifs_assert(path[p - 1] < znode->child_cnt);
@@ -1492,6 +1496,289 @@
 }
 
 /**
+ * ubifs_tnc_get_bu_keys - lookup keys for bulk-read.
+ * @c: UBIFS file-system description object
+ * @bu: bulk-read parameters and results
+ *
+ * Lookup consecutive data node keys for the same inode that reside
+ * consecutively in the same LEB.
+ */
+int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
+{
+	int n, err = 0, lnum = -1, uninitialized_var(offs);
+	int uninitialized_var(len);
+	unsigned int block = key_block(c, &bu->key);
+	struct ubifs_znode *znode;
+
+	bu->cnt = 0;
+	bu->blk_cnt = 0;
+	bu->eof = 0;
+
+	mutex_lock(&c->tnc_mutex);
+	/* Find first key */
+	err = ubifs_lookup_level0(c, &bu->key, &znode, &n);
+	if (err < 0)
+		goto out;
+	if (err) {
+		/* Key found */
+		len = znode->zbranch[n].len;
+		/* The buffer must be big enough for at least 1 node */
+		if (len > bu->buf_len) {
+			err = -EINVAL;
+			goto out;
+		}
+		/* Add this key */
+		bu->zbranch[bu->cnt++] = znode->zbranch[n];
+		bu->blk_cnt += 1;
+		lnum = znode->zbranch[n].lnum;
+		offs = ALIGN(znode->zbranch[n].offs + len, 8);
+	}
+	while (1) {
+		struct ubifs_zbranch *zbr;
+		union ubifs_key *key;
+		unsigned int next_block;
+
+		/* Find next key */
+		err = tnc_next(c, &znode, &n);
+		if (err)
+			goto out;
+		zbr = &znode->zbranch[n];
+		key = &zbr->key;
+		/* See if there is another data key for this file */
+		if (key_inum(c, key) != key_inum(c, &bu->key) ||
+		    key_type(c, key) != UBIFS_DATA_KEY) {
+			err = -ENOENT;
+			goto out;
+		}
+		if (lnum < 0) {
+			/* First key found */
+			lnum = zbr->lnum;
+			offs = ALIGN(zbr->offs + zbr->len, 8);
+			len = zbr->len;
+			if (len > bu->buf_len) {
+				err = -EINVAL;
+				goto out;
+			}
+		} else {
+			/*
+			 * The data nodes must be in consecutive positions in
+			 * the same LEB.
+			 */
+			if (zbr->lnum != lnum || zbr->offs != offs)
+				goto out;
+			offs += ALIGN(zbr->len, 8);
+			len = ALIGN(len, 8) + zbr->len;
+			/* Must not exceed buffer length */
+			if (len > bu->buf_len)
+				goto out;
+		}
+		/* Allow for holes */
+		next_block = key_block(c, key);
+		bu->blk_cnt += (next_block - block - 1);
+		if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
+			goto out;
+		block = next_block;
+		/* Add this key */
+		bu->zbranch[bu->cnt++] = *zbr;
+		bu->blk_cnt += 1;
+		/* See if we have room for more */
+		if (bu->cnt >= UBIFS_MAX_BULK_READ)
+			goto out;
+		if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
+			goto out;
+	}
+out:
+	if (err == -ENOENT) {
+		bu->eof = 1;
+		err = 0;
+	}
+	bu->gc_seq = c->gc_seq;
+	mutex_unlock(&c->tnc_mutex);
+	if (err)
+		return err;
+	/*
+	 * An enormous hole could cause bulk-read to encompass too many
+	 * page cache pages, so limit the number here.
+	 */
+	if (bu->blk_cnt > UBIFS_MAX_BULK_READ)
+		bu->blk_cnt = UBIFS_MAX_BULK_READ;
+	/*
+	 * Ensure that bulk-read covers a whole number of page cache
+	 * pages.
+	 */
+	if (UBIFS_BLOCKS_PER_PAGE == 1 ||
+	    !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1)))
+		return 0;
+	if (bu->eof) {
+		/* At the end of file we can round up */
+		bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1;
+		return 0;
+	}
+	/* Exclude data nodes that do not make up a whole page cache page */
+	block = key_block(c, &bu->key) + bu->blk_cnt;
+	block &= ~(UBIFS_BLOCKS_PER_PAGE - 1);
+	while (bu->cnt) {
+		if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block)
+			break;
+		bu->cnt -= 1;
+	}
+	return 0;
+}
+
+/**
+ * read_wbuf - bulk-read from a LEB with a wbuf.
+ * @wbuf: wbuf that may overlap the read
+ * @buf: buffer into which to read
+ * @len: read length
+ * @lnum: LEB number from which to read
+ * @offs: offset from which to read
+ *
+ * This functions returns %0 on success or a negative error code on failure.
+ */
+static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
+		     int offs)
+{
+	const struct ubifs_info *c = wbuf->c;
+	int rlen, overlap;
+
+	dbg_io("LEB %d:%d, length %d", lnum, offs, len);
+	ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
+	ubifs_assert(!(offs & 7) && offs < c->leb_size);
+	ubifs_assert(offs + len <= c->leb_size);
+
+	spin_lock(&wbuf->lock);
+	overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
+	if (!overlap) {
+		/* We may safely unlock the write-buffer and read the data */
+		spin_unlock(&wbuf->lock);
+		return ubi_read(c->ubi, lnum, buf, offs, len);
+	}
+
+	/* Don't read under wbuf */
+	rlen = wbuf->offs - offs;
+	if (rlen < 0)
+		rlen = 0;
+
+	/* Copy the rest from the write-buffer */
+	memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
+	spin_unlock(&wbuf->lock);
+
+	if (rlen > 0)
+		/* Read everything that goes before write-buffer */
+		return ubi_read(c->ubi, lnum, buf, offs, rlen);
+
+	return 0;
+}
+
+/**
+ * validate_data_node - validate data nodes for bulk-read.
+ * @c: UBIFS file-system description object
+ * @buf: buffer containing data node to validate
+ * @zbr: zbranch of data node to validate
+ *
+ * This functions returns %0 on success or a negative error code on failure.
+ */
+static int validate_data_node(struct ubifs_info *c, void *buf,
+			      struct ubifs_zbranch *zbr)
+{
+	union ubifs_key key1;
+	struct ubifs_ch *ch = buf;
+	int err, len;
+
+	if (ch->node_type != UBIFS_DATA_NODE) {
+		ubifs_err("bad node type (%d but expected %d)",
+			  ch->node_type, UBIFS_DATA_NODE);
+		goto out_err;
+	}
+
+	err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0);
+	if (err) {
+		ubifs_err("expected node type %d", UBIFS_DATA_NODE);
+		goto out;
+	}
+
+	len = le32_to_cpu(ch->len);
+	if (len != zbr->len) {
+		ubifs_err("bad node length %d, expected %d", len, zbr->len);
+		goto out_err;
+	}
+
+	/* Make sure the key of the read node is correct */
+	key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
+	if (!keys_eq(c, &zbr->key, &key1)) {
+		ubifs_err("bad key in node at LEB %d:%d",
+			  zbr->lnum, zbr->offs);
+		dbg_tnc("looked for key %s found node's key %s",
+			DBGKEY(&zbr->key), DBGKEY1(&key1));
+		goto out_err;
+	}
+
+	return 0;
+
+out_err:
+	err = -EINVAL;
+out:
+	ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs);
+	dbg_dump_node(c, buf);
+	dbg_dump_stack();
+	return err;
+}
+
+/**
+ * ubifs_tnc_bulk_read - read a number of data nodes in one go.
+ * @c: UBIFS file-system description object
+ * @bu: bulk-read parameters and results
+ *
+ * This functions reads and validates the data nodes that were identified by the
+ * 'ubifs_tnc_get_bu_keys()' function. This functions returns %0 on success,
+ * -EAGAIN to indicate a race with GC, or another negative error code on
+ * failure.
+ */
+int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
+{
+	int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i;
+	struct ubifs_wbuf *wbuf;
+	void *buf;
+
+	len = bu->zbranch[bu->cnt - 1].offs;
+	len += bu->zbranch[bu->cnt - 1].len - offs;
+	if (len > bu->buf_len) {
+		ubifs_err("buffer too small %d vs %d", bu->buf_len, len);
+		return -EINVAL;
+	}
+
+	/* Do the read */
+	wbuf = ubifs_get_wbuf(c, lnum);
+	if (wbuf)
+		err = read_wbuf(wbuf, bu->buf, len, lnum, offs);
+	else
+		err = ubi_read(c->ubi, lnum, bu->buf, offs, len);
+
+	/* Check for a race with GC */
+	if (maybe_leb_gced(c, lnum, bu->gc_seq))
+		return -EAGAIN;
+
+	if (err && err != -EBADMSG) {
+		ubifs_err("failed to read from LEB %d:%d, error %d",
+			  lnum, offs, err);
+		dbg_dump_stack();
+		dbg_tnc("key %s", DBGKEY(&bu->key));
+		return err;
+	}
+
+	/* Validate the nodes read */
+	buf = bu->buf;
+	for (i = 0; i < bu->cnt; i++) {
+		err = validate_data_node(c, buf, &bu->zbranch[i]);
+		if (err)
+			return err;
+		buf = buf + ALIGN(bu->zbranch[i].len, 8);
+	}
+
+	return 0;
+}
+
+/**
  * do_lookup_nm- look up a "hashed" node.
  * @c: UBIFS file-system description object
  * @key: node key to lookup
@@ -1675,7 +1962,7 @@
 {
 	struct ubifs_znode *zn, *zi, *zp;
 	int i, keep, move, appending = 0;
-	union ubifs_key *key = &zbr->key;
+	union ubifs_key *key = &zbr->key, *key1;
 
 	ubifs_assert(n >= 0 && n <= c->fanout);
 
@@ -1716,20 +2003,33 @@
 	zn->level = znode->level;
 
 	/* Decide where to split */
-	if (znode->level == 0 && n == c->fanout &&
-	    key_type(c, key) == UBIFS_DATA_KEY) {
-		union ubifs_key *key1;
-
-		/*
-		 * If this is an inode which is being appended - do not split
-		 * it because no other zbranches can be inserted between
-		 * zbranches of consecutive data nodes anyway.
-		 */
-		key1 = &znode->zbranch[n - 1].key;
-		if (key_inum(c, key1) == key_inum(c, key) &&
-		    key_type(c, key1) == UBIFS_DATA_KEY &&
-		    key_block(c, key1) == key_block(c, key) - 1)
-			appending = 1;
+	if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) {
+		/* Try not to split consecutive data keys */
+		if (n == c->fanout) {
+			key1 = &znode->zbranch[n - 1].key;
+			if (key_inum(c, key1) == key_inum(c, key) &&
+			    key_type(c, key1) == UBIFS_DATA_KEY)
+				appending = 1;
+		} else
+			goto check_split;
+	} else if (appending && n != c->fanout) {
+		/* Try not to split consecutive data keys */
+		appending = 0;
+check_split:
+		if (n >= (c->fanout + 1) / 2) {
+			key1 = &znode->zbranch[0].key;
+			if (key_inum(c, key1) == key_inum(c, key) &&
+			    key_type(c, key1) == UBIFS_DATA_KEY) {
+				key1 = &znode->zbranch[n].key;
+				if (key_inum(c, key1) != key_inum(c, key) ||
+				    key_type(c, key1) != UBIFS_DATA_KEY) {
+					keep = n;
+					move = c->fanout - keep;
+					zi = znode;
+					goto do_split;
+				}
+			}
+		}
 	}
 
 	if (appending) {
@@ -1759,6 +2059,8 @@
 			zbr->znode->parent = zn;
 	}
 
+do_split:
+
 	__set_bit(DIRTY_ZNODE, &zn->flags);
 	atomic_long_inc(&c->dirty_zn_cnt);
 
@@ -1785,14 +2087,11 @@
 
 	/* Insert new znode (produced by spitting) into the parent */
 	if (zp) {
-		i = n;
+		if (n == 0 && zi == znode && znode->iip == 0)
+			correct_parent_keys(c, znode);
+
 		/* Locate insertion point */
 		n = znode->iip + 1;
-		if (appending && n != c->fanout)
-			appending = 0;
-
-		if (i == 0 && zi == znode && znode->iip == 0)
-			correct_parent_keys(c, znode);
 
 		/* Tail recursion */
 		zbr->key = zn->zbranch[0].key;
diff --git a/fs/ubifs/tnc_misc.c b/fs/ubifs/tnc_misc.c
index a25c1cc..b48db99 100644
--- a/fs/ubifs/tnc_misc.c
+++ b/fs/ubifs/tnc_misc.c
@@ -480,8 +480,8 @@
 	}
 
 	/* Make sure the key of the read node is correct */
-	key_read(c, key, &key1);
-	if (memcmp(node + UBIFS_KEY_OFFSET, &key1, c->key_len)) {
+	key_read(c, node + UBIFS_KEY_OFFSET, &key1);
+	if (!keys_eq(c, key, &key1)) {
 		ubifs_err("bad key in node at LEB %d:%d",
 			  zbr->lnum, zbr->offs);
 		dbg_tnc("looked for key %s found node's key %s",
diff --git a/fs/ubifs/ubifs-media.h b/fs/ubifs/ubifs-media.h
index a9ecbd9..0b37804 100644
--- a/fs/ubifs/ubifs-media.h
+++ b/fs/ubifs/ubifs-media.h
@@ -75,7 +75,6 @@
  */
 #define UBIFS_BLOCK_SIZE  4096
 #define UBIFS_BLOCK_SHIFT 12
-#define UBIFS_BLOCK_MASK  0x00000FFF
 
 /* UBIFS padding byte pattern (must not be first or last byte of node magic) */
 #define UBIFS_PADDING_BYTE 0xCE
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 17c620b..a7bd32f 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -142,6 +142,9 @@
 /* Maximum expected tree height for use by bottom_up_buf */
 #define BOTTOM_UP_HEIGHT 64
 
+/* Maximum number of data nodes to bulk-read */
+#define UBIFS_MAX_BULK_READ 32
+
 /*
  * Lockdep classes for UBIFS inode @ui_mutex.
  */
@@ -328,9 +331,10 @@
  *               this inode
  * @dirty: non-zero if the inode is dirty
  * @xattr: non-zero if this is an extended attribute inode
+ * @bulk_read: non-zero if bulk-read should be used
  * @ui_mutex: serializes inode write-back with the rest of VFS operations,
- *            serializes "clean <-> dirty" state changes, protects @dirty,
- *            @ui_size, and @xattr_size
+ *            serializes "clean <-> dirty" state changes, serializes bulk-read,
+ *            protects @dirty, @bulk_read, @ui_size, and @xattr_size
  * @ui_lock: protects @synced_i_size
  * @synced_i_size: synchronized size of inode, i.e. the value of inode size
  *                 currently stored on the flash; used only for regular file
@@ -338,6 +342,8 @@
  * @ui_size: inode size used by UBIFS when writing to flash
  * @flags: inode flags (@UBIFS_COMPR_FL, etc)
  * @compr_type: default compression type used for this inode
+ * @last_page_read: page number of last page read (for bulk read)
+ * @read_in_a_row: number of consecutive pages read in a row (for bulk read)
  * @data_len: length of the data attached to the inode
  * @data: inode's data
  *
@@ -379,12 +385,15 @@
 	unsigned int xattr_names;
 	unsigned int dirty:1;
 	unsigned int xattr:1;
+	unsigned int bulk_read:1;
 	struct mutex ui_mutex;
 	spinlock_t ui_lock;
 	loff_t synced_i_size;
 	loff_t ui_size;
 	int flags;
 	int compr_type;
+	pgoff_t last_page_read;
+	pgoff_t read_in_a_row;
 	int data_len;
 	void *data;
 };
@@ -698,8 +707,8 @@
  * struct ubifs_zbranch - key/coordinate/length branch stored in znodes.
  * @key: key
  * @znode: znode address in memory
- * @lnum: LEB number of the indexing node
- * @offs: offset of the indexing node within @lnum
+ * @lnum: LEB number of the target node (indexing node or data node)
+ * @offs: target node offset within @lnum
  * @len: target node length
  */
 struct ubifs_zbranch {
@@ -744,6 +753,28 @@
 };
 
 /**
+ * struct bu_info - bulk-read information
+ * @key: first data node key
+ * @zbranch: zbranches of data nodes to bulk read
+ * @buf: buffer to read into
+ * @buf_len: buffer length
+ * @gc_seq: GC sequence number to detect races with GC
+ * @cnt: number of data nodes for bulk read
+ * @blk_cnt: number of data blocks including holes
+ * @oef: end of file reached
+ */
+struct bu_info {
+	union ubifs_key key;
+	struct ubifs_zbranch zbranch[UBIFS_MAX_BULK_READ];
+	void *buf;
+	int buf_len;
+	int gc_seq;
+	int cnt;
+	int blk_cnt;
+	int eof;
+};
+
+/**
  * struct ubifs_node_range - node length range description data structure.
  * @len: fixed node length
  * @min_len: minimum possible node length
@@ -862,9 +893,13 @@
 /**
  * struct ubifs_mount_opts - UBIFS-specific mount options information.
  * @unmount_mode: selected unmount mode (%0 default, %1 normal, %2 fast)
+ * @bulk_read: enable bulk-reads
+ * @chk_data_crc: check CRCs when reading data nodes
  */
 struct ubifs_mount_opts {
 	unsigned int unmount_mode:2;
+	unsigned int bulk_read:2;
+	unsigned int chk_data_crc:2;
 };
 
 /**
@@ -905,13 +940,12 @@
  * @cmt_state: commit state
  * @cs_lock: commit state lock
  * @cmt_wq: wait queue to sleep on if the log is full and a commit is running
+ *
  * @fast_unmount: do not run journal commit before un-mounting
  * @big_lpt: flag that LPT is too big to write whole during commit
- * @check_lpt_free: flag that indicates LPT GC may be needed
- * @nospace: non-zero if the file-system does not have flash space (used as
- *           optimization)
- * @nospace_rp: the same as @nospace, but additionally means that even reserved
- *              pool is full
+ * @no_chk_data_crc: do not check CRCs when reading data nodes (except during
+ *                   recovery)
+ * @bulk_read: enable bulk-reads
  *
  * @tnc_mutex: protects the Tree Node Cache (TNC), @zroot, @cnext, @enext, and
  *             @calc_idx_sz
@@ -935,6 +969,7 @@
  * @mst_node: master node
  * @mst_offs: offset of valid master node
  * @mst_mutex: protects the master node area, @mst_node, and @mst_offs
+ * @bulk_read_buf_size: buffer size for bulk-reads
  *
  * @log_lebs: number of logical eraseblocks in the log
  * @log_bytes: log size in bytes
@@ -977,12 +1012,17 @@
  *                        but which still have to be taken into account because
  *                        the index has not been committed so far
  * @space_lock: protects @budg_idx_growth, @budg_data_growth, @budg_dd_growth,
- *              @budg_uncommited_idx, @min_idx_lebs, @old_idx_sz, and @lst;
+ *              @budg_uncommited_idx, @min_idx_lebs, @old_idx_sz, @lst,
+ *              @nospace, and @nospace_rp;
  * @min_idx_lebs: minimum number of LEBs required for the index
  * @old_idx_sz: size of index on flash
  * @calc_idx_sz: temporary variable which is used to calculate new index size
  *               (contains accurate new index size at end of TNC commit start)
  * @lst: lprops statistics
+ * @nospace: non-zero if the file-system does not have flash space (used as
+ *           optimization)
+ * @nospace_rp: the same as @nospace, but additionally means that even reserved
+ *              pool is full
  *
  * @page_budget: budget for a page
  * @inode_budget: budget for an inode
@@ -1061,6 +1101,7 @@
  * @lpt_drty_flgs: dirty flags for LPT special nodes e.g. ltab
  * @dirty_nn_cnt: number of dirty nnodes
  * @dirty_pn_cnt: number of dirty pnodes
+ * @check_lpt_free: flag that indicates LPT GC may be needed
  * @lpt_sz: LPT size
  * @lpt_nod_buf: buffer for an on-flash nnode or pnode
  * @lpt_buf: buffer of LEB size used by LPT
@@ -1102,6 +1143,7 @@
  * @rcvrd_mst_node: recovered master node to write when mounting ro to rw
  * @size_tree: inode size information for recovery
  * @remounting_rw: set while remounting from ro to rw (sb flags have MS_RDONLY)
+ * @always_chk_crc: always check CRCs (while mounting and remounting rw)
  * @mount_opts: UBIFS-specific mount options
  *
  * @dbg_buf: a buffer of LEB size used for debugging purposes
@@ -1146,11 +1188,11 @@
 	int cmt_state;
 	spinlock_t cs_lock;
 	wait_queue_head_t cmt_wq;
+
 	unsigned int fast_unmount:1;
 	unsigned int big_lpt:1;
-	unsigned int check_lpt_free:1;
-	unsigned int nospace:1;
-	unsigned int nospace_rp:1;
+	unsigned int no_chk_data_crc:1;
+	unsigned int bulk_read:1;
 
 	struct mutex tnc_mutex;
 	struct ubifs_zbranch zroot;
@@ -1175,6 +1217,7 @@
 	struct ubifs_mst_node *mst_node;
 	int mst_offs;
 	struct mutex mst_mutex;
+	int bulk_read_buf_size;
 
 	int log_lebs;
 	long long log_bytes;
@@ -1218,6 +1261,8 @@
 	unsigned long long old_idx_sz;
 	unsigned long long calc_idx_sz;
 	struct ubifs_lp_stats lst;
+	unsigned int nospace:1;
+	unsigned int nospace_rp:1;
 
 	int page_budget;
 	int inode_budget;
@@ -1294,6 +1339,7 @@
 	int lpt_drty_flgs;
 	int dirty_nn_cnt;
 	int dirty_pn_cnt;
+	int check_lpt_free;
 	long long lpt_sz;
 	void *lpt_nod_buf;
 	void *lpt_buf;
@@ -1335,6 +1381,7 @@
 	struct ubifs_mst_node *rcvrd_mst_node;
 	struct rb_root size_tree;
 	int remounting_rw;
+	int always_chk_crc;
 	struct ubifs_mount_opts mount_opts;
 
 #ifdef CONFIG_UBIFS_FS_DEBUG
@@ -1347,6 +1394,12 @@
 	unsigned long fail_timeout;
 	unsigned int fail_cnt;
 	unsigned int fail_cnt_max;
+	long long chk_lpt_sz;
+	long long chk_lpt_sz2;
+	long long chk_lpt_wastage;
+	int chk_lpt_lebs;
+	int new_nhead_lnum;
+	int new_nhead_offs;
 #endif
 };
 
@@ -1377,7 +1430,7 @@
 int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum,
 		     int offs, int dtype);
 int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
-		     int offs, int quiet);
+		     int offs, int quiet, int chk_crc);
 void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad);
 void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last);
 int ubifs_io_init(struct ubifs_info *c);
@@ -1490,6 +1543,8 @@
 int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level,
 		       int lnum, int offs);
 int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode);
+int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu);
+int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu);
 
 /* tnc_misc.c */
 struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr,
@@ -1586,12 +1641,10 @@
 void ubifs_lpt_free(struct ubifs_info *c, int wr_only);
 
 /* lprops.c */
-void ubifs_get_lprops(struct ubifs_info *c);
 const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c,
 					   const struct ubifs_lprops *lp,
 					   int free, int dirty, int flags,
 					   int idx_gc_cnt);
-void ubifs_release_lprops(struct ubifs_info *c);
 void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *stats);
 void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops,
 		      int cat);
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 649bec7..cfd31e2 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -446,7 +446,7 @@
 		int type;
 
 		xent = ubifs_tnc_next_ent(c, &key, &nm);
-		if (unlikely(IS_ERR(xent))) {
+		if (IS_ERR(xent)) {
 			err = PTR_ERR(xent);
 			break;
 		}
diff --git a/include/asm-cris/thread_info.h b/include/asm-cris/thread_info.h
index 7efe100..cee97f1 100644
--- a/include/asm-cris/thread_info.h
+++ b/include/asm-cris/thread_info.h
@@ -88,6 +88,7 @@
 #define TIF_RESTORE_SIGMASK	9	/* restore signal mask in do_signal() */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		17
+#define TIF_FREEZE		18	/* is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
@@ -95,6 +96,7 @@
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
+#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
diff --git a/include/asm-frv/ide.h b/include/asm-frv/ide.h
index 7ebcc56..3610766 100644
--- a/include/asm-frv/ide.h
+++ b/include/asm-frv/ide.h
@@ -18,15 +18,7 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 
-/****************************************************************************/
-/*
- * some bits needed for parts of the IDE subsystem to compile
- */
-#define __ide_mm_insw(port, addr, n)	insw((unsigned long) (port), addr, n)
-#define __ide_mm_insl(port, addr, n)	insl((unsigned long) (port), addr, n)
-#define __ide_mm_outsw(port, addr, n)	outsw((unsigned long) (port), addr, n)
-#define __ide_mm_outsl(port, addr, n)	outsl((unsigned long) (port), addr, n)
-
+#include <asm-generic/ide_iops.h>
 
 #endif /* __KERNEL__ */
 #endif /* _ASM_IDE_H */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 0f6dabd..12c07c1 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -41,7 +41,7 @@
 #define __WARN() warn_on_slowpath(__FILE__, __LINE__)
 #define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg)
 #else
-#define __WARN_printf(arg...) __WARN()
+#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
 #endif
 
 #ifndef WARN_ON
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index ed108be..f104af7 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -22,8 +22,6 @@
 {
 	if (unlikely(atomic_dec_return(count) < 0))
 		fail_fn(count);
-	else
-		smp_mb();
 }
 
 /**
@@ -41,10 +39,7 @@
 {
 	if (unlikely(atomic_dec_return(count) < 0))
 		return fail_fn(count);
-	else {
-		smp_mb();
-		return 0;
-	}
+	return 0;
 }
 
 /**
@@ -63,7 +58,6 @@
 static inline void
 __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 {
-	smp_mb();
 	if (unlikely(atomic_inc_return(count) <= 0))
 		fail_fn(count);
 }
@@ -88,25 +82,9 @@
 static inline int
 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
 {
-	/*
-	 * We have two variants here. The cmpxchg based one is the best one
-	 * because it never induce a false contention state.  It is included
-	 * here because architectures using the inc/dec algorithms over the
-	 * xchg ones are much more likely to support cmpxchg natively.
-	 *
-	 * If not we fall back to the spinlock based variant - that is
-	 * just as efficient (and simpler) as a 'destructive' probing of
-	 * the mutex state would be.
-	 */
-#ifdef __HAVE_ARCH_CMPXCHG
-	if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
-		smp_mb();
+	if (likely(atomic_cmpxchg(count, 1, 0) == 1))
 		return 1;
-	}
 	return 0;
-#else
-	return fail_fn(count);
-#endif
 }
 
 #endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 7b9cd2c..580a6d3 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -27,8 +27,6 @@
 {
 	if (unlikely(atomic_xchg(count, 0) != 1))
 		fail_fn(count);
-	else
-		smp_mb();
 }
 
 /**
@@ -46,10 +44,7 @@
 {
 	if (unlikely(atomic_xchg(count, 0) != 1))
 		return fail_fn(count);
-	else {
-		smp_mb();
-		return 0;
-	}
+	return 0;
 }
 
 /**
@@ -67,7 +62,6 @@
 static inline void
 __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 {
-	smp_mb();
 	if (unlikely(atomic_xchg(count, 1) != 0))
 		fail_fn(count);
 }
@@ -110,7 +104,6 @@
 		if (prev < 0)
 			prev = 0;
 	}
-	smp_mb();
 
 	return prev;
 }
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
index 71ef3f0..89061c1 100644
--- a/include/asm-generic/rtc.h
+++ b/include/asm-generic/rtc.h
@@ -84,12 +84,12 @@
 
 	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
 	{
-		BCD_TO_BIN(time->tm_sec);
-		BCD_TO_BIN(time->tm_min);
-		BCD_TO_BIN(time->tm_hour);
-		BCD_TO_BIN(time->tm_mday);
-		BCD_TO_BIN(time->tm_mon);
-		BCD_TO_BIN(time->tm_year);
+		time->tm_sec = bcd2bin(time->tm_sec);
+		time->tm_min = bcd2bin(time->tm_min);
+		time->tm_hour = bcd2bin(time->tm_hour);
+		time->tm_mday = bcd2bin(time->tm_mday);
+		time->tm_mon = bcd2bin(time->tm_mon);
+		time->tm_year = bcd2bin(time->tm_year);
 	}
 
 #ifdef CONFIG_MACH_DECSTATION
@@ -159,12 +159,12 @@
 
 	if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
 	    || RTC_ALWAYS_BCD) {
-		BIN_TO_BCD(sec);
-		BIN_TO_BCD(min);
-		BIN_TO_BCD(hrs);
-		BIN_TO_BCD(day);
-		BIN_TO_BCD(mon);
-		BIN_TO_BCD(yrs);
+		sec = bin2bcd(sec);
+		min = bin2bcd(min);
+		hrs = bin2bcd(hrs);
+		day = bin2bcd(day);
+		mon = bin2bcd(mon);
+		yrs = bin2bcd(yrs);
 	}
 
 	save_control = CMOS_READ(RTC_CONTROL);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 74c5faf..80744606 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -37,6 +37,13 @@
 #define MEM_DISCARD(sec) *(.mem##sec)
 #endif
 
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#define MCOUNT_REC()	VMLINUX_SYMBOL(__start_mcount_loc) = .; \
+			*(__mcount_loc)				\
+			VMLINUX_SYMBOL(__stop_mcount_loc) = .;
+#else
+#define MCOUNT_REC()
+#endif
 
 /* .data section */
 #define DATA_DATA							\
@@ -52,7 +59,10 @@
 	. = ALIGN(8);							\
 	VMLINUX_SYMBOL(__start___markers) = .;				\
 	*(__markers)							\
-	VMLINUX_SYMBOL(__stop___markers) = .;
+	VMLINUX_SYMBOL(__stop___markers) = .;				\
+	VMLINUX_SYMBOL(__start___tracepoints) = .;			\
+	*(__tracepoints)						\
+	VMLINUX_SYMBOL(__stop___tracepoints) = .;
 
 #define RO_DATA(align)							\
 	. = ALIGN((align));						\
@@ -61,6 +71,7 @@
 		*(.rodata) *(.rodata.*)					\
 		*(__vermagic)		/* Kernel version magic */	\
 		*(__markers_strings)	/* Markers: strings */		\
+		*(__tracepoints_strings)/* Tracepoints: strings */	\
 	}								\
 									\
 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
@@ -188,6 +199,7 @@
 	/* __*init sections */						\
 	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
 		*(.ref.rodata)						\
+		MCOUNT_REC()						\
 		DEV_KEEP(init.rodata)					\
 		DEV_KEEP(exit.rodata)					\
 		CPU_KEEP(init.rodata)					\
diff --git a/include/asm-m68k/ide.h b/include/asm-m68k/ide.h
index 1daf6cb..b996a3c 100644
--- a/include/asm-m68k/ide.h
+++ b/include/asm-m68k/ide.h
@@ -92,15 +92,6 @@
 #define outsw_swapw(port, addr, n)	raw_outsw_swapw((u16 *)port, addr, n)
 #endif
 
-
-/* Q40 and Atari have byteswapped IDE busses and since many interesting
- * values in the identification string are text, chars and words they
- * happened to be almost correct without swapping.. However *_capacity
- * is needed for drives over 8 GB. RZ */
-#if defined(CONFIG_Q40) || defined(CONFIG_ATARI)
-#define M68K_IDE_SWAPW  (MACH_IS_Q40 || MACH_IS_ATARI)
-#endif
-
 #ifdef CONFIG_BLK_DEV_FALCON_IDE
 #define IDE_ARCH_LOCK
 
diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h
index abc0027..af0fda4 100644
--- a/include/asm-m68k/thread_info.h
+++ b/include/asm-m68k/thread_info.h
@@ -52,5 +52,6 @@
 #define TIF_DELAYED_TRACE	14	/* single step a syscall */
 #define TIF_SYSCALL_TRACE	15	/* syscall trace active */
 #define TIF_MEMDIE		16
+#define TIF_FREEZE		17	/* thread is freezing for suspend */
 
 #endif	/* _ASM_M68K_THREAD_INFO_H */
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
index e07e728..62274ab 100644
--- a/include/asm-um/thread_info.h
+++ b/include/asm-um/thread_info.h
@@ -69,6 +69,7 @@
 #define TIF_MEMDIE	 	5
 #define TIF_SYSCALL_AUDIT	6
 #define TIF_RESTORE_SIGMASK	7
+#define TIF_FREEZE		16	/* is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
@@ -77,5 +78,6 @@
 #define _TIF_MEMDIE		(1 << TIF_MEMDIE)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
+#define _TIF_FREEZE		(1 << TIF_FREEZE)
 
 #endif
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index d76a083..ef1d72d 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -40,8 +40,6 @@
 extern unsigned int apic_verbosity;
 extern int local_apic_timer_c2_ok;
 
-extern int ioapic_force;
-
 extern int disable_apic;
 /*
  * Basic functions accessing APICs.
@@ -100,6 +98,20 @@
 extern void enable_x2apic(void);
 extern void enable_IR_x2apic(void);
 extern void x2apic_icr_write(u32 low, u32 id);
+static inline int x2apic_enabled(void)
+{
+	int msr, msr2;
+
+	if (!cpu_has_x2apic)
+		return 0;
+
+	rdmsr(MSR_IA32_APICBASE, msr, msr2);
+	if (msr & X2APIC_ENABLE)
+		return 1;
+	return 0;
+}
+#else
+#define x2apic_enabled()	0
 #endif
 
 struct apic_ops {
diff --git a/include/asm-x86/bigsmp/apic.h b/include/asm-x86/bigsmp/apic.h
index 0a9cd7c..1d9543b 100644
--- a/include/asm-x86/bigsmp/apic.h
+++ b/include/asm-x86/bigsmp/apic.h
@@ -9,22 +9,17 @@
 	return (1);
 }
 
-/* Round robin the irqs amoung the online cpus */
 static inline cpumask_t target_cpus(void)
 {
-	static unsigned long cpu = NR_CPUS;
-	do {
-		if (cpu >= NR_CPUS)
-			cpu = first_cpu(cpu_online_map);
-		else
-			cpu = next_cpu(cpu, cpu_online_map);
-	} while (cpu >= NR_CPUS);
-	return cpumask_of_cpu(cpu);
+#ifdef CONFIG_SMP
+        return cpu_online_map;
+#else
+        return cpumask_of_cpu(0);
+#endif
 }
 
 #undef APIC_DEST_LOGICAL
 #define APIC_DEST_LOGICAL	0
-#define TARGET_CPUS		(target_cpus())
 #define APIC_DFR_VALUE		(APIC_DFR_FLAT)
 #define INT_DELIVERY_MODE	(dest_Fixed)
 #define INT_DEST_MODE		(0)    /* phys delivery to target proc */
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
index ed2de22..313438e 100644
--- a/include/asm-x86/efi.h
+++ b/include/asm-x86/efi.h
@@ -94,4 +94,17 @@
 extern void efi_call_phys_prelog(void);
 extern void efi_call_phys_epilog(void);
 
+#ifndef CONFIG_EFI
+/*
+ * IF EFI is not configured, have the EFI calls return -ENOSYS.
+ */
+#define efi_call0(_f)					(-ENOSYS)
+#define efi_call1(_f, _a1)				(-ENOSYS)
+#define efi_call2(_f, _a1, _a2)				(-ENOSYS)
+#define efi_call3(_f, _a1, _a2, _a3)			(-ENOSYS)
+#define efi_call4(_f, _a1, _a2, _a3, _a4)		(-ENOSYS)
+#define efi_call5(_f, _a1, _a2, _a3, _a4, _a5)		(-ENOSYS)
+#define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6)	(-ENOSYS)
+#endif /* CONFIG_EFI */
+
 #endif /* ASM_X86__EFI_H */
diff --git a/include/asm-x86/es7000/apic.h b/include/asm-x86/es7000/apic.h
index aae50c2..380f0b4 100644
--- a/include/asm-x86/es7000/apic.h
+++ b/include/asm-x86/es7000/apic.h
@@ -17,7 +17,6 @@
 	return cpumask_of_cpu(smp_processor_id());
 #endif
 }
-#define TARGET_CPUS	(target_cpus())
 
 #if defined CONFIG_ES7000_CLUSTERED_APIC
 #define APIC_DFR_VALUE		(APIC_DFR_CLUSTER)
@@ -81,7 +80,7 @@
 	int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
 	printk("Enabling APIC mode:  %s.  Using %d I/O APICs, target cpus %lx\n",
 		(apic_version[apic] == 0x14) ?
-		"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]);
+		"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]);
 }
 
 static inline int multi_timer_check(int apic, int irq)
diff --git a/include/asm-x86/ftrace.h b/include/asm-x86/ftrace.h
index be0e004..1bb6f9bb 100644
--- a/include/asm-x86/ftrace.h
+++ b/include/asm-x86/ftrace.h
@@ -7,6 +7,16 @@
 
 #ifndef __ASSEMBLY__
 extern void mcount(void);
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+	/*
+	 * call mcount is "e8 <4 byte offset>"
+	 * The addr points to the 4 byte offset and the caller of this
+	 * function wants the pointer to e8. Simply subtract one.
+	 */
+	return addr - 1;
+}
 #endif
 
 #endif /* CONFIG_FTRACE */
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h
index 34280f0..6fe4f81 100644
--- a/include/asm-x86/genapic_32.h
+++ b/include/asm-x86/genapic_32.h
@@ -57,6 +57,7 @@
 	unsigned (*get_apic_id)(unsigned long x);
 	unsigned long apic_id_mask;
 	unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
+	cpumask_t (*vector_allocation_domain)(int cpu);
 
 #ifdef CONFIG_SMP
 	/* ipi */
@@ -104,6 +105,7 @@
 	APICFUNC(get_apic_id)				\
 	.apic_id_mask = APIC_ID_MASK,			\
 	APICFUNC(cpu_mask_to_apicid)			\
+	APICFUNC(vector_allocation_domain)			\
 	APICFUNC(acpi_madt_oem_check)			\
 	IPIFUNC(send_IPI_mask)				\
 	IPIFUNC(send_IPI_allbutself)			\
diff --git a/include/asm-x86/hpet.h b/include/asm-x86/hpet.h
index cbbbb6d..58b273f 100644
--- a/include/asm-x86/hpet.h
+++ b/include/asm-x86/hpet.h
@@ -1,6 +1,8 @@
 #ifndef ASM_X86__HPET_H
 #define ASM_X86__HPET_H
 
+#include <linux/msi.h>
+
 #ifdef CONFIG_HPET_TIMER
 
 #define HPET_MMAP_SIZE		1024
@@ -10,6 +12,11 @@
 #define HPET_CFG		0x010
 #define HPET_STATUS		0x020
 #define HPET_COUNTER		0x0f0
+
+#define HPET_Tn_CFG(n)		(0x100 + 0x20 * n)
+#define HPET_Tn_CMP(n)		(0x108 + 0x20 * n)
+#define HPET_Tn_ROUTE(n)	(0x110 + 0x20 * n)
+
 #define HPET_T0_CFG		0x100
 #define HPET_T0_CMP		0x108
 #define HPET_T0_ROUTE		0x110
@@ -65,6 +72,20 @@
 extern unsigned long hpet_readl(unsigned long a);
 extern void force_hpet_resume(void);
 
+extern void hpet_msi_unmask(unsigned int irq);
+extern void hpet_msi_mask(unsigned int irq);
+extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg);
+extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg);
+
+#ifdef CONFIG_PCI_MSI
+extern int arch_setup_hpet_msi(unsigned int irq);
+#else
+static inline int arch_setup_hpet_msi(unsigned int irq)
+{
+	return -EINVAL;
+}
+#endif
+
 #ifdef CONFIG_HPET_EMULATE_RTC
 
 #include <linux/interrupt.h>
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h
index 50f6e03..749d042 100644
--- a/include/asm-x86/hw_irq.h
+++ b/include/asm-x86/hw_irq.h
@@ -96,13 +96,8 @@
 
 /* SMP */
 extern void smp_apic_timer_interrupt(struct pt_regs *);
-#ifdef CONFIG_X86_32
 extern void smp_spurious_interrupt(struct pt_regs *);
 extern void smp_error_interrupt(struct pt_regs *);
-#else
-extern asmlinkage void smp_spurious_interrupt(void);
-extern asmlinkage void smp_error_interrupt(void);
-#endif
 #ifdef CONFIG_X86_SMP
 extern void smp_reschedule_interrupt(struct pt_regs *);
 extern void smp_call_function_interrupt(struct pt_regs *);
@@ -115,13 +110,13 @@
 #endif
 
 #ifdef CONFIG_X86_32
-extern void (*const interrupt[NR_IRQS])(void);
-#else
-typedef int vector_irq_t[NR_VECTORS];
-DECLARE_PER_CPU(vector_irq_t, vector_irq);
+extern void (*const interrupt[NR_VECTORS])(void);
 #endif
 
-#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64)
+typedef int vector_irq_t[NR_VECTORS];
+DECLARE_PER_CPU(vector_irq_t, vector_irq);
+
+#ifdef CONFIG_X86_IO_APIC
 extern void lock_vector_lock(void);
 extern void unlock_vector_lock(void);
 extern void __setup_vector_irq(int cpu);
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
index 8ec68a5..d35cbd7 100644
--- a/include/asm-x86/io_apic.h
+++ b/include/asm-x86/io_apic.h
@@ -4,6 +4,7 @@
 #include <linux/types.h>
 #include <asm/mpspec.h>
 #include <asm/apicdef.h>
+#include <asm/irq_vectors.h>
 
 /*
  * Intel IO-APIC support for SMP and UP systems.
@@ -87,24 +88,8 @@
 		mask		:  1,	/* 0: enabled, 1: disabled */
 		__reserved_2	: 15;
 
-#ifdef CONFIG_X86_32
-	union {
-		struct {
-			__u32	__reserved_1	: 24,
-				physical_dest	:  4,
-				__reserved_2	:  4;
-		} physical;
-
-		struct {
-			__u32	__reserved_1	: 24,
-				logical_dest	:  8;
-		} logical;
-	} dest;
-#else
 	__u32	__reserved_3	: 24,
 		dest		:  8;
-#endif
-
 } __attribute__ ((packed));
 
 struct IR_IO_APIC_route_entry {
@@ -203,10 +188,17 @@
 extern void reinit_intr_remapped_IO_APIC(int);
 #endif
 
+extern int probe_nr_irqs(void);
+
 #else  /* !CONFIG_X86_IO_APIC */
 #define io_apic_assign_pci_irqs 0
 static const int timer_through_8259 = 0;
 static inline void ioapic_init_mappings(void) { }
+
+static inline int probe_nr_irqs(void)
+{
+	return NR_IRQS;
+}
 #endif
 
 #endif /* ASM_X86__IO_APIC_H */
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index 961e746..2daaffc 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -7,9 +7,13 @@
 extern int force_iommu, no_iommu;
 extern int iommu_detected;
 extern int dmar_disabled;
+extern int forbid_dac;
 
 extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
 
+/* 10 seconds */
+#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
+
 #ifdef CONFIG_GART_IOMMU
 extern int gart_iommu_aperture;
 extern int gart_iommu_aperture_allowed;
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h
index c5d2d76..a8d065d 100644
--- a/include/asm-x86/irq_vectors.h
+++ b/include/asm-x86/irq_vectors.h
@@ -19,19 +19,14 @@
 
 /*
  * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
- * cleanup after irq migration on 64 bit.
+ * cleanup after irq migration.
  */
 #define IRQ_MOVE_CLEANUP_VECTOR	FIRST_EXTERNAL_VECTOR
 
 /*
- * Vectors 0x20-0x2f are used for ISA interrupts on 32 bit.
- * Vectors 0x30-0x3f are used for ISA interrupts on 64 bit.
+ * Vectors 0x30-0x3f are used for ISA interrupts.
  */
-#ifdef CONFIG_X86_32
-#define IRQ0_VECTOR		(FIRST_EXTERNAL_VECTOR)
-#else
 #define IRQ0_VECTOR		(FIRST_EXTERNAL_VECTOR + 0x10)
-#endif
 #define IRQ1_VECTOR		(IRQ0_VECTOR + 1)
 #define IRQ2_VECTOR		(IRQ0_VECTOR + 2)
 #define IRQ3_VECTOR		(IRQ0_VECTOR + 3)
@@ -96,11 +91,7 @@
  * start at 0x31(0x41) to spread out vectors evenly between priority
  * levels. (0x80 is the syscall vector)
  */
-#ifdef CONFIG_X86_32
-# define FIRST_DEVICE_VECTOR	0x31
-#else
-# define FIRST_DEVICE_VECTOR	(IRQ15_VECTOR + 2)
-#endif
+#define FIRST_DEVICE_VECTOR	(IRQ15_VECTOR + 2)
 
 #define NR_VECTORS		256
 
@@ -116,7 +107,6 @@
 # else
 #  define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
 # endif
-# define NR_IRQ_VECTORS NR_IRQS
 
 #elif !defined(CONFIG_X86_VOYAGER)
 
@@ -124,23 +114,15 @@
 
 #  define NR_IRQS		224
 
-#  if (224 >= 32 * NR_CPUS)
-#   define NR_IRQ_VECTORS	NR_IRQS
-#  else
-#   define NR_IRQ_VECTORS	(32 * NR_CPUS)
-#  endif
-
 # else /* IO_APIC || PARAVIRT */
 
 #  define NR_IRQS		16
-#  define NR_IRQ_VECTORS	NR_IRQS
 
 # endif
 
 #else /* !VISWS && !VOYAGER */
 
 # define NR_IRQS		224
-# define NR_IRQ_VECTORS		NR_IRQS
 
 #endif /* VISWS */
 
diff --git a/include/asm-x86/mach-default/entry_arch.h b/include/asm-x86/mach-default/entry_arch.h
index 9283b60..6b1add8 100644
--- a/include/asm-x86/mach-default/entry_arch.h
+++ b/include/asm-x86/mach-default/entry_arch.h
@@ -14,6 +14,7 @@
 BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
 BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
 BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
+BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
 #endif
 
 /*
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h
index 2a330a4..3c66f2c 100644
--- a/include/asm-x86/mach-default/mach_apic.h
+++ b/include/asm-x86/mach-default/mach_apic.h
@@ -85,6 +85,20 @@
 	return 0;
 #endif
 }
+
+static inline cpumask_t vector_allocation_domain(int cpu)
+{
+        /* Careful. Some cpus do not strictly honor the set of cpus
+         * specified in the interrupt destination when using lowest
+         * priority interrupt delivery mode.
+         *
+         * In particular there was a hyperthreading cpu observed to
+         * deliver interrupts to the wrong hyperthread when only one
+         * hyperthread was specified in the interrupt desitination.
+         */
+        cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
+        return domain;
+}
 #endif
 
 static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
@@ -138,6 +152,5 @@
 static inline void enable_apic_mode(void)
 {
 }
-
 #endif /* CONFIG_X86_LOCAL_APIC */
 #endif /* ASM_X86__MACH_DEFAULT__MACH_APIC_H */
diff --git a/include/asm-x86/mach-generic/irq_vectors_limits.h b/include/asm-x86/mach-generic/irq_vectors_limits.h
deleted file mode 100644
index f7870e1..0000000
--- a/include/asm-x86/mach-generic/irq_vectors_limits.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H
-#define ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H
-
-/*
- * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
- * even with uni-proc kernels, so use a big array.
- *
- * This value should be the same in both the generic and summit subarches.
- * Change one, change 'em both.
- */
-#define NR_IRQS	224
-#define NR_IRQ_VECTORS	1024
-
-#endif /* ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-x86/mach-generic/mach_apic.h b/include/asm-x86/mach-generic/mach_apic.h
index 5d010c6..5085b52 100644
--- a/include/asm-x86/mach-generic/mach_apic.h
+++ b/include/asm-x86/mach-generic/mach_apic.h
@@ -24,6 +24,7 @@
 #define check_phys_apicid_present (genapic->check_phys_apicid_present)
 #define check_apicid_used (genapic->check_apicid_used)
 #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
+#define vector_allocation_domain (genapic->vector_allocation_domain)
 #define enable_apic_mode (genapic->enable_apic_mode)
 #define phys_pkg_id (genapic->phys_pkg_id)
 
diff --git a/include/asm-x86/numaq/apic.h b/include/asm-x86/numaq/apic.h
index a8344ba..0bf2a06 100644
--- a/include/asm-x86/numaq/apic.h
+++ b/include/asm-x86/numaq/apic.h
@@ -12,8 +12,6 @@
 	return CPU_MASK_ALL;
 }
 
-#define TARGET_CPUS (target_cpus())
-
 #define NO_BALANCE_IRQ (1)
 #define esr_disable (1)
 
diff --git a/include/asm-x86/summit/apic.h b/include/asm-x86/summit/apic.h
index 394b00b..9b3070f 100644
--- a/include/asm-x86/summit/apic.h
+++ b/include/asm-x86/summit/apic.h
@@ -22,7 +22,6 @@
 	 */
 	return cpumask_of_cpu(0);
 }
-#define TARGET_CPUS	(target_cpus())
 
 #define INT_DELIVERY_MODE (dest_LowestPrio)
 #define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
diff --git a/include/asm-x86/summit/irq_vectors_limits.h b/include/asm-x86/summit/irq_vectors_limits.h
deleted file mode 100644
index 890ce3f..0000000
--- a/include/asm-x86/summit/irq_vectors_limits.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _ASM_IRQ_VECTORS_LIMITS_H
-#define _ASM_IRQ_VECTORS_LIMITS_H
-
-/*
- * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
- * even with uni-proc kernels, so use a big array.
- *
- * This value should be the same in both the generic and summit subarches.
- * Change one, change 'em both.
- */
-#define NR_IRQS	224
-#define NR_IRQ_VECTORS	1024
-
-#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-x86/uv/bios.h b/include/asm-x86/uv/bios.h
index 7cd6d7e..215f196 100644
--- a/include/asm-x86/uv/bios.h
+++ b/include/asm-x86/uv/bios.h
@@ -2,9 +2,7 @@
 #define ASM_X86__UV__BIOS_H
 
 /*
- * BIOS layer definitions.
- *
- *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
+ * UV BIOS layer definitions.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -19,11 +17,43 @@
  *  You should have received a copy of the GNU General Public License
  *  along with this program; if not, write to the Free Software
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
+ *  Copyright (c) Russ Anderson
  */
 
 #include <linux/rtc.h>
 
-#define BIOS_FREQ_BASE			0x01000001
+/*
+ * Values for the BIOS calls.  It is passed as the first * argument in the
+ * BIOS call.  Passing any other value in the first argument will result
+ * in a BIOS_STATUS_UNIMPLEMENTED return status.
+ */
+enum uv_bios_cmd {
+	UV_BIOS_COMMON,
+	UV_BIOS_GET_SN_INFO,
+	UV_BIOS_FREQ_BASE
+};
+
+/*
+ * Status values returned from a BIOS call.
+ */
+enum {
+	BIOS_STATUS_SUCCESS		=  0,
+	BIOS_STATUS_UNIMPLEMENTED	= -ENOSYS,
+	BIOS_STATUS_EINVAL		= -EINVAL,
+	BIOS_STATUS_UNAVAIL		= -EBUSY
+};
+
+/*
+ * The UV system table describes specific firmware
+ * capabilities available to the Linux kernel at runtime.
+ */
+struct uv_systab {
+	char signature[4];	/* must be "UVST" */
+	u32 revision;		/* distinguish different firmware revs */
+	u64 function;		/* BIOS runtime callback function ptr */
+};
 
 enum {
 	BIOS_FREQ_BASE_PLATFORM = 0,
@@ -31,38 +61,34 @@
 	BIOS_FREQ_BASE_REALTIME_CLOCK = 2
 };
 
-# define BIOS_CALL(result, a0, a1, a2, a3, a4, a5, a6, a7)		\
-	do {								\
-		/* XXX - the real call goes here */			\
-		result.status = BIOS_STATUS_UNIMPLEMENTED;		\
-		isrv.v0 = 0;						\
-		isrv.v1 = 0;						\
-	} while (0)
-
-enum {
-	BIOS_STATUS_SUCCESS		=  0,
-	BIOS_STATUS_UNIMPLEMENTED	= -1,
-	BIOS_STATUS_EINVAL		= -2,
-	BIOS_STATUS_ERROR		= -3
+union partition_info_u {
+	u64	val;
+	struct {
+		u64	hub_version	:  8,
+			partition_id	: 16,
+			coherence_id	: 16,
+			region_size	: 24;
+	};
 };
 
-struct uv_bios_retval {
-	/*
-	 * A zero status value indicates call completed without error.
-	 * A negative status value indicates reason of call failure.
-	 * A positive status value indicates success but an
-	 * informational value should be printed (e.g., "reboot for
-	 * change to take effect").
-	 */
-	s64 status;
-	u64 v0;
-	u64 v1;
-	u64 v2;
-};
+/*
+ * bios calls have 6 parameters
+ */
+extern s64 uv_bios_call(enum uv_bios_cmd, u64, u64, u64, u64, u64);
+extern s64 uv_bios_call_irqsave(enum uv_bios_cmd, u64, u64, u64, u64, u64);
+extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
 
-extern long
-x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second,
-		   unsigned long *drift_info);
-extern const char *x86_bios_strerror(long status);
+extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
+extern s64 uv_bios_freq_base(u64, u64 *);
+
+extern void uv_bios_init(void);
+
+extern int uv_type;
+extern long sn_partition_id;
+extern long uv_coherency_id;
+extern long uv_region_size;
+#define partition_coherence_id()	(uv_coherency_id)
+
+extern struct kobject *sgi_uv_kobj;	/* /sys/firmware/sgi_uv */
 
 #endif /* ASM_X86__UV__BIOS_H */
diff --git a/include/asm-x86/uv/uv_irq.h b/include/asm-x86/uv/uv_irq.h
new file mode 100644
index 0000000..8bf5f32
--- /dev/null
+++ b/include/asm-x86/uv/uv_irq.h
@@ -0,0 +1,36 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV IRQ definitions
+ *
+ * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef ASM_X86__UV__UV_IRQ_H
+#define ASM_X86__UV__UV_IRQ_H
+
+/* If a generic version of this structure gets defined, eliminate this one. */
+struct uv_IO_APIC_route_entry {
+	__u64	vector		:  8,
+		delivery_mode	:  3,
+		dest_mode	:  1,
+		delivery_status	:  1,
+		polarity	:  1,
+		__reserved_1	:  1,
+		trigger		:  1,
+		mask		:  1,
+		__reserved_2	: 15,
+		dest		: 32;
+};
+
+extern struct irq_chip uv_irq_chip;
+
+extern int arch_enable_uv_irq(char *, unsigned int, int, int, unsigned long);
+extern void arch_disable_uv_irq(int, unsigned long);
+
+extern int uv_setup_irq(char *, int, int, unsigned long);
+extern void uv_teardown_irq(unsigned int, int, unsigned long);
+
+#endif /* ASM_X86__UV__UV_IRQ_H */
diff --git a/include/asm-xtensa/io.h b/include/asm-xtensa/io.h
index 47c3616..07b7299 100644
--- a/include/asm-xtensa/io.h
+++ b/include/asm-xtensa/io.h
@@ -18,10 +18,12 @@
 
 #include <linux/types.h>
 
-#define XCHAL_KIO_CACHED_VADDR	0xf0000000
-#define XCHAL_KIO_BYPASS_VADDR	0xf8000000
+#define XCHAL_KIO_CACHED_VADDR	0xe0000000
+#define XCHAL_KIO_BYPASS_VADDR	0xf0000000
 #define XCHAL_KIO_PADDR		0xf0000000
-#define XCHAL_KIO_SIZE		0x08000000
+#define XCHAL_KIO_SIZE		0x10000000
+
+#define IOADDR(x)		(XCHAL_KIO_BYPASS_VADDR + (x))
 
 /*
  * swap functions to change byte order from little-endian to big-endian and
diff --git a/include/asm-xtensa/rwsem.h b/include/asm-xtensa/rwsem.h
index 0aad3a5..e39edf5 100644
--- a/include/asm-xtensa/rwsem.h
+++ b/include/asm-xtensa/rwsem.h
@@ -13,6 +13,10 @@
 #ifndef _XTENSA_RWSEM_H
 #define _XTENSA_RWSEM_H
 
+#ifndef _LINUX_RWSEM_H
+#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
+#endif
+
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <asm/atomic.h>
diff --git a/include/asm-xtensa/thread_info.h b/include/asm-xtensa/thread_info.h
index 7e4131d..0f4fe1f 100644
--- a/include/asm-xtensa/thread_info.h
+++ b/include/asm-xtensa/thread_info.h
@@ -134,6 +134,7 @@
 #define TIF_MEMDIE		5
 #define TIF_RESTORE_SIGMASK	6	/* restore signal mask in do_signal() */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_FREEZE		17	/* is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
@@ -142,6 +143,7 @@
 #define _TIF_IRET		(1<<TIF_IRET)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
+#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
diff --git a/include/asm-xtensa/variant-dc232b/core.h b/include/asm-xtensa/variant-dc232b/core.h
new file mode 100644
index 0000000..525bd3d
--- /dev/null
+++ b/include/asm-xtensa/variant-dc232b/core.h
@@ -0,0 +1,424 @@
+/*
+ * Xtensa processor core configuration information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+	    Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ *  Note:  Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ *  configured, and a value of 0 otherwise.  These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+				ISA
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE			0	/* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED		1	/* windowed registers option */
+#define XCHAL_NUM_AREGS			32	/* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2		5	/* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE	3	/* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG		1	/* debug option */
+#define XCHAL_HAVE_DENSITY		1	/* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS		1	/* zero-overhead loops */
+#define XCHAL_HAVE_NSA			1	/* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX		1	/* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT			1	/* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS		1	/* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16		1	/* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32		1	/* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH		0	/* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32		1	/* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R			1	/* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS	1	/* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16		0	/* CONST16 instruction */
+#define XCHAL_HAVE_ADDX			1	/* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES	0	/* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES	0	/* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12		1	/* (obsolete option) */
+#define XCHAL_HAVE_ABS			1	/* ABS instruction */
+/*#define XCHAL_HAVE_POPC		0*/	/* POPC instruction */
+/*#define XCHAL_HAVE_CRC		0*/	/* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC		1	/* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I		1	/* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION		0	/* speculation */
+#define XCHAL_HAVE_FULL_RESET		1	/* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS		1	/* */
+#define XCHAL_NUM_MISC_REGS		2	/* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER		0	/* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID			1	/* processor ID register */
+#define XCHAL_HAVE_THREADPTR		1	/* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS		0	/* boolean registers */
+#define XCHAL_HAVE_CP			1	/* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG			8	/* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16		1	/* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005	0	/* vector floating-point pkg */
+#define XCHAL_HAVE_FP			0	/* floating point pkg */
+#define XCHAL_HAVE_VECTRA1		0	/* Vectra I  pkg */
+#define XCHAL_HAVE_VECTRALX		0	/* Vectra LX pkg */
+#define XCHAL_HAVE_HIFI2		0	/* HiFi2 Audio Engine pkg */
+
+
+/*----------------------------------------------------------------------
+				MISC
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES	8	/* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH		4	/* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH		4	/* data width in bytes */
+/*  In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION	1	/* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION	1	/* unaligned stores cause exc.*/
+
+#define XCHAL_SW_VERSION		701001	/* sw version of this header */
+
+#define XCHAL_CORE_ID			"dc232b"	/* alphanum core name
+						   (CoreID) set in the Xtensa
+						   Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION		"Diamond 232L Standard Core Rev.B (LE)"
+#define XCHAL_BUILD_UNIQUE_ID		0x0000BEEF	/* 22-bit sw build ID */
+
+/*
+ *  These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0		0xC56307FE	/* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1		0x0D40BEEF	/* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME		"LX2.1.1"	/* full version name */
+#define XCHAL_HW_VERSION_MAJOR		2210	/* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR		1	/* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION		221001	/* major*100+minor */
+#define XCHAL_HW_REL_LX2		1
+#define XCHAL_HW_REL_LX2_1		1
+#define XCHAL_HW_REL_LX2_1_1		1
+#define XCHAL_HW_CONFIGID_RELIABLE	1
+/*  If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR	2210	/* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR	1	/* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION		221001	/* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR	2210	/* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR	1	/* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION		221001	/* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+				CACHE
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE		32	/* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE		32	/* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH		5	/* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH		5	/* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE		16384	/* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE		16384	/* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK	1	/* writeback feature */
+
+
+
+
+/****************************************************************************
+    Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+				CACHE
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF			1	/* any outbound PIF present */
+
+/*  If present, cache size in bytes == (ways * 2^(linewidth + setwidth)).  */
+
+/*  Number of cache sets in log2(lines per way):  */
+#define XCHAL_ICACHE_SETWIDTH		7
+#define XCHAL_DCACHE_SETWIDTH		7
+
+/*  Cache set associativity (number of ways):  */
+#define XCHAL_ICACHE_WAYS		4
+#define XCHAL_DCACHE_WAYS		4
+
+/*  Cache features:  */
+#define XCHAL_ICACHE_LINE_LOCKABLE	1
+#define XCHAL_DCACHE_LINE_LOCKABLE	1
+#define XCHAL_ICACHE_ECC_PARITY		0
+#define XCHAL_DCACHE_ECC_PARITY		0
+
+/*  Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits):  */
+#define XCHAL_CA_BITS			4
+
+
+/*----------------------------------------------------------------------
+			INTERNAL I/D RAM/ROMs and XLMI
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM		0	/* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM		0	/* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM		0	/* number of core data ROMs */
+#define XCHAL_NUM_DATARAM		0	/* number of core data RAMs */
+#define XCHAL_NUM_URAM			0	/* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI			0	/* number of core XLMI ports */
+
+
+/*----------------------------------------------------------------------
+			INTERRUPTS and TIMERS
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS		1	/* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS	1	/* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI			1	/* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT		1	/* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS		3	/* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS		22	/* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2	5	/* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS		17	/* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS		6	/* number of interrupt levels
+						   (not including level zero) */
+#define XCHAL_EXCM_LEVEL		3	/* level masked by PS.EXCM */
+	/* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/*  Masks of interrupts at each interrupt level:  */
+#define XCHAL_INTLEVEL1_MASK		0x001F80FF
+#define XCHAL_INTLEVEL2_MASK		0x00000100
+#define XCHAL_INTLEVEL3_MASK		0x00200E00
+#define XCHAL_INTLEVEL4_MASK		0x00001000
+#define XCHAL_INTLEVEL5_MASK		0x00002000
+#define XCHAL_INTLEVEL6_MASK		0x00000000
+#define XCHAL_INTLEVEL7_MASK		0x00004000
+
+/*  Masks of interrupts at each range 1..n of interrupt levels:  */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK	0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK	0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK	0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK	0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK	0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK	0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK	0x003FFFFF
+
+/*  Level of each interrupt:  */
+#define XCHAL_INT0_LEVEL		1
+#define XCHAL_INT1_LEVEL		1
+#define XCHAL_INT2_LEVEL		1
+#define XCHAL_INT3_LEVEL		1
+#define XCHAL_INT4_LEVEL		1
+#define XCHAL_INT5_LEVEL		1
+#define XCHAL_INT6_LEVEL		1
+#define XCHAL_INT7_LEVEL		1
+#define XCHAL_INT8_LEVEL		2
+#define XCHAL_INT9_LEVEL		3
+#define XCHAL_INT10_LEVEL		3
+#define XCHAL_INT11_LEVEL		3
+#define XCHAL_INT12_LEVEL		4
+#define XCHAL_INT13_LEVEL		5
+#define XCHAL_INT14_LEVEL		7
+#define XCHAL_INT15_LEVEL		1
+#define XCHAL_INT16_LEVEL		1
+#define XCHAL_INT17_LEVEL		1
+#define XCHAL_INT18_LEVEL		1
+#define XCHAL_INT19_LEVEL		1
+#define XCHAL_INT20_LEVEL		1
+#define XCHAL_INT21_LEVEL		3
+#define XCHAL_DEBUGLEVEL		6	/* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT	1	/* OCD external db interrupt */
+#define XCHAL_NMILEVEL			7	/* NMI "level" (for use with
+						   EXCSAVE/EPS/EPC_n, RFI n) */
+
+/*  Type of each interrupt:  */
+#define XCHAL_INT0_TYPE 	XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE 	XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE 	XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE 	XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE 	XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE 	XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE 	XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE 	XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE 	XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE 	XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE 	XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE 	XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE 	XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE 	XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE 	XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE 	XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE 	XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE 	XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE 	XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE 	XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE 	XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE 	XTHAL_INTTYPE_EXTERN_EDGE
+
+/*  Masks of interrupts for each type of interrupt:  */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED	0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE	0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE	0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL	0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER	0x00002440
+#define XCHAL_INTTYPE_MASK_NMI		0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR	0x00000000
+
+/*  Interrupt numbers assigned to specific interrupt sources:  */
+#define XCHAL_TIMER0_INTERRUPT		6	/* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT		10	/* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT		13	/* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT		XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT		14	/* non-maskable interrupt */
+
+/*  Interrupt numbers for levels at which only one interrupt is configured:  */
+#define XCHAL_INTLEVEL2_NUM		8
+#define XCHAL_INTLEVEL4_NUM		12
+#define XCHAL_INTLEVEL5_NUM		13
+#define XCHAL_INTLEVEL7_NUM		14
+/*  (There are many interrupts each at level(s) 1, 3.)  */
+
+
+/*
+ *  External interrupt vectors/levels.
+ *  These macros describe how Xtensa processor interrupt numbers
+ *  (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ *  map to external BInterrupt<n> pins, for those interrupts
+ *  configured as external (level-triggered, edge-triggered, or NMI).
+ *  See the Xtensa processor databook for more details.
+ */
+
+/*  Core interrupt numbers mapped to each EXTERNAL interrupt number:  */
+#define XCHAL_EXTINT0_NUM		0	/* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM		1	/* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM		2	/* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM		3	/* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM		4	/* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM		5	/* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM		8	/* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM		9	/* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM		12	/* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM		14	/* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM		15	/* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM		16	/* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM		17	/* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM		18	/* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM		19	/* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM		20	/* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM		21	/* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+			EXCEPTIONS and VECTORS
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION		2	/* Xtensa Exception Architecture
+						   number: 1 == XEA1 (old)
+							   2 == XEA2 (new)
+							   0 == XEAX (extern) */
+#define XCHAL_HAVE_XEA1			0	/* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2			1	/* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX			0	/* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS		1	/* exception option */
+#define XCHAL_HAVE_MEM_ECC_PARITY	0	/* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT	1	/* relocatable vectors */
+#define XCHAL_HAVE_VECBASE		1	/* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR	0xD0000000  /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR	0x00000000
+#define XCHAL_RESET_VECBASE_OVERLAP	0
+
+#define XCHAL_RESET_VECTOR0_VADDR	0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR	0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR	0xD8000500
+#define XCHAL_RESET_VECTOR1_PADDR	0x00000500
+#define XCHAL_RESET_VECTOR_VADDR	0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR	0xFE000000
+#define XCHAL_USER_VECOFS		0x00000340
+#define XCHAL_USER_VECTOR_VADDR		0xD0000340
+#define XCHAL_USER_VECTOR_PADDR		0x00000340
+#define XCHAL_KERNEL_VECOFS		0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR	0xD0000300
+#define XCHAL_KERNEL_VECTOR_PADDR	0x00000300
+#define XCHAL_DOUBLEEXC_VECOFS		0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR	0xD00003C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR	0x000003C0
+#define XCHAL_WINDOW_OF4_VECOFS		0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS		0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS		0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS		0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS	0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS	0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR	0xD0000000
+#define XCHAL_WINDOW_VECTORS_PADDR	0x00000000
+#define XCHAL_INTLEVEL2_VECOFS		0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR	0xD0000180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR	0x00000180
+#define XCHAL_INTLEVEL3_VECOFS		0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR	0xD00001C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR	0x000001C0
+#define XCHAL_INTLEVEL4_VECOFS		0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR	0xD0000200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR	0x00000200
+#define XCHAL_INTLEVEL5_VECOFS		0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR	0xD0000240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR	0x00000240
+#define XCHAL_INTLEVEL6_VECOFS		0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR	0xD0000280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR	0x00000280
+#define XCHAL_DEBUG_VECOFS		XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR	XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR	XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS		0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR		0xD00002C0
+#define XCHAL_NMI_VECTOR_PADDR		0x000002C0
+#define XCHAL_INTLEVEL7_VECOFS		XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR	XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR	XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+				DEBUG
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD			1	/* OnChipDebug option */
+#define XCHAL_NUM_IBREAK		2	/* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK		2	/* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY	1	/* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+				MMU
+  ----------------------------------------------------------------------*/
+
+/*  See core-matmap.h header file for more details.  */
+
+#define XCHAL_HAVE_TLBS			1	/* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY		0	/* one way maps I+D 4GB vaddr */
+#define XCHAL_HAVE_IDENTITY_MAP		0	/* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR		0	/* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR	0	/* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR	0	/* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU		1	/* full MMU (with page table
+						   [autorefill] and protection)
+						   usable for an MMU-based OS */
+/*  If none of the above last 4 are set, it's a custom TLB configuration.  */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2	2	/* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2	2	/* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS		8	/* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS			4	/* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS		2	/* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
+
diff --git a/include/asm-xtensa/variant-dc232b/tie-asm.h b/include/asm-xtensa/variant-dc232b/tie-asm.h
new file mode 100644
index 0000000..ed4f53f
--- /dev/null
+++ b/include/asm-xtensa/variant-dc232b/tie-asm.h
@@ -0,0 +1,122 @@
+/*
+ * This header file contains assembly-language definitions (assembly
+ * macros, etc.) for this specific Xtensa processor's TIE extensions
+ * and options.  It is customized to this Xtensa processor configuration.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/*  Selection parameter values for save-area save/restore macros:  */
+/*  Option vs. TIE:  */
+#define XTHAL_SAS_TIE	0x0001	/* custom extension or coprocessor */
+#define XTHAL_SAS_OPT	0x0002	/* optional (and not a coprocessor) */
+/*  Whether used automatically by compiler:  */
+#define XTHAL_SAS_NOCC	0x0004	/* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC	0x0008	/* used by compiler without special opts/code */
+/*  ABI handling across function calls:  */
+#define XTHAL_SAS_CALR	0x0010	/* caller-saved */
+#define XTHAL_SAS_CALE	0x0020	/* callee-saved */
+#define XTHAL_SAS_GLOB	0x0040	/* global across function calls (in thread) */
+/*  Misc  */
+#define XTHAL_SAS_ALL	0xFFFF	/* include all default NCP contents */
+
+
+
+/* Macro to save all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Save area ptr (clobbered):  ptr  (1 byte aligned)
+ * Scratch regs  (clobbered):  at1..at4  (only first XCHAL_NCP_NUM_ATMPS needed)
+ */
+	.macro xchal_ncp_store  ptr at1 at2 at3 at4  continue=0 ofs=-1 select=XTHAL_SAS_ALL
+	xchal_sa_start	\continue, \ofs
+	.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~\select
+	xchal_sa_align	\ptr, 0, 1024-8, 4, 4
+	rsr	\at1, ACCLO		// MAC16 accumulator
+	rsr	\at2, ACCHI
+	s32i	\at1, \ptr, .Lxchal_ofs_ + 0
+	s32i	\at2, \ptr, .Lxchal_ofs_ + 4
+	.set	.Lxchal_ofs_, .Lxchal_ofs_ + 8
+	.endif
+	.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+	xchal_sa_align	\ptr, 0, 1024-16, 4, 4
+	rsr	\at1, M0		// MAC16 registers
+	rsr	\at2, M1
+	s32i	\at1, \ptr, .Lxchal_ofs_ + 0
+	s32i	\at2, \ptr, .Lxchal_ofs_ + 4
+	rsr	\at1, M2
+	rsr	\at2, M3
+	s32i	\at1, \ptr, .Lxchal_ofs_ + 8
+	s32i	\at2, \ptr, .Lxchal_ofs_ + 12
+	.set	.Lxchal_ofs_, .Lxchal_ofs_ + 16
+	.endif
+	.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+	xchal_sa_align	\ptr, 0, 1024-4, 4, 4
+	rsr	\at1, SCOMPARE1		// conditional store option
+	s32i	\at1, \ptr, .Lxchal_ofs_ + 0
+	.set	.Lxchal_ofs_, .Lxchal_ofs_ + 4
+	.endif
+	.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
+	xchal_sa_align	\ptr, 0, 1024-4, 4, 4
+	rur	\at1, THREADPTR		// threadptr option
+	s32i	\at1, \ptr, .Lxchal_ofs_ + 0
+	.set	.Lxchal_ofs_, .Lxchal_ofs_ + 4
+	.endif
+	.endm	// xchal_ncp_store
+
+/* Macro to save all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Save area ptr (clobbered):  ptr  (1 byte aligned)
+ * Scratch regs  (clobbered):  at1..at4  (only first XCHAL_NCP_NUM_ATMPS needed)
+ */
+	.macro xchal_ncp_load  ptr at1 at2 at3 at4  continue=0 ofs=-1 select=XTHAL_SAS_ALL
+	xchal_sa_start	\continue, \ofs
+	.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~\select
+	xchal_sa_align	\ptr, 0, 1024-8, 4, 4
+	l32i	\at1, \ptr, .Lxchal_ofs_ + 0
+	l32i	\at2, \ptr, .Lxchal_ofs_ + 4
+	wsr	\at1, ACCLO		// MAC16 accumulator
+	wsr	\at2, ACCHI
+	.set	.Lxchal_ofs_, .Lxchal_ofs_ + 8
+	.endif
+	.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+	xchal_sa_align	\ptr, 0, 1024-16, 4, 4
+	l32i	\at1, \ptr, .Lxchal_ofs_ + 0
+	l32i	\at2, \ptr, .Lxchal_ofs_ + 4
+	wsr	\at1, M0		// MAC16 registers
+	wsr	\at2, M1
+	l32i	\at1, \ptr, .Lxchal_ofs_ + 8
+	l32i	\at2, \ptr, .Lxchal_ofs_ + 12
+	wsr	\at1, M2
+	wsr	\at2, M3
+	.set	.Lxchal_ofs_, .Lxchal_ofs_ + 16
+	.endif
+	.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+	xchal_sa_align	\ptr, 0, 1024-4, 4, 4
+	l32i	\at1, \ptr, .Lxchal_ofs_ + 0
+	wsr	\at1, SCOMPARE1		// conditional store option
+	.set	.Lxchal_ofs_, .Lxchal_ofs_ + 4
+	.endif
+	.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
+	xchal_sa_align	\ptr, 0, 1024-4, 4, 4
+	l32i	\at1, \ptr, .Lxchal_ofs_ + 0
+	wur	\at1, THREADPTR		// threadptr option
+	.set	.Lxchal_ofs_, .Lxchal_ofs_ + 4
+	.endif
+	.endm	// xchal_ncp_load
+
+
+
+#define XCHAL_NCP_NUM_ATMPS	2
+
+
+#define XCHAL_SA_NUM_ATMPS	2
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
+
diff --git a/include/asm-xtensa/variant-dc232b/tie.h b/include/asm-xtensa/variant-dc232b/tie.h
new file mode 100644
index 0000000..018e81a
--- /dev/null
+++ b/include/asm-xtensa/variant-dc232b/tie.h
@@ -0,0 +1,131 @@
+/*
+ * This header file describes this specific Xtensa processor's TIE extensions
+ * that extend basic Xtensa core functionality.  It is customized to this
+ * Xtensa processor configuration.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM			1	/* number of coprocessors */
+#define XCHAL_CP_MAX			8	/* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK			0x80	/* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK		0x80	/* bitmask of only port CPs */
+
+/*  Basic parameters of each coprocessor:  */
+#define XCHAL_CP7_NAME			"XTIOP"
+#define XCHAL_CP7_IDENT			XTIOP
+#define XCHAL_CP7_SA_SIZE		0	/* size of state save area */
+#define XCHAL_CP7_SA_ALIGN		1	/* min alignment of save area */
+#define XCHAL_CP_ID_XTIOP		7	/* coprocessor ID (0..7) */
+
+/*  Filler info for unassigned coprocessors, to simplify arrays etc:  */
+#define XCHAL_CP0_SA_SIZE		0
+#define XCHAL_CP0_SA_ALIGN		1
+#define XCHAL_CP1_SA_SIZE		0
+#define XCHAL_CP1_SA_ALIGN		1
+#define XCHAL_CP2_SA_SIZE		0
+#define XCHAL_CP2_SA_ALIGN		1
+#define XCHAL_CP3_SA_SIZE		0
+#define XCHAL_CP3_SA_ALIGN		1
+#define XCHAL_CP4_SA_SIZE		0
+#define XCHAL_CP4_SA_ALIGN		1
+#define XCHAL_CP5_SA_SIZE		0
+#define XCHAL_CP5_SA_ALIGN		1
+#define XCHAL_CP6_SA_SIZE		0
+#define XCHAL_CP6_SA_ALIGN		1
+
+/*  Save area for non-coprocessor optional and custom (TIE) state:  */
+#define XCHAL_NCP_SA_SIZE		32
+#define XCHAL_NCP_SA_ALIGN		4
+
+/*  Total save area for optional and custom state (NCP + CPn):  */
+#define XCHAL_TOTAL_SA_SIZE		32	/* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN		4	/* actual minimum alignment */
+
+/*
+ * Detailed contents of save areas.
+ * NOTE:  caller must define the XCHAL_SA_REG macro (not defined here)
+ * before expanding the XCHAL_xxx_SA_LIST() macros.
+ *
+ * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
+ *		dbnum,base,regnum,bitsz,gapsz,reset,x...)
+ *
+ *	s = passed from XCHAL_*_LIST(s), eg. to select how to expand
+ *	ccused = set if used by compiler without special options or code
+ *	abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
+ *	kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
+ *	opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
+ *	name = lowercase reg name (no quotes)
+ *	galign = group byte alignment (power of 2) (galign >= align)
+ *	align = register byte alignment (power of 2)
+ *	asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
+ *	  (not including any pad bytes required to galign this or next reg)
+ *	dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
+ *	base = reg shortname w/o index (or sr=special, ur=TIE user reg)
+ *	regnum = reg index in regfile, or special/TIE-user reg number
+ *	bitsz = number of significant bits (regfile width, or ur/sr mask bits)
+ *	gapsz = intervening bits, if bitsz bits not stored contiguously
+ *	(padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
+ *	reset = register reset value (or 0 if undefined at reset)
+ *	x = reserved for future use (0 until then)
+ *
+ *  To filter out certain registers, e.g. to expand only the non-global
+ *  registers used by the compiler, you can do something like this:
+ *
+ *  #define XCHAL_SA_REG(s,ccused,p...)	SELCC##ccused(p)
+ *  #define SELCC0(p...)
+ *  #define SELCC1(abikind,p...)	SELAK##abikind(p)
+ *  #define SELAK0(p...)		REG(p)
+ *  #define SELAK1(p...)		REG(p)
+ *  #define SELAK2(p...)
+ *  #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
+ *		...what you want to expand...
+ */
+
+#define XCHAL_NCP_SA_NUM	8
+#define XCHAL_NCP_SA_LIST(s)	\
+ XCHAL_SA_REG(s,1,0,0,1,          acclo, 4, 4, 4,0x0210,  sr,16 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1,          acchi, 4, 4, 4,0x0211,  sr,17 ,  8,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1,             m0, 4, 4, 4,0x0220,  sr,32 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1,             m1, 4, 4, 4,0x0221,  sr,33 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1,             m2, 4, 4, 4,0x0222,  sr,34 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1,             m3, 4, 4, 4,0x0223,  sr,35 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1,      scompare1, 4, 4, 4,0x020C,  sr,12 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,2,1,1,      threadptr, 4, 4, 4,0x03E7,  ur,231, 32,0,0,0)
+
+#define XCHAL_CP0_SA_NUM	0
+#define XCHAL_CP0_SA_LIST(s)	/* empty */
+
+#define XCHAL_CP1_SA_NUM	0
+#define XCHAL_CP1_SA_LIST(s)	/* empty */
+
+#define XCHAL_CP2_SA_NUM	0
+#define XCHAL_CP2_SA_LIST(s)	/* empty */
+
+#define XCHAL_CP3_SA_NUM	0
+#define XCHAL_CP3_SA_LIST(s)	/* empty */
+
+#define XCHAL_CP4_SA_NUM	0
+#define XCHAL_CP4_SA_LIST(s)	/* empty */
+
+#define XCHAL_CP5_SA_NUM	0
+#define XCHAL_CP5_SA_LIST(s)	/* empty */
+
+#define XCHAL_CP6_SA_NUM	0
+#define XCHAL_CP6_SA_LIST(s)	/* empty */
+
+#define XCHAL_CP7_SA_NUM	0
+#define XCHAL_CP7_SA_LIST(s)	/* empty */
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX.  */
+#define XCHAL_OP0_FORMAT_LENGTHS	3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
+
+#endif /*_XTENSA_CORE_TIE_H*/
+
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index bf9aca5..e531783e 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -183,6 +183,7 @@
 unifdef-y += auxvec.h
 unifdef-y += binfmts.h
 unifdef-y += blktrace_api.h
+unifdef-y += byteorder.h
 unifdef-y += capability.h
 unifdef-y += capi.h
 unifdef-y += cciss_ioctl.h
@@ -340,6 +341,7 @@
 unifdef-y += stat.h
 unifdef-y += stddef.h
 unifdef-y += string.h
+unifdef-y += swab.h
 unifdef-y += synclink.h
 unifdef-y += sysctl.h
 unifdef-y += tcp.h
diff --git a/include/linux/aer.h b/include/linux/aer.h
index f251814..f7df1ee 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -10,7 +10,6 @@
 #if defined(CONFIG_PCIEAER)
 /* pci-e port driver needs this function to enable aer */
 extern int pci_enable_pcie_error_reporting(struct pci_dev *dev);
-extern int pci_find_aer_capability(struct pci_dev *dev);
 extern int pci_disable_pcie_error_reporting(struct pci_dev *dev);
 extern int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
 #else
@@ -18,10 +17,6 @@
 {
 	return -EINVAL;
 }
-static inline int pci_find_aer_capability(struct pci_dev *dev)
-{
-	return 0;
-}
 static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
 {
 	return -EINVAL;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0a24d55..bee52ab 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -175,6 +175,8 @@
  * BDI_CAP_READ_MAP:       Can be mapped for reading
  * BDI_CAP_WRITE_MAP:      Can be mapped for writing
  * BDI_CAP_EXEC_MAP:       Can be mapped for execution
+ *
+ * BDI_CAP_SWAP_BACKED:    Count shmem/tmpfs objects as swap-backed.
  */
 #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
 #define BDI_CAP_NO_WRITEBACK	0x00000002
@@ -184,6 +186,7 @@
 #define BDI_CAP_WRITE_MAP	0x00000020
 #define BDI_CAP_EXEC_MAP	0x00000040
 #define BDI_CAP_NO_ACCT_WB	0x00000080
+#define BDI_CAP_SWAP_BACKED	0x00000100
 
 #define BDI_CAP_VMFLAGS \
 	(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
@@ -248,6 +251,11 @@
 				      BDI_CAP_NO_WRITEBACK));
 }
 
+static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
+{
+	return bdi->capabilities & BDI_CAP_SWAP_BACKED;
+}
+
 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
 {
 	return bdi_cap_writeback_dirty(mapping->backing_dev_info);
@@ -258,4 +266,9 @@
 	return bdi_cap_account_dirty(mapping->backing_dev_info);
 }
 
+static inline bool mapping_cap_swap_backed(struct address_space *mapping)
+{
+	return bdi_cap_swap_backed(mapping->backing_dev_info);
+}
+
 #endif		/* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/bcd.h b/include/linux/bcd.h
index 7ac518e..22ea563 100644
--- a/include/linux/bcd.h
+++ b/include/linux/bcd.h
@@ -1,12 +1,3 @@
-/* Permission is hereby granted to copy, modify and redistribute this code
- * in terms of the GNU Library General Public License, Version 2 or later,
- * at your option.
- */
-
-/* macros to translate to/from binary and binary-coded decimal (frequently
- * found in RTC chips).
- */
-
 #ifndef _BCD_H
 #define _BCD_H
 
@@ -15,11 +6,4 @@
 unsigned bcd2bin(unsigned char val) __attribute_const__;
 unsigned char bin2bcd(unsigned val) __attribute_const__;
 
-#define BCD2BIN(val)	bcd2bin(val)
-#define BIN2BCD(val)	bin2bcd(val)
-
-/* backwards compat */
-#define BCD_TO_BIN(val) ((val)=BCD2BIN(val))
-#define BIN_TO_BCD(val) ((val)=BIN2BCD(val))
-
 #endif /* _BCD_H */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 89781fd..a08c33a 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -110,7 +110,6 @@
 
 extern int bitmap_scnprintf(char *buf, unsigned int len,
 			const unsigned long *src, int nbits);
-extern int bitmap_scnprintf_len(unsigned int nr_bits);
 extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
 			unsigned long *dst, int nbits);
 extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
@@ -130,6 +129,7 @@
 extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
 extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
 extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
+extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
 
 #define BITMAP_LAST_WORD_MASK(nbits)					\
 (									\
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index eadaab4..3ce64b9 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -322,7 +322,7 @@
 
 static inline int trylock_buffer(struct buffer_head *bh)
 {
-	return likely(!test_and_set_bit(BH_Lock, &bh->b_state));
+	return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
 }
 
 static inline void lock_buffer(struct buffer_head *bh)
diff --git a/include/linux/byteorder/Kbuild b/include/linux/byteorder/Kbuild
index 1133d5f..fbaa7f9 100644
--- a/include/linux/byteorder/Kbuild
+++ b/include/linux/byteorder/Kbuild
@@ -1,3 +1,4 @@
 unifdef-y += big_endian.h
 unifdef-y += little_endian.h
 unifdef-y += swab.h
+unifdef-y += swabb.h
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
index 44f95b9..1cba3f3 100644
--- a/include/linux/byteorder/big_endian.h
+++ b/include/linux/byteorder/big_endian.h
@@ -10,6 +10,7 @@
 
 #include <linux/types.h>
 #include <linux/byteorder/swab.h>
+#include <linux/byteorder/swabb.h>
 
 #define __constant_htonl(x) ((__force __be32)(__u32)(x))
 #define __constant_ntohl(x) ((__force __u32)(__be32)(x))
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
index 4cc170a..cedc1b5 100644
--- a/include/linux/byteorder/little_endian.h
+++ b/include/linux/byteorder/little_endian.h
@@ -10,6 +10,7 @@
 
 #include <linux/types.h>
 #include <linux/byteorder/swab.h>
+#include <linux/byteorder/swabb.h>
 
 #define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
 #define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 30934e4..8b00f66 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -9,12 +9,12 @@
  */
 
 #include <linux/sched.h>
-#include <linux/kref.h>
 #include <linux/cpumask.h>
 #include <linux/nodemask.h>
 #include <linux/rcupdate.h>
 #include <linux/cgroupstats.h>
 #include <linux/prio_heap.h>
+#include <linux/rwsem.h>
 
 #ifdef CONFIG_CGROUPS
 
@@ -137,6 +137,15 @@
 	 * release_list_lock
 	 */
 	struct list_head release_list;
+
+	/* pids_mutex protects the fields below */
+	struct rw_semaphore pids_mutex;
+	/* Array of process ids in the cgroup */
+	pid_t *tasks_pids;
+	/* How many files are using the current tasks_pids array */
+	int pids_use_count;
+	/* Length of the current tasks_pids array */
+	int pids_length;
 };
 
 /* A css_set is a structure holding pointers to a set of
@@ -149,7 +158,7 @@
 struct css_set {
 
 	/* Reference count */
-	struct kref ref;
+	atomic_t refcount;
 
 	/*
 	 * List running through all cgroup groups in the same hash
@@ -394,6 +403,9 @@
 int cgroup_scan_tasks(struct cgroup_scanner *scan);
 int cgroup_attach_task(struct cgroup *, struct task_struct *);
 
+void cgroup_mm_owner_callbacks(struct task_struct *old,
+			       struct task_struct *new);
+
 #else /* !CONFIG_CGROUPS */
 
 static inline int cgroup_init_early(void) { return 0; }
@@ -412,15 +424,9 @@
 	return -EINVAL;
 }
 
+static inline void cgroup_mm_owner_callbacks(struct task_struct *old,
+					     struct task_struct *new) {}
+
 #endif /* !CONFIG_CGROUPS */
 
-#ifdef CONFIG_MM_OWNER
-extern void
-cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new);
-#else /* !CONFIG_MM_OWNER */
-static inline void
-cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
-{
-}
-#endif /* CONFIG_MM_OWNER */
 #endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index e287745..9c22396 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -48,3 +48,9 @@
 #endif
 
 /* */
+
+#ifdef CONFIG_CGROUP_FREEZER
+SUBSYS(freezer)
+#endif
+
+/* */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 55e434f..f88d32f 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -45,7 +45,8 @@
  * @read:		returns a cycle value
  * @mask:		bitmask for two's complement
  *			subtraction of non 64 bit counters
- * @mult:		cycle to nanosecond multiplier
+ * @mult:		cycle to nanosecond multiplier (adjusted by NTP)
+ * @mult_orig:		cycle to nanosecond multiplier (unadjusted by NTP)
  * @shift:		cycle to nanosecond divisor (power of two)
  * @flags:		flags describing special properties
  * @vread:		vsyscall based read
@@ -63,6 +64,7 @@
 	cycle_t (*read)(void);
 	cycle_t mask;
 	u32 mult;
+	u32 mult_orig;
 	u32 shift;
 	unsigned long flags;
 	cycle_t (*vread)(void);
@@ -77,6 +79,7 @@
 	/* timekeeping specific data, ignore */
 	cycle_t cycle_interval;
 	u64	xtime_interval;
+	u32	raw_interval;
 	/*
 	 * Second part is written at each timer interrupt
 	 * Keep it in a different cache line to dirty no
@@ -85,6 +88,7 @@
 	cycle_t cycle_last ____cacheline_aligned_in_smp;
 	u64 xtime_nsec;
 	s64 error;
+	struct timespec raw_time;
 
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
 	/* Watchdog related data, used by the framework */
@@ -201,17 +205,19 @@
 {
 	u64 tmp;
 
-	/* XXX - All of this could use a whole lot of optimization */
+	/* Do the ns -> cycle conversion first, using original mult */
 	tmp = length_nsec;
 	tmp <<= c->shift;
-	tmp += c->mult/2;
-	do_div(tmp, c->mult);
+	tmp += c->mult_orig/2;
+	do_div(tmp, c->mult_orig);
 
 	c->cycle_interval = (cycle_t)tmp;
 	if (c->cycle_interval == 0)
 		c->cycle_interval = 1;
 
+	/* Go back from cycles -> shifted ns, this time use ntp adjused mult */
 	c->xtime_interval = (u64)c->cycle_interval * c->mult;
+	c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift;
 }
 
 
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 8322141..98115d9 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -44,6 +44,8 @@
 # error Sorry, your compiler is too old/not recognized.
 #endif
 
+#define notrace __attribute__((no_instrument_function))
+
 /* Intel compiler defines __GNUC__. So we will overwrite implementations
  * coming from above header files here
  */
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 025e4f5..0acf3b7 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -8,12 +8,9 @@
 #include <linux/proc_fs.h>
 
 #define ELFCORE_ADDR_MAX	(-1ULL)
+#define ELFCORE_ADDR_ERR	(-2ULL)
 
-#ifdef CONFIG_PROC_VMCORE
 extern unsigned long long elfcorehdr_addr;
-#else
-static const unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
-#endif
 
 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
 						unsigned long, int);
@@ -28,10 +25,43 @@
 
 #define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
 
+/*
+ * is_kdump_kernel() checks whether this kernel is booting after a panic of
+ * previous kernel or not. This is determined by checking if previous kernel
+ * has passed the elf core header address on command line.
+ *
+ * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will
+ * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of
+ * previous kernel.
+ */
+
 static inline int is_kdump_kernel(void)
 {
 	return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0;
 }
+
+/* is_vmcore_usable() checks if the kernel is booting after a panic and
+ * the vmcore region is usable.
+ *
+ * This makes use of the fact that due to alignment -2ULL is not
+ * a valid pointer, much in the vain of IS_ERR(), except
+ * dealing directly with an unsigned long long rather than a pointer.
+ */
+
+static inline int is_vmcore_usable(void)
+{
+	return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0;
+}
+
+/* vmcore_unusable() marks the vmcore as unusable,
+ * without disturbing the logic of is_kdump_kernel()
+ */
+
+static inline void vmcore_unusable(void)
+{
+	if (is_kdump_kernel())
+		elfcorehdr_addr = ELFCORE_ADDR_ERR;
+}
 #else /* !CONFIG_CRASH_DUMP */
 static inline int is_kdump_kernel(void) { return 0; }
 #endif /* CONFIG_CRASH_DUMP */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 08d7835..dfb30db 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -354,6 +354,9 @@
  */
 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
 
+#define dm_array_too_big(fixed, obj, num) \
+	((num) > (UINT_MAX - (fixed)) / (obj))
+
 static inline sector_t to_sector(unsigned long n)
 {
 	return (n >> SECTOR_SHIFT);
diff --git a/include/linux/device.h b/include/linux/device.h
index 987f591..1a3686d1 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -450,7 +450,7 @@
 }
 #endif
 
-static inline void *dev_get_drvdata(struct device *dev)
+static inline void *dev_get_drvdata(const struct device *dev)
 {
 	return dev->driver_data;
 }
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
new file mode 100644
index 0000000..a9e652a
--- /dev/null
+++ b/include/linux/dm-region-hash.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2003 Sistina Software Limited.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+ *
+ * Device-Mapper dirty region hash interface.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_REGION_HASH_H
+#define DM_REGION_HASH_H
+
+#include <linux/dm-dirty-log.h>
+
+/*-----------------------------------------------------------------
+ * Region hash
+ *----------------------------------------------------------------*/
+struct dm_region_hash;
+struct dm_region;
+
+/*
+ * States a region can have.
+ */
+enum dm_rh_region_states {
+	DM_RH_CLEAN	 = 0x01,	/* No writes in flight. */
+	DM_RH_DIRTY	 = 0x02,	/* Writes in flight. */
+	DM_RH_NOSYNC	 = 0x04,	/* Out of sync. */
+	DM_RH_RECOVERING = 0x08,	/* Under resynchronization. */
+};
+
+/*
+ * Region hash create/destroy.
+ */
+struct bio_list;
+struct dm_region_hash *dm_region_hash_create(
+		void *context, void (*dispatch_bios)(void *context,
+						     struct bio_list *bios),
+		void (*wakeup_workers)(void *context),
+		void (*wakeup_all_recovery_waiters)(void *context),
+		sector_t target_begin, unsigned max_recovery,
+		struct dm_dirty_log *log, uint32_t region_size,
+		region_t nr_regions);
+void dm_region_hash_destroy(struct dm_region_hash *rh);
+
+struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh);
+
+/*
+ * Conversion functions.
+ */
+region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio);
+sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region);
+void *dm_rh_region_context(struct dm_region *reg);
+
+/*
+ * Get region size and key (ie. number of the region).
+ */
+sector_t dm_rh_get_region_size(struct dm_region_hash *rh);
+region_t dm_rh_get_region_key(struct dm_region *reg);
+
+/*
+ * Get/set/update region state (and dirty log).
+ *
+ */
+int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block);
+void dm_rh_set_state(struct dm_region_hash *rh, region_t region,
+		     enum dm_rh_region_states state, int may_block);
+
+/* Non-zero errors_handled leaves the state of the region NOSYNC */
+void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled);
+
+/* Flush the region hash and dirty log. */
+int dm_rh_flush(struct dm_region_hash *rh);
+
+/* Inc/dec pending count on regions. */
+void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios);
+void dm_rh_dec(struct dm_region_hash *rh, region_t region);
+
+/* Delay bios on regions. */
+void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
+
+void dm_rh_mark_nosync(struct dm_region_hash *rh,
+		       struct bio *bio, unsigned done, int error);
+
+/*
+ * Region recovery control.
+ */
+
+/* Prepare some regions for recovery by starting to quiesce them. */
+void dm_rh_recovery_prepare(struct dm_region_hash *rh);
+
+/* Try fetching a quiesced region for recovery. */
+struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh);
+
+/* Report recovery end on a region. */
+void dm_rh_recovery_end(struct dm_region *reg, int error);
+
+/* Returns number of regions with recovery work outstanding. */
+int dm_rh_recovery_in_flight(struct dm_region_hash *rh);
+
+/* Start/stop recovery. */
+void dm_rh_start_recovery(struct dm_region_hash *rh);
+void dm_rh_stop_recovery(struct dm_region_hash *rh);
+
+#endif /* DM_REGION_HASH_H */
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index bff5c65..952df39 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -2,15 +2,14 @@
 #define _DMA_REMAPPING_H
 
 /*
- * We need a fixed PAGE_SIZE of 4K irrespective of
- * arch PAGE_SIZE for IOMMU page tables.
+ * VT-d hardware uses 4KiB page size regardless of host page size.
  */
-#define PAGE_SHIFT_4K		(12)
-#define PAGE_SIZE_4K		(1UL << PAGE_SHIFT_4K)
-#define PAGE_MASK_4K		(((u64)-1) << PAGE_SHIFT_4K)
-#define PAGE_ALIGN_4K(addr)	(((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
+#define VTD_PAGE_SHIFT		(12)
+#define VTD_PAGE_SIZE		(1UL << VTD_PAGE_SHIFT)
+#define VTD_PAGE_MASK		(((u64)-1) << VTD_PAGE_SHIFT)
+#define VTD_PAGE_ALIGN(addr)	(((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
 
-#define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT_4K)
+#define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT)
 #define DMA_32BIT_PFN		IOVA_PFN(DMA_32BIT_MASK)
 #define DMA_64BIT_PFN		IOVA_PFN(DMA_64BIT_MASK)
 
@@ -25,7 +24,7 @@
 	u64	val;
 	u64	rsvd1;
 };
-#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
+#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
 static inline bool root_present(struct root_entry *root)
 {
 	return (root->val & 1);
@@ -36,7 +35,7 @@
 }
 static inline void set_root_value(struct root_entry *root, unsigned long value)
 {
-	root->val |= value & PAGE_MASK_4K;
+	root->val |= value & VTD_PAGE_MASK;
 }
 
 struct context_entry;
@@ -45,7 +44,7 @@
 {
 	return (struct context_entry *)
 		(root_present(root)?phys_to_virt(
-		root->val & PAGE_MASK_4K):
+		root->val & VTD_PAGE_MASK) :
 		NULL);
 }
 
@@ -67,7 +66,7 @@
 #define context_present(c) ((c).lo & 1)
 #define context_fault_disable(c) (((c).lo >> 1) & 1)
 #define context_translation_type(c) (((c).lo >> 2) & 3)
-#define context_address_root(c) ((c).lo & PAGE_MASK_4K)
+#define context_address_root(c) ((c).lo & VTD_PAGE_MASK)
 #define context_address_width(c) ((c).hi &  7)
 #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
 
@@ -81,7 +80,7 @@
 	} while (0)
 #define CONTEXT_TT_MULTI_LEVEL 0
 #define context_set_address_root(c, val) \
-	do {(c).lo |= (val) & PAGE_MASK_4K;} while (0)
+	do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0)
 #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
 #define context_set_domain_id(c, val) \
 	do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
@@ -107,9 +106,9 @@
 #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
 #define dma_set_pte_prot(p, prot) \
 		do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
-#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
+#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK)
 #define dma_set_pte_addr(p, addr) do {\
-		(p).val |= ((addr) & PAGE_MASK_4K); } while (0)
+		(p).val |= ((addr) & VTD_PAGE_MASK); } while (0)
 #define dma_pte_present(p) (((p).val & 3) != 0)
 
 struct intel_iommu;
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index c360c55..f1984fc 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -45,7 +45,6 @@
 	list_for_each_entry(drhd, &dmar_drhd_units, list)
 
 extern int dmar_table_init(void);
-extern int early_dmar_detect(void);
 extern int dmar_dev_scope_init(void);
 
 /* Intel IOMMU detection */
diff --git a/include/linux/dvb/frontend.h b/include/linux/dvb/frontend.h
index 6e4ace2..79a8ed8 100644
--- a/include/linux/dvb/frontend.h
+++ b/include/linux/dvb/frontend.h
@@ -166,6 +166,7 @@
 	VSB_16,
 	PSK_8,
 	APSK_16,
+	APSK_32,
 	DQPSK,
 } fe_modulation_t;
 
@@ -295,6 +296,7 @@
 	SYS_DVBC_ANNEX_AC,
 	SYS_DVBC_ANNEX_B,
 	SYS_DVBT,
+	SYS_DSS,
 	SYS_DVBS,
 	SYS_DVBS2,
 	SYS_DVBH,
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 807373d..bb66feb 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -208,6 +208,9 @@
 #define EFI_GLOBAL_VARIABLE_GUID \
     EFI_GUID(  0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c )
 
+#define UV_SYSTEM_TABLE_GUID \
+    EFI_GUID(  0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93 )
+
 typedef struct {
 	efi_guid_t guid;
 	unsigned long table;
@@ -255,6 +258,7 @@
 	unsigned long boot_info;	/* boot info table */
 	unsigned long hcdp;		/* HCDP table */
 	unsigned long uga;		/* UGA table */
+	unsigned long uv_systab;	/* UV system table */
 	efi_get_time_t *get_time;
 	efi_set_time_t *set_time;
 	efi_get_wakeup_time_t *get_wakeup_time;
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 159d9b4..d14f029 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -380,6 +380,8 @@
 #define EXT3_MOUNT_QUOTA		0x80000 /* Some quota option set */
 #define EXT3_MOUNT_USRQUOTA		0x100000 /* "old" user quota */
 #define EXT3_MOUNT_GRPQUOTA		0x200000 /* "old" group quota */
+#define EXT3_MOUNT_DATA_ERR_ABORT	0x400000 /* Abort on file data write
+						  * error in ordered mode */
 
 /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
 #ifndef _LINUX_EXT2_FS_H
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 531ccd5..75a81ea 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -808,6 +808,7 @@
 struct fb_info {
 	int node;
 	int flags;
+	struct mutex lock;		/* Lock for open/release/ioctl funcs */
 	struct fb_var_screeninfo var;	/* Current var */
 	struct fb_fix_screeninfo fix;	/* Current fix */
 	struct fb_monspecs monspecs;	/* Current Monitor specs */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index deddeed..8f22533 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -6,7 +6,7 @@
 #include <linux/sched.h>
 #include <linux/wait.h>
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_FREEZER
 /*
  * Check if a process has been frozen
  */
@@ -39,28 +39,18 @@
 	clear_tsk_thread_flag(p, TIF_FREEZE);
 }
 
+static inline bool should_send_signal(struct task_struct *p)
+{
+	return !(p->flags & PF_FREEZER_NOSIG);
+}
+
 /*
  * Wake up a frozen process
- *
- * task_lock() is taken to prevent the race with refrigerator() which may
- * occur if the freezing of tasks fails.  Namely, without the lock, if the
- * freezing of tasks failed, thaw_tasks() might have run before a task in
- * refrigerator() could call frozen_process(), in which case the task would be
- * frozen and no one would thaw it.
  */
-static inline int thaw_process(struct task_struct *p)
-{
-	task_lock(p);
-	if (frozen(p)) {
-		p->flags &= ~PF_FROZEN;
-		task_unlock(p);
-		wake_up_process(p);
-		return 1;
-	}
-	clear_freeze_flag(p);
-	task_unlock(p);
-	return 0;
-}
+extern int __thaw_process(struct task_struct *p);
+
+/* Takes and releases task alloc lock using task_lock() */
+extern int thaw_process(struct task_struct *p);
 
 extern void refrigerator(void);
 extern int freeze_processes(void);
@@ -75,6 +65,15 @@
 		return 0;
 }
 
+extern bool freeze_task(struct task_struct *p, bool sig_only);
+extern void cancel_freezing(struct task_struct *p);
+
+#ifdef CONFIG_CGROUP_FREEZER
+extern int cgroup_frozen(struct task_struct *task);
+#else /* !CONFIG_CGROUP_FREEZER */
+static inline int cgroup_frozen(struct task_struct *task) { return 0; }
+#endif /* !CONFIG_CGROUP_FREEZER */
+
 /*
  * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
  * calls wait_for_completion(&vfork) and reset right after it returns from this
@@ -166,7 +165,7 @@
 	} while (try_to_freeze());					\
 	__retval;							\
 })
-#else /* !CONFIG_PM_SLEEP */
+#else /* !CONFIG_FREEZER */
 static inline int frozen(struct task_struct *p) { return 0; }
 static inline int freezing(struct task_struct *p) { return 0; }
 static inline void set_freeze_flag(struct task_struct *p) {}
@@ -191,6 +190,6 @@
 #define wait_event_freezable_timeout(wq, condition, timeout)		\
 		wait_event_interruptible_timeout(wq, condition, timeout)
 
-#endif /* !CONFIG_PM_SLEEP */
+#endif /* !CONFIG_FREEZER */
 
 #endif	/* FREEZER_H_INCLUDED */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index bb38406..a3d4615 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,10 +1,14 @@
 #ifndef _LINUX_FTRACE_H
 #define _LINUX_FTRACE_H
 
-#ifdef CONFIG_FTRACE
-
 #include <linux/linkage.h>
 #include <linux/fs.h>
+#include <linux/ktime.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kallsyms.h>
+
+#ifdef CONFIG_FTRACE
 
 extern int ftrace_enabled;
 extern int
@@ -36,6 +40,7 @@
 # define register_ftrace_function(ops) do { } while (0)
 # define unregister_ftrace_function(ops) do { } while (0)
 # define clear_ftrace_function(ops) do { } while (0)
+static inline void ftrace_kill_atomic(void) { }
 #endif /* CONFIG_FTRACE */
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -76,8 +81,10 @@
 
 extern int skip_trace(unsigned long ip);
 
-void ftrace_disable_daemon(void);
-void ftrace_enable_daemon(void);
+extern void ftrace_release(void *start, unsigned long size);
+
+extern void ftrace_disable_daemon(void);
+extern void ftrace_enable_daemon(void);
 
 #else
 # define skip_trace(ip)				({ 0; })
@@ -85,6 +92,7 @@
 # define ftrace_set_filter(buf, len, reset)	do { } while (0)
 # define ftrace_disable_daemon()		do { } while (0)
 # define ftrace_enable_daemon()			do { } while (0)
+static inline void ftrace_release(void *start, unsigned long size) { }
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 /* totally disable ftrace - can not re-enable after this */
@@ -98,9 +106,11 @@
 #endif
 }
 
-/* Ftrace disable/restore without lock. Some synchronization mechanism
+/*
+ * Ftrace disable/restore without lock. Some synchronization mechanism
  * must be used to prevent ftrace_enabled to be changed between
- * disable/restore. */
+ * disable/restore.
+ */
 static inline int __ftrace_enabled_save(void)
 {
 #ifdef CONFIG_FTRACE
@@ -157,9 +167,71 @@
 #ifdef CONFIG_TRACING
 extern void
 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
+
+/**
+ * ftrace_printk - printf formatting in the ftrace buffer
+ * @fmt: the printf format for printing
+ *
+ * Note: __ftrace_printk is an internal function for ftrace_printk and
+ *       the @ip is passed in via the ftrace_printk macro.
+ *
+ * This function allows a kernel developer to debug fast path sections
+ * that printk is not appropriate for. By scattering in various
+ * printk like tracing in the code, a developer can quickly see
+ * where problems are occurring.
+ *
+ * This is intended as a debugging tool for the developer only.
+ * Please refrain from leaving ftrace_printks scattered around in
+ * your code.
+ */
+# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
+extern int
+__ftrace_printk(unsigned long ip, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern void ftrace_dump(void);
 #else
 static inline void
 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
+static inline int
+ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)));
+
+static inline int
+ftrace_printk(const char *fmt, ...)
+{
+	return 0;
+}
+static inline void ftrace_dump(void) { }
 #endif
 
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+extern void ftrace_init(void);
+extern void ftrace_init_module(unsigned long *start, unsigned long *end);
+#else
+static inline void ftrace_init(void) { }
+static inline void
+ftrace_init_module(unsigned long *start, unsigned long *end) { }
+#endif
+
+
+struct boot_trace {
+	pid_t			caller;
+	char			func[KSYM_NAME_LEN];
+	int			result;
+	unsigned long long	duration;		/* usecs */
+	ktime_t			calltime;
+	ktime_t			rettime;
+};
+
+#ifdef CONFIG_BOOT_TRACER
+extern void trace_boot(struct boot_trace *it, initcall_t fn);
+extern void start_boot_trace(void);
+extern void stop_boot_trace(void);
+#else
+static inline void trace_boot(struct boot_trace *it, initcall_t fn) { }
+static inline void start_boot_trace(void) { }
+static inline void stop_boot_trace(void) { }
+#endif
+
+
+
 #endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 265635d..350fe97 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -17,8 +17,14 @@
  *  - add lock_owner field to fuse_setattr_in, fuse_read_in and fuse_write_in
  *  - add blksize field to fuse_attr
  *  - add file flags field to fuse_read_in and fuse_write_in
+ *
+ * 7.10
+ *  - add nonseekable open flag
  */
 
+#ifndef _LINUX_FUSE_H
+#define _LINUX_FUSE_H
+
 #include <asm/types.h>
 #include <linux/major.h>
 
@@ -26,7 +32,7 @@
 #define FUSE_KERNEL_VERSION 7
 
 /** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 9
+#define FUSE_KERNEL_MINOR_VERSION 10
 
 /** The node ID of the root inode */
 #define FUSE_ROOT_ID 1
@@ -98,9 +104,11 @@
  *
  * FOPEN_DIRECT_IO: bypass page cache for this open file
  * FOPEN_KEEP_CACHE: don't invalidate the data cache on open
+ * FOPEN_NONSEEKABLE: the file is not seekable
  */
 #define FOPEN_DIRECT_IO		(1 << 0)
 #define FOPEN_KEEP_CACHE	(1 << 1)
+#define FOPEN_NONSEEKABLE	(1 << 2)
 
 /**
  * INIT request/reply flags
@@ -409,3 +417,5 @@
 #define FUSE_DIRENT_ALIGN(x) (((x) + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1))
 #define FUSE_DIRENT_SIZE(d) \
 	FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
+
+#endif /* _LINUX_FUSE_H */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 2f245fe..9a4e35c 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -125,12 +125,12 @@
 	enum hrtimer_restart		(*function)(struct hrtimer *);
 	struct hrtimer_clock_base	*base;
 	unsigned long			state;
-	enum hrtimer_cb_mode		cb_mode;
 	struct list_head		cb_entry;
+	enum hrtimer_cb_mode		cb_mode;
 #ifdef CONFIG_TIMER_STATS
+	int				start_pid;
 	void				*start_site;
 	char				start_comm[16];
-	int				start_pid;
 #endif
 };
 
@@ -155,10 +155,8 @@
  * @first:		pointer to the timer node which expires first
  * @resolution:		the resolution of the clock, in nanoseconds
  * @get_time:		function to retrieve the current time of the clock
- * @get_softirq_time:	function to retrieve the current time from the softirq
  * @softirq_time:	the time when running the hrtimer queue in the softirq
  * @offset:		offset of this clock to the monotonic base
- * @reprogram:		function to reprogram the timer event
  */
 struct hrtimer_clock_base {
 	struct hrtimer_cpu_base	*cpu_base;
@@ -167,13 +165,9 @@
 	struct rb_node		*first;
 	ktime_t			resolution;
 	ktime_t			(*get_time)(void);
-	ktime_t			(*get_softirq_time)(void);
 	ktime_t			softirq_time;
 #ifdef CONFIG_HIGH_RES_TIMERS
 	ktime_t			offset;
-	int			(*reprogram)(struct hrtimer *t,
-					     struct hrtimer_clock_base *b,
-					     ktime_t n);
 #endif
 };
 
diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h
index 0177d28..0f91a95 100644
--- a/include/linux/i2c-algo-pcf.h
+++ b/include/linux/i2c-algo-pcf.h
@@ -31,7 +31,10 @@
 	int  (*getpcf) (void *data, int ctl);
 	int  (*getown) (void *data);
 	int  (*getclock) (void *data);
-	void (*waitforpin) (void);
+	void (*waitforpin) (void *data);
+
+	void (*xfer_begin) (void *data);
+	void (*xfer_end) (void *data);
 
 	/* Multi-master lost arbitration back-off delay (msecs)
 	 * This should be set by the bus adapter or knowledgable client
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 493435b..01d67ba 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -60,7 +60,7 @@
 #define I2C_DRIVERID_WM8775	69	/* wm8775 audio processor	*/
 #define I2C_DRIVERID_CS53L32A	70	/* cs53l32a audio processor	*/
 #define I2C_DRIVERID_CX25840	71	/* cx2584x video encoder	*/
-#define I2C_DRIVERID_SAA7127	72	/* saa7124 video encoder	*/
+#define I2C_DRIVERID_SAA7127	72	/* saa7127 video encoder	*/
 #define I2C_DRIVERID_SAA711X	73	/* saa711x video encoders	*/
 #define I2C_DRIVERID_AKITAIOEXP	74	/* IO Expander on Sharp SL-C1000 */
 #define I2C_DRIVERID_INFRARED	75	/* I2C InfraRed on Video boards */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 0611512..33a5992 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -53,45 +53,44 @@
  * transmit one message at a time, a more complex version can be used to
  * transmit an arbitrary number of messages without interruption.
  */
-extern int i2c_master_send(struct i2c_client *,const char* ,int);
-extern int i2c_master_recv(struct i2c_client *,char* ,int);
+extern int i2c_master_send(struct i2c_client *client, const char *buf,
+			   int count);
+extern int i2c_master_recv(struct i2c_client *client, char *buf, int count);
 
 /* Transfer num messages.
  */
-extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
-
+extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+			int num);
 
 /* This is the very generalized SMBus access routine. You probably do not
    want to use this, though; one of the functions below may be much easier,
    and probably just as fast.
    Note that we use i2c_adapter here, because you do not need a specific
    smbus adapter to call this function. */
-extern s32 i2c_smbus_xfer (struct i2c_adapter * adapter, u16 addr,
-                           unsigned short flags,
-                           char read_write, u8 command, int size,
-                           union i2c_smbus_data * data);
+extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+			  unsigned short flags, char read_write, u8 command,
+			  int size, union i2c_smbus_data *data);
 
 /* Now follow the 'nice' access routines. These also document the calling
    conventions of i2c_smbus_xfer. */
 
-extern s32 i2c_smbus_read_byte(struct i2c_client * client);
-extern s32 i2c_smbus_write_byte(struct i2c_client * client, u8 value);
-extern s32 i2c_smbus_read_byte_data(struct i2c_client * client, u8 command);
-extern s32 i2c_smbus_write_byte_data(struct i2c_client * client,
-                                     u8 command, u8 value);
-extern s32 i2c_smbus_read_word_data(struct i2c_client * client, u8 command);
-extern s32 i2c_smbus_write_word_data(struct i2c_client * client,
-                                     u8 command, u16 value);
+extern s32 i2c_smbus_read_byte(struct i2c_client *client);
+extern s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value);
+extern s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command);
+extern s32 i2c_smbus_write_byte_data(struct i2c_client *client,
+				     u8 command, u8 value);
+extern s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command);
+extern s32 i2c_smbus_write_word_data(struct i2c_client *client,
+				     u8 command, u16 value);
 /* Returns the number of read bytes */
 extern s32 i2c_smbus_read_block_data(struct i2c_client *client,
 				     u8 command, u8 *values);
-extern s32 i2c_smbus_write_block_data(struct i2c_client * client,
-				      u8 command, u8 length,
-				      const u8 *values);
+extern s32 i2c_smbus_write_block_data(struct i2c_client *client,
+				      u8 command, u8 length, const u8 *values);
 /* Returns the number of read bytes */
-extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client * client,
+extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client,
 					 u8 command, u8 length, u8 *values);
-extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client,
+extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
 					  u8 command, u8 length,
 					  const u8 *values);
 
@@ -169,7 +168,7 @@
 	/* a ioctl like command that can be used to perform specific functions
 	 * with the device.
 	 */
-	int (*command)(struct i2c_client *client,unsigned int cmd, void *arg);
+	int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
 
 	struct device_driver driver;
 	const struct i2c_device_id *id_table;
@@ -224,14 +223,14 @@
 	return to_i2c_client(dev);
 }
 
-static inline void *i2c_get_clientdata (struct i2c_client *dev)
+static inline void *i2c_get_clientdata(const struct i2c_client *dev)
 {
-	return dev_get_drvdata (&dev->dev);
+	return dev_get_drvdata(&dev->dev);
 }
 
-static inline void i2c_set_clientdata (struct i2c_client *dev, void *data)
+static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
 {
-	dev_set_drvdata (&dev->dev, data);
+	dev_set_drvdata(&dev->dev, data);
 }
 
 /**
@@ -240,6 +239,7 @@
  * @flags: to initialize i2c_client.flags
  * @addr: stored in i2c_client.addr
  * @platform_data: stored in i2c_client.dev.platform_data
+ * @archdata: copied into i2c_client.dev.archdata
  * @irq: stored in i2c_client.irq
  *
  * I2C doesn't actually support hardware probing, although controllers and
@@ -259,6 +259,7 @@
 	unsigned short	flags;
 	unsigned short	addr;
 	void		*platform_data;
+	struct dev_archdata	*archdata;
 	int		irq;
 };
 
@@ -272,7 +273,7 @@
  * fields (such as associated irq, or device-specific platform_data)
  * are provided using conventional syntax.
  */
-#define I2C_BOARD_INFO(dev_type,dev_addr) \
+#define I2C_BOARD_INFO(dev_type, dev_addr) \
 	.type = (dev_type), .addr = (dev_addr)
 
 
@@ -306,10 +307,12 @@
  */
 #ifdef CONFIG_I2C_BOARDINFO
 extern int
-i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned n);
+i2c_register_board_info(int busnum, struct i2c_board_info const *info,
+			unsigned n);
 #else
 static inline int
-i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned n)
+i2c_register_board_info(int busnum, struct i2c_board_info const *info,
+			unsigned n)
 {
 	return 0;
 }
@@ -328,11 +331,11 @@
 	   using common I2C messages */
 	/* master_xfer should return the number of messages successfully
 	   processed, or a negative value on error */
-	int (*master_xfer)(struct i2c_adapter *adap,struct i2c_msg *msgs,
-	                   int num);
+	int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
+			   int num);
 	int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr,
-	                   unsigned short flags, char read_write,
-	                   u8 command, int size, union i2c_smbus_data * data);
+			   unsigned short flags, char read_write,
+			   u8 command, int size, union i2c_smbus_data *data);
 
 	/* To determine what the adapter supports */
 	u32 (*functionality) (struct i2c_adapter *);
@@ -345,7 +348,7 @@
 struct i2c_adapter {
 	struct module *owner;
 	unsigned int id;
-	unsigned int class;
+	unsigned int class;		  /* classes to allow probing for */
 	const struct i2c_algorithm *algo; /* the algorithm to access the bus */
 	void *algo_data;
 
@@ -369,14 +372,14 @@
 };
 #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
 
-static inline void *i2c_get_adapdata (struct i2c_adapter *dev)
+static inline void *i2c_get_adapdata(const struct i2c_adapter *dev)
 {
-	return dev_get_drvdata (&dev->dev);
+	return dev_get_drvdata(&dev->dev);
 }
 
-static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data)
+static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
 {
-	dev_set_drvdata (&dev->dev, data);
+	dev_set_drvdata(&dev->dev, data);
 }
 
 /*flags for the client struct: */
@@ -449,7 +452,7 @@
 		const struct i2c_client_address_data *address_data,
 		int (*found_proc) (struct i2c_adapter *, int, int));
 
-extern struct i2c_adapter* i2c_get_adapter(int id);
+extern struct i2c_adapter *i2c_get_adapter(int id);
 extern void i2c_put_adapter(struct i2c_adapter *adap);
 
 
@@ -465,7 +468,7 @@
 	return (func & i2c_get_functionality(adap)) == func;
 }
 
-/* Return id number for a specific adapter */
+/* Return the adapter number for a specific adapter */
 static inline int i2c_adapter_id(struct i2c_adapter *adap)
 {
 	return adap->nr;
@@ -526,7 +529,7 @@
 
 #define I2C_FUNC_I2C			0x00000001
 #define I2C_FUNC_10BIT_ADDR		0x00000002
-#define I2C_FUNC_PROTOCOL_MANGLING	0x00000004 /* I2C_M_{REV_DIR_ADDR,NOSTART,..} */
+#define I2C_FUNC_PROTOCOL_MANGLING	0x00000004 /* I2C_M_NOSTART etc. */
 #define I2C_FUNC_SMBUS_PEC		0x00000008
 #define I2C_FUNC_SMBUS_BLOCK_PROC_CALL	0x00008000 /* SMBus 2.0 */
 #define I2C_FUNC_SMBUS_QUICK		0x00010000
@@ -541,30 +544,26 @@
 #define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000
 #define I2C_FUNC_SMBUS_READ_I2C_BLOCK	0x04000000 /* I2C-like block xfer  */
 #define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK	0x08000000 /* w/ 1-byte reg. addr. */
-#define I2C_FUNC_SMBUS_READ_I2C_BLOCK_2	 0x10000000 /* I2C-like block xfer  */
-#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK_2 0x20000000 /* w/ 2-byte reg. addr. */
 
-#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \
-                             I2C_FUNC_SMBUS_WRITE_BYTE)
-#define I2C_FUNC_SMBUS_BYTE_DATA (I2C_FUNC_SMBUS_READ_BYTE_DATA | \
-                                  I2C_FUNC_SMBUS_WRITE_BYTE_DATA)
-#define I2C_FUNC_SMBUS_WORD_DATA (I2C_FUNC_SMBUS_READ_WORD_DATA | \
-                                  I2C_FUNC_SMBUS_WRITE_WORD_DATA)
-#define I2C_FUNC_SMBUS_BLOCK_DATA (I2C_FUNC_SMBUS_READ_BLOCK_DATA | \
-                                   I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)
-#define I2C_FUNC_SMBUS_I2C_BLOCK (I2C_FUNC_SMBUS_READ_I2C_BLOCK | \
-                                  I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)
-#define I2C_FUNC_SMBUS_I2C_BLOCK_2 (I2C_FUNC_SMBUS_READ_I2C_BLOCK_2 | \
-                                    I2C_FUNC_SMBUS_WRITE_I2C_BLOCK_2)
+#define I2C_FUNC_SMBUS_BYTE		(I2C_FUNC_SMBUS_READ_BYTE | \
+					 I2C_FUNC_SMBUS_WRITE_BYTE)
+#define I2C_FUNC_SMBUS_BYTE_DATA	(I2C_FUNC_SMBUS_READ_BYTE_DATA | \
+					 I2C_FUNC_SMBUS_WRITE_BYTE_DATA)
+#define I2C_FUNC_SMBUS_WORD_DATA	(I2C_FUNC_SMBUS_READ_WORD_DATA | \
+					 I2C_FUNC_SMBUS_WRITE_WORD_DATA)
+#define I2C_FUNC_SMBUS_BLOCK_DATA	(I2C_FUNC_SMBUS_READ_BLOCK_DATA | \
+					 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)
+#define I2C_FUNC_SMBUS_I2C_BLOCK	(I2C_FUNC_SMBUS_READ_I2C_BLOCK | \
+					 I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)
 
-#define I2C_FUNC_SMBUS_EMUL (I2C_FUNC_SMBUS_QUICK | \
-                             I2C_FUNC_SMBUS_BYTE | \
-                             I2C_FUNC_SMBUS_BYTE_DATA | \
-                             I2C_FUNC_SMBUS_WORD_DATA | \
-                             I2C_FUNC_SMBUS_PROC_CALL | \
-                             I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \
-			     I2C_FUNC_SMBUS_I2C_BLOCK | \
-			     I2C_FUNC_SMBUS_PEC)
+#define I2C_FUNC_SMBUS_EMUL		(I2C_FUNC_SMBUS_QUICK | \
+					 I2C_FUNC_SMBUS_BYTE | \
+					 I2C_FUNC_SMBUS_BYTE_DATA | \
+					 I2C_FUNC_SMBUS_WORD_DATA | \
+					 I2C_FUNC_SMBUS_PROC_CALL | \
+					 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \
+					 I2C_FUNC_SMBUS_I2C_BLOCK | \
+					 I2C_FUNC_SMBUS_PEC)
 
 /*
  * Data for SMBus Messages
@@ -574,7 +573,7 @@
 	__u8 byte;
 	__u16 word;
 	__u8 block[I2C_SMBUS_BLOCK_MAX + 2]; /* block[0] is used for length */
-	                       /* and one more for user-space compatibility */
+			       /* and one more for user-space compatibility */
 };
 
 /* i2c_smbus_xfer read or write markers */
@@ -602,21 +601,21 @@
 
 /* Default fill of many variables */
 #define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
-                          I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END}
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+			     I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END}
 
 /* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the
    module header */
@@ -625,7 +624,7 @@
   static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \
   static unsigned int var##_num; \
   module_param_array(var, short, &var##_num, 0); \
-  MODULE_PARM_DESC(var,desc)
+  MODULE_PARM_DESC(var, desc)
 
 #define I2C_CLIENT_MODULE_PARM_FORCE(name)				\
 I2C_CLIENT_MODULE_PARM(force_##name,					\
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h
new file mode 100644
index 0000000..fb604dc
--- /dev/null
+++ b/include/linux/i2c/twl4030.h
@@ -0,0 +1,343 @@
+/*
+ * twl4030.h - header for TWL4030 PM and audio CODEC device
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * Based on tlv320aic23.c:
+ * Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ */
+
+#ifndef __TWL4030_H_
+#define __TWL4030_H_
+
+/*
+ * Using the twl4030 core we address registers using a pair
+ *	{ module id, relative register offset }
+ * which that core then maps to the relevant
+ *	{ i2c slave, absolute register address }
+ *
+ * The module IDs are meaningful only to the twl4030 core code,
+ * which uses them as array indices to look up the first register
+ * address each module uses within a given i2c slave.
+ */
+
+/* Slave 0 (i2c address 0x48) */
+#define TWL4030_MODULE_USB		0x00
+
+/* Slave 1 (i2c address 0x49) */
+#define TWL4030_MODULE_AUDIO_VOICE	0x01
+#define TWL4030_MODULE_GPIO		0x02
+#define TWL4030_MODULE_INTBR		0x03
+#define TWL4030_MODULE_PIH		0x04
+#define TWL4030_MODULE_TEST		0x05
+
+/* Slave 2 (i2c address 0x4a) */
+#define TWL4030_MODULE_KEYPAD		0x06
+#define TWL4030_MODULE_MADC		0x07
+#define TWL4030_MODULE_INTERRUPTS	0x08
+#define TWL4030_MODULE_LED		0x09
+#define TWL4030_MODULE_MAIN_CHARGE	0x0A
+#define TWL4030_MODULE_PRECHARGE	0x0B
+#define TWL4030_MODULE_PWM0		0x0C
+#define TWL4030_MODULE_PWM1		0x0D
+#define TWL4030_MODULE_PWMA		0x0E
+#define TWL4030_MODULE_PWMB		0x0F
+
+/* Slave 3 (i2c address 0x4b) */
+#define TWL4030_MODULE_BACKUP		0x10
+#define TWL4030_MODULE_INT		0x11
+#define TWL4030_MODULE_PM_MASTER	0x12
+#define TWL4030_MODULE_PM_RECEIVER	0x13
+#define TWL4030_MODULE_RTC		0x14
+#define TWL4030_MODULE_SECURED_REG	0x15
+
+/*
+ * Read and write single 8-bit registers
+ */
+int twl4030_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
+int twl4030_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
+
+/*
+ * Read and write several 8-bit registers at once.
+ *
+ * IMPORTANT:  For twl4030_i2c_write(), allocate num_bytes + 1
+ * for the value, and populate your data starting at offset 1.
+ */
+int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, u8 num_bytes);
+int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, u8 num_bytes);
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * NOTE:  at up to 1024 registers, this is a big chip.
+ *
+ * Avoid putting register declarations in this file, instead of into
+ * a driver-private file, unless some of the registers in a block
+ * need to be shared with other drivers.  One example is blocks that
+ * have Secondary IRQ Handler (SIH) registers.
+ */
+
+#define TWL4030_SIH_CTRL_EXCLEN_MASK	BIT(0)
+#define TWL4030_SIH_CTRL_PENDDIS_MASK	BIT(1)
+#define TWL4030_SIH_CTRL_COR_MASK	BIT(2)
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * GPIO Block Register offsets (use TWL4030_MODULE_GPIO)
+ */
+
+#define REG_GPIODATAIN1			0x0
+#define REG_GPIODATAIN2			0x1
+#define REG_GPIODATAIN3			0x2
+#define REG_GPIODATADIR1		0x3
+#define REG_GPIODATADIR2		0x4
+#define REG_GPIODATADIR3		0x5
+#define REG_GPIODATAOUT1		0x6
+#define REG_GPIODATAOUT2		0x7
+#define REG_GPIODATAOUT3		0x8
+#define REG_CLEARGPIODATAOUT1		0x9
+#define REG_CLEARGPIODATAOUT2		0xA
+#define REG_CLEARGPIODATAOUT3		0xB
+#define REG_SETGPIODATAOUT1		0xC
+#define REG_SETGPIODATAOUT2		0xD
+#define REG_SETGPIODATAOUT3		0xE
+#define REG_GPIO_DEBEN1			0xF
+#define REG_GPIO_DEBEN2			0x10
+#define REG_GPIO_DEBEN3			0x11
+#define REG_GPIO_CTRL			0x12
+#define REG_GPIOPUPDCTR1		0x13
+#define REG_GPIOPUPDCTR2		0x14
+#define REG_GPIOPUPDCTR3		0x15
+#define REG_GPIOPUPDCTR4		0x16
+#define REG_GPIOPUPDCTR5		0x17
+#define REG_GPIO_ISR1A			0x19
+#define REG_GPIO_ISR2A			0x1A
+#define REG_GPIO_ISR3A			0x1B
+#define REG_GPIO_IMR1A			0x1C
+#define REG_GPIO_IMR2A			0x1D
+#define REG_GPIO_IMR3A			0x1E
+#define REG_GPIO_ISR1B			0x1F
+#define REG_GPIO_ISR2B			0x20
+#define REG_GPIO_ISR3B			0x21
+#define REG_GPIO_IMR1B			0x22
+#define REG_GPIO_IMR2B			0x23
+#define REG_GPIO_IMR3B			0x24
+#define REG_GPIO_EDR1			0x28
+#define REG_GPIO_EDR2			0x29
+#define REG_GPIO_EDR3			0x2A
+#define REG_GPIO_EDR4			0x2B
+#define REG_GPIO_EDR5			0x2C
+#define REG_GPIO_SIH_CTRL		0x2D
+
+/* Up to 18 signals are available as GPIOs, when their
+ * pins are not assigned to another use (such as ULPI/USB).
+ */
+#define TWL4030_GPIO_MAX		18
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Keypad register offsets (use TWL4030_MODULE_KEYPAD)
+ * ... SIH/interrupt only
+ */
+
+#define TWL4030_KEYPAD_KEYP_ISR1	0x11
+#define TWL4030_KEYPAD_KEYP_IMR1	0x12
+#define TWL4030_KEYPAD_KEYP_ISR2	0x13
+#define TWL4030_KEYPAD_KEYP_IMR2	0x14
+#define TWL4030_KEYPAD_KEYP_SIR		0x15	/* test register */
+#define TWL4030_KEYPAD_KEYP_EDR		0x16
+#define TWL4030_KEYPAD_KEYP_SIH_CTRL	0x17
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Multichannel ADC register offsets (use TWL4030_MODULE_MADC)
+ * ... SIH/interrupt only
+ */
+
+#define TWL4030_MADC_ISR1		0x61
+#define TWL4030_MADC_IMR1		0x62
+#define TWL4030_MADC_ISR2		0x63
+#define TWL4030_MADC_IMR2		0x64
+#define TWL4030_MADC_SIR		0x65	/* test register */
+#define TWL4030_MADC_EDR		0x66
+#define TWL4030_MADC_SIH_CTRL		0x67
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Battery charger register offsets (use TWL4030_MODULE_INTERRUPTS)
+ */
+
+#define TWL4030_INTERRUPTS_BCIISR1A	0x0
+#define TWL4030_INTERRUPTS_BCIISR2A	0x1
+#define TWL4030_INTERRUPTS_BCIIMR1A	0x2
+#define TWL4030_INTERRUPTS_BCIIMR2A	0x3
+#define TWL4030_INTERRUPTS_BCIISR1B	0x4
+#define TWL4030_INTERRUPTS_BCIISR2B	0x5
+#define TWL4030_INTERRUPTS_BCIIMR1B	0x6
+#define TWL4030_INTERRUPTS_BCIIMR2B	0x7
+#define TWL4030_INTERRUPTS_BCISIR1	0x8	/* test register */
+#define TWL4030_INTERRUPTS_BCISIR2	0x9	/* test register */
+#define TWL4030_INTERRUPTS_BCIEDR1	0xa
+#define TWL4030_INTERRUPTS_BCIEDR2	0xb
+#define TWL4030_INTERRUPTS_BCIEDR3	0xc
+#define TWL4030_INTERRUPTS_BCISIHCTRL	0xd
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Power Interrupt block register offsets (use TWL4030_MODULE_INT)
+ */
+
+#define TWL4030_INT_PWR_ISR1		0x0
+#define TWL4030_INT_PWR_IMR1		0x1
+#define TWL4030_INT_PWR_ISR2		0x2
+#define TWL4030_INT_PWR_IMR2		0x3
+#define TWL4030_INT_PWR_SIR		0x4	/* test register */
+#define TWL4030_INT_PWR_EDR1		0x5
+#define TWL4030_INT_PWR_EDR2		0x6
+#define TWL4030_INT_PWR_SIH_CTRL	0x7
+
+/*----------------------------------------------------------------------*/
+
+struct twl4030_bci_platform_data {
+	int *battery_tmp_tbl;
+	unsigned int tblsize;
+};
+
+/* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */
+struct twl4030_gpio_platform_data {
+	int		gpio_base;
+	unsigned	irq_base, irq_end;
+
+	/* package the two LED signals as output-only GPIOs? */
+	bool		use_leds;
+
+	/* gpio-n should control VMMC(n+1) if BIT(n) in mmc_cd is set */
+	u8		mmc_cd;
+
+	/* For gpio-N, bit (1 << N) in "pullups" is set if that pullup
+	 * should be enabled.  Else, if that bit is set in "pulldowns",
+	 * that pulldown is enabled.  Don't waste power by letting any
+	 * digital inputs float...
+	 */
+	u32		pullups;
+	u32		pulldowns;
+
+	int		(*setup)(struct device *dev,
+				unsigned gpio, unsigned ngpio);
+	int		(*teardown)(struct device *dev,
+				unsigned gpio, unsigned ngpio);
+};
+
+struct twl4030_madc_platform_data {
+	int		irq_line;
+};
+
+struct twl4030_keypad_data {
+	int rows;
+	int cols;
+	int *keymap;
+	int irq;
+	unsigned int keymapsize;
+	unsigned int rep:1;
+};
+
+enum twl4030_usb_mode {
+	T2_USB_MODE_ULPI = 1,
+	T2_USB_MODE_CEA2011_3PIN = 2,
+};
+
+struct twl4030_usb_data {
+	enum twl4030_usb_mode	usb_mode;
+};
+
+struct twl4030_platform_data {
+	unsigned				irq_base, irq_end;
+	struct twl4030_bci_platform_data	*bci;
+	struct twl4030_gpio_platform_data	*gpio;
+	struct twl4030_madc_platform_data	*madc;
+	struct twl4030_keypad_data		*keypad;
+	struct twl4030_usb_data			*usb;
+
+	/* REVISIT more to come ... _nothing_ should be hard-wired */
+};
+
+/*----------------------------------------------------------------------*/
+
+int twl4030_sih_setup(int module);
+
+/*
+ * FIXME completely stop using TWL4030_IRQ_BASE ... instead, pass the
+ * IRQ data to subsidiary devices using platform device resources.
+ */
+
+/* IRQ information-need base */
+#include <mach/irqs.h>
+/* TWL4030 interrupts */
+
+/* #define TWL4030_MODIRQ_GPIO		(TWL4030_IRQ_BASE + 0) */
+#define TWL4030_MODIRQ_KEYPAD		(TWL4030_IRQ_BASE + 1)
+#define TWL4030_MODIRQ_BCI		(TWL4030_IRQ_BASE + 2)
+#define TWL4030_MODIRQ_MADC		(TWL4030_IRQ_BASE + 3)
+/* #define TWL4030_MODIRQ_USB		(TWL4030_IRQ_BASE + 4) */
+/* #define TWL4030_MODIRQ_PWR		(TWL4030_IRQ_BASE + 5) */
+
+#define TWL4030_PWRIRQ_PWRBTN		(TWL4030_PWR_IRQ_BASE + 0)
+/* #define TWL4030_PWRIRQ_CHG_PRES		(TWL4030_PWR_IRQ_BASE + 1) */
+/* #define TWL4030_PWRIRQ_USB_PRES		(TWL4030_PWR_IRQ_BASE + 2) */
+/* #define TWL4030_PWRIRQ_RTC		(TWL4030_PWR_IRQ_BASE + 3) */
+/* #define TWL4030_PWRIRQ_HOT_DIE		(TWL4030_PWR_IRQ_BASE + 4) */
+/* #define TWL4030_PWRIRQ_PWROK_TIMEOUT	(TWL4030_PWR_IRQ_BASE + 5) */
+/* #define TWL4030_PWRIRQ_MBCHG		(TWL4030_PWR_IRQ_BASE + 6) */
+/* #define TWL4030_PWRIRQ_SC_DETECT	(TWL4030_PWR_IRQ_BASE + 7) */
+
+/* Rest are unsued currently*/
+
+/* Offsets to Power Registers */
+#define TWL4030_VDAC_DEV_GRP		0x3B
+#define TWL4030_VDAC_DEDICATED		0x3E
+#define TWL4030_VAUX1_DEV_GRP		0x17
+#define TWL4030_VAUX1_DEDICATED		0x1A
+#define TWL4030_VAUX2_DEV_GRP		0x1B
+#define TWL4030_VAUX2_DEDICATED		0x1E
+#define TWL4030_VAUX3_DEV_GRP		0x1F
+#define TWL4030_VAUX3_DEDICATED		0x22
+
+/* TWL4030 GPIO interrupt definitions */
+
+#define TWL4030_GPIO_IRQ_NO(n)		(TWL4030_GPIO_IRQ_BASE + (n))
+
+/*
+ * Exported TWL4030 GPIO APIs
+ *
+ * WARNING -- use standard GPIO and IRQ calls instead; these will vanish.
+ */
+int twl4030_set_gpio_debounce(int gpio, int enable);
+
+#if defined(CONFIG_TWL4030_BCI_BATTERY) || \
+	defined(CONFIG_TWL4030_BCI_BATTERY_MODULE)
+	extern int twl4030charger_usb_en(int enable);
+#else
+	static inline int twl4030charger_usb_en(int enable) { return 0; }
+#endif
+
+#endif /* End of __TWL4030_H */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index c47e371..89e53cf 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -461,12 +461,26 @@
 struct ide_acpi_hwif_link;
 #endif
 
+struct ide_drive_s;
+
+struct ide_disk_ops {
+	int		(*check)(struct ide_drive_s *, const char *);
+	int		(*get_capacity)(struct ide_drive_s *);
+	void		(*setup)(struct ide_drive_s *);
+	void		(*flush)(struct ide_drive_s *);
+	int		(*init_media)(struct ide_drive_s *, struct gendisk *);
+	int		(*set_doorlock)(struct ide_drive_s *, struct gendisk *,
+					int);
+	ide_startstop_t	(*do_request)(struct ide_drive_s *, struct request *,
+				      sector_t);
+	int		(*end_request)(struct ide_drive_s *, int, int);
+	int		(*ioctl)(struct ide_drive_s *, struct inode *,
+				 struct file *, unsigned int, unsigned long);
+};
+
 /* ATAPI device flags */
 enum {
 	IDE_AFLAG_DRQ_INTERRUPT		= (1 << 0),
-	IDE_AFLAG_MEDIA_CHANGED		= (1 << 1),
-	/* Drive cannot lock the door. */
-	IDE_AFLAG_NO_DOORLOCK		= (1 << 2),
 
 	/* ide-cd */
 	/* Drive cannot eject the disc. */
@@ -498,14 +512,10 @@
 	IDE_AFLAG_LE_SPEED_FIELDS	= (1 << 17),
 
 	/* ide-floppy */
-	/* Format in progress */
-	IDE_AFLAG_FORMAT_IN_PROGRESS	= (1 << 18),
 	/* Avoid commands not supported in Clik drive */
 	IDE_AFLAG_CLIK_DRIVE		= (1 << 19),
 	/* Requires BH algorithm for packets */
 	IDE_AFLAG_ZIP_DRIVE		= (1 << 20),
-	/* Write protect */
-	IDE_AFLAG_WP			= (1 << 21),
 	/* Supports format progress report */
 	IDE_AFLAG_SRFP			= (1 << 22),
 
@@ -578,7 +588,11 @@
 	/* don't unload heads */
 	IDE_DFLAG_NO_UNLOAD		= (1 << 27),
 	/* heads unloaded, please don't reset port */
-	IDE_DFLAG_PARKED		= (1 << 28)
+	IDE_DFLAG_PARKED		= (1 << 28),
+	IDE_DFLAG_MEDIA_CHANGED		= (1 << 29),
+	/* write protect */
+	IDE_DFLAG_WP			= (1 << 30),
+	IDE_DFLAG_FORMAT_IN_PROGRESS	= (1 << 31),
 };
 
 struct ide_drive_s {
@@ -597,6 +611,8 @@
 #endif
 	struct hwif_s		*hwif;	/* actually (ide_hwif_t *) */
 
+	const struct ide_disk_ops *disk_ops;
+
 	unsigned long dev_flags;
 
 	unsigned long sleep;		/* sleep until this time */
@@ -1123,8 +1139,8 @@
 	void		(*resume)(ide_drive_t *);
 	void		(*shutdown)(ide_drive_t *);
 #ifdef CONFIG_IDE_PROC_FS
-	ide_proc_entry_t		*proc;
-	const struct ide_proc_devset	*settings;
+	ide_proc_entry_t *		(*proc_entries)(ide_drive_t *);
+	const struct ide_proc_devset *	(*proc_devsets)(ide_drive_t *);
 #endif
 };
 
diff --git a/include/linux/init.h b/include/linux/init.h
index ad63824..0c12646 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -40,7 +40,7 @@
 
 /* These are for everybody (although not all archs will actually
    discard it in modules) */
-#define __init		__section(.init.text) __cold
+#define __init		__section(.init.text) __cold notrace
 #define __initdata	__section(.init.data)
 #define __initconst	__section(.init.rodata)
 #define __exitdata	__section(.exit.data)
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 2e117f3..3d017cf 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -29,6 +29,7 @@
 #include <linux/io.h>
 #include <linux/dma_remapping.h>
 #include <asm/cacheflush.h>
+#include <asm/iommu.h>
 
 /*
  * Intel IOMMU register specification per version 1.0 public spec.
@@ -127,6 +128,7 @@
 
 
 /* IOTLB_REG */
+#define DMA_TLB_FLUSH_GRANU_OFFSET  60
 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
 #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
 #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
@@ -140,6 +142,7 @@
 #define DMA_TLB_MAX_SIZE (0x3f)
 
 /* INVALID_DESC */
+#define DMA_CCMD_INVL_GRANU_OFFSET  61
 #define DMA_ID_TLB_GLOBAL_FLUSH	(((u64)1) << 3)
 #define DMA_ID_TLB_DSI_FLUSH	(((u64)2) << 3)
 #define DMA_ID_TLB_PSI_FLUSH	(((u64)3) << 3)
@@ -200,22 +203,21 @@
 #define dma_frcd_type(d) ((d >> 30) & 1)
 #define dma_frcd_fault_reason(c) (c & 0xff)
 #define dma_frcd_source_id(c) (c & 0xffff)
-#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
+/* low 64 bit */
+#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
 
-#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */
-
-#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
-{\
-	cycles_t start_time = get_cycles();\
-	while (1) {\
-		sts = op (iommu->reg + offset);\
-		if (cond)\
-			break;\
+#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts)			\
+do {									\
+	cycles_t start_time = get_cycles();				\
+	while (1) {							\
+		sts = op(iommu->reg + offset);				\
+		if (cond)						\
+			break;						\
 		if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
-			panic("DMAR hardware is malfunctioning\n");\
-		cpu_relax();\
-	}\
-}
+			panic("DMAR hardware is malfunctioning\n");	\
+		cpu_relax();						\
+	}								\
+} while (0)
 
 #define QI_LENGTH	256	/* queue length */
 
@@ -238,6 +240,19 @@
 #define QI_IWD_STATUS_DATA(d)	(((u64)d) << 32)
 #define QI_IWD_STATUS_WRITE	(((u64)1) << 5)
 
+#define QI_IOTLB_DID(did) 	(((u64)did) << 16)
+#define QI_IOTLB_DR(dr) 	(((u64)dr) << 7)
+#define QI_IOTLB_DW(dw) 	(((u64)dw) << 6)
+#define QI_IOTLB_GRAN(gran) 	(((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
+#define QI_IOTLB_ADDR(addr)	(((u64)addr) & VTD_PAGE_MASK)
+#define QI_IOTLB_IH(ih)		(((u64)ih) << 6)
+#define QI_IOTLB_AM(am)		(((u8)am))
+
+#define QI_CC_FM(fm)		(((u64)fm) << 48)
+#define QI_CC_SID(sid)		(((u64)sid) << 32)
+#define QI_CC_DID(did)		(((u64)did) << 16)
+#define QI_CC_GRAN(gran)	(((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
+
 struct qi_desc {
 	u64 low, high;
 };
@@ -263,6 +278,13 @@
 };
 #endif
 
+struct iommu_flush {
+	int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
+		u64 type, int non_present_entry_flush);
+	int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
+		unsigned int size_order, u64 type, int non_present_entry_flush);
+};
+
 struct intel_iommu {
 	void __iomem	*reg; /* Pointer to hardware regs, virtual addr */
 	u64		cap;
@@ -282,6 +304,7 @@
 	unsigned char name[7];    /* Device Name */
 	struct msi_msg saved_msg;
 	struct sys_device sysdev;
+	struct iommu_flush flush;
 #endif
 	struct q_inval  *qi;            /* Queued invalidation info */
 #ifdef CONFIG_INTR_REMAP
@@ -303,6 +326,12 @@
 extern int dmar_enable_qi(struct intel_iommu *iommu);
 extern void qi_global_iec(struct intel_iommu *iommu);
 
+extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
+			        u8 fm, u64 type, int non_present_entry_flush);
+extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+			  unsigned int size_order, u64 type,
+			  int non_present_entry_flush);
+
 extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
 
 void intel_iommu_domain_exit(struct dmar_domain *domain);
@@ -324,4 +353,11 @@
 }
 #endif /* CONFIG_DMAR */
 
+extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
+extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
+extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
+extern int intel_map_sg(struct device *, struct scatterlist *, int, int);
+extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int);
+
 #endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 35a61dc..f58a0cf 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -8,6 +8,7 @@
 #include <linux/preempt.h>
 #include <linux/cpumask.h>
 #include <linux/irqreturn.h>
+#include <linux/irqnr.h>
 #include <linux/hardirq.h>
 #include <linux/sched.h>
 #include <linux/irqflags.h>
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 8d9411b..d058c57 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -18,6 +18,7 @@
 #include <linux/spinlock.h>
 #include <linux/cpumask.h>
 #include <linux/irqreturn.h>
+#include <linux/irqnr.h>
 #include <linux/errno.h>
 
 #include <asm/irq.h>
@@ -152,6 +153,7 @@
  * @name:		flow handler name for /proc/interrupts output
  */
 struct irq_desc {
+	unsigned int		irq;
 	irq_flow_handler_t	handle_irq;
 	struct irq_chip		*chip;
 	struct msi_desc		*msi_desc;
@@ -170,7 +172,7 @@
 	cpumask_t		affinity;
 	unsigned int		cpu;
 #endif
-#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
+#ifdef CONFIG_GENERIC_PENDING_IRQ
 	cpumask_t		pending_mask;
 #endif
 #ifdef CONFIG_PROC_FS
@@ -179,8 +181,14 @@
 	const char		*name;
 } ____cacheline_internodealigned_in_smp;
 
+
 extern struct irq_desc irq_desc[NR_IRQS];
 
+static inline struct irq_desc *irq_to_desc(unsigned int irq)
+{
+	return (irq < nr_irqs) ? irq_desc + irq : NULL;
+}
+
 /*
  * Migration helpers for obsolete names, they will go away:
  */
@@ -198,19 +206,15 @@
 
 #ifdef CONFIG_GENERIC_HARDIRQS
 
-#ifndef handle_dynamic_tick
-# define handle_dynamic_tick(a)		do { } while (0)
-#endif
-
 #ifdef CONFIG_SMP
 
-#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
+#ifdef CONFIG_GENERIC_PENDING_IRQ
 
 void set_pending_irq(unsigned int irq, cpumask_t mask);
 void move_native_irq(int irq);
 void move_masked_irq(int irq);
 
-#else /* CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE */
+#else /* CONFIG_GENERIC_PENDING_IRQ */
 
 static inline void move_irq(int irq)
 {
@@ -237,19 +241,14 @@
 
 #endif /* CONFIG_SMP */
 
-#ifdef CONFIG_IRQBALANCE
-extern void set_balance_irq_affinity(unsigned int irq, cpumask_t mask);
-#else
-static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
-{
-}
-#endif
-
 extern int no_irq_affinity;
 
 static inline int irq_balancing_disabled(unsigned int irq)
 {
-	return irq_desc[irq].status & IRQ_NO_BALANCING_MASK;
+	struct irq_desc *desc;
+
+	desc = irq_to_desc(irq);
+	return desc->status & IRQ_NO_BALANCING_MASK;
 }
 
 /* Handle irq action chains: */
@@ -279,10 +278,8 @@
  * irqchip-style controller then we call the ->handle_irq() handler,
  * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
  */
-static inline void generic_handle_irq(unsigned int irq)
+static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_desc *desc = irq_desc + irq;
-
 #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
 	desc->handle_irq(irq, desc);
 #else
@@ -293,6 +290,11 @@
 #endif
 }
 
+static inline void generic_handle_irq(unsigned int irq)
+{
+	generic_handle_irq_desc(irq, irq_to_desc(irq));
+}
+
 /* Handling of unhandled and spurious interrupts: */
 extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
 			   int action_ret);
@@ -325,7 +327,10 @@
 static inline void __set_irq_handler_unlocked(int irq,
 					      irq_flow_handler_t handler)
 {
-	irq_desc[irq].handle_irq = handler;
+	struct irq_desc *desc;
+
+	desc = irq_to_desc(irq);
+	desc->handle_irq = handler;
 }
 
 /*
@@ -353,13 +358,14 @@
 extern void set_irq_probe(unsigned int irq);
 
 /* Handle dynamic irq creation and destruction */
+extern unsigned int create_irq_nr(unsigned int irq_want);
 extern int create_irq(void);
 extern void destroy_irq(unsigned int irq);
 
 /* Test to see if a driver has successfully requested an irq */
 static inline int irq_has_action(unsigned int irq)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 	return desc->action != NULL;
 }
 
@@ -374,10 +380,10 @@
 extern int set_irq_type(unsigned int irq, unsigned int type);
 extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
 
-#define get_irq_chip(irq)	(irq_desc[irq].chip)
-#define get_irq_chip_data(irq)	(irq_desc[irq].chip_data)
-#define get_irq_data(irq)	(irq_desc[irq].handler_data)
-#define get_irq_msi(irq)	(irq_desc[irq].msi_desc)
+#define get_irq_chip(irq)	(irq_to_desc(irq)->chip)
+#define get_irq_chip_data(irq)	(irq_to_desc(irq)->chip_data)
+#define get_irq_data(irq)	(irq_to_desc(irq)->handler_data)
+#define get_irq_msi(irq)	(irq_to_desc(irq)->msi_desc)
 
 #endif /* CONFIG_GENERIC_HARDIRQS */
 
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
new file mode 100644
index 0000000..452c280
--- /dev/null
+++ b/include/linux/irqnr.h
@@ -0,0 +1,24 @@
+#ifndef _LINUX_IRQNR_H
+#define _LINUX_IRQNR_H
+
+#ifndef CONFIG_GENERIC_HARDIRQS
+#include <asm/irq.h>
+# define nr_irqs		NR_IRQS
+
+# define for_each_irq_desc(irq, desc)		\
+	for (irq = 0; irq < nr_irqs; irq++)
+#else
+extern int nr_irqs;
+
+# define for_each_irq_desc(irq, desc)		\
+	for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++)
+
+# define for_each_irq_desc_reverse(irq, desc)				\
+	for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1);	\
+	     irq >= 0; irq--, desc--)
+#endif
+
+#define for_each_irq_nr(irq)			\
+	for (irq = 0; irq < nr_irqs; irq++)
+
+#endif
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 7ebbcb1..346e2b8 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -816,6 +816,9 @@
 #define JFS_FLUSHED	0x008	/* The journal superblock has been flushed */
 #define JFS_LOADED	0x010	/* The journal superblock has been loaded */
 #define JFS_BARRIER	0x020	/* Use IDE barriers */
+#define JFS_ABORT_ON_SYNCDATA_ERR	0x040  /* Abort the journal on file
+						* data write error in ordered
+						* mode */
 
 /*
  * Function declarations for the journaling transaction and buffer
@@ -908,7 +911,7 @@
 		   (journal_t *, unsigned long, unsigned long, unsigned long);
 extern int	   journal_create     (journal_t *);
 extern int	   journal_load       (journal_t *journal);
-extern void	   journal_destroy    (journal_t *);
+extern int	   journal_destroy    (journal_t *);
 extern int	   journal_recover    (journal_t *journal);
 extern int	   journal_wipe       (journal_t *, int);
 extern int	   journal_skip_recovery	(journal_t *);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 463d6f1..c7d106e 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -641,6 +641,11 @@
 	 */
 	int t_handle_count;
 
+	/*
+	 * For use by the filesystem to store fs-specific data
+	 * structures associated with the transaction
+	 */
+	struct list_head	t_private_list;
 };
 
 struct transaction_run_stats_s {
@@ -935,6 +940,10 @@
 
 	pid_t			j_last_sync_writer;
 
+	/* This function is called when a transaction is closed */
+	void			(*j_commit_callback)(journal_t *,
+						     transaction_t *);
+
 	/*
 	 * Journal statistics
 	 */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5a566b7..94d17ff6 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -496,4 +496,9 @@
 #define NUMA_BUILD 0
 #endif
 
+/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
+#endif
+
 #endif
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index cf9f40a..4a145ca 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -39,19 +39,34 @@
 
 extern unsigned long long nr_context_switches(void);
 
+struct irq_desc;
+
+static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
+					    struct irq_desc *desc)
+{
+	kstat_this_cpu.irqs[irq]++;
+}
+
+static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+{
+       return kstat_cpu(cpu).irqs[irq];
+}
+
 /*
  * Number of interrupts per specific IRQ source, since bootup
  */
-static inline int kstat_irqs(int irq)
+static inline unsigned int kstat_irqs(unsigned int irq)
 {
-	int cpu, sum = 0;
+	unsigned int sum = 0;
+	int cpu;
 
 	for_each_possible_cpu(cpu)
-		sum += kstat_cpu(cpu).irqs[irq];
+		sum += kstat_irqs_cpu(irq, cpu);
 
 	return sum;
 }
 
+extern unsigned long long task_delta_exec(struct task_struct *);
 extern void account_user_time(struct task_struct *, cputime_t);
 extern void account_user_time_scaled(struct task_struct *, cputime_t);
 extern void account_system_time(struct task_struct *, int, cputime_t);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 0be7795..497b1d1 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -29,6 +29,7 @@
  *		<jkenisto@us.ibm.com>  and Prasanna S Panchamukhi
  *		<prasanna@in.ibm.com> added function-return probes.
  */
+#include <linux/linkage.h>
 #include <linux/list.h>
 #include <linux/notifier.h>
 #include <linux/smp.h>
@@ -47,7 +48,7 @@
 #define KPROBE_HIT_SSDONE	0x00000008
 
 /* Attach to insert probes on any functions which should be ignored*/
-#define __kprobes	__attribute__((__section__(".kprobes.text")))
+#define __kprobes	__attribute__((__section__(".kprobes.text"))) notrace
 
 struct kprobe;
 struct pt_regs;
@@ -256,7 +257,7 @@
 
 #else /* CONFIG_KPROBES */
 
-#define __kprobes	/**/
+#define __kprobes	notrace
 struct jprobe;
 struct kretprobe;
 
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 947cf84..c261aa0 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -340,6 +340,9 @@
 
 	ATA_EHI_DID_RESET	= ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
 
+	/* mask of flags to transfer *to* the slave link */
+	ATA_EHI_TO_SLAVE_MASK	= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET,
+
 	/* max tries if error condition is still set after ->error_handler */
 	ATA_EH_MAX_TRIES	= 5,
 
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 56ba373..9fd1f85 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -4,8 +4,6 @@
 #include <linux/compiler.h>
 #include <asm/linkage.h>
 
-#define notrace __attribute__((no_instrument_function))
-
 #ifdef __cplusplus
 #define CPP_ASMLINKAGE extern "C"
 #else
diff --git a/include/linux/marker.h b/include/linux/marker.h
index 1290653..889196c 100644
--- a/include/linux/marker.h
+++ b/include/linux/marker.h
@@ -160,4 +160,11 @@
 extern void *marker_get_private_data(const char *name, marker_probe_func *probe,
 	int num);
 
+/*
+ * marker_synchronize_unregister must be called between the last marker probe
+ * unregistration and the end of module exit to make sure there is no caller
+ * executing a probe when it is freed.
+ */
+#define marker_synchronize_unregister() synchronize_sched()
+
 #endif
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index fdf3967..1fbe14d 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -27,16 +27,13 @@
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 
-#define page_reset_bad_cgroup(page)	((page)->page_cgroup = 0)
-
-extern struct page_cgroup *page_get_page_cgroup(struct page *page);
 extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
 				gfp_t gfp_mask);
 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
 					gfp_t gfp_mask);
+extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru);
 extern void mem_cgroup_uncharge_page(struct page *page);
 extern void mem_cgroup_uncharge_cache_page(struct page *page);
-extern void mem_cgroup_move_lists(struct page *page, bool active);
 extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask);
 
 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -44,7 +41,7 @@
 					unsigned long *scanned, int order,
 					int mode, struct zone *z,
 					struct mem_cgroup *mem_cont,
-					int active);
+					int active, int file);
 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
 
@@ -69,21 +66,11 @@
 extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
 							int priority);
 
-extern long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
-				struct zone *zone, int priority);
-extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
-				struct zone *zone, int priority);
+extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
+					int priority, enum lru_list lru);
+
 
 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
-static inline void page_reset_bad_cgroup(struct page *page)
-{
-}
-
-static inline struct page_cgroup *page_get_page_cgroup(struct page *page)
-{
-	return NULL;
-}
-
 static inline int mem_cgroup_charge(struct page *page,
 					struct mm_struct *mm, gfp_t gfp_mask)
 {
@@ -159,14 +146,9 @@
 {
 }
 
-static inline long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
-					struct zone *zone, int priority)
-{
-	return 0;
-}
-
-static inline long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
-					struct zone *zone, int priority)
+static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem,
+					struct zone *zone, int priority,
+					enum lru_list lru)
 {
 	return 0;
 }
diff --git a/include/linux/mfd/da903x.h b/include/linux/mfd/da903x.h
new file mode 100644
index 0000000..cad314c
--- /dev/null
+++ b/include/linux/mfd/da903x.h
@@ -0,0 +1,201 @@
+#ifndef __LINUX_PMIC_DA903X_H
+#define __LINUX_PMIC_DA903X_H
+
+/* Unified sub device IDs for DA9030/DA9034 */
+enum {
+	DA9030_ID_LED_1,
+	DA9030_ID_LED_2,
+	DA9030_ID_LED_3,
+	DA9030_ID_LED_4,
+	DA9030_ID_LED_PC,
+	DA9030_ID_VIBRA,
+	DA9030_ID_WLED,
+	DA9030_ID_BUCK1,
+	DA9030_ID_BUCK2,
+	DA9030_ID_LDO1,
+	DA9030_ID_LDO2,
+	DA9030_ID_LDO3,
+	DA9030_ID_LDO4,
+	DA9030_ID_LDO5,
+	DA9030_ID_LDO6,
+	DA9030_ID_LDO7,
+	DA9030_ID_LDO8,
+	DA9030_ID_LDO9,
+	DA9030_ID_LDO10,
+	DA9030_ID_LDO11,
+	DA9030_ID_LDO12,
+	DA9030_ID_LDO13,
+	DA9030_ID_LDO14,
+	DA9030_ID_LDO15,
+	DA9030_ID_LDO16,
+	DA9030_ID_LDO17,
+	DA9030_ID_LDO18,
+	DA9030_ID_LDO19,
+	DA9030_ID_LDO_INT,	/* LDO Internal */
+
+	DA9034_ID_LED_1,
+	DA9034_ID_LED_2,
+	DA9034_ID_VIBRA,
+	DA9034_ID_WLED,
+	DA9034_ID_TOUCH,
+
+	DA9034_ID_BUCK1,
+	DA9034_ID_BUCK2,
+	DA9034_ID_LDO1,
+	DA9034_ID_LDO2,
+	DA9034_ID_LDO3,
+	DA9034_ID_LDO4,
+	DA9034_ID_LDO5,
+	DA9034_ID_LDO6,
+	DA9034_ID_LDO7,
+	DA9034_ID_LDO8,
+	DA9034_ID_LDO9,
+	DA9034_ID_LDO10,
+	DA9034_ID_LDO11,
+	DA9034_ID_LDO12,
+	DA9034_ID_LDO13,
+	DA9034_ID_LDO14,
+	DA9034_ID_LDO15,
+};
+
+/*
+ * DA9030/DA9034 LEDs sub-devices uses generic "struct led_info"
+ * as the platform_data
+ */
+
+/* DA9030 flags for "struct led_info"
+ */
+#define DA9030_LED_RATE_ON	(0 << 5)
+#define DA9030_LED_RATE_052S	(1 << 5)
+#define DA9030_LED_DUTY_1_16	(0 << 3)
+#define DA9030_LED_DUTY_1_8	(1 << 3)
+#define DA9030_LED_DUTY_1_4	(2 << 3)
+#define DA9030_LED_DUTY_1_2	(3 << 3)
+
+#define DA9030_VIBRA_MODE_1P3V	(0 << 1)
+#define DA9030_VIBRA_MODE_2P7V	(1 << 1)
+#define DA9030_VIBRA_FREQ_1HZ	(0 << 2)
+#define DA9030_VIBRA_FREQ_2HZ	(1 << 2)
+#define DA9030_VIBRA_FREQ_4HZ	(2 << 2)
+#define DA9030_VIBRA_FREQ_8HZ	(3 << 2)
+#define DA9030_VIBRA_DUTY_ON	(0 << 4)
+#define DA9030_VIBRA_DUTY_75P	(1 << 4)
+#define DA9030_VIBRA_DUTY_50P	(2 << 4)
+#define DA9030_VIBRA_DUTY_25P	(3 << 4)
+
+/* DA9034 flags for "struct led_info" */
+#define DA9034_LED_RAMP		(1 << 7)
+
+/* DA9034 touch screen platform data */
+struct da9034_touch_pdata {
+	int	interval_ms;	/* sampling interval while pen down */
+	int	x_inverted;
+	int	y_inverted;
+};
+
+struct da903x_subdev_info {
+	int		id;
+	const char	*name;
+	void		*platform_data;
+};
+
+struct da903x_platform_data {
+	int num_subdevs;
+	struct da903x_subdev_info *subdevs;
+};
+
+/* bit definitions for DA9030 events */
+#define DA9030_EVENT_ONKEY		(1 << 0)
+#define	DA9030_EVENT_PWREN		(1 << 1)
+#define	DA9030_EVENT_EXTON		(1 << 2)
+#define	DA9030_EVENT_CHDET		(1 << 3)
+#define	DA9030_EVENT_TBAT		(1 << 4)
+#define	DA9030_EVENT_VBATMON		(1 << 5)
+#define	DA9030_EVENT_VBATMON_TXON	(1 << 6)
+#define	DA9030_EVENT_CHIOVER		(1 << 7)
+#define	DA9030_EVENT_TCTO		(1 << 8)
+#define	DA9030_EVENT_CCTO		(1 << 9)
+#define	DA9030_EVENT_ADC_READY		(1 << 10)
+#define	DA9030_EVENT_VBUS_4P4		(1 << 11)
+#define	DA9030_EVENT_VBUS_4P0		(1 << 12)
+#define	DA9030_EVENT_SESS_VALID		(1 << 13)
+#define	DA9030_EVENT_SRP_DETECT		(1 << 14)
+#define	DA9030_EVENT_WATCHDOG		(1 << 15)
+#define	DA9030_EVENT_LDO15		(1 << 16)
+#define	DA9030_EVENT_LDO16		(1 << 17)
+#define	DA9030_EVENT_LDO17		(1 << 18)
+#define	DA9030_EVENT_LDO18		(1 << 19)
+#define	DA9030_EVENT_LDO19		(1 << 20)
+#define	DA9030_EVENT_BUCK2		(1 << 21)
+
+/* bit definitions for DA9034 events */
+#define DA9034_EVENT_ONKEY		(1 << 0)
+#define DA9034_EVENT_EXTON		(1 << 2)
+#define DA9034_EVENT_CHDET		(1 << 3)
+#define DA9034_EVENT_TBAT		(1 << 4)
+#define DA9034_EVENT_VBATMON		(1 << 5)
+#define DA9034_EVENT_REV_IOVER		(1 << 6)
+#define DA9034_EVENT_CH_IOVER		(1 << 7)
+#define DA9034_EVENT_CH_TCTO		(1 << 8)
+#define DA9034_EVENT_CH_CCTO		(1 << 9)
+#define DA9034_EVENT_USB_DEV		(1 << 10)
+#define DA9034_EVENT_OTGCP_IOVER	(1 << 11)
+#define DA9034_EVENT_VBUS_4P55		(1 << 12)
+#define DA9034_EVENT_VBUS_3P8		(1 << 13)
+#define DA9034_EVENT_SESS_1P8		(1 << 14)
+#define DA9034_EVENT_SRP_READY		(1 << 15)
+#define DA9034_EVENT_ADC_MAN		(1 << 16)
+#define DA9034_EVENT_ADC_AUTO4		(1 << 17)
+#define DA9034_EVENT_ADC_AUTO5		(1 << 18)
+#define DA9034_EVENT_ADC_AUTO6		(1 << 19)
+#define DA9034_EVENT_PEN_DOWN		(1 << 20)
+#define DA9034_EVENT_TSI_READY		(1 << 21)
+#define DA9034_EVENT_UART_TX		(1 << 22)
+#define DA9034_EVENT_UART_RX		(1 << 23)
+#define DA9034_EVENT_HEADSET		(1 << 25)
+#define DA9034_EVENT_HOOKSWITCH		(1 << 26)
+#define DA9034_EVENT_WATCHDOG		(1 << 27)
+
+extern int da903x_register_notifier(struct device *dev,
+		struct notifier_block *nb, unsigned int events);
+extern int da903x_unregister_notifier(struct device *dev,
+		struct notifier_block *nb, unsigned int events);
+
+/* Status Query Interface */
+#define DA9030_STATUS_ONKEY		(1 << 0)
+#define DA9030_STATUS_PWREN1		(1 << 1)
+#define DA9030_STATUS_EXTON		(1 << 2)
+#define DA9030_STATUS_CHDET		(1 << 3)
+#define DA9030_STATUS_TBAT		(1 << 4)
+#define DA9030_STATUS_VBATMON		(1 << 5)
+#define DA9030_STATUS_VBATMON_TXON	(1 << 6)
+#define DA9030_STATUS_MCLKDET		(1 << 7)
+
+#define DA9034_STATUS_ONKEY		(1 << 0)
+#define DA9034_STATUS_EXTON		(1 << 2)
+#define DA9034_STATUS_CHDET		(1 << 3)
+#define DA9034_STATUS_TBAT		(1 << 4)
+#define DA9034_STATUS_VBATMON		(1 << 5)
+#define DA9034_STATUS_PEN_DOWN		(1 << 6)
+#define DA9034_STATUS_MCLKDET		(1 << 7)
+#define DA9034_STATUS_USB_DEV		(1 << 8)
+#define DA9034_STATUS_HEADSET		(1 << 9)
+#define DA9034_STATUS_HOOKSWITCH	(1 << 10)
+#define DA9034_STATUS_REMCON		(1 << 11)
+#define DA9034_STATUS_VBUS_VALID_4P55	(1 << 12)
+#define DA9034_STATUS_VBUS_VALID_3P8	(1 << 13)
+#define DA9034_STATUS_SESS_VALID_1P8	(1 << 14)
+#define DA9034_STATUS_SRP_READY		(1 << 15)
+
+extern int da903x_query_status(struct device *dev, unsigned int status);
+
+
+/* NOTE: the two functions below are not intended for use outside
+ * of the DA9034 sub-device drivers
+ */
+extern int da903x_write(struct device *dev, int reg, uint8_t val);
+extern int da903x_read(struct device *dev, int reg, uint8_t *val);
+extern int da903x_update(struct device *dev, int reg, uint8_t val, uint8_t mask);
+extern int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask);
+extern int da903x_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
+#endif /* __LINUX_PMIC_DA903X_H */
diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h
index e83c7f2..b462981 100644
--- a/include/linux/mfd/t7l66xb.h
+++ b/include/linux/mfd/t7l66xb.h
@@ -15,8 +15,6 @@
 #include <linux/mfd/tmio.h>
 
 struct t7l66xb_platform_data {
-	int (*enable_clk32k)(struct platform_device *dev);
-	void (*disable_clk32k)(struct platform_device *dev);
 	int (*enable)(struct platform_device *dev);
 	int (*disable)(struct platform_device *dev);
 	int (*suspend)(struct platform_device *dev);
diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h
index fa06e06..b488820 100644
--- a/include/linux/mfd/tc6387xb.h
+++ b/include/linux/mfd/tc6387xb.h
@@ -11,9 +11,6 @@
 #define MFD_TC6387XB_H
 
 struct tc6387xb_platform_data {
-	int (*enable_clk32k)(struct platform_device *dev);
-	void (*disable_clk32k)(struct platform_device *dev);
-
 	int (*enable)(struct platform_device *dev);
 	int (*disable)(struct platform_device *dev);
 	int (*suspend)(struct platform_device *dev);
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h
index fec7b3f..626e448 100644
--- a/include/linux/mfd/tc6393xb.h
+++ b/include/linux/mfd/tc6393xb.h
@@ -17,12 +17,12 @@
 #ifndef MFD_TC6393XB_H
 #define MFD_TC6393XB_H
 
+#include <linux/fb.h>
+
 /* Also one should provide the CK3P6MI clock */
 struct tc6393xb_platform_data {
 	u16	scr_pll2cr;	/* PLL2 Control */
 	u16	scr_gper;	/* GP Enable */
-	u32	scr_gpo_doecr;	/* GPO Data OE Control */
-	u32	scr_gpo_dsr;	/* GPO Data Set */
 
 	int	(*enable)(struct platform_device *dev);
 	int	(*disable)(struct platform_device *dev);
@@ -31,15 +31,28 @@
 
 	int	irq_base;	/* base for subdevice irqs */
 	int	gpio_base;
+	int	(*setup)(struct platform_device *dev);
+	void	(*teardown)(struct platform_device *dev);
 
 	struct tmio_nand_data	*nand_data;
+	struct tmio_fb_data	*fb_data;
+
+	unsigned resume_restore : 1; /* make special actions
+					to preserve the state
+					on suspend/resume */
 };
 
+extern int tc6393xb_lcd_mode(struct platform_device *fb,
+			     const struct fb_videomode *mode);
+extern int tc6393xb_lcd_set_power(struct platform_device *fb, bool on);
+
 /*
  * Relative to irq_base
  */
 #define	IRQ_TC6393_NAND		0
 #define	IRQ_TC6393_MMC		1
+#define	IRQ_TC6393_OHCI		2
+#define	IRQ_TC6393_FB		4
 
 #define	TC6393XB_NR_IRQS	8
 
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 03aea61..3f34005 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -7,7 +7,6 @@
 typedef struct page *new_page_t(struct page *, unsigned long private, int **);
 
 #ifdef CONFIG_MIGRATION
-extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
 extern int putback_lru_pages(struct list_head *l);
 extern int migrate_page(struct address_space *,
 			struct page *, struct page *);
@@ -21,8 +20,6 @@
 		const nodemask_t *from, const nodemask_t *to,
 		unsigned long flags);
 #else
-static inline int isolate_lru_page(struct page *p, struct list_head *list)
-					{ return -ENOSYS; }
 static inline int putback_lru_pages(struct list_head *l) { return 0; }
 static inline int migrate_pages(struct list_head *l, new_page_t x,
 		unsigned long private) { return -ENOSYS; }
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 77323a7..cf9c679 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -132,6 +132,15 @@
 	MLX4_MAILBOX_SIZE	=  4096
 };
 
+enum {
+	/* set port opcode modifiers */
+	MLX4_SET_PORT_GENERAL   = 0x0,
+	MLX4_SET_PORT_RQP_CALC  = 0x1,
+	MLX4_SET_PORT_MAC_TABLE = 0x2,
+	MLX4_SET_PORT_VLAN_TABLE = 0x3,
+	MLX4_SET_PORT_PRIO_MAP  = 0x4,
+};
+
 struct mlx4_dev;
 
 struct mlx4_cmd_mailbox {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b2f9444..bd9977b 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -60,6 +60,7 @@
 	MLX4_DEV_CAP_FLAG_IPOIB_CSUM	= 1 <<  7,
 	MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1 <<  8,
 	MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1 <<  9,
+	MLX4_DEV_CAP_FLAG_DPDP		= 1 << 12,
 	MLX4_DEV_CAP_FLAG_MEM_WINDOW	= 1 << 16,
 	MLX4_DEV_CAP_FLAG_APM		= 1 << 17,
 	MLX4_DEV_CAP_FLAG_ATOMIC	= 1 << 18,
@@ -145,6 +146,29 @@
 	MLX4_MTT_FLAG_PRESENT		= 1
 };
 
+enum mlx4_qp_region {
+	MLX4_QP_REGION_FW = 0,
+	MLX4_QP_REGION_ETH_ADDR,
+	MLX4_QP_REGION_FC_ADDR,
+	MLX4_QP_REGION_FC_EXCH,
+	MLX4_NUM_QP_REGION
+};
+
+enum mlx4_port_type {
+	MLX4_PORT_TYPE_IB	= 1 << 0,
+	MLX4_PORT_TYPE_ETH	= 1 << 1,
+};
+
+enum mlx4_special_vlan_idx {
+	MLX4_NO_VLAN_IDX        = 0,
+	MLX4_VLAN_MISS_IDX,
+	MLX4_VLAN_REGULAR
+};
+
+enum {
+	MLX4_NUM_FEXCH          = 64 * 1024,
+};
+
 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
 {
 	return (major << 32) | (minor << 16) | subminor;
@@ -154,7 +178,9 @@
 	u64			fw_ver;
 	int			num_ports;
 	int			vl_cap[MLX4_MAX_PORTS + 1];
-	int			mtu_cap[MLX4_MAX_PORTS + 1];
+	int			ib_mtu_cap[MLX4_MAX_PORTS + 1];
+	u64			def_mac[MLX4_MAX_PORTS + 1];
+	int			eth_mtu_cap[MLX4_MAX_PORTS + 1];
 	int			gid_table_len[MLX4_MAX_PORTS + 1];
 	int			pkey_table_len[MLX4_MAX_PORTS + 1];
 	int			local_ca_ack_delay;
@@ -169,7 +195,6 @@
 	int			max_rq_desc_sz;
 	int			max_qp_init_rdma;
 	int			max_qp_dest_rdma;
-	int			reserved_qps;
 	int			sqp_start;
 	int			num_srqs;
 	int			max_srq_wqes;
@@ -201,6 +226,15 @@
 	u16			stat_rate_support;
 	u8			port_width_cap[MLX4_MAX_PORTS + 1];
 	int			max_gso_sz;
+	int                     reserved_qps_cnt[MLX4_NUM_QP_REGION];
+	int			reserved_qps;
+	int                     reserved_qps_base[MLX4_NUM_QP_REGION];
+	int                     log_num_macs;
+	int                     log_num_vlans;
+	int                     log_num_prios;
+	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1];
+	u8			supported_type[MLX4_MAX_PORTS + 1];
+	u32			port_mask;
 };
 
 struct mlx4_buf_list {
@@ -355,6 +389,11 @@
 	u64			si_guid;
 };
 
+#define mlx4_foreach_port(port, dev, type)				\
+	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
+		if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
+		     ~(dev)->caps.port_mask) & 1 << ((port) - 1))
+
 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
 		   struct mlx4_buf *buf);
 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
@@ -400,7 +439,10 @@
 		  int collapsed);
 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
 
-int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp);
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
 
 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
@@ -416,6 +458,12 @@
 			  int block_mcast_loopback);
 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
 
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
+
 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
 		      int npages, u64 iova, u32 *lkey, u32 *rkey);
 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c61ba10..ffee2f7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -132,6 +132,11 @@
 #define VM_RandomReadHint(v)		((v)->vm_flags & VM_RAND_READ)
 
 /*
+ * special vmas that are non-mergable, non-mlock()able
+ */
+#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
+
+/*
  * mapping from the currently active vm_flags protection bits (the
  * low four bits) to a page protection mask..
  */
@@ -700,10 +705,10 @@
 extern void show_free_areas(void);
 
 #ifdef CONFIG_SHMEM
-int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
 #else
 static inline int shmem_lock(struct file *file, int lock,
-			     struct user_struct *user)
+			    struct user_struct *user)
 {
 	return 0;
 }
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 895bc4e..c948350 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -1,40 +1,100 @@
-static inline void
-add_page_to_active_list(struct zone *zone, struct page *page)
+#ifndef LINUX_MM_INLINE_H
+#define LINUX_MM_INLINE_H
+
+/**
+ * page_is_file_cache - should the page be on a file LRU or anon LRU?
+ * @page: the page to test
+ *
+ * Returns LRU_FILE if @page is page cache page backed by a regular filesystem,
+ * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
+ * Used by functions that manipulate the LRU lists, to sort a page
+ * onto the right LRU list.
+ *
+ * We would like to get this info without a page flag, but the state
+ * needs to survive until the page is last deleted from the LRU, which
+ * could be as far down as __page_cache_release.
+ */
+static inline int page_is_file_cache(struct page *page)
 {
-	list_add(&page->lru, &zone->active_list);
-	__inc_zone_state(zone, NR_ACTIVE);
+	if (PageSwapBacked(page))
+		return 0;
+
+	/* The page is page cache backed by a normal filesystem. */
+	return LRU_FILE;
 }
 
 static inline void
-add_page_to_inactive_list(struct zone *zone, struct page *page)
+add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
 {
-	list_add(&page->lru, &zone->inactive_list);
-	__inc_zone_state(zone, NR_INACTIVE);
+	list_add(&page->lru, &zone->lru[l].list);
+	__inc_zone_state(zone, NR_LRU_BASE + l);
 }
 
 static inline void
-del_page_from_active_list(struct zone *zone, struct page *page)
+del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
 {
 	list_del(&page->lru);
-	__dec_zone_state(zone, NR_ACTIVE);
-}
-
-static inline void
-del_page_from_inactive_list(struct zone *zone, struct page *page)
-{
-	list_del(&page->lru);
-	__dec_zone_state(zone, NR_INACTIVE);
+	__dec_zone_state(zone, NR_LRU_BASE + l);
 }
 
 static inline void
 del_page_from_lru(struct zone *zone, struct page *page)
 {
+	enum lru_list l = LRU_BASE;
+
 	list_del(&page->lru);
-	if (PageActive(page)) {
-		__ClearPageActive(page);
-		__dec_zone_state(zone, NR_ACTIVE);
+	if (PageUnevictable(page)) {
+		__ClearPageUnevictable(page);
+		l = LRU_UNEVICTABLE;
 	} else {
-		__dec_zone_state(zone, NR_INACTIVE);
+		if (PageActive(page)) {
+			__ClearPageActive(page);
+			l += LRU_ACTIVE;
+		}
+		l += page_is_file_cache(page);
 	}
+	__dec_zone_state(zone, NR_LRU_BASE + l);
 }
 
+/**
+ * page_lru - which LRU list should a page be on?
+ * @page: the page to test
+ *
+ * Returns the LRU list a page should be on, as an index
+ * into the array of LRU lists.
+ */
+static inline enum lru_list page_lru(struct page *page)
+{
+	enum lru_list lru = LRU_BASE;
+
+	if (PageUnevictable(page))
+		lru = LRU_UNEVICTABLE;
+	else {
+		if (PageActive(page))
+			lru += LRU_ACTIVE;
+		lru += page_is_file_cache(page);
+	}
+
+	return lru;
+}
+
+/**
+ * inactive_anon_is_low - check if anonymous pages need to be deactivated
+ * @zone: zone to check
+ *
+ * Returns true if the zone does not have enough inactive anon pages,
+ * meaning some active anon pages need to be deactivated.
+ */
+static inline int inactive_anon_is_low(struct zone *zone)
+{
+	unsigned long active, inactive;
+
+	active = zone_page_state(zone, NR_ACTIVE_ANON);
+	inactive = zone_page_state(zone, NR_INACTIVE_ANON);
+
+	if (inactive * zone->inactive_ratio < active)
+		return 1;
+
+	return 0;
+}
+#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 9d49fa3..fe825471 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -94,9 +94,6 @@
 	void *virtual;			/* Kernel virtual address (NULL if
 					   not kmapped, ie. highmem) */
 #endif /* WANT_PAGE_VIRTUAL */
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-	unsigned long page_cgroup;
-#endif
 };
 
 /*
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index 61d19e1..139d7c8 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -34,11 +34,15 @@
 /* Called from page fault handler. */
 extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
 
-/* Called from ioremap.c */
 #ifdef CONFIG_MMIOTRACE
+/* Called from ioremap.c */
 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
 							void __iomem *addr);
 extern void mmiotrace_iounmap(volatile void __iomem *addr);
+
+/* For anyone to insert markers. Remember trailing newline. */
+extern int mmiotrace_printk(const char *fmt, ...)
+				__attribute__ ((format (printf, 1, 2)));
 #else
 static inline void mmiotrace_ioremap(resource_size_t offset,
 					unsigned long size, void __iomem *addr)
@@ -48,15 +52,22 @@
 static inline void mmiotrace_iounmap(volatile void __iomem *addr)
 {
 }
-#endif /* CONFIG_MMIOTRACE_HOOKS */
+
+static inline int mmiotrace_printk(const char *fmt, ...)
+				__attribute__ ((format (printf, 1, 0)));
+
+static inline int mmiotrace_printk(const char *fmt, ...)
+{
+	return 0;
+}
+#endif /* CONFIG_MMIOTRACE */
 
 enum mm_io_opcode {
 	MMIO_READ = 0x1,     /* struct mmiotrace_rw */
 	MMIO_WRITE = 0x2,    /* struct mmiotrace_rw */
 	MMIO_PROBE = 0x3,    /* struct mmiotrace_map */
 	MMIO_UNPROBE = 0x4,  /* struct mmiotrace_map */
-	MMIO_MARKER = 0x5,   /* raw char data */
-	MMIO_UNKNOWN_OP = 0x6, /* struct mmiotrace_rw */
+	MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */
 };
 
 struct mmiotrace_rw {
@@ -81,5 +92,6 @@
 extern void disable_mmiotrace(void);
 extern void mmio_trace_rw(struct mmiotrace_rw *rw);
 extern void mmio_trace_mapping(struct mmiotrace_map *map);
+extern int mmio_trace_printk(const char *fmt, va_list args);
 
 #endif /* MMIOTRACE_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 428328a..35a7b5e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -81,21 +81,31 @@
 enum zone_stat_item {
 	/* First 128 byte cacheline (assuming 64 bit words) */
 	NR_FREE_PAGES,
-	NR_INACTIVE,
-	NR_ACTIVE,
+	NR_LRU_BASE,
+	NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
+	NR_ACTIVE_ANON,		/*  "     "     "   "       "         */
+	NR_INACTIVE_FILE,	/*  "     "     "   "       "         */
+	NR_ACTIVE_FILE,		/*  "     "     "   "       "         */
+#ifdef CONFIG_UNEVICTABLE_LRU
+	NR_UNEVICTABLE,		/*  "     "     "   "       "         */
+	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */
+#else
+	NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */
+	NR_MLOCK = NR_ACTIVE_FILE,
+#endif
 	NR_ANON_PAGES,	/* Mapped anonymous pages */
 	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.
 			   only modified from process context */
 	NR_FILE_PAGES,
 	NR_FILE_DIRTY,
 	NR_WRITEBACK,
-	/* Second 128 byte cacheline */
 	NR_SLAB_RECLAIMABLE,
 	NR_SLAB_UNRECLAIMABLE,
 	NR_PAGETABLE,		/* used for pagetables */
 	NR_UNSTABLE_NFS,	/* NFS unstable pages */
 	NR_BOUNCE,
 	NR_VMSCAN_WRITE,
+	/* Second 128 byte cacheline */
 	NR_WRITEBACK_TEMP,	/* Writeback using temporary buffers */
 #ifdef CONFIG_NUMA
 	NUMA_HIT,		/* allocated in intended node */
@@ -107,6 +117,55 @@
 #endif
 	NR_VM_ZONE_STAT_ITEMS };
 
+/*
+ * We do arithmetic on the LRU lists in various places in the code,
+ * so it is important to keep the active lists LRU_ACTIVE higher in
+ * the array than the corresponding inactive lists, and to keep
+ * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
+ *
+ * This has to be kept in sync with the statistics in zone_stat_item
+ * above and the descriptions in vmstat_text in mm/vmstat.c
+ */
+#define LRU_BASE 0
+#define LRU_ACTIVE 1
+#define LRU_FILE 2
+
+enum lru_list {
+	LRU_INACTIVE_ANON = LRU_BASE,
+	LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
+	LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
+	LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
+#ifdef CONFIG_UNEVICTABLE_LRU
+	LRU_UNEVICTABLE,
+#else
+	LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */
+#endif
+	NR_LRU_LISTS
+};
+
+#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
+
+#define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++)
+
+static inline int is_file_lru(enum lru_list l)
+{
+	return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
+}
+
+static inline int is_active_lru(enum lru_list l)
+{
+	return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
+}
+
+static inline int is_unevictable_lru(enum lru_list l)
+{
+#ifdef CONFIG_UNEVICTABLE_LRU
+	return (l == LRU_UNEVICTABLE);
+#else
+	return 0;
+#endif
+}
+
 struct per_cpu_pages {
 	int count;		/* number of pages in the list */
 	int high;		/* high watermark, emptying needed */
@@ -251,10 +310,22 @@
 
 	/* Fields commonly accessed by the page reclaim scanner */
 	spinlock_t		lru_lock;	
-	struct list_head	active_list;
-	struct list_head	inactive_list;
-	unsigned long		nr_scan_active;
-	unsigned long		nr_scan_inactive;
+	struct {
+		struct list_head list;
+		unsigned long nr_scan;
+	} lru[NR_LRU_LISTS];
+
+	/*
+	 * The pageout code in vmscan.c keeps track of how many of the
+	 * mem/swap backed and file backed pages are refeferenced.
+	 * The higher the rotated/scanned ratio, the more valuable
+	 * that cache is.
+	 *
+	 * The anon LRU stats live in [0], file LRU stats in [1]
+	 */
+	unsigned long		recent_rotated[2];
+	unsigned long		recent_scanned[2];
+
 	unsigned long		pages_scanned;	   /* since last reclaim */
 	unsigned long		flags;		   /* zone flags, see below */
 
@@ -276,6 +347,12 @@
 	 */
 	int prev_priority;
 
+	/*
+	 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
+	 * this zone's LRU.  Maintained by the pageout code.
+	 */
+	unsigned int inactive_ratio;
+
 
 	ZONE_PADDING(_pad2_)
 	/* Rarely used or read-mostly fields */
@@ -524,8 +601,11 @@
 	struct zone node_zones[MAX_NR_ZONES];
 	struct zonelist node_zonelists[MAX_ZONELISTS];
 	int nr_zones;
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
+#ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */
 	struct page *node_mem_map;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+	struct page_cgroup *node_page_cgroup;
+#endif
 #endif
 	struct bootmem_data *bdata;
 #ifdef CONFIG_MEMORY_HOTPLUG
@@ -854,6 +934,7 @@
 #endif
 
 struct page;
+struct page_cgroup;
 struct mem_section {
 	/*
 	 * This is, logically, a pointer to an array of struct
@@ -871,6 +952,14 @@
 
 	/* See declaration of similar field in struct zone */
 	unsigned long *pageblock_flags;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+	/*
+	 * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
+	 * section. (see memcontrol.h/page_cgroup.h about this.)
+	 */
+	struct page_cgroup *page_cgroup;
+	unsigned long pad;
+#endif
 };
 
 #ifdef CONFIG_SPARSEMEM_EXTREME
diff --git a/include/linux/module.h b/include/linux/module.h
index a41555c..3bfed01 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -16,6 +16,7 @@
 #include <linux/kobject.h>
 #include <linux/moduleparam.h>
 #include <linux/marker.h>
+#include <linux/tracepoint.h>
 #include <asm/local.h>
 
 #include <asm/module.h>
@@ -28,7 +29,7 @@
 #define MODULE_SYMBOL_PREFIX ""
 #endif
 
-#define MODULE_NAME_LEN (64 - sizeof(unsigned long))
+#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN
 
 struct kernel_symbol
 {
@@ -59,6 +60,7 @@
 	struct kobject kobj;
 	struct module *mod;
 	struct kobject *drivers_dir;
+	struct module_param_attrs *mp;
 };
 
 /* These are either module local, or the kernel's dummy ones. */
@@ -241,7 +243,6 @@
 
 	/* Sysfs stuff. */
 	struct module_kobject mkobj;
-	struct module_param_attrs *param_attrs;
 	struct module_attribute *modinfo_attrs;
 	const char *version;
 	const char *srcversion;
@@ -276,7 +277,7 @@
 
 	/* Exception table */
 	unsigned int num_exentries;
-	const struct exception_table_entry *extable;
+	struct exception_table_entry *extable;
 
 	/* Startup function. */
 	int (*init)(void);
@@ -331,6 +332,10 @@
 	struct marker *markers;
 	unsigned int num_markers;
 #endif
+#ifdef CONFIG_TRACEPOINTS
+	struct tracepoint *tracepoints;
+	unsigned int num_tracepoints;
+#endif
 
 #ifdef CONFIG_MODULE_UNLOAD
 	/* What modules depend on me? */
@@ -453,6 +458,9 @@
 
 extern void module_update_markers(void);
 
+extern void module_update_tracepoints(void);
+extern int module_get_iter_tracepoints(struct tracepoint_iter *iter);
+
 #else /* !CONFIG_MODULES... */
 #define EXPORT_SYMBOL(sym)
 #define EXPORT_SYMBOL_GPL(sym)
@@ -557,6 +565,15 @@
 {
 }
 
+static inline void module_update_tracepoints(void)
+{
+}
+
+static inline int module_get_iter_tracepoints(struct tracepoint_iter *iter)
+{
+	return 0;
+}
+
 #endif /* CONFIG_MODULES */
 
 struct device_driver;
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index ec62438..e4af339 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -13,6 +13,9 @@
 #define MODULE_PARAM_PREFIX KBUILD_MODNAME "."
 #endif
 
+/* Chosen so that structs with an unsigned long line up. */
+#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
+
 #ifdef MODULE
 #define ___module_cat(a,b) __mod_ ## a ## b
 #define __module_cat(a,b) ___module_cat(a,b)
@@ -79,7 +82,8 @@
 #define __module_param_call(prefix, name, set, get, arg, perm)		\
 	/* Default value instead of permissions? */			\
 	static int __param_perm_check_##name __attribute__((unused)) =	\
-	BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2));	\
+	BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2))	\
+	+ BUILD_BUG_ON_ZERO(sizeof(""prefix) > MAX_PARAM_PREFIX_LEN);	\
 	static const char __param_str_##name[] = prefix #name;		\
 	static struct kernel_param __moduleparam_const __param_##name	\
 	__used								\
@@ -100,6 +104,25 @@
 #define module_param(name, type, perm)				\
 	module_param_named(name, name, type, perm)
 
+#ifndef MODULE
+/**
+ * core_param - define a historical core kernel parameter.
+ * @name: the name of the cmdline and sysfs parameter (often the same as var)
+ * @var: the variable
+ * @type: the type (for param_set_##type and param_get_##type)
+ * @perm: visibility in sysfs
+ *
+ * core_param is just like module_param(), but cannot be modular and
+ * doesn't add a prefix (such as "printk.").  This is for compatibility
+ * with __setup(), and it makes sense as truly core parameters aren't
+ * tied to the particular file they're in.
+ */
+#define core_param(name, var, type, perm)				\
+	param_check_##type(name, &(var));				\
+	__module_param_call("", name, param_set_##type, param_get_##type, \
+			    &var, perm)
+#endif /* !MODULE */
+
 /* Actually copy string: maxlen param is usually sizeof(string). */
 #define module_param_string(name, string, len, perm)			\
 	static const struct kparam_string __param_string_##name		\
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index d6fb115..ee5124ec 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -12,6 +12,7 @@
 #include <linux/mtd/flashchip.h>
 #include <linux/mtd/map.h>
 #include <linux/mtd/cfi_endian.h>
+#include <linux/mtd/xip.h>
 
 #ifdef CONFIG_MTD_CFI_I1
 #define cfi_interleave(cfi) 1
@@ -430,7 +431,6 @@
 {
 	map_word val;
 	uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);
-
 	val = cfi_build_cmd(cmd, map, cfi);
 
 	if (prev_val)
@@ -483,6 +483,13 @@
 	}
 }
 
+int __xipram cfi_qry_present(struct map_info *map, __u32 base,
+			     struct cfi_private *cfi);
+int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
+			     struct cfi_private *cfi);
+void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
+			       struct cfi_private *cfi);
+
 struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
 			     const char* name);
 struct cfi_fixup {
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index 08dd131..d4f38c5 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -73,6 +73,10 @@
 	int buffer_write_time;
 	int erase_time;
 
+	int word_write_time_max;
+	int buffer_write_time_max;
+	int erase_time_max;
+
 	void *priv;
 };
 
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 9226365..eae26bb 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -25,8 +25,10 @@
 #define MTD_ERASE_DONE          0x08
 #define MTD_ERASE_FAILED        0x10
 
+#define MTD_FAIL_ADDR_UNKNOWN 0xffffffff
+
 /* If the erase fails, fail_addr might indicate exactly which block failed.  If
-   fail_addr = 0xffffffff, the failure was not at the device level or was not
+   fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not
    specific to any particular block. */
 struct erase_info {
 	struct mtd_info *mtd;
diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h
new file mode 100644
index 0000000..51534e5
--- /dev/null
+++ b/include/linux/mtd/nand-gpio.h
@@ -0,0 +1,19 @@
+#ifndef __LINUX_MTD_NAND_GPIO_H
+#define __LINUX_MTD_NAND_GPIO_H
+
+#include <linux/mtd/nand.h>
+
+struct gpio_nand_platdata {
+	int	gpio_nce;
+	int	gpio_nwp;
+	int	gpio_cle;
+	int	gpio_ale;
+	int	gpio_rdy;
+	void	(*adjust_parts)(struct gpio_nand_platdata *, size_t);
+	struct mtd_partition *parts;
+	unsigned int num_parts;
+	unsigned int options;
+	int	chip_delay;
+};
+
+#endif
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 81774e5..733d3f3 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -248,6 +248,7 @@
  * @read_page_raw:	function to read a raw page without ECC
  * @write_page_raw:	function to write a raw page without ECC
  * @read_page:	function to read a page according to the ecc generator requirements
+ * @read_subpage:	function to read parts of the page covered by ECC.
  * @write_page:	function to write a page according to the ecc generator requirements
  * @read_oob:	function to read chip OOB data
  * @write_oob:	function to write chip OOB data
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index d1b310c..0c6bbe2 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -152,6 +152,8 @@
 #define ONENAND_SYS_CFG1_INT		(1 << 6)
 #define ONENAND_SYS_CFG1_IOBE		(1 << 5)
 #define ONENAND_SYS_CFG1_RDY_CONF	(1 << 4)
+#define ONENAND_SYS_CFG1_HF		(1 << 2)
+#define ONENAND_SYS_CFG1_SYNC_WRITE	(1 << 1)
 
 /*
  * Controller Status Register F240h (R)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 5014f7a..c92b4d4 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -73,7 +73,6 @@
 struct device_node;
 
 int __devinit of_mtd_parse_partitions(struct device *dev,
-                                      struct mtd_info *mtd,
                                       struct device_node *node,
                                       struct mtd_partition **pparts);
 
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
new file mode 100644
index 0000000..e77c1ce
--- /dev/null
+++ b/include/linux/mtd/sh_flctl.h
@@ -0,0 +1,125 @@
+/*
+ * SuperH FLCTL nand controller
+ *
+ * Copyright © 2008 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef __SH_FLCTL_H__
+#define __SH_FLCTL_H__
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+/* FLCTL registers */
+#define FLCMNCR(f)		(f->reg + 0x0)
+#define FLCMDCR(f)		(f->reg + 0x4)
+#define FLCMCDR(f)		(f->reg + 0x8)
+#define FLADR(f)		(f->reg + 0xC)
+#define FLADR2(f)		(f->reg + 0x3C)
+#define FLDATAR(f)		(f->reg + 0x10)
+#define FLDTCNTR(f)		(f->reg + 0x14)
+#define FLINTDMACR(f)		(f->reg + 0x18)
+#define FLBSYTMR(f)		(f->reg + 0x1C)
+#define FLBSYCNT(f)		(f->reg + 0x20)
+#define FLDTFIFO(f)		(f->reg + 0x24)
+#define FLECFIFO(f)		(f->reg + 0x28)
+#define FLTRCR(f)		(f->reg + 0x2C)
+#define	FL4ECCRESULT0(f)	(f->reg + 0x80)
+#define	FL4ECCRESULT1(f)	(f->reg + 0x84)
+#define	FL4ECCRESULT2(f)	(f->reg + 0x88)
+#define	FL4ECCRESULT3(f)	(f->reg + 0x8C)
+#define	FL4ECCCR(f)		(f->reg + 0x90)
+#define	FL4ECCCNT(f)		(f->reg + 0x94)
+#define	FLERRADR(f)		(f->reg + 0x98)
+
+/* FLCMNCR control bits */
+#define ECCPOS2		(0x1 << 25)
+#define _4ECCCNTEN	(0x1 << 24)
+#define _4ECCEN		(0x1 << 23)
+#define _4ECCCORRECT	(0x1 << 22)
+#define SNAND_E		(0x1 << 18)	/* SNAND (0=512 1=2048)*/
+#define QTSEL_E		(0x1 << 17)
+#define ENDIAN		(0x1 << 16)	/* 1 = little endian */
+#define FCKSEL_E	(0x1 << 15)
+#define ECCPOS_00	(0x00 << 12)
+#define ECCPOS_01	(0x01 << 12)
+#define ECCPOS_02	(0x02 << 12)
+#define ACM_SACCES_MODE	(0x01 << 10)
+#define NANWF_E		(0x1 << 9)
+#define SE_D		(0x1 << 8)	/* Spare area disable */
+#define	CE1_ENABLE	(0x1 << 4)	/* Chip Enable 1 */
+#define	CE0_ENABLE	(0x1 << 3)	/* Chip Enable 0 */
+#define	TYPESEL_SET	(0x1 << 0)
+
+/* FLCMDCR control bits */
+#define ADRCNT2_E	(0x1 << 31)	/* 5byte address enable */
+#define ADRMD_E		(0x1 << 26)	/* Sector address access */
+#define CDSRC_E		(0x1 << 25)	/* Data buffer selection */
+#define DOSR_E		(0x1 << 24)	/* Status read check */
+#define SELRW		(0x1 << 21)	/*  0:read 1:write */
+#define DOADR_E		(0x1 << 20)	/* Address stage execute */
+#define ADRCNT_1	(0x00 << 18)	/* Address data bytes: 1byte */
+#define ADRCNT_2	(0x01 << 18)	/* Address data bytes: 2byte */
+#define ADRCNT_3	(0x02 << 18)	/* Address data bytes: 3byte */
+#define ADRCNT_4	(0x03 << 18)	/* Address data bytes: 4byte */
+#define DOCMD2_E	(0x1 << 17)	/* 2nd cmd stage execute */
+#define DOCMD1_E	(0x1 << 16)	/* 1st cmd stage execute */
+
+/* FLTRCR control bits */
+#define TRSTRT		(0x1 << 0)	/* translation start */
+#define TREND		(0x1 << 1)	/* translation end */
+
+/* FL4ECCCR control bits */
+#define	_4ECCFA		(0x1 << 2)	/* 4 symbols correct fault */
+#define	_4ECCEND	(0x1 << 1)	/* 4 symbols end */
+#define	_4ECCEXST	(0x1 << 0)	/* 4 symbols exist */
+
+#define INIT_FL4ECCRESULT_VAL	0x03FF03FF
+#define LOOP_TIMEOUT_MAX	0x00010000
+
+#define mtd_to_flctl(mtd)	container_of(mtd, struct sh_flctl, mtd)
+
+struct sh_flctl {
+	struct mtd_info		mtd;
+	struct nand_chip	chip;
+	void __iomem		*reg;
+
+	uint8_t	done_buff[2048 + 64];	/* max size 2048 + 64 */
+	int	read_bytes;
+	int	index;
+	int	seqin_column;		/* column in SEQIN cmd */
+	int	seqin_page_addr;	/* page_addr in SEQIN cmd */
+	uint32_t seqin_read_cmd;		/* read cmd in SEQIN cmd */
+	int	erase1_page_addr;	/* page_addr in ERASE1 cmd */
+	uint32_t erase_ADRCNT;		/* bits of FLCMDCR in ERASE1 cmd */
+	uint32_t rw_ADRCNT;	/* bits of FLCMDCR in READ WRITE cmd */
+
+	int	hwecc_cant_correct[4];
+
+	unsigned page_size:1;	/* NAND page size (0 = 512, 1 = 2048) */
+	unsigned hwecc:1;	/* Hardware ECC (0 = disabled, 1 = enabled) */
+};
+
+struct sh_flctl_platform_data {
+	struct mtd_partition	*parts;
+	int			nr_parts;
+	unsigned long		flcmncr_val;
+
+	unsigned has_hwecc:1;
+};
+
+#endif	/* __SH_FLCTL_H__ */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index ac8d023..4eaa834 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -367,8 +367,12 @@
 
 static inline struct rpc_cred *nfs_file_cred(struct file *file)
 {
-	if (file != NULL)
-		return nfs_file_open_context(file)->cred;
+	if (file != NULL) {
+		struct nfs_open_context *ctx =
+			nfs_file_open_context(file);
+		if (ctx)
+			return ctx->cred;
+	}
 	return NULL;
 }
 
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index bcb8f72..5231861 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -86,13 +86,6 @@
 void oprofile_arch_exit(void);
 
 /**
- * Add data to the event buffer.
- * The data passed is free-form, but typically consists of
- * file offsets, dcookies, context information, and ESCAPE codes.
- */
-void add_event_entry(unsigned long data);
-
-/**
  * Add a sample. This may be called from any context. Pass
  * smp_processor_id() as cpu.
  */
@@ -162,5 +155,14 @@
 
 /** lock for read/write safety */
 extern spinlock_t oprofilefs_lock;
+
+/**
+ * Add the contents of a circular buffer to the event buffer.
+ */
+void oprofile_put_buff(unsigned long *buf, unsigned int start,
+			unsigned int stop, unsigned int max);
+
+unsigned long oprofile_get_cpu_buffer_size(void);
+void oprofile_cpu_buffer_inc_smpl_lost(void);
  
 #endif /* OPROFILE_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c74d3e8..b12f93a 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -93,6 +93,11 @@
 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
 	PG_reclaim,		/* To be reclaimed asap */
 	PG_buddy,		/* Page is free, on buddy lists */
+	PG_swapbacked,		/* Page is backed by RAM/swap */
+#ifdef CONFIG_UNEVICTABLE_LRU
+	PG_unevictable,		/* Page is "unevictable"  */
+	PG_mlocked,		/* Page is vma mlocked */
+#endif
 #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
 	PG_uncached,		/* Page has been mapped as uncached */
 #endif
@@ -161,6 +166,18 @@
 #define TESTSCFLAG(uname, lname)					\
 	TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
 
+#define SETPAGEFLAG_NOOP(uname)						\
+static inline void SetPage##uname(struct page *page) {  }
+
+#define CLEARPAGEFLAG_NOOP(uname)					\
+static inline void ClearPage##uname(struct page *page) {  }
+
+#define __CLEARPAGEFLAG_NOOP(uname)					\
+static inline void __ClearPage##uname(struct page *page) {  }
+
+#define TESTCLEARFLAG_FALSE(uname)					\
+static inline int TestClearPage##uname(struct page *page) { return 0; }
+
 struct page;	/* forward declaration */
 
 TESTPAGEFLAG(Locked, locked)
@@ -169,6 +186,7 @@
 PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
 PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
 PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
+	TESTCLEARFLAG(Active, active)
 __PAGEFLAG(Slab, slab)
 PAGEFLAG(Checked, checked)		/* Used by some filesystems */
 PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned)	/* Xen */
@@ -176,6 +194,7 @@
 PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
 PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
 	__SETPAGEFLAG(Private, private)
+PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
 
 __PAGEFLAG(SlobPage, slob_page)
 __PAGEFLAG(SlobFree, slob_free)
@@ -211,6 +230,25 @@
 PAGEFLAG_FALSE(SwapCache)
 #endif
 
+#ifdef CONFIG_UNEVICTABLE_LRU
+PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
+	TESTCLEARFLAG(Unevictable, unevictable)
+
+#define MLOCK_PAGES 1
+PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
+	TESTSCFLAG(Mlocked, mlocked)
+
+#else
+
+#define MLOCK_PAGES 0
+PAGEFLAG_FALSE(Mlocked)
+	SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked)
+
+PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
+	SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
+	__CLEARPAGEFLAG_NOOP(Unevictable)
+#endif
+
 #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
 PAGEFLAG(Uncached, uncached)
 #else
@@ -326,15 +364,25 @@
 
 #endif /* !PAGEFLAGS_EXTENDED */
 
+#ifdef CONFIG_UNEVICTABLE_LRU
+#define __PG_UNEVICTABLE	(1 << PG_unevictable)
+#define __PG_MLOCKED		(1 << PG_mlocked)
+#else
+#define __PG_UNEVICTABLE	0
+#define __PG_MLOCKED		0
+#endif
+
 #define PAGE_FLAGS	(1 << PG_lru   | 1 << PG_private   | 1 << PG_locked | \
 			 1 << PG_buddy | 1 << PG_writeback | \
-			 1 << PG_slab  | 1 << PG_swapcache | 1 << PG_active)
+			 1 << PG_slab  | 1 << PG_swapcache | 1 << PG_active | \
+			 __PG_UNEVICTABLE | __PG_MLOCKED)
 
 /*
  * Flags checked in bad_page().  Pages on the free list should not have
  * these flags set.  It they are, there is a problem.
  */
-#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | 1 << PG_reclaim | 1 << PG_dirty)
+#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | \
+		1 << PG_reclaim | 1 << PG_dirty | 1 << PG_swapbacked)
 
 /*
  * Flags checked when a page is freed.  Pages being freed should not have
@@ -347,7 +395,8 @@
  * Pages being prepped should not have these flags set.  It they are, there
  * is a problem.
  */
-#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | 1 << PG_reserved | 1 << PG_dirty)
+#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | \
+		1 << PG_reserved | 1 << PG_dirty | 1 << PG_swapbacked)
 
 #endif /* !__GENERATING_BOUNDS_H */
 #endif	/* PAGE_FLAGS_H */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
new file mode 100644
index 0000000..f546ad6
--- /dev/null
+++ b/include/linux/page_cgroup.h
@@ -0,0 +1,108 @@
+#ifndef __LINUX_PAGE_CGROUP_H
+#define __LINUX_PAGE_CGROUP_H
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#include <linux/bit_spinlock.h>
+/*
+ * Page Cgroup can be considered as an extended mem_map.
+ * A page_cgroup page is associated with every page descriptor. The
+ * page_cgroup helps us identify information about the cgroup
+ * All page cgroups are allocated at boot or memory hotplug event,
+ * then the page cgroup for pfn always exists.
+ */
+struct page_cgroup {
+	unsigned long flags;
+	struct mem_cgroup *mem_cgroup;
+	struct page *page;
+	struct list_head lru;		/* per cgroup LRU list */
+};
+
+void __init pgdat_page_cgroup_init(struct pglist_data *pgdat);
+void __init page_cgroup_init(void);
+struct page_cgroup *lookup_page_cgroup(struct page *page);
+
+enum {
+	/* flags for mem_cgroup */
+	PCG_LOCK,  /* page cgroup is locked */
+	PCG_CACHE, /* charged as cache */
+	PCG_USED, /* this object is in use. */
+	/* flags for LRU placement */
+	PCG_ACTIVE, /* page is active in this cgroup */
+	PCG_FILE, /* page is file system backed */
+	PCG_UNEVICTABLE, /* page is unevictableable */
+};
+
+#define TESTPCGFLAG(uname, lname)			\
+static inline int PageCgroup##uname(struct page_cgroup *pc)	\
+	{ return test_bit(PCG_##lname, &pc->flags); }
+
+#define SETPCGFLAG(uname, lname)			\
+static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
+	{ set_bit(PCG_##lname, &pc->flags);  }
+
+#define CLEARPCGFLAG(uname, lname)			\
+static inline void ClearPageCgroup##uname(struct page_cgroup *pc)	\
+	{ clear_bit(PCG_##lname, &pc->flags);  }
+
+/* Cache flag is set only once (at allocation) */
+TESTPCGFLAG(Cache, CACHE)
+
+TESTPCGFLAG(Used, USED)
+CLEARPCGFLAG(Used, USED)
+
+/* LRU management flags (from global-lru definition) */
+TESTPCGFLAG(File, FILE)
+SETPCGFLAG(File, FILE)
+CLEARPCGFLAG(File, FILE)
+
+TESTPCGFLAG(Active, ACTIVE)
+SETPCGFLAG(Active, ACTIVE)
+CLEARPCGFLAG(Active, ACTIVE)
+
+TESTPCGFLAG(Unevictable, UNEVICTABLE)
+SETPCGFLAG(Unevictable, UNEVICTABLE)
+CLEARPCGFLAG(Unevictable, UNEVICTABLE)
+
+static inline int page_cgroup_nid(struct page_cgroup *pc)
+{
+	return page_to_nid(pc->page);
+}
+
+static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
+{
+	return page_zonenum(pc->page);
+}
+
+static inline void lock_page_cgroup(struct page_cgroup *pc)
+{
+	bit_spin_lock(PCG_LOCK, &pc->flags);
+}
+
+static inline int trylock_page_cgroup(struct page_cgroup *pc)
+{
+	return bit_spin_trylock(PCG_LOCK, &pc->flags);
+}
+
+static inline void unlock_page_cgroup(struct page_cgroup *pc)
+{
+	bit_spin_unlock(PCG_LOCK, &pc->flags);
+}
+
+#else /* CONFIG_CGROUP_MEM_RES_CTLR */
+struct page_cgroup;
+
+static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat)
+{
+}
+
+static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
+{
+	return NULL;
+}
+
+static inline void page_cgroup_init(void)
+{
+}
+
+#endif
+#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 5da31c1..709742b 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -32,6 +32,34 @@
 	}
 }
 
+#ifdef CONFIG_UNEVICTABLE_LRU
+#define AS_UNEVICTABLE	(__GFP_BITS_SHIFT + 2)	/* e.g., ramdisk, SHM_LOCK */
+
+static inline void mapping_set_unevictable(struct address_space *mapping)
+{
+	set_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline void mapping_clear_unevictable(struct address_space *mapping)
+{
+	clear_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline int mapping_unevictable(struct address_space *mapping)
+{
+	if (likely(mapping))
+		return test_bit(AS_UNEVICTABLE, &mapping->flags);
+	return !!mapping;
+}
+#else
+static inline void mapping_set_unevictable(struct address_space *mapping) { }
+static inline void mapping_clear_unevictable(struct address_space *mapping) { }
+static inline int mapping_unevictable(struct address_space *mapping)
+{
+	return 0;
+}
+#endif
+
 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 {
 	return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
@@ -271,19 +299,19 @@
 extern void __lock_page_nosync(struct page *page);
 extern void unlock_page(struct page *page);
 
-static inline void set_page_locked(struct page *page)
+static inline void __set_page_locked(struct page *page)
 {
-	set_bit(PG_locked, &page->flags);
+	__set_bit(PG_locked, &page->flags);
 }
 
-static inline void clear_page_locked(struct page *page)
+static inline void __clear_page_locked(struct page *page)
 {
-	clear_bit(PG_locked, &page->flags);
+	__clear_bit(PG_locked, &page->flags);
 }
 
 static inline int trylock_page(struct page *page)
 {
-	return !test_and_set_bit(PG_locked, &page->flags);
+	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 }
 
 /*
@@ -410,17 +438,17 @@
 
 /*
  * Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run set_page_locked() against it.
+ * the page is new, so we can just run __set_page_locked() against it.
  */
 static inline int add_to_page_cache(struct page *page,
 		struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 {
 	int error;
 
-	set_page_locked(page);
+	__set_page_locked(page);
 	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
 	if (unlikely(error))
-		clear_page_locked(page);
+		__clear_page_locked(page);
 	return error;
 }
 
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 8eb7fa7..e90a2cb 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -23,9 +23,9 @@
 void __pagevec_release(struct pagevec *pvec);
 void __pagevec_release_nonlru(struct pagevec *pvec);
 void __pagevec_free(struct pagevec *pvec);
-void __pagevec_lru_add(struct pagevec *pvec);
-void __pagevec_lru_add_active(struct pagevec *pvec);
+void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
 void pagevec_strip(struct pagevec *pvec);
+void pagevec_swap_free(struct pagevec *pvec);
 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
 		pgoff_t start, unsigned nr_pages);
 unsigned pagevec_lookup_tag(struct pagevec *pvec,
@@ -81,10 +81,36 @@
 		__pagevec_free(pvec);
 }
 
-static inline void pagevec_lru_add(struct pagevec *pvec)
+static inline void __pagevec_lru_add_anon(struct pagevec *pvec)
+{
+	____pagevec_lru_add(pvec, LRU_INACTIVE_ANON);
+}
+
+static inline void __pagevec_lru_add_active_anon(struct pagevec *pvec)
+{
+	____pagevec_lru_add(pvec, LRU_ACTIVE_ANON);
+}
+
+static inline void __pagevec_lru_add_file(struct pagevec *pvec)
+{
+	____pagevec_lru_add(pvec, LRU_INACTIVE_FILE);
+}
+
+static inline void __pagevec_lru_add_active_file(struct pagevec *pvec)
+{
+	____pagevec_lru_add(pvec, LRU_ACTIVE_FILE);
+}
+
+static inline void pagevec_lru_add_file(struct pagevec *pvec)
 {
 	if (pagevec_count(pvec))
-		__pagevec_lru_add(pvec);
+		__pagevec_lru_add_file(pvec);
+}
+
+static inline void pagevec_lru_add_anon(struct pagevec *pvec)
+{
+	if (pagevec_count(pvec))
+		__pagevec_lru_add_anon(pvec);
 }
 
 #endif /* _LINUX_PAGEVEC_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 98dc624..752def8 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -51,6 +51,7 @@
 #include <linux/kobject.h>
 #include <asm/atomic.h>
 #include <linux/device.h>
+#include <linux/io.h>
 
 /* Include the ID list */
 #include <linux/pci_ids.h>
@@ -64,6 +65,11 @@
 	struct kobject kobj;
 };
 
+static inline const char *pci_slot_name(const struct pci_slot *slot)
+{
+	return kobject_name(&slot->kobj);
+}
+
 /* File state for mmap()s on /proc/bus/pci/X/Y */
 enum pci_mmap_state {
 	pci_mmap_io,
@@ -214,6 +220,7 @@
 	unsigned int	broken_parity_status:1;	/* Device generates false positive parity */
 	unsigned int 	msi_enabled:1;
 	unsigned int	msix_enabled:1;
+	unsigned int	ari_enabled:1;	/* ARI forwarding */
 	unsigned int	is_managed:1;
 	unsigned int	is_pcie:1;
 	pci_dev_flags_t dev_flags;
@@ -347,7 +354,6 @@
 struct pci_dynids {
 	spinlock_t lock;            /* protects list, index */
 	struct list_head list;      /* for IDs added at runtime */
-	unsigned int use_driver_data:1; /* pci_device_id->driver_data is used */
 };
 
 /* ---------------------------------------------------------------- */
@@ -456,8 +462,8 @@
 
 /**
  * PCI_VDEVICE - macro used to describe a specific pci device in short form
- * @vend: the vendor name
- * @dev: the 16 bit PCI Device ID
+ * @vendor: the vendor name
+ * @device: the 16 bit PCI Device ID
  *
  * This macro is used to create a struct pci_device_id that matches a
  * specific PCI device.  The subvendor, and subdevice fields will be set
@@ -509,9 +515,10 @@
 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
 				int busnr);
 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
-				 const char *name);
+				 const char *name,
+				 struct hotplug_slot *hotplug);
 void pci_destroy_slot(struct pci_slot *slot);
-void pci_update_slot_number(struct pci_slot *slot, int slot_nr);
+void pci_renumber_slot(struct pci_slot *slot, int slot_nr);
 int pci_scan_slot(struct pci_bus *bus, int devfn);
 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@@ -626,11 +633,15 @@
 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
 int pcie_get_readrq(struct pci_dev *dev);
 int pcie_set_readrq(struct pci_dev *dev, int rq);
+int pci_reset_function(struct pci_dev *dev);
+int pci_execute_reset_function(struct pci_dev *dev);
 void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
 
 /* ROM control related routines */
+int pci_enable_rom(struct pci_dev *pdev);
+void pci_disable_rom(struct pci_dev *pdev);
 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
 size_t pci_get_rom_size(void __iomem *rom, size_t size);
@@ -643,6 +654,7 @@
 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
 void pci_pme_active(struct pci_dev *dev, bool enable);
 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
+int pci_wake_from_d3(struct pci_dev *dev, bool enable);
 pci_power_t pci_target_state(struct pci_dev *dev);
 int pci_prepare_to_sleep(struct pci_dev *dev);
 int pci_back_from_sleep(struct pci_dev *dev);
@@ -723,7 +735,7 @@
 };
 
 struct msix_entry {
-	u16 	vector;	/* kernel uses to write allocated vector */
+	u32	vector;	/* kernel uses to write allocated vector */
 	u16	entry;	/* driver uses to specify entry, OS writes */
 };
 
@@ -1116,5 +1128,20 @@
 static inline void pci_mmcfg_late_init(void) { }
 #endif
 
+#ifdef CONFIG_HAS_IOMEM
+static inline void * pci_ioremap_bar(struct pci_dev *pdev, int bar)
+{
+	/*
+	 * Make sure the BAR is actually a memory resource, not an IO resource
+	 */
+	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
+		WARN_ON(1);
+		return NULL;
+	}
+	return ioremap_nocache(pci_resource_start(pdev, bar),
+				     pci_resource_len(pdev, bar));
+}
+#endif
+
 #endif /* __KERNEL__ */
 #endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index a08cd06..a00bd1a 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -142,8 +142,6 @@
 
 /**
  * struct hotplug_slot - used to register a physical slot with the hotplug pci core
- * @name: the name of the slot being registered.  This string must
- * be unique amoung slots registered on this system.
  * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
  * @info: pointer to the &struct hotplug_slot_info for the initial values for
  * this slot.
@@ -153,7 +151,6 @@
  * needs.
  */
 struct hotplug_slot {
-	char				*name;
 	struct hotplug_slot_ops		*ops;
 	struct hotplug_slot_info	*info;
 	void (*release) (struct hotplug_slot *slot);
@@ -165,7 +162,13 @@
 };
 #define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj)
 
-extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr);
+static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
+{
+	return pci_slot_name(slot->pci_slot);
+}
+
+extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr,
+			   const char *name);
 extern int pci_hp_deregister(struct hotplug_slot *slot);
 extern int __must_check pci_hp_change_slot_info	(struct hotplug_slot *slot,
 						 struct hotplug_slot_info *info);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 8edddc24..369f442 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1944,6 +1944,14 @@
 
 #define PCI_VENDOR_ID_OXSEMI		0x1415
 #define PCI_DEVICE_ID_OXSEMI_12PCI840	0x8403
+#define PCI_DEVICE_ID_OXSEMI_PCIe840		0xC000
+#define PCI_DEVICE_ID_OXSEMI_PCIe840_G		0xC004
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_0		0xC100
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_0_G	0xC104
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_1		0xC110
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_G	0xC114
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U	0xC118
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU	0xC11C
 #define PCI_DEVICE_ID_OXSEMI_16PCI954	0x9501
 #define PCI_DEVICE_ID_OXSEMI_16PCI95N	0x9511
 #define PCI_DEVICE_ID_OXSEMI_16PCI954PP	0x9513
@@ -2454,9 +2462,9 @@
 #define PCI_DEVICE_ID_INTEL_ICH10_3	0x3a1a
 #define PCI_DEVICE_ID_INTEL_ICH10_4	0x3a30
 #define PCI_DEVICE_ID_INTEL_ICH10_5	0x3a60
-#define PCI_DEVICE_ID_INTEL_PCH_0	0x3b10
-#define PCI_DEVICE_ID_INTEL_PCH_1	0x3b11
-#define PCI_DEVICE_ID_INTEL_PCH_2	0x3b30
+#define PCI_DEVICE_ID_INTEL_PCH_LPC_MIN	0x3b00
+#define PCI_DEVICE_ID_INTEL_PCH_LPC_MAX	0x3b1f
+#define PCI_DEVICE_ID_INTEL_PCH_SMBUS	0x3b30
 #define PCI_DEVICE_ID_INTEL_IOAT_SNB	0x402f
 #define PCI_DEVICE_ID_INTEL_5100_16	0x65f0
 #define PCI_DEVICE_ID_INTEL_5100_21	0x65f5
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 450684f..e5effd4 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -377,6 +377,7 @@
 #define  PCI_EXP_DEVCAP_RBER	0x8000	/* Role-Based Error Reporting */
 #define  PCI_EXP_DEVCAP_PWR_VAL	0x3fc0000 /* Slot Power Limit Value */
 #define  PCI_EXP_DEVCAP_PWR_SCL	0xc000000 /* Slot Power Limit Scale */
+#define  PCI_EXP_DEVCAP_FLR     0x10000000 /* Function Level Reset */
 #define PCI_EXP_DEVCTL		8	/* Device Control */
 #define  PCI_EXP_DEVCTL_CERE	0x0001	/* Correctable Error Reporting En. */
 #define  PCI_EXP_DEVCTL_NFERE	0x0002	/* Non-Fatal Error Reporting Enable */
@@ -389,6 +390,7 @@
 #define  PCI_EXP_DEVCTL_AUX_PME	0x0400	/* Auxiliary Power PM Enable */
 #define  PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800  /* Enable No Snoop */
 #define  PCI_EXP_DEVCTL_READRQ	0x7000	/* Max_Read_Request_Size */
+#define  PCI_EXP_DEVCTL_BCR_FLR 0x8000  /* Bridge Configuration Retry / FLR */
 #define PCI_EXP_DEVSTA		10	/* Device Status */
 #define  PCI_EXP_DEVSTA_CED	0x01	/* Correctable Error Detected */
 #define  PCI_EXP_DEVSTA_NFED	0x02	/* Non-Fatal Error Detected */
@@ -419,6 +421,10 @@
 #define  PCI_EXP_RTCTL_CRSSVE	0x10	/* CRS Software Visibility Enable */
 #define PCI_EXP_RTCAP		30	/* Root Capabilities */
 #define PCI_EXP_RTSTA		32	/* Root Status */
+#define PCI_EXP_DEVCAP2		36	/* Device Capabilities 2 */
+#define  PCI_EXP_DEVCAP2_ARI	0x20	/* Alternative Routing-ID */
+#define PCI_EXP_DEVCTL2		40	/* Device Control 2 */
+#define  PCI_EXP_DEVCTL2_ARI	0x20	/* Alternative Routing-ID */
 
 /* Extended Capabilities (PCI-X 2.0 and Express) */
 #define PCI_EXT_CAP_ID(header)		(header & 0x0000ffff)
@@ -429,6 +435,7 @@
 #define PCI_EXT_CAP_ID_VC	2
 #define PCI_EXT_CAP_ID_DSN	3
 #define PCI_EXT_CAP_ID_PWR	4
+#define PCI_EXT_CAP_ID_ARI	14
 
 /* Advanced Error Reporting */
 #define PCI_ERR_UNCOR_STATUS	4	/* Uncorrectable Error Status */
@@ -536,5 +543,14 @@
 #define HT_CAPTYPE_GEN3		0xD0	/* Generation 3 hypertransport configuration */
 #define HT_CAPTYPE_PM		0xE0	/* Hypertransport powermanagement configuration */
 
+/* Alternative Routing-ID Interpretation */
+#define PCI_ARI_CAP		0x04	/* ARI Capability Register */
+#define  PCI_ARI_CAP_MFVC	0x0001	/* MFVC Function Groups Capability */
+#define  PCI_ARI_CAP_ACS	0x0002	/* ACS Function Groups Capability */
+#define  PCI_ARI_CAP_NFN(x)	(((x) >> 8) & 0xff) /* Next Function Number */
+#define PCI_ARI_CTRL		0x06	/* ARI Control Register */
+#define  PCI_ARI_CTRL_MFVC	0x0001	/* MFVC Function Groups Enable */
+#define  PCI_ARI_CTRL_ACS	0x0002	/* ACS Function Groups Enable */
+#define  PCI_ARI_CTRL_FG(x)	(((x) >> 4) & 7) /* Function Group */
 
 #endif /* LINUX_PCI_REGS_H */
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index a7dd38f..a7c7213 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -45,8 +45,6 @@
 	int it_requeue_pending;		/* waiting to requeue this timer */
 #define REQUEUE_PENDING 1
 	int it_sigev_notify;		/* notify word of sigevent struct */
-	int it_sigev_signo;		/* signo word of sigevent struct */
-	sigval_t it_sigev_value;	/* value word of sigevent struct */
 	struct task_struct *it_process;	/* process to send signal to */
 	struct sigqueue *sigq;		/* signal queue entry. */
 	union {
@@ -115,4 +113,6 @@
 
 long clock_nanosleep_restart(struct restart_block *restart_block);
 
+void update_rlimit_cpu(unsigned long rlim_new);
+
 #endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index ea96ead..f9348cb 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -165,6 +165,12 @@
 extern void power_supply_changed(struct power_supply *psy);
 extern int power_supply_am_i_supplied(struct power_supply *psy);
 
+#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
+extern int power_supply_is_system_supplied(void);
+#else
+static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
+#endif
+
 extern int power_supply_register(struct device *parent,
 				 struct power_supply *psy);
 extern void power_supply_unregister(struct power_supply *psy);
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 5700450..a0fc322 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -19,10 +19,16 @@
 
 #if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
 void create_prof_cpu_mask(struct proc_dir_entry *de);
+int create_proc_profile(void);
 #else
 static inline void create_prof_cpu_mask(struct proc_dir_entry *de)
 {
 }
+
+static inline int create_proc_profile(void)
+{
+	return 0;
+}
 #endif
 
 enum profile_type {
@@ -37,7 +43,6 @@
 /* init basic kernel profiler */
 int profile_init(void);
 int profile_setup(char *str);
-int create_proc_profile(void);
 void profile_tick(int type);
 
 /*
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index ea7416c..22641d5 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -94,7 +94,6 @@
 extern void __ptrace_link(struct task_struct *child,
 			  struct task_struct *new_parent);
 extern void __ptrace_unlink(struct task_struct *child);
-extern void ptrace_untrace(struct task_struct *child);
 #define PTRACE_MODE_READ   1
 #define PTRACE_MODE_ATTACH 2
 /* Returns 0 on success, -errno on denial. */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
new file mode 100644
index 0000000..536b0ca
--- /dev/null
+++ b/include/linux/ring_buffer.h
@@ -0,0 +1,127 @@
+#ifndef _LINUX_RING_BUFFER_H
+#define _LINUX_RING_BUFFER_H
+
+#include <linux/mm.h>
+#include <linux/seq_file.h>
+
+struct ring_buffer;
+struct ring_buffer_iter;
+
+/*
+ * Don't reference this struct directly, use functions below.
+ */
+struct ring_buffer_event {
+	u32		type:2, len:3, time_delta:27;
+	u32		array[];
+};
+
+/**
+ * enum ring_buffer_type - internal ring buffer types
+ *
+ * @RINGBUF_TYPE_PADDING:	Left over page padding
+ *				 array is ignored
+ *				 size is variable depending on how much
+ *				  padding is needed
+ *
+ * @RINGBUF_TYPE_TIME_EXTEND:	Extend the time delta
+ *				 array[0] = time delta (28 .. 59)
+ *				 size = 8 bytes
+ *
+ * @RINGBUF_TYPE_TIME_STAMP:	Sync time stamp with external clock
+ *				 array[0] = tv_nsec
+ *				 array[1] = tv_sec
+ *				 size = 16 bytes
+ *
+ * @RINGBUF_TYPE_DATA:		Data record
+ *				 If len is zero:
+ *				  array[0] holds the actual length
+ *				  array[1..(length+3)/4-1] holds data
+ *				 else
+ *				  length = len << 2
+ *				  array[0..(length+3)/4] holds data
+ */
+enum ring_buffer_type {
+	RINGBUF_TYPE_PADDING,
+	RINGBUF_TYPE_TIME_EXTEND,
+	/* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */
+	RINGBUF_TYPE_TIME_STAMP,
+	RINGBUF_TYPE_DATA,
+};
+
+unsigned ring_buffer_event_length(struct ring_buffer_event *event);
+void *ring_buffer_event_data(struct ring_buffer_event *event);
+
+/**
+ * ring_buffer_event_time_delta - return the delta timestamp of the event
+ * @event: the event to get the delta timestamp of
+ *
+ * The delta timestamp is the 27 bit timestamp since the last event.
+ */
+static inline unsigned
+ring_buffer_event_time_delta(struct ring_buffer_event *event)
+{
+	return event->time_delta;
+}
+
+/*
+ * size is in bytes for each per CPU buffer.
+ */
+struct ring_buffer *
+ring_buffer_alloc(unsigned long size, unsigned flags);
+void ring_buffer_free(struct ring_buffer *buffer);
+
+int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
+
+struct ring_buffer_event *
+ring_buffer_lock_reserve(struct ring_buffer *buffer,
+			 unsigned long length,
+			 unsigned long *flags);
+int ring_buffer_unlock_commit(struct ring_buffer *buffer,
+			      struct ring_buffer_event *event,
+			      unsigned long flags);
+int ring_buffer_write(struct ring_buffer *buffer,
+		      unsigned long length, void *data);
+
+struct ring_buffer_event *
+ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts);
+struct ring_buffer_event *
+ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts);
+
+struct ring_buffer_iter *
+ring_buffer_read_start(struct ring_buffer *buffer, int cpu);
+void ring_buffer_read_finish(struct ring_buffer_iter *iter);
+
+struct ring_buffer_event *
+ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
+struct ring_buffer_event *
+ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
+void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
+int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
+
+unsigned long ring_buffer_size(struct ring_buffer *buffer);
+
+void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
+void ring_buffer_reset(struct ring_buffer *buffer);
+
+int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
+			 struct ring_buffer *buffer_b, int cpu);
+
+int ring_buffer_empty(struct ring_buffer *buffer);
+int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
+
+void ring_buffer_record_disable(struct ring_buffer *buffer);
+void ring_buffer_record_enable(struct ring_buffer *buffer);
+void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
+void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
+
+unsigned long ring_buffer_entries(struct ring_buffer *buffer);
+unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
+
+u64 ring_buffer_time_stamp(int cpu);
+void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
+
+enum ring_buffer_flags {
+	RB_FL_OVERWRITE		= 1 << 0,
+};
+
+#endif /* _LINUX_RING_BUFFER_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index fed6f5e..89f0564 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -39,18 +39,6 @@
 
 #ifdef CONFIG_MMU
 
-extern struct kmem_cache *anon_vma_cachep;
-
-static inline struct anon_vma *anon_vma_alloc(void)
-{
-	return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
-}
-
-static inline void anon_vma_free(struct anon_vma *anon_vma)
-{
-	kmem_cache_free(anon_vma_cachep, anon_vma);
-}
-
 static inline void anon_vma_lock(struct vm_area_struct *vma)
 {
 	struct anon_vma *anon_vma = vma->anon_vma;
@@ -75,6 +63,9 @@
 void anon_vma_link(struct vm_area_struct *);
 void __anon_vma_link(struct vm_area_struct *);
 
+extern struct anon_vma *page_lock_anon_vma(struct page *page);
+extern void page_unlock_anon_vma(struct anon_vma *anon_vma);
+
 /*
  * rmap interfaces called when adding or removing pte of page
  */
@@ -117,6 +108,19 @@
  */
 int page_mkclean(struct page *);
 
+#ifdef CONFIG_UNEVICTABLE_LRU
+/*
+ * called in munlock()/munmap() path to check for other vmas holding
+ * the page mlocked.
+ */
+int try_to_munlock(struct page *);
+#else
+static inline int try_to_munlock(struct page *page)
+{
+	return 0;	/* a.k.a. SWAP_SUCCESS */
+}
+#endif
+
 #else	/* !CONFIG_MMU */
 
 #define anon_vma_init()		do {} while (0)
@@ -140,5 +144,6 @@
 #define SWAP_SUCCESS	0
 #define SWAP_AGAIN	1
 #define SWAP_FAIL	2
+#define SWAP_MLOCK	3
 
 #endif	/* _LINUX_RMAP_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c226c7b..10bff55 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -287,7 +287,6 @@
 extern void account_process_tick(struct task_struct *task, int user);
 extern void update_process_times(int user);
 extern void scheduler_tick(void);
-extern void hrtick_resched(void);
 
 extern void sched_show_task(struct task_struct *p);
 
@@ -403,12 +402,21 @@
 #define MMF_DUMP_MAPPED_PRIVATE	4
 #define MMF_DUMP_MAPPED_SHARED	5
 #define MMF_DUMP_ELF_HEADERS	6
+#define MMF_DUMP_HUGETLB_PRIVATE 7
+#define MMF_DUMP_HUGETLB_SHARED  8
 #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
-#define MMF_DUMP_FILTER_BITS	5
+#define MMF_DUMP_FILTER_BITS	7
 #define MMF_DUMP_FILTER_MASK \
 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
 #define MMF_DUMP_FILTER_DEFAULT \
-	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED))
+	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
+	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
+
+#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
+# define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
+#else
+# define MMF_DUMP_MASK_DEFAULT_ELF	0
+#endif
 
 struct sighand_struct {
 	atomic_t		count;
@@ -425,6 +433,39 @@
 	unsigned long		ac_minflt, ac_majflt;
 };
 
+/**
+ * struct task_cputime - collected CPU time counts
+ * @utime:		time spent in user mode, in &cputime_t units
+ * @stime:		time spent in kernel mode, in &cputime_t units
+ * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
+ *
+ * This structure groups together three kinds of CPU time that are
+ * tracked for threads and thread groups.  Most things considering
+ * CPU time want to group these counts together and treat all three
+ * of them in parallel.
+ */
+struct task_cputime {
+	cputime_t utime;
+	cputime_t stime;
+	unsigned long long sum_exec_runtime;
+};
+/* Alternate field names when used to cache expirations. */
+#define prof_exp	stime
+#define virt_exp	utime
+#define sched_exp	sum_exec_runtime
+
+/**
+ * struct thread_group_cputime - thread group interval timer counts
+ * @totals:		thread group interval timers; substructure for
+ *			uniprocessor kernel, per-cpu for SMP kernel.
+ *
+ * This structure contains the version of task_cputime, above, that is
+ * used for thread group CPU clock calculations.
+ */
+struct thread_group_cputime {
+	struct task_cputime *totals;
+};
+
 /*
  * NOTE! "signal_struct" does not have it's own
  * locking, because a shared signal_struct always
@@ -470,6 +511,17 @@
 	cputime_t it_prof_expires, it_virt_expires;
 	cputime_t it_prof_incr, it_virt_incr;
 
+	/*
+	 * Thread group totals for process CPU clocks.
+	 * See thread_group_cputime(), et al, for details.
+	 */
+	struct thread_group_cputime cputime;
+
+	/* Earliest-expiration cache. */
+	struct task_cputime cputime_expires;
+
+	struct list_head cpu_timers[3];
+
 	/* job control IDs */
 
 	/*
@@ -500,7 +552,7 @@
 	 * Live threads maintain their own counters and add to these
 	 * in __exit_signal, except for the group leader.
 	 */
-	cputime_t utime, stime, cutime, cstime;
+	cputime_t cutime, cstime;
 	cputime_t gtime;
 	cputime_t cgtime;
 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -509,14 +561,6 @@
 	struct task_io_accounting ioac;
 
 	/*
-	 * Cumulative ns of scheduled CPU time for dead threads in the
-	 * group, not including a zombie group leader.  (This only differs
-	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
-	 * other than jiffies.)
-	 */
-	unsigned long long sum_sched_runtime;
-
-	/*
 	 * We don't bother to synchronize most readers of this at all,
 	 * because there is no reader checking a limit that actually needs
 	 * to get both rlim_cur and rlim_max atomically, and either one
@@ -527,8 +571,6 @@
 	 */
 	struct rlimit rlim[RLIM_NLIMITS];
 
-	struct list_head cpu_timers[3];
-
 	/* keep the process-shared keyrings here so that they do the right
 	 * thing in threads created with CLONE_THREAD */
 #ifdef CONFIG_KEYS
@@ -1137,8 +1179,7 @@
 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
 	unsigned long min_flt, maj_flt;
 
-  	cputime_t it_prof_expires, it_virt_expires;
-	unsigned long long it_sched_expires;
+	struct task_cputime cputime_expires;
 	struct list_head cpu_timers[3];
 
 /* process credentials */
@@ -1588,6 +1629,7 @@
 
 extern unsigned long long
 task_sched_runtime(struct task_struct *task);
+extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
 
 /* sched_exec is called by processes performing an exec */
 #ifdef CONFIG_SMP
@@ -1622,6 +1664,7 @@
 extern unsigned int sysctl_sched_migration_cost;
 extern unsigned int sysctl_sched_nr_migrate;
 extern unsigned int sysctl_sched_shares_ratelimit;
+extern unsigned int sysctl_sched_shares_thresh;
 
 int sched_nr_latency_handler(struct ctl_table *table, int write,
 		struct file *file, void __user *buffer, size_t *length,
@@ -2085,6 +2128,30 @@
 }
 
 /*
+ * Thread group CPU time accounting.
+ */
+
+extern int thread_group_cputime_alloc(struct task_struct *);
+extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
+
+static inline void thread_group_cputime_init(struct signal_struct *sig)
+{
+	sig->cputime.totals = NULL;
+}
+
+static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
+{
+	if (curr->signal->cputime.totals)
+		return 0;
+	return thread_group_cputime_alloc(curr);
+}
+
+static inline void thread_group_cputime_free(struct signal_struct *sig)
+{
+	free_percpu(sig->cputime.totals);
+}
+
+/*
  * Reevaluate whether the task has signals pending delivery.
  * Wake the task if so.
  * This is required every time the blocked sigset_t changes.
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index a1783b2..dc50bcc 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -60,6 +60,19 @@
 	return seq_bitmap(m, mask->bits, MAX_NUMNODES);
 }
 
+int seq_bitmap_list(struct seq_file *m, unsigned long *bits,
+		unsigned int nr_bits);
+
+static inline int seq_cpumask_list(struct seq_file *m, cpumask_t *mask)
+{
+	return seq_bitmap_list(m, mask->bits, NR_CPUS);
+}
+
+static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
+{
+	return seq_bitmap_list(m, mask->bits, MAX_NUMNODES);
+}
+
 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
 int single_release(struct inode *, struct file *);
 void *__seq_open_private(struct file *, const struct seq_operations *, int);
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
new file mode 100644
index 0000000..68e212f
--- /dev/null
+++ b/include/linux/sh_intc.h
@@ -0,0 +1,91 @@
+#ifndef __SH_INTC_H
+#define __SH_INTC_H
+
+typedef unsigned char intc_enum;
+
+struct intc_vect {
+	intc_enum enum_id;
+	unsigned short vect;
+};
+
+#define INTC_VECT(enum_id, vect) { enum_id, vect }
+#define INTC_IRQ(enum_id, irq) INTC_VECT(enum_id, irq2evt(irq))
+
+struct intc_group {
+	intc_enum enum_id;
+	intc_enum enum_ids[32];
+};
+
+#define INTC_GROUP(enum_id, ids...) { enum_id, { ids } }
+
+struct intc_mask_reg {
+	unsigned long set_reg, clr_reg, reg_width;
+	intc_enum enum_ids[32];
+#ifdef CONFIG_SMP
+	unsigned long smp;
+#endif
+};
+
+struct intc_prio_reg {
+	unsigned long set_reg, clr_reg, reg_width, field_width;
+	intc_enum enum_ids[16];
+#ifdef CONFIG_SMP
+	unsigned long smp;
+#endif
+};
+
+struct intc_sense_reg {
+	unsigned long reg, reg_width, field_width;
+	intc_enum enum_ids[16];
+};
+
+#ifdef CONFIG_SMP
+#define INTC_SMP(stride, nr) .smp = (stride) | ((nr) << 8)
+#else
+#define INTC_SMP(stride, nr)
+#endif
+
+struct intc_desc {
+	struct intc_vect *vectors;
+	unsigned int nr_vectors;
+	struct intc_group *groups;
+	unsigned int nr_groups;
+	struct intc_mask_reg *mask_regs;
+	unsigned int nr_mask_regs;
+	struct intc_prio_reg *prio_regs;
+	unsigned int nr_prio_regs;
+	struct intc_sense_reg *sense_regs;
+	unsigned int nr_sense_regs;
+	char *name;
+#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
+	struct intc_mask_reg *ack_regs;
+	unsigned int nr_ack_regs;
+#endif
+};
+
+#define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a)
+#define DECLARE_INTC_DESC(symbol, chipname, vectors, groups,		\
+	mask_regs, prio_regs, sense_regs)				\
+struct intc_desc symbol __initdata = {					\
+	_INTC_ARRAY(vectors), _INTC_ARRAY(groups),			\
+	_INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs),			\
+	_INTC_ARRAY(sense_regs),					\
+	chipname,							\
+}
+
+#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
+#define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups,	\
+	mask_regs, prio_regs, sense_regs, ack_regs)			\
+struct intc_desc symbol __initdata = {					\
+	_INTC_ARRAY(vectors), _INTC_ARRAY(groups),			\
+	_INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs),			\
+	_INTC_ARRAY(sense_regs),					\
+	chipname,							\
+	_INTC_ARRAY(ack_regs),						\
+}
+#endif
+
+void __init register_intc_controller(struct intc_desc *desc);
+int intc_set_priority(unsigned int irq, unsigned int prio);
+
+#endif /* __SH_INTC_H */
diff --git a/include/linux/swab.h b/include/linux/swab.h
index 270d5c20..bbed279 100644
--- a/include/linux/swab.h
+++ b/include/linux/swab.h
@@ -47,8 +47,6 @@
 {
 #ifdef __arch_swab16
 	return __arch_swab16(val);
-#elif defined(__arch_swab16p)
-	return __arch_swab16p(&val);
 #else
 	return __const_swab16(val);
 #endif
@@ -58,8 +56,6 @@
 {
 #ifdef __arch_swab32
 	return __arch_swab32(val);
-#elif defined(__arch_swab32p)
-	return __arch_swab32p(&val);
 #else
 	return __const_swab32(val);
 #endif
@@ -69,8 +65,6 @@
 {
 #ifdef __arch_swab64
 	return __arch_swab64(val);
-#elif defined(__arch_swab64p)
-	return __arch_swab64p(&val);
 #elif defined(__SWAB_64_THRU_32__)
 	__u32 h = val >> 32;
 	__u32 l = val & ((1ULL << 32) - 1);
@@ -84,8 +78,6 @@
 {
 #ifdef __arch_swahw32
 	return __arch_swahw32(val);
-#elif defined(__arch_swahw32p)
-	return __arch_swahw32p(&val);
 #else
 	return __const_swahw32(val);
 #endif
@@ -95,8 +87,6 @@
 {
 #ifdef __arch_swahb32
 	return __arch_swahb32(val);
-#elif defined(__arch_swahb32p)
-	return __arch_swahb32p(&val);
 #else
 	return __const_swahb32(val);
 #endif
diff --git a/include/linux/swap.h b/include/linux/swap.h
index de40f169a..a3af95b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -7,6 +7,7 @@
 #include <linux/list.h>
 #include <linux/memcontrol.h>
 #include <linux/sched.h>
+#include <linux/node.h>
 
 #include <asm/atomic.h>
 #include <asm/page.h>
@@ -171,8 +172,10 @@
 
 
 /* linux/mm/swap.c */
-extern void lru_cache_add(struct page *);
-extern void lru_cache_add_active(struct page *);
+extern void __lru_cache_add(struct page *, enum lru_list lru);
+extern void lru_cache_add_lru(struct page *, enum lru_list lru);
+extern void lru_cache_add_active_or_unevictable(struct page *,
+					struct vm_area_struct *);
 extern void activate_page(struct page *);
 extern void mark_page_accessed(struct page *);
 extern void lru_add_drain(void);
@@ -180,12 +183,38 @@
 extern void rotate_reclaimable_page(struct page *page);
 extern void swap_setup(void);
 
+extern void add_page_to_unevictable_list(struct page *page);
+
+/**
+ * lru_cache_add: add a page to the page lists
+ * @page: the page to add
+ */
+static inline void lru_cache_add_anon(struct page *page)
+{
+	__lru_cache_add(page, LRU_INACTIVE_ANON);
+}
+
+static inline void lru_cache_add_active_anon(struct page *page)
+{
+	__lru_cache_add(page, LRU_ACTIVE_ANON);
+}
+
+static inline void lru_cache_add_file(struct page *page)
+{
+	__lru_cache_add(page, LRU_INACTIVE_FILE);
+}
+
+static inline void lru_cache_add_active_file(struct page *page)
+{
+	__lru_cache_add(page, LRU_ACTIVE_FILE);
+}
+
 /* linux/mm/vmscan.c */
 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 					gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
 							gfp_t gfp_mask);
-extern int __isolate_lru_page(struct page *page, int mode);
+extern int __isolate_lru_page(struct page *page, int mode, int file);
 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 extern int vm_swappiness;
 extern int remove_mapping(struct address_space *mapping, struct page *page);
@@ -204,6 +233,34 @@
 }
 #endif
 
+#ifdef CONFIG_UNEVICTABLE_LRU
+extern int page_evictable(struct page *page, struct vm_area_struct *vma);
+extern void scan_mapping_unevictable_pages(struct address_space *);
+
+extern unsigned long scan_unevictable_pages;
+extern int scan_unevictable_handler(struct ctl_table *, int, struct file *,
+					void __user *, size_t *, loff_t *);
+extern int scan_unevictable_register_node(struct node *node);
+extern void scan_unevictable_unregister_node(struct node *node);
+#else
+static inline int page_evictable(struct page *page,
+						struct vm_area_struct *vma)
+{
+	return 1;
+}
+
+static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
+{
+}
+
+static inline int scan_unevictable_register_node(struct node *node)
+{
+	return 0;
+}
+
+static inline void scan_unevictable_unregister_node(struct node *node) { }
+#endif
+
 extern int kswapd_run(int nid);
 
 #ifdef CONFIG_MMU
@@ -251,6 +308,7 @@
 extern struct swap_info_struct *get_swap_info_struct(unsigned);
 extern int can_share_swap_page(struct page *);
 extern int remove_exclusive_swap_page(struct page *);
+extern int remove_exclusive_swap_page_ref(struct page *);
 struct backing_dev_info;
 
 /* linux/mm/thrash.c */
@@ -339,6 +397,11 @@
 	return 0;
 }
 
+static inline int remove_exclusive_swap_page_ref(struct page *page)
+{
+	return 0;
+}
+
 static inline swp_entry_t get_swap_page(void)
 {
 	swp_entry_t entry;
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index b330e28..9d68fed 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -21,8 +21,9 @@
 struct module;
 
 /* FIXME
- * The *owner field is no longer used, but leave around
- * until the tree gets cleaned up fully.
+ * The *owner field is no longer used.
+ * x86 tree has been cleaned up. The owner
+ * attribute is still left for other arches.
  */
 struct attribute {
 	const char		*name;
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 98921a3..b6ec818 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -96,9 +96,11 @@
 extern void tick_clock_notify(void);
 extern int tick_check_oneshot_change(int allow_nohz);
 extern struct tick_sched *tick_get_tick_sched(int cpu);
+extern void tick_check_idle(int cpu);
 # else
 static inline void tick_clock_notify(void) { }
 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
+static inline void tick_check_idle(int cpu) { }
 # endif
 
 #else /* CONFIG_GENERIC_CLOCKEVENTS */
@@ -106,26 +108,23 @@
 static inline void tick_cancel_sched_timer(int cpu) { }
 static inline void tick_clock_notify(void) { }
 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
+static inline void tick_check_idle(int cpu) { }
 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
 
 # ifdef CONFIG_NO_HZ
 extern void tick_nohz_stop_sched_tick(int inidle);
 extern void tick_nohz_restart_sched_tick(void);
-extern void tick_nohz_update_jiffies(void);
 extern ktime_t tick_nohz_get_sleep_length(void);
-extern void tick_nohz_stop_idle(int cpu);
 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
 # else
 static inline void tick_nohz_stop_sched_tick(int inidle) { }
 static inline void tick_nohz_restart_sched_tick(void) { }
-static inline void tick_nohz_update_jiffies(void) { }
 static inline ktime_t tick_nohz_get_sleep_length(void)
 {
 	ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
 
 	return len;
 }
-static inline void tick_nohz_stop_idle(int cpu) { }
 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
 # endif /* !NO_HZ */
 
diff --git a/include/linux/time.h b/include/linux/time.h
index 51e883d..4f1c9db 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -119,6 +119,7 @@
 extern unsigned int alarm_setitimer(unsigned int seconds);
 extern int do_getitimer(int which, struct itimerval *value);
 extern void getnstimeofday(struct timespec *tv);
+extern void getrawmonotonic(struct timespec *ts);
 extern void getboottime(struct timespec *ts);
 extern void monotonic_to_bootbased(struct timespec *ts);
 
@@ -127,6 +128,9 @@
 extern void update_wall_time(void);
 extern void update_xtime_cache(u64 nsec);
 
+struct tms;
+extern void do_sys_times(struct tms *);
+
 /**
  * timespec_to_ns - Convert timespec to nanoseconds
  * @ts:		pointer to the timespec variable to be converted
@@ -216,6 +220,7 @@
 #define CLOCK_MONOTONIC			1
 #define CLOCK_PROCESS_CPUTIME_ID	2
 #define CLOCK_THREAD_CPUTIME_ID		3
+#define CLOCK_MONOTONIC_RAW		4
 
 /*
  * The IDs of various hardware clocks:
diff --git a/include/linux/timex.h b/include/linux/timex.h
index fc6035d..9007313 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -82,7 +82,7 @@
  */
 #define SHIFT_USEC 16		/* frequency offset scale (shift) */
 #define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
-#define PPM_SCALE_INV_SHIFT 20
+#define PPM_SCALE_INV_SHIFT 19
 #define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
 		       PPM_SCALE + 1)
 
@@ -141,8 +141,15 @@
 #define ADJ_MICRO		0x1000	/* select microsecond resolution */
 #define ADJ_NANO		0x2000	/* select nanosecond resolution */
 #define ADJ_TICK		0x4000	/* tick value */
+
+#ifdef __KERNEL__
+#define ADJ_ADJTIME		0x8000	/* switch between adjtime/adjtimex modes */
+#define ADJ_OFFSET_SINGLESHOT	0x0001	/* old-fashioned adjtime */
+#define ADJ_OFFSET_READONLY	0x2000	/* read-only adjtime */
+#else
 #define ADJ_OFFSET_SINGLESHOT	0x8001	/* old-fashioned adjtime */
-#define ADJ_OFFSET_SS_READ	0xa001  /* read-only adjtime */
+#define ADJ_OFFSET_SS_READ	0xa001	/* read-only adjtime */
+#endif
 
 /* xntp 3.4 compatibility names */
 #define MOD_OFFSET	ADJ_OFFSET
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
new file mode 100644
index 0000000..c5bb39c
--- /dev/null
+++ b/include/linux/tracepoint.h
@@ -0,0 +1,137 @@
+#ifndef _LINUX_TRACEPOINT_H
+#define _LINUX_TRACEPOINT_H
+
+/*
+ * Kernel Tracepoint API.
+ *
+ * See Documentation/tracepoint.txt.
+ *
+ * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ *
+ * Heavily inspired from the Linux Kernel Markers.
+ *
+ * This file is released under the GPLv2.
+ * See the file COPYING for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+
+struct module;
+struct tracepoint;
+
+struct tracepoint {
+	const char *name;		/* Tracepoint name */
+	int state;			/* State. */
+	void **funcs;
+} __attribute__((aligned(8)));
+
+
+#define TPPROTO(args...)	args
+#define TPARGS(args...)		args
+
+#ifdef CONFIG_TRACEPOINTS
+
+/*
+ * it_func[0] is never NULL because there is at least one element in the array
+ * when the array itself is non NULL.
+ */
+#define __DO_TRACE(tp, proto, args)					\
+	do {								\
+		void **it_func;						\
+									\
+		rcu_read_lock_sched();					\
+		it_func = rcu_dereference((tp)->funcs);			\
+		if (it_func) {						\
+			do {						\
+				((void(*)(proto))(*it_func))(args);	\
+			} while (*(++it_func));				\
+		}							\
+		rcu_read_unlock_sched();				\
+	} while (0)
+
+/*
+ * Make sure the alignment of the structure in the __tracepoints section will
+ * not add unwanted padding between the beginning of the section and the
+ * structure. Force alignment to the same alignment as the section start.
+ */
+#define DEFINE_TRACE(name, proto, args)					\
+	static inline void trace_##name(proto)				\
+	{								\
+		static const char __tpstrtab_##name[]			\
+		__attribute__((section("__tracepoints_strings")))	\
+		= #name ":" #proto;					\
+		static struct tracepoint __tracepoint_##name		\
+		__attribute__((section("__tracepoints"), aligned(8))) =	\
+		{ __tpstrtab_##name, 0, NULL };				\
+		if (unlikely(__tracepoint_##name.state))		\
+			__DO_TRACE(&__tracepoint_##name,		\
+				TPPROTO(proto), TPARGS(args));		\
+	}								\
+	static inline int register_trace_##name(void (*probe)(proto))	\
+	{								\
+		return tracepoint_probe_register(#name ":" #proto,	\
+			(void *)probe);					\
+	}								\
+	static inline void unregister_trace_##name(void (*probe)(proto))\
+	{								\
+		tracepoint_probe_unregister(#name ":" #proto,		\
+			(void *)probe);					\
+	}
+
+extern void tracepoint_update_probe_range(struct tracepoint *begin,
+	struct tracepoint *end);
+
+#else /* !CONFIG_TRACEPOINTS */
+#define DEFINE_TRACE(name, proto, args)			\
+	static inline void _do_trace_##name(struct tracepoint *tp, proto) \
+	{ }								\
+	static inline void trace_##name(proto)				\
+	{ }								\
+	static inline int register_trace_##name(void (*probe)(proto))	\
+	{								\
+		return -ENOSYS;						\
+	}								\
+	static inline void unregister_trace_##name(void (*probe)(proto))\
+	{ }
+
+static inline void tracepoint_update_probe_range(struct tracepoint *begin,
+	struct tracepoint *end)
+{ }
+#endif /* CONFIG_TRACEPOINTS */
+
+/*
+ * Connect a probe to a tracepoint.
+ * Internal API, should not be used directly.
+ */
+extern int tracepoint_probe_register(const char *name, void *probe);
+
+/*
+ * Disconnect a probe from a tracepoint.
+ * Internal API, should not be used directly.
+ */
+extern int tracepoint_probe_unregister(const char *name, void *probe);
+
+struct tracepoint_iter {
+	struct module *module;
+	struct tracepoint *tracepoint;
+};
+
+extern void tracepoint_iter_start(struct tracepoint_iter *iter);
+extern void tracepoint_iter_next(struct tracepoint_iter *iter);
+extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
+extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
+extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
+	struct tracepoint *begin, struct tracepoint *end);
+
+/*
+ * tracepoint_synchronize_unregister must be called between the last tracepoint
+ * probe unregistration and the end of module exit to make sure there is no
+ * caller executing a probe when it is freed.
+ */
+static inline void tracepoint_synchronize_unregister(void)
+{
+	synchronize_sched();
+}
+
+#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 94ac74a..8fa973b 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1135,6 +1135,7 @@
 	struct list_head urb_list;
 	wait_queue_head_t wait;
 	spinlock_t lock;
+	unsigned int poisoned:1;
 };
 
 static inline void init_usb_anchor(struct usb_anchor *anchor)
@@ -1459,12 +1460,18 @@
 extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags);
 extern int usb_unlink_urb(struct urb *urb);
 extern void usb_kill_urb(struct urb *urb);
+extern void usb_poison_urb(struct urb *urb);
+extern void usb_unpoison_urb(struct urb *urb);
 extern void usb_kill_anchored_urbs(struct usb_anchor *anchor);
+extern void usb_poison_anchored_urbs(struct usb_anchor *anchor);
 extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor);
 extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor);
 extern void usb_unanchor_urb(struct urb *urb);
 extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
 					 unsigned int timeout);
+extern struct urb *usb_get_from_anchor(struct usb_anchor *anchor);
+extern void usb_scuttle_anchored_urbs(struct usb_anchor *anchor);
+extern int usb_anchor_empty(struct usb_anchor *anchor);
 
 /**
  * usb_urb_dir_in - check if an URB describes an IN transfer
diff --git a/include/linux/usb/Kbuild b/include/linux/usb/Kbuild
index 42e84fc..54c4463 100644
--- a/include/linux/usb/Kbuild
+++ b/include/linux/usb/Kbuild
@@ -4,4 +4,5 @@
 header-y += gadgetfs.h
 header-y += midi.h
 header-y += g_printer.h
-
+header-y += tmc.h
+header-y += vstusb.h
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h
index ca228bb..18a7293 100644
--- a/include/linux/usb/cdc.h
+++ b/include/linux/usb/cdc.h
@@ -160,6 +160,15 @@
 	__u8	bDetailData[0];
 } __attribute__ ((packed));
 
+/* "OBEX Control Model Functional Descriptor" */
+struct usb_cdc_obex_desc {
+	__u8	bLength;
+	__u8	bDescriptorType;
+	__u8	bDescriptorSubType;
+
+	__le16	bcdVersion;
+} __attribute__ ((packed));
+
 /*-------------------------------------------------------------------------*/
 
 /*
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index c932390..935c380 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -130,6 +130,9 @@
 
 int usb_add_function(struct usb_configuration *, struct usb_function *);
 
+int usb_function_deactivate(struct usb_function *);
+int usb_function_activate(struct usb_function *);
+
 int usb_interface_id(struct usb_configuration *, struct usb_function *);
 
 /**
@@ -316,9 +319,13 @@
 	struct usb_composite_driver	*driver;
 	u8				next_string_id;
 
-	spinlock_t			lock;
+	/* the gadget driver won't enable the data pullup
+	 * while the deactivation count is nonzero.
+	 */
+	unsigned			deactivations;
 
-	/* REVISIT use and existence of lock ... */
+	/* protects at least deactivation count */
+	spinlock_t			lock;
 };
 
 extern int usb_string_id(struct usb_composite_dev *c);
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 655341d..0b8617a 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -192,7 +192,7 @@
  * The driver.owner field should be set to the module owner of this driver.
  * The driver.name field should be set to the name of this driver (remember
  * it will show up in sysfs, so it needs to be short and to the point.
- * Useing the module name is a good idea.)
+ * Using the module name is a good idea.)
  */
 struct usb_serial_driver {
 	const char *description;
diff --git a/include/linux/usb/tmc.h b/include/linux/usb/tmc.h
new file mode 100644
index 0000000..c045ae1
--- /dev/null
+++ b/include/linux/usb/tmc.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2007 Stefan Kopp, Gechingen, Germany
+ * Copyright (C) 2008 Novell, Inc.
+ * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
+ *
+ * This file holds USB constants defined by the USB Device Class
+ * Definition for Test and Measurement devices published by the USB-IF.
+ *
+ * It also has the ioctl definitions for the usbtmc kernel driver that
+ * userspace needs to know about.
+ */
+
+#ifndef __LINUX_USB_TMC_H
+#define __LINUX_USB_TMC_H
+
+/* USB TMC status values */
+#define USBTMC_STATUS_SUCCESS				0x01
+#define USBTMC_STATUS_PENDING				0x02
+#define USBTMC_STATUS_FAILED				0x80
+#define USBTMC_STATUS_TRANSFER_NOT_IN_PROGRESS		0x81
+#define USBTMC_STATUS_SPLIT_NOT_IN_PROGRESS		0x82
+#define USBTMC_STATUS_SPLIT_IN_PROGRESS			0x83
+
+/* USB TMC requests values */
+#define USBTMC_REQUEST_INITIATE_ABORT_BULK_OUT		1
+#define USBTMC_REQUEST_CHECK_ABORT_BULK_OUT_STATUS	2
+#define USBTMC_REQUEST_INITIATE_ABORT_BULK_IN		3
+#define USBTMC_REQUEST_CHECK_ABORT_BULK_IN_STATUS	4
+#define USBTMC_REQUEST_INITIATE_CLEAR			5
+#define USBTMC_REQUEST_CHECK_CLEAR_STATUS		6
+#define USBTMC_REQUEST_GET_CAPABILITIES			7
+#define USBTMC_REQUEST_INDICATOR_PULSE			64
+
+/* Request values for USBTMC driver's ioctl entry point */
+#define USBTMC_IOC_NR			91
+#define USBTMC_IOCTL_INDICATOR_PULSE	_IO(USBTMC_IOC_NR, 1)
+#define USBTMC_IOCTL_CLEAR		_IO(USBTMC_IOC_NR, 2)
+#define USBTMC_IOCTL_ABORT_BULK_OUT	_IO(USBTMC_IOC_NR, 3)
+#define USBTMC_IOCTL_ABORT_BULK_IN	_IO(USBTMC_IOC_NR, 4)
+#define USBTMC_IOCTL_CLEAR_OUT_HALT	_IO(USBTMC_IOC_NR, 6)
+#define USBTMC_IOCTL_CLEAR_IN_HALT	_IO(USBTMC_IOC_NR, 7)
+
+#endif
diff --git a/include/linux/usb/vstusb.h b/include/linux/usb/vstusb.h
new file mode 100644
index 0000000..1cfac67
--- /dev/null
+++ b/include/linux/usb/vstusb.h
@@ -0,0 +1,71 @@
+/*****************************************************************************
+ *  File: drivers/usb/misc/vstusb.h
+ *
+ *  Purpose: Support for the bulk USB Vernier Spectrophotometers
+ *
+ *  Author:     EQware Engineering, Inc.
+ *              Oregon City, OR, USA 97045
+ *
+ *  Copyright:  2007, 2008
+ *              Vernier Software & Technology
+ *              Beaverton, OR, USA 97005
+ *
+ *  Web:        www.vernier.com
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ *****************************************************************************/
+/*****************************************************************************
+ *
+ *  The vstusb module is a standard usb 'client' driver running on top of the
+ *  standard usb host controller stack.
+ *
+ *  In general, vstusb supports standard bulk usb pipes.  It supports multiple
+ *  devices and multiple pipes per device.
+ *
+ *  The vstusb driver supports two interfaces:
+ *  1 - ioctl SEND_PIPE/RECV_PIPE - a general bulk write/read msg
+ *  	interface to any pipe with timeout support;
+ *  2 - standard read/write with ioctl config - offers standard read/write
+ *  	interface with ioctl configured pipes and timeouts.
+ *
+ *  Both interfaces can be signal from other process and will abort its i/o
+ *  operation.
+ *
+ *  A timeout of 0 means NO timeout.  The user can still terminate the read via
+ *  signal.
+ *
+ *  If using multiple threads with this driver, the user should ensure that
+ *  any reads, writes, or ioctls are complete before closing the device.
+ *  Changing read/write timeouts or pipes takes effect on next read/write.
+ *
+ *****************************************************************************/
+
+struct vstusb_args {
+	union {
+		/* this struct is used for IOCTL_VSTUSB_SEND_PIPE,	*
+		 * IOCTL_VSTUSB_RECV_PIPE, and read()/write() fops	*/
+		struct {
+			void __user	*buffer;
+			size_t          count;
+			unsigned int    timeout_ms;
+			int             pipe;
+		};
+
+		/* this one is used for IOCTL_VSTUSB_CONFIG_RW  	*/
+		struct {
+			int rd_pipe;
+			int rd_timeout_ms;
+			int wr_pipe;
+			int wr_timeout_ms;
+		};
+	};
+};
+
+#define VST_IOC_MAGIC 'L'
+#define VST_IOC_FIRST 0x20
+#define IOCTL_VSTUSB_SEND_PIPE	_IO(VST_IOC_MAGIC, VST_IOC_FIRST)
+#define IOCTL_VSTUSB_RECV_PIPE	_IO(VST_IOC_MAGIC, VST_IOC_FIRST + 1)
+#define IOCTL_VSTUSB_CONFIG_RW	_IO(VST_IOC_MAGIC, VST_IOC_FIRST + 2)
diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h
new file mode 100644
index 0000000..a102561
--- /dev/null
+++ b/include/linux/usb/wusb-wa.h
@@ -0,0 +1,271 @@
+/*
+ * Wireless USB Wire Adapter constants and structures.
+ *
+ * Copyright (C) 2005-2006 Intel Corporation.
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ * FIXME: organize properly, group logically
+ *
+ * All the event structures are defined in uwb/spec.h, as they are
+ * common to the WHCI and WUSB radio control interfaces.
+ *
+ * References:
+ *   [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8
+ */
+#ifndef __LINUX_USB_WUSB_WA_H
+#define __LINUX_USB_WUSB_WA_H
+
+/**
+ * Radio Command Request for the Radio Control Interface
+ *
+ * Radio Control Interface command and event codes are the same as
+ * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_*
+ */
+enum {
+	WA_EXEC_RC_CMD = 40,	/* Radio Control command Request */
+};
+
+/* Wireless Adapter Requests ([WUSB] table 8-51) */
+enum {
+	WUSB_REQ_ADD_MMC_IE     = 20,
+	WUSB_REQ_REMOVE_MMC_IE  = 21,
+	WUSB_REQ_SET_NUM_DNTS   = 22,
+	WUSB_REQ_SET_CLUSTER_ID = 23,
+	WUSB_REQ_SET_DEV_INFO   = 24,
+	WUSB_REQ_GET_TIME       = 25,
+	WUSB_REQ_SET_STREAM_IDX = 26,
+	WUSB_REQ_SET_WUSB_MAS   = 27,
+};
+
+
+/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */
+enum {
+	WUSB_TIME_ADJ   = 0,
+	WUSB_TIME_BPST  = 1,
+	WUSB_TIME_WUSB  = 2,
+};
+
+enum {
+	WA_ENABLE = 0x01,
+	WA_RESET = 0x02,
+	RPIPE_PAUSE = 0x1,
+};
+
+/* Responses from Get Status request ([WUSB] section 8.3.1.6) */
+enum {
+	WA_STATUS_ENABLED = 0x01,
+	WA_STATUS_RESETTING = 0x02
+};
+
+enum rpipe_crs {
+	RPIPE_CRS_CTL = 0x01,
+	RPIPE_CRS_ISO = 0x02,
+	RPIPE_CRS_BULK = 0x04,
+	RPIPE_CRS_INTR = 0x08
+};
+
+/**
+ * RPipe descriptor ([WUSB] section 8.5.2.11)
+ *
+ * FIXME: explain rpipes
+ */
+struct usb_rpipe_descriptor {
+	u8 	bLength;
+	u8	bDescriptorType;
+	__le16  wRPipeIndex;
+	__le16	wRequests;
+	__le16	wBlocks;		/* rw if 0 */
+	__le16	wMaxPacketSize;		/* rw? */
+	u8	bHSHubAddress;		/* reserved: 0 */
+	u8	bHSHubPort;		/* ??? FIXME ??? */
+	u8	bSpeed;			/* rw: xfer rate 'enum uwb_phy_rate' */
+	u8	bDeviceAddress;		/* rw: Target device address */
+	u8	bEndpointAddress;	/* rw: Target EP address */
+	u8	bDataSequence;		/* ro: Current Data sequence */
+	__le32	dwCurrentWindow;	/* ro */
+	u8	bMaxDataSequence;	/* ro?: max supported seq */
+	u8	bInterval;		/* rw:  */
+	u8	bOverTheAirInterval;	/* rw:  */
+	u8	bmAttribute;		/* ro?  */
+	u8	bmCharacteristics;	/* ro? enum rpipe_attr, supported xsactions */
+	u8	bmRetryOptions;		/* rw? */
+	__le16	wNumTransactionErrors;	/* rw */
+} __attribute__ ((packed));
+
+/**
+ * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4)
+ *
+ * These are the notifications coming on the notification endpoint of
+ * an HWA and a DWA.
+ */
+enum wa_notif_type {
+	DWA_NOTIF_RWAKE = 0x91,
+	DWA_NOTIF_PORTSTATUS = 0x92,
+	WA_NOTIF_TRANSFER = 0x93,
+	HWA_NOTIF_BPST_ADJ = 0x94,
+	HWA_NOTIF_DN = 0x95,
+};
+
+/**
+ * Wire Adapter notification header
+ *
+ * Notifications coming from a wire adapter use a common header
+ * defined in [WUSB] sections 8.4.5 & 8.5.4.
+ */
+struct wa_notif_hdr {
+	u8 bLength;
+	u8 bNotifyType;			/* enum wa_notif_type */
+} __attribute__((packed));
+
+/**
+ * HWA DN Received notification [(WUSB] section 8.5.4.2)
+ *
+ * The DNData is specified in WUSB1.0[7.6]. For each device
+ * notification we received, we just need to dispatch it.
+ *
+ * @dndata:  this is really an array of notifications, but all start
+ *           with the same header.
+ */
+struct hwa_notif_dn {
+	struct wa_notif_hdr hdr;
+	u8 bSourceDeviceAddr;		/* from errata 2005/07 */
+	u8 bmAttributes;
+	struct wusb_dn_hdr dndata[];
+} __attribute__((packed));
+
+/* [WUSB] section 8.3.3 */
+enum wa_xfer_type {
+	WA_XFER_TYPE_CTL = 0x80,
+	WA_XFER_TYPE_BI = 0x81,		/* bulk/interrupt */
+	WA_XFER_TYPE_ISO = 0x82,
+	WA_XFER_RESULT = 0x83,
+	WA_XFER_ABORT = 0x84,
+};
+
+/* [WUSB] section 8.3.3 */
+struct wa_xfer_hdr {
+	u8 bLength;			/* 0x18 */
+	u8 bRequestType;		/* 0x80 WA_REQUEST_TYPE_CTL */
+	__le16 wRPipe;			/* RPipe index */
+	__le32 dwTransferID;		/* Host-assigned ID */
+	__le32 dwTransferLength;	/* Length of data to xfer */
+	u8 bTransferSegment;
+} __attribute__((packed));
+
+struct wa_xfer_ctl {
+	struct wa_xfer_hdr hdr;
+	u8 bmAttribute;
+	__le16 wReserved;
+	struct usb_ctrlrequest baSetupData;
+} __attribute__((packed));
+
+struct wa_xfer_bi {
+	struct wa_xfer_hdr hdr;
+	u8 bReserved;
+	__le16 wReserved;
+} __attribute__((packed));
+
+struct wa_xfer_hwaiso {
+	struct wa_xfer_hdr hdr;
+	u8 bReserved;
+	__le16 wPresentationTime;
+	__le32 dwNumOfPackets;
+	/* FIXME: u8 pktdata[]? */
+} __attribute__((packed));
+
+/* [WUSB] section 8.3.3.5 */
+struct wa_xfer_abort {
+	u8 bLength;
+	u8 bRequestType;
+	__le16 wRPipe;			/* RPipe index */
+	__le32 dwTransferID;		/* Host-assigned ID */
+} __attribute__((packed));
+
+/**
+ * WA Transfer Complete notification ([WUSB] section 8.3.3.3)
+ *
+ */
+struct wa_notif_xfer {
+	struct wa_notif_hdr hdr;
+	u8 bEndpoint;
+	u8 Reserved;
+} __attribute__((packed));
+
+/** Transfer result basic codes [WUSB] table 8-15 */
+enum {
+	WA_XFER_STATUS_SUCCESS,
+	WA_XFER_STATUS_HALTED,
+	WA_XFER_STATUS_DATA_BUFFER_ERROR,
+	WA_XFER_STATUS_BABBLE,
+	WA_XFER_RESERVED,
+	WA_XFER_STATUS_NOT_FOUND,
+	WA_XFER_STATUS_INSUFFICIENT_RESOURCE,
+	WA_XFER_STATUS_TRANSACTION_ERROR,
+	WA_XFER_STATUS_ABORTED,
+	WA_XFER_STATUS_RPIPE_NOT_READY,
+	WA_XFER_INVALID_FORMAT,
+	WA_XFER_UNEXPECTED_SEGMENT_NUMBER,
+	WA_XFER_STATUS_RPIPE_TYPE_MISMATCH,
+};
+
+/** [WUSB] section 8.3.3.4 */
+struct wa_xfer_result {
+	struct wa_notif_hdr hdr;
+	__le32 dwTransferID;
+	__le32 dwTransferLength;
+	u8     bTransferSegment;
+	u8     bTransferStatus;
+	__le32 dwNumOfPackets;
+} __attribute__((packed));
+
+/**
+ * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7).
+ *
+ * NOTE: u16 fields are read Little Endian from the hardware.
+ *
+ * @bNumPorts is the original max number of devices that the host can
+ *            connect; we might chop this so the stack can handle
+ *            it. In case you need to access it, use wusbhc->ports_max
+ *            if it is a Wireless USB WA.
+ */
+struct usb_wa_descriptor {
+	u8	bLength;
+	u8	bDescriptorType;
+	u16	bcdWAVersion;
+	u8	bNumPorts;		/* don't use!! */
+	u8	bmAttributes;		/* Reserved == 0 */
+	u16	wNumRPipes;
+	u16	wRPipeMaxBlock;
+	u8	bRPipeBlockSize;
+	u8	bPwrOn2PwrGood;
+	u8	bNumMMCIEs;
+	u8	DeviceRemovable;	/* FIXME: in DWA this is up to 16 bytes */
+} __attribute__((packed));
+
+/**
+ * HWA Device Information Buffer (WUSB1.0[T8.54])
+ */
+struct hwa_dev_info {
+	u8	bmDeviceAvailability[32];       /* FIXME: ignored for now */
+	u8	bDeviceAddress;
+	__le16	wPHYRates;
+	u8	bmDeviceAttribute;
+} __attribute__((packed));
+
+#endif /* #ifndef __LINUX_USB_WUSB_WA_H */
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
new file mode 100644
index 0000000..5f401b6
--- /dev/null
+++ b/include/linux/usb/wusb.h
@@ -0,0 +1,376 @@
+/*
+ * Wireless USB Standard Definitions
+ * Event Size Tables
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ * FIXME: organize properly, group logically
+ *
+ * All the event structures are defined in uwb/spec.h, as they are
+ * common to the WHCI and WUSB radio control interfaces.
+ */
+
+#ifndef __WUSB_H__
+#define __WUSB_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/uwb/spec.h>
+#include <linux/usb/ch9.h>
+#include <linux/param.h>
+
+/**
+ * WUSB Information Element header
+ *
+ * I don't know why, they decided to make it different to the MBOA MAC
+ * IE Header; beats me.
+ */
+struct wuie_hdr {
+	u8 bLength;
+	u8 bIEIdentifier;
+} __attribute__((packed));
+
+enum {
+	WUIE_ID_WCTA = 0x80,
+	WUIE_ID_CONNECTACK,
+	WUIE_ID_HOST_INFO,
+	WUIE_ID_CHANGE_ANNOUNCE,
+	WUIE_ID_DEVICE_DISCONNECT,
+	WUIE_ID_HOST_DISCONNECT,
+	WUIE_ID_KEEP_ALIVE = 0x89,
+	WUIE_ID_ISOCH_DISCARD,
+	WUIE_ID_RESET_DEVICE,
+};
+
+/**
+ * Maximum number of array elements in a WUSB IE.
+ *
+ * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that
+ * are "arrays" have to limited to 4 elements. So we define it
+ * like that to ease up and submit only the neeed size.
+ */
+#define WUIE_ELT_MAX 4
+
+/**
+ * Wrapper for the data that defines a CHID, a CDID or a CK
+ *
+ * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of
+ * data. In order to avoid confusion and enforce types, we wrap it.
+ *
+ * Make it packed, as we use it in some hw defintions.
+ */
+struct wusb_ckhdid {
+	u8 data[16];
+} __attribute__((packed));
+
+const static
+struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
+
+#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
+
+/**
+ * WUSB IE: Host Information (WUSB1.0[7.5.2])
+ *
+ * Used to provide information about the host to the Wireless USB
+ * devices in range (CHID can be used as an ASCII string).
+ */
+struct wuie_host_info {
+	struct wuie_hdr hdr;
+	__le16 attributes;
+	struct wusb_ckhdid CHID;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Connect Ack (WUSB1.0[7.5.1])
+ *
+ * Used to acknowledge device connect requests. See note for
+ * WUIE_ELT_MAX.
+ */
+struct wuie_connect_ack {
+	struct wuie_hdr hdr;
+	struct {
+		struct wusb_ckhdid CDID;
+		u8 bDeviceAddress;	/* 0 means unused */
+		u8 bReserved;
+	} blk[WUIE_ELT_MAX];
+} __attribute__((packed));
+
+/**
+ * WUSB IE Host Information Element, Connect Availability
+ *
+ * WUSB1.0[7.5.2], bmAttributes description
+ */
+enum {
+	WUIE_HI_CAP_RECONNECT = 0,
+	WUIE_HI_CAP_LIMITED,
+	WUIE_HI_CAP_RESERVED,
+	WUIE_HI_CAP_ALL,
+};
+
+/**
+ * WUSB IE: Channel Stop (WUSB1.0[7.5.8])
+ *
+ * Tells devices the host is going to stop sending MMCs and will dissapear.
+ */
+struct wuie_channel_stop {
+	struct wuie_hdr hdr;
+	u8 attributes;
+	u8 timestamp[3];
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Keepalive (WUSB1.0[7.5.9])
+ *
+ * Ask device(s) to send keepalives.
+ */
+struct wuie_keep_alive {
+	struct wuie_hdr hdr;
+	u8 bDeviceAddress[WUIE_ELT_MAX];
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Reset device (WUSB1.0[7.5.11])
+ *
+ * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only
+ * use it for one at the time...
+ *
+ * In any case, this request is a wee bit silly: why don't they target
+ * by address??
+ */
+struct wuie_reset {
+	struct wuie_hdr hdr;
+	struct wusb_ckhdid CDID;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Disconnect device (WUSB1.0[7.5.11])
+ *
+ * Tell device to disconnect; we can fit 4 addresses, but we only use
+ * it for one at the time...
+ */
+struct wuie_disconnect {
+	struct wuie_hdr hdr;
+	u8 bDeviceAddress;
+	u8 padding;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Host disconnect ([WUSB] section 7.5.5)
+ *
+ * Tells all connected devices to disconnect.
+ */
+struct wuie_host_disconnect {
+	struct wuie_hdr hdr;
+} __attribute__((packed));
+
+/**
+ * WUSB Device Notification header (WUSB1.0[7.6])
+ */
+struct wusb_dn_hdr {
+	u8 bType;
+	u8 notifdata[];
+} __attribute__((packed));
+
+/** Device Notification codes (WUSB1.0[Table 7-54]) */
+enum WUSB_DN {
+	WUSB_DN_CONNECT = 0x01,
+	WUSB_DN_DISCONNECT = 0x02,
+	WUSB_DN_EPRDY = 0x03,
+	WUSB_DN_MASAVAILCHANGED = 0x04,
+	WUSB_DN_RWAKE = 0x05,
+	WUSB_DN_SLEEP = 0x06,
+	WUSB_DN_ALIVE = 0x07,
+};
+
+/** WUSB Device Notification Connect */
+struct wusb_dn_connect {
+	struct wusb_dn_hdr hdr;
+	__le16 attributes;
+	struct wusb_ckhdid CDID;
+} __attribute__((packed));
+
+static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn)
+{
+	return le16_to_cpu(dn->attributes) & 0xff;
+}
+
+static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn)
+{
+	return (le16_to_cpu(dn->attributes) >> 8) & 0x1;
+}
+
+static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn)
+{
+	return (le16_to_cpu(dn->attributes) >> 9) & 0x03;
+}
+
+/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */
+struct wusb_dn_alive {
+	struct wusb_dn_hdr hdr;
+} __attribute__((packed));
+
+/** Device is disconnecting (WUSB1.0[7.6.2]) */
+struct wusb_dn_disconnect {
+	struct wusb_dn_hdr hdr;
+} __attribute__((packed));
+
+/* General constants */
+enum {
+	WUSB_TRUST_TIMEOUT_MS = 4000,	/* [WUSB] section 4.15.1 */
+};
+
+static inline size_t ckhdid_printf(char *pr_ckhdid, size_t size,
+				   const struct wusb_ckhdid *ckhdid)
+{
+	return scnprintf(pr_ckhdid, size,
+			 "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx "
+			 "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx",
+			 ckhdid->data[0],  ckhdid->data[1],
+			 ckhdid->data[2],  ckhdid->data[3],
+			 ckhdid->data[4],  ckhdid->data[5],
+			 ckhdid->data[6],  ckhdid->data[7],
+			 ckhdid->data[8],  ckhdid->data[9],
+			 ckhdid->data[10], ckhdid->data[11],
+			 ckhdid->data[12], ckhdid->data[13],
+			 ckhdid->data[14], ckhdid->data[15]);
+}
+
+/*
+ * WUSB Crypto stuff (WUSB1.0[6])
+ */
+
+extern const char *wusb_et_name(u8);
+
+/**
+ * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for
+ * the host or the device.
+ */
+static inline u8 wusb_key_index(int index, int type, int originator)
+{
+	return (originator << 6) | (type << 4) | index;
+}
+
+#define WUSB_KEY_INDEX_TYPE_PTK			0 /* for HWA only */
+#define WUSB_KEY_INDEX_TYPE_ASSOC		1
+#define WUSB_KEY_INDEX_TYPE_GTK			2
+#define WUSB_KEY_INDEX_ORIGINATOR_HOST		0
+#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE	1
+
+/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
+struct aes_ccm_nonce {
+	u8 sfn[6];              /* Little Endian */
+	u8 tkid[3];             /* LE */
+	struct uwb_dev_addr dest_addr;
+	struct uwb_dev_addr src_addr;
+} __attribute__((packed));
+
+/* A CCM operation label, defined on WUSB1.0[6.5.x] */
+struct aes_ccm_label {
+	u8 data[14];
+} __attribute__((packed));
+
+/*
+ * Input to the key derivation sequence defined in
+ * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the
+ * PRF function.
+ */
+struct wusb_keydvt_in {
+	u8 hnonce[16];
+	u8 dnonce[16];
+} __attribute__((packed));
+
+/*
+ * Output from the key derivation sequence defined in
+ * WUSB1.0[6.5.1].
+ */
+struct wusb_keydvt_out {
+	u8 kck[16];
+	u8 ptk[16];
+} __attribute__((packed));
+
+/* Pseudo Random Function WUSB1.0[6.5] */
+extern int wusb_crypto_init(void);
+extern void wusb_crypto_exit(void);
+extern ssize_t wusb_prf(void *out, size_t out_size,
+			const u8 key[16], const struct aes_ccm_nonce *_n,
+			const struct aes_ccm_label *a,
+			const void *b, size_t blen, size_t len);
+
+static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16],
+			      const struct aes_ccm_nonce *n,
+			      const struct aes_ccm_label *a,
+			      const void *b, size_t blen)
+{
+	return wusb_prf(out, out_size, key, n, a, b, blen, 64);
+}
+
+static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16],
+			       const struct aes_ccm_nonce *n,
+			       const struct aes_ccm_label *a,
+			       const void *b, size_t blen)
+{
+	return wusb_prf(out, out_size, key, n, a, b, blen, 128);
+}
+
+static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16],
+			       const struct aes_ccm_nonce *n,
+			       const struct aes_ccm_label *a,
+			       const void *b, size_t blen)
+{
+	return wusb_prf(out, out_size, key, n, a, b, blen, 256);
+}
+
+/* Key derivation WUSB1.0[6.5.1] */
+static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out,
+				  const u8 key[16],
+				  const struct aes_ccm_nonce *n,
+				  const struct wusb_keydvt_in *keydvt_in)
+{
+	const struct aes_ccm_label a = { .data = "Pair-wise keys" };
+	return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a,
+			    keydvt_in, sizeof(*keydvt_in));
+}
+
+/*
+ * Out-of-band MIC Generation WUSB1.0[6.5.2]
+ *
+ * Compute the MIC over @key, @n and @hs and place it in @mic_out.
+ *
+ * @mic_out:  Where to place the 8 byte MIC tag
+ * @key:      KCK from the derivation process
+ * @n:        CCM nonce, n->sfn == 0, TKID as established in the
+ *            process.
+ * @hs:       Handshake struct for phase 2 of the 4-way.
+ *            hs->bStatus and hs->bReserved are zero.
+ *            hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2]
+ *            hs->dest_addr is the device's USB address padded with 0
+ *            hs->src_addr is the hosts's UWB device address
+ *            hs->mic is ignored (as we compute that value).
+ */
+static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16],
+			       const struct aes_ccm_nonce *n,
+			       const struct usb_handshake *hs)
+{
+	const struct aes_ccm_label a = { .data = "out-of-bandMIC" };
+	return wusb_prf_64(mic_out, 8, key, n, &a,
+			   hs, sizeof(*hs) - sizeof(hs->MIC));
+}
+
+#endif /* #ifndef __WUSB_H__ */
diff --git a/include/linux/uwb.h b/include/linux/uwb.h
new file mode 100644
index 0000000..f9ccbd9
--- /dev/null
+++ b/include/linux/uwb.h
@@ -0,0 +1,765 @@
+/*
+ * Ultra Wide Band
+ * UWB API
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: doc: overview of the API, different parts and pointers
+ */
+
+#ifndef __LINUX__UWB_H__
+#define __LINUX__UWB_H__
+
+#include <linux/limits.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/uwb/spec.h>
+
+struct uwb_dev;
+struct uwb_beca_e;
+struct uwb_rc;
+struct uwb_rsv;
+struct uwb_dbg;
+
+/**
+ * struct uwb_dev - a UWB Device
+ * @rc: UWB Radio Controller that discovered the device (kind of its
+ *     parent).
+ * @bce: a beacon cache entry for this device; or NULL if the device
+ *     is a local radio controller.
+ * @mac_addr: the EUI-48 address of this device.
+ * @dev_addr: the current DevAddr used by this device.
+ * @beacon_slot: the slot number the beacon is using.
+ * @streams: bitmap of streams allocated to reservations targeted at
+ *     this device.  For an RC, this is the streams allocated for
+ *     reservations targeted at DevAddrs.
+ *
+ * A UWB device may either by a neighbor or part of a local radio
+ * controller.
+ */
+struct uwb_dev {
+	struct mutex mutex;
+	struct list_head list_node;
+	struct device dev;
+	struct uwb_rc *rc;		/* radio controller */
+	struct uwb_beca_e *bce;		/* Beacon Cache Entry */
+
+	struct uwb_mac_addr mac_addr;
+	struct uwb_dev_addr dev_addr;
+	int beacon_slot;
+	DECLARE_BITMAP(streams, UWB_NUM_STREAMS);
+};
+#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev)
+
+/**
+ * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs
+ *
+ * RC[CE]Bs have a 'context ID' field that matches the command with
+ * the event received to confirm it.
+ *
+ * Maximum number of context IDs
+ */
+enum { UWB_RC_CTX_MAX = 256 };
+
+
+/** Notification chain head for UWB generated events to listeners */
+struct uwb_notifs_chain {
+	struct list_head list;
+	struct mutex mutex;
+};
+
+/**
+ * struct uwb_mas_bm - a bitmap of all MAS in a superframe
+ * @bm: a bitmap of length #UWB_NUM_MAS
+ */
+struct uwb_mas_bm {
+	DECLARE_BITMAP(bm, UWB_NUM_MAS);
+};
+
+/**
+ * uwb_rsv_state - UWB Reservation state.
+ *
+ * NONE - reservation is not active (no DRP IE being transmitted).
+ *
+ * Owner reservation states:
+ *
+ * INITIATED - owner has sent an initial DRP request.
+ * PENDING - target responded with pending Reason Code.
+ * MODIFIED - reservation manager is modifying an established
+ * reservation with a different MAS allocation.
+ * ESTABLISHED - the reservation has been successfully negotiated.
+ *
+ * Target reservation states:
+ *
+ * DENIED - request is denied.
+ * ACCEPTED - request is accepted.
+ * PENDING - PAL has yet to make a decision to whether to accept or
+ * deny.
+ *
+ * FIXME: further target states TBD.
+ */
+enum uwb_rsv_state {
+	UWB_RSV_STATE_NONE,
+	UWB_RSV_STATE_O_INITIATED,
+	UWB_RSV_STATE_O_PENDING,
+	UWB_RSV_STATE_O_MODIFIED,
+	UWB_RSV_STATE_O_ESTABLISHED,
+	UWB_RSV_STATE_T_ACCEPTED,
+	UWB_RSV_STATE_T_DENIED,
+	UWB_RSV_STATE_T_PENDING,
+
+	UWB_RSV_STATE_LAST,
+};
+
+enum uwb_rsv_target_type {
+	UWB_RSV_TARGET_DEV,
+	UWB_RSV_TARGET_DEVADDR,
+};
+
+/**
+ * struct uwb_rsv_target - the target of a reservation.
+ *
+ * Reservations unicast and targeted at a single device
+ * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a
+ * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR).
+ */
+struct uwb_rsv_target {
+	enum uwb_rsv_target_type type;
+	union {
+		struct uwb_dev *dev;
+		struct uwb_dev_addr devaddr;
+	};
+};
+
+/*
+ * Number of streams reserved for reservations targeted at DevAddrs.
+ */
+#define UWB_NUM_GLOBAL_STREAMS 1
+
+typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv);
+
+/**
+ * struct uwb_rsv - a DRP reservation
+ *
+ * Data structure management:
+ *
+ * @rc:             the radio controller this reservation is for
+ *                  (as target or owner)
+ * @rc_node:        a list node for the RC
+ * @pal_node:       a list node for the PAL
+ *
+ * Owner and target parameters:
+ *
+ * @owner:          the UWB device owning this reservation
+ * @target:         the target UWB device
+ * @type:           reservation type
+ *
+ * Owner parameters:
+ *
+ * @max_mas:        maxiumum number of MAS
+ * @min_mas:        minimum number of MAS
+ * @sparsity:       owner selected sparsity
+ * @is_multicast:   true iff multicast
+ *
+ * @callback:       callback function when the reservation completes
+ * @pal_priv:       private data for the PAL making the reservation
+ *
+ * Reservation status:
+ *
+ * @status:         negotiation status
+ * @stream:         stream index allocated for this reservation
+ * @mas:            reserved MAS
+ * @drp_ie:         the DRP IE
+ * @ie_valid:       true iff the DRP IE matches the reservation parameters
+ *
+ * DRP reservations are uniquely identified by the owner, target and
+ * stream index.  However, when using a DevAddr as a target (e.g., for
+ * a WUSB cluster reservation) the responses may be received from
+ * devices with different DevAddrs.  In this case, reservations are
+ * uniquely identified by just the stream index.  A number of stream
+ * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this.
+ */
+struct uwb_rsv {
+	struct uwb_rc *rc;
+	struct list_head rc_node;
+	struct list_head pal_node;
+
+	struct uwb_dev *owner;
+	struct uwb_rsv_target target;
+	enum uwb_drp_type type;
+	int max_mas;
+	int min_mas;
+	int sparsity;
+	bool is_multicast;
+
+	uwb_rsv_cb_f callback;
+	void *pal_priv;
+
+	enum uwb_rsv_state state;
+	u8 stream;
+	struct uwb_mas_bm mas;
+	struct uwb_ie_drp *drp_ie;
+	bool ie_valid;
+	struct timer_list timer;
+	bool expired;
+};
+
+static const
+struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } };
+
+static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas)
+{
+	bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS);
+}
+
+/**
+ * struct uwb_drp_avail - a radio controller's view of MAS usage
+ * @global:   MAS unused by neighbors (excluding reservations targetted
+ *            or owned by the local radio controller) or the beaon period
+ * @local:    MAS unused by local established reservations
+ * @pending:  MAS unused by local pending reservations
+ * @ie:       DRP Availability IE to be included in the beacon
+ * @ie_valid: true iff @ie is valid and does not need to regenerated from
+ *            @global and @local
+ *
+ * Each radio controller maintains a view of MAS usage or
+ * availability. MAS available for a new reservation are determined
+ * from the intersection of @global, @local, and @pending.
+ *
+ * The radio controller must transmit a DRP Availability IE that's the
+ * intersection of @global and @local.
+ *
+ * A set bit indicates the MAS is unused and available.
+ *
+ * rc->rsvs_mutex should be held before accessing this data structure.
+ *
+ * [ECMA-368] section 17.4.3.
+ */
+struct uwb_drp_avail {
+	DECLARE_BITMAP(global, UWB_NUM_MAS);
+	DECLARE_BITMAP(local, UWB_NUM_MAS);
+	DECLARE_BITMAP(pending, UWB_NUM_MAS);
+	struct uwb_ie_drp_avail ie;
+	bool ie_valid;
+};
+
+
+const char *uwb_rsv_state_str(enum uwb_rsv_state state);
+const char *uwb_rsv_type_str(enum uwb_drp_type type);
+
+struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb,
+			       void *pal_priv);
+void uwb_rsv_destroy(struct uwb_rsv *rsv);
+
+int uwb_rsv_establish(struct uwb_rsv *rsv);
+int uwb_rsv_modify(struct uwb_rsv *rsv,
+		   int max_mas, int min_mas, int sparsity);
+void uwb_rsv_terminate(struct uwb_rsv *rsv);
+
+void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv);
+
+/**
+ * Radio Control Interface instance
+ *
+ *
+ * Life cycle rules: those of the UWB Device.
+ *
+ * @index:    an index number for this radio controller, as used in the
+ *            device name.
+ * @version:  version of protocol supported by this device
+ * @priv:     Backend implementation; rw with uwb_dev.dev.sem taken.
+ * @cmd:      Backend implementation to execute commands; rw and call
+ *            only  with uwb_dev.dev.sem taken.
+ * @reset:    Hardware reset of radio controller and any PAL controllers.
+ * @filter:   Backend implementation to manipulate data to and from device
+ *            to be compliant to specification assumed by driver (WHCI
+ *            0.95).
+ *
+ *            uwb_dev.dev.mutex is used to execute commands and update
+ *            the corresponding structures; can't use a spinlock
+ *            because rc->cmd() can sleep.
+ * @ies:         This is a dynamically allocated array cacheing the
+ *               IEs (settable by the host) that the beacon of this
+ *               radio controller is currently sending.
+ *
+ *               In reality, we store here the full command we set to
+ *               the radio controller (which is basically a command
+ *               prefix followed by all the IEs the beacon currently
+ *               contains). This way we don't have to realloc and
+ *               memcpy when setting it.
+ *
+ *               We set this up in uwb_rc_ie_setup(), where we alloc
+ *               this struct, call get_ie() [so we know which IEs are
+ *               currently being sent, if any].
+ *
+ * @ies_capacity:Amount of space (in bytes) allocated in @ies. The
+ *               amount used is given by sizeof(*ies) plus ies->wIELength
+ *               (which is a little endian quantity all the time).
+ * @ies_mutex:   protect the IE cache
+ * @dbg:         information for the debug interface
+ */
+struct uwb_rc {
+	struct uwb_dev uwb_dev;
+	int index;
+	u16 version;
+
+	struct module *owner;
+	void *priv;
+	int (*start)(struct uwb_rc *rc);
+	void (*stop)(struct uwb_rc *rc);
+	int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t);
+	int (*reset)(struct uwb_rc *rc);
+	int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *);
+	int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t,
+			    size_t *, size_t *);
+
+	spinlock_t neh_lock;		/* protects neh_* and ctx_* */
+	struct list_head neh_list;	/* Open NE handles */
+	unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)];
+	u8 ctx_roll;
+
+	int beaconing;			/* Beaconing state [channel number] */
+	int scanning;
+	enum uwb_scan_type scan_type:3;
+	unsigned ready:1;
+	struct uwb_notifs_chain notifs_chain;
+
+	struct uwb_drp_avail drp_avail;
+	struct list_head reservations;
+	struct mutex rsvs_mutex;
+	struct workqueue_struct *rsv_workq;
+	struct work_struct rsv_update_work;
+
+	struct mutex ies_mutex;
+	struct uwb_rc_cmd_set_ie *ies;
+	size_t ies_capacity;
+
+	spinlock_t pal_lock;
+	struct list_head pals;
+
+	struct uwb_dbg *dbg;
+};
+
+
+/**
+ * struct uwb_pal - a UWB PAL
+ * @name:    descriptive name for this PAL (wushc, wlp, etc.).
+ * @device:  a device for the PAL.  Used to link the PAL and the radio
+ *           controller in sysfs.
+ * @new_rsv: called when a peer requests a reservation (may be NULL if
+ *           the PAL cannot accept reservation requests).
+ *
+ * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB
+ * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP).
+ *
+ * The PALs using a radio controller must register themselves to
+ * permit the UWB stack to coordinate usage of the radio between the
+ * various PALs or to allow PALs to response to certain requests from
+ * peers.
+ *
+ * A struct uwb_pal should be embedded in a containing structure
+ * belonging to the PAL and initialized with uwb_pal_init()).  Fields
+ * should be set appropriately by the PAL before registering the PAL
+ * with uwb_pal_register().
+ */
+struct uwb_pal {
+	struct list_head node;
+	const char *name;
+	struct device *device;
+	void (*new_rsv)(struct uwb_rsv *rsv);
+};
+
+void uwb_pal_init(struct uwb_pal *pal);
+int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal);
+void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal);
+
+/*
+ * General public API
+ *
+ * This API can be used by UWB device drivers or by those implementing
+ * UWB Radio Controllers
+ */
+struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
+				       const struct uwb_dev_addr *devaddr);
+struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *);
+static inline void uwb_dev_get(struct uwb_dev *uwb_dev)
+{
+	get_device(&uwb_dev->dev);
+}
+static inline void uwb_dev_put(struct uwb_dev *uwb_dev)
+{
+	put_device(&uwb_dev->dev);
+}
+struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev);
+
+/**
+ * Callback function for 'uwb_{dev,rc}_foreach()'.
+ *
+ * @dev:  Linux device instance
+ *        'uwb_dev = container_of(dev, struct uwb_dev, dev)'
+ * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'.
+ *
+ * @returns: 0 to continue the iterations, any other val to stop
+ *           iterating and return the value to the caller of
+ *           _foreach().
+ */
+typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv);
+int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv);
+
+struct uwb_rc *uwb_rc_alloc(void);
+struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *);
+struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *);
+void uwb_rc_put(struct uwb_rc *rc);
+
+typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg,
+                                struct uwb_rceb *reply, ssize_t reply_size);
+
+int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
+		     struct uwb_rccb *cmd, size_t cmd_size,
+		     u8 expected_type, u16 expected_event,
+		     uwb_rc_cmd_cb_f cb, void *arg);
+ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
+		   struct uwb_rccb *cmd, size_t cmd_size,
+		   struct uwb_rceb *reply, size_t reply_size);
+ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
+		    struct uwb_rccb *cmd, size_t cmd_size,
+		    u8 expected_type, u16 expected_event,
+		    struct uwb_rceb **preply);
+ssize_t uwb_rc_get_ie(struct uwb_rc *, struct uwb_rc_evt_get_ie **);
+int uwb_bg_joined(struct uwb_rc *rc);
+
+size_t __uwb_addr_print(char *, size_t, const unsigned char *, int);
+
+int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *);
+int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *);
+int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *);
+int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *);
+int __uwb_mac_addr_assigned_check(struct device *, void *);
+int __uwb_dev_addr_assigned_check(struct device *, void *);
+
+/* Print in @buf a pretty repr of @addr */
+static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size,
+					const struct uwb_dev_addr *addr)
+{
+	return __uwb_addr_print(buf, buf_size, addr->data, 0);
+}
+
+/* Print in @buf a pretty repr of @addr */
+static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size,
+					const struct uwb_mac_addr *addr)
+{
+	return __uwb_addr_print(buf, buf_size, addr->data, 1);
+}
+
+/* @returns 0 if device addresses @addr2 and @addr1 are equal */
+static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1,
+				   const struct uwb_dev_addr *addr2)
+{
+	return memcmp(addr1, addr2, sizeof(*addr1));
+}
+
+/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */
+static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1,
+				   const struct uwb_mac_addr *addr2)
+{
+	return memcmp(addr1, addr2, sizeof(*addr1));
+}
+
+/* @returns !0 if a MAC @addr is a broadcast address */
+static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr)
+{
+	struct uwb_mac_addr bcast = {
+		.data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
+	};
+	return !uwb_mac_addr_cmp(addr, &bcast);
+}
+
+/* @returns !0 if a MAC @addr is all zeroes*/
+static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr)
+{
+	struct uwb_mac_addr unset = {
+		.data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
+	};
+	return !uwb_mac_addr_cmp(addr, &unset);
+}
+
+/* @returns !0 if the address is in use. */
+static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc,
+					       struct uwb_dev_addr *addr)
+{
+	return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr);
+}
+
+/*
+ * UWB Radio Controller API
+ *
+ * This API is used (in addition to the general API) to implement UWB
+ * Radio Controllers.
+ */
+void uwb_rc_init(struct uwb_rc *);
+int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv);
+void uwb_rc_rm(struct uwb_rc *);
+void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t);
+void uwb_rc_neh_error(struct uwb_rc *, int);
+void uwb_rc_reset_all(struct uwb_rc *rc);
+
+/**
+ * uwb_rsv_is_owner - is the owner of this reservation the RC?
+ * @rsv: the reservation
+ */
+static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv)
+{
+	return rsv->owner == &rsv->rc->uwb_dev;
+}
+
+/**
+ * Events generated by UWB that can be passed to any listeners
+ *
+ * Higher layers can register callback functions with the radio
+ * controller using uwb_notifs_register(). The radio controller
+ * maintains a list of all registered handlers and will notify all
+ * nodes when an event occurs.
+ */
+enum uwb_notifs {
+	UWB_NOTIF_BG_JOIN = 0,	/* radio controller joined a beacon group */
+	UWB_NOTIF_BG_LEAVE = 1,	/* radio controller left a beacon group */
+	UWB_NOTIF_ONAIR,
+	UWB_NOTIF_OFFAIR,
+};
+
+/* Callback function registered with UWB */
+struct uwb_notifs_handler {
+	struct list_head list_node;
+	void (*cb)(void *, struct uwb_dev *, enum uwb_notifs);
+	void *data;
+};
+
+int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *);
+int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *);
+
+
+/**
+ * UWB radio controller Event Size Entry (for creating entry tables)
+ *
+ * WUSB and WHCI define events and notifications, and they might have
+ * fixed or variable size.
+ *
+ * Each event/notification has a size which is not necessarily known
+ * in advance based on the event code. As well, vendor specific
+ * events/notifications will have a size impossible to determine
+ * unless we know about the device's specific details.
+ *
+ * It was way too smart of the spec writers not to think that it would
+ * be impossible for a generic driver to skip over vendor specific
+ * events/notifications if there are no LENGTH fields in the HEADER of
+ * each message...the transaction size cannot be counted on as the
+ * spec does not forbid to pack more than one event in a single
+ * transaction.
+ *
+ * Thus, we guess sizes with tables (or for events, when you know the
+ * size ahead of time you can use uwb_rc_neh_extra_size*()). We
+ * register tables with the known events and their sizes, and then we
+ * traverse those tables. For those with variable length, we provide a
+ * way to lookup the size inside the event/notification's
+ * payload. This allows device-specific event size tables to be
+ * registered.
+ *
+ * @size:   Size of the payload
+ *
+ * @offset: if != 0, at offset @offset-1 starts a field with a length
+ *          that has to be added to @size. The format of the field is
+ *          given by @type.
+ *
+ * @type:   Type and length of the offset field. Most common is LE 16
+ *          bits (that's why that is zero); others are there mostly to
+ *          cover for bugs and weirdos.
+ */
+struct uwb_est_entry {
+	size_t size;
+	unsigned offset;
+	enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type;
+};
+
+int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product,
+		     const struct uwb_est_entry *, size_t entries);
+int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product,
+		       const struct uwb_est_entry *, size_t entries);
+ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
+			  size_t len);
+
+/* -- Misc */
+
+enum {
+	EDC_MAX_ERRORS = 10,
+	EDC_ERROR_TIMEFRAME = HZ,
+};
+
+/* error density counter */
+struct edc {
+	unsigned long timestart;
+	u16 errorcount;
+};
+
+static inline
+void edc_init(struct edc *edc)
+{
+	edc->timestart = jiffies;
+}
+
+/* Called when an error occured.
+ * This is way to determine if the number of acceptable errors per time
+ * period has been exceeded. It is not accurate as there are cases in which
+ * this scheme will not work, for example if there are periodic occurences
+ * of errors that straddle updates to the start time. This scheme is
+ * sufficient for our usage.
+ *
+ * @returns 1 if maximum acceptable errors per timeframe has been exceeded.
+ */
+static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe)
+{
+	unsigned long now;
+
+	now = jiffies;
+	if (now - err_hist->timestart > timeframe) {
+		err_hist->errorcount = 1;
+		err_hist->timestart = now;
+	} else if (++err_hist->errorcount > max_err) {
+			err_hist->errorcount = 0;
+			err_hist->timestart = now;
+			return 1;
+	}
+	return 0;
+}
+
+
+/* Information Element handling */
+
+/* For representing the state of writing to a buffer when iterating */
+struct uwb_buf_ctx {
+	char *buf;
+	size_t bytes, size;
+};
+
+typedef int (*uwb_ie_f)(struct uwb_dev *, const struct uwb_ie_hdr *,
+			size_t, void *);
+struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
+ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data,
+			const void *buf, size_t size);
+int uwb_ie_dump_hex(struct uwb_dev *, const struct uwb_ie_hdr *,
+		    size_t, void *);
+int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *);
+struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
+
+
+/*
+ * Transmission statistics
+ *
+ * UWB uses LQI and RSSI (one byte values) for reporting radio signal
+ * strength and line quality indication. We do quick and dirty
+ * averages of those. They are signed values, btw.
+ *
+ * For 8 bit quantities, we keep the min, the max, an accumulator
+ * (@sigma) and a # of samples. When @samples gets to 255, we compute
+ * the average (@sigma / @samples), place it in @sigma and reset
+ * @samples to 1 (so we use it as the first sample).
+ *
+ * Now, statistically speaking, probably I am kicking the kidneys of
+ * some books I have in my shelves collecting dust, but I just want to
+ * get an approx, not the Nobel.
+ *
+ * LOCKING: there is no locking per se, but we try to keep a lockless
+ * schema. Only _add_samples() modifies the values--as long as you
+ * have other locking on top that makes sure that no two calls of
+ * _add_sample() happen at the same time, then we are fine. Now, for
+ * resetting the values we just set @samples to 0 and that makes the
+ * next _add_sample() to start with defaults. Reading the values in
+ * _show() currently can race, so you need to make sure the calls are
+ * under the same lock that protects calls to _add_sample(). FIXME:
+ * currently unlocked (It is not ultraprecise but does the trick. Bite
+ * me).
+ */
+struct stats {
+	s8 min, max;
+	s16 sigma;
+	atomic_t samples;
+};
+
+static inline
+void stats_init(struct stats *stats)
+{
+	atomic_set(&stats->samples, 0);
+	wmb();
+}
+
+static inline
+void stats_add_sample(struct stats *stats, s8 sample)
+{
+	s8 min, max;
+	s16 sigma;
+	unsigned samples = atomic_read(&stats->samples);
+	if (samples == 0) {	/* it was zero before, so we initialize */
+		min = 127;
+		max = -128;
+		sigma = 0;
+	} else {
+		min = stats->min;
+		max = stats->max;
+		sigma = stats->sigma;
+	}
+
+	if (sample < min)	/* compute new values */
+		min = sample;
+	else if (sample > max)
+		max = sample;
+	sigma += sample;
+
+	stats->min = min;	/* commit */
+	stats->max = max;
+	stats->sigma = sigma;
+	if (atomic_add_return(1, &stats->samples) > 255) {
+		/* wrapped around! reset */
+		stats->sigma = sigma / 256;
+		atomic_set(&stats->samples, 1);
+	}
+}
+
+static inline ssize_t stats_show(struct stats *stats, char *buf)
+{
+	int min, max, avg;
+	int samples = atomic_read(&stats->samples);
+	if (samples == 0)
+		min = max = avg = 0;
+	else {
+		min = stats->min;
+		max = stats->max;
+		avg = stats->sigma / samples;
+	}
+	return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg);
+}
+
+static inline ssize_t stats_store(struct stats *stats, const char *buf,
+				  size_t size)
+{
+	stats_init(stats);
+	return size;
+}
+
+#endif /* #ifndef __LINUX__UWB_H__ */
diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h
new file mode 100644
index 0000000..1141f41
--- /dev/null
+++ b/include/linux/uwb/debug-cmd.h
@@ -0,0 +1,57 @@
+/*
+ * Ultra Wide Band
+ * Debug interface commands
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __LINUX__UWB__DEBUG_CMD_H__
+#define __LINUX__UWB__DEBUG_CMD_H__
+
+#include <linux/types.h>
+
+/*
+ * Debug interface commands
+ *
+ * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation.
+ *
+ * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation.
+ */
+
+enum uwb_dbg_cmd_type {
+	UWB_DBG_CMD_RSV_ESTABLISH = 1,
+	UWB_DBG_CMD_RSV_TERMINATE = 2,
+};
+
+struct uwb_dbg_cmd_rsv_establish {
+	__u8  target[6];
+	__u8  type;
+	__u16 max_mas;
+	__u16 min_mas;
+	__u8  sparsity;
+};
+
+struct uwb_dbg_cmd_rsv_terminate {
+	int index;
+};
+
+struct uwb_dbg_cmd {
+	__u32 type;
+	union {
+		struct uwb_dbg_cmd_rsv_establish rsv_establish;
+		struct uwb_dbg_cmd_rsv_terminate rsv_terminate;
+	};
+};
+
+#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */
diff --git a/include/linux/uwb/debug.h b/include/linux/uwb/debug.h
new file mode 100644
index 0000000..a86a73f
--- /dev/null
+++ b/include/linux/uwb/debug.h
@@ -0,0 +1,82 @@
+/*
+ * Ultra Wide Band
+ * Debug Support
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: doc
+ * Invoke like:
+ *
+ * #define D_LOCAL 4
+ * #include <linux/uwb/debug.h>
+ *
+ * At the end of your include files.
+ */
+#include <linux/types.h>
+
+struct device;
+extern void dump_bytes(struct device *dev, const void *_buf, size_t rsize);
+
+/* Master debug switch; !0 enables, 0 disables */
+#define D_MASTER (!0)
+
+/* Local (per-file) debug switch; #define before #including */
+#ifndef D_LOCAL
+#define D_LOCAL 0
+#endif
+
+#undef __d_printf
+#undef d_fnstart
+#undef d_fnend
+#undef d_printf
+#undef d_dump
+
+#define __d_printf(l, _tag, _dev, f, a...)				\
+do {									\
+	struct device *__dev = (_dev);					\
+	if (D_MASTER && D_LOCAL >= (l)) {				\
+		char __head[64] = "";					\
+		if (_dev != NULL) {					\
+			if ((unsigned long)__dev < 4096)		\
+				printk(KERN_ERR "E: Corrupt dev %p\n",	\
+					__dev);				\
+			else						\
+				snprintf(__head, sizeof(__head),	\
+					 "%s %s: ",			\
+					 dev_driver_string(__dev),	\
+					 __dev->bus_id);		\
+		}							\
+		printk(KERN_ERR "%s%s" _tag ": " f, __head,		\
+			__func__, ## a);				\
+	}								\
+} while (0 && _dev)
+
+#define d_fnstart(l, _dev, f, a...)	\
+	__d_printf(l, " FNSTART", _dev, f, ## a)
+#define d_fnend(l, _dev, f, a...)	\
+	__d_printf(l, " FNEND", _dev, f, ## a)
+#define d_printf(l, _dev, f, a...)	\
+	__d_printf(l, "", _dev, f, ## a)
+#define d_dump(l, _dev, ptr, size)		\
+do {						\
+	struct device *__dev = _dev;		\
+	if (D_MASTER && D_LOCAL >= (l))		\
+		dump_bytes(__dev, ptr, size);	\
+} while (0 && _dev)
+#define d_test(l) (D_MASTER && D_LOCAL >= (l))
diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h
new file mode 100644
index 0000000..198c15f
--- /dev/null
+++ b/include/linux/uwb/spec.h
@@ -0,0 +1,727 @@
+/*
+ * Ultra Wide Band
+ * UWB Standard definitions
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * All these definitions are based on the ECMA-368 standard.
+ *
+ * Note all definitions are Little Endian in the wire, and we will
+ * convert them to host order before operating on the bitfields (that
+ * yes, we use extensively).
+ */
+
+#ifndef __LINUX__UWB_SPEC_H__
+#define __LINUX__UWB_SPEC_H__
+
+#include <linux/types.h>
+#include <linux/bitmap.h>
+
+#define i1480_FW 0x00000303
+/* #define i1480_FW 0x00000302 */
+
+/**
+ * Number of Medium Access Slots in a superframe.
+ *
+ * UWB divides time in SuperFrames, each one divided in 256 pieces, or
+ * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the
+ * basic bandwidth allocation unit in UWB.
+ */
+enum { UWB_NUM_MAS = 256 };
+
+/**
+ * Number of Zones in superframe.
+ *
+ * UWB divides the superframe into zones with numbering starting from BPST.
+ * See MBOA MAC[16.8.6]
+ */
+enum { UWB_NUM_ZONES = 16 };
+
+/*
+ * Number of MAS in a zone.
+ */
+#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES)
+
+/*
+ * Number of streams per DRP reservation between a pair of devices.
+ *
+ * [ECMA-368] section 16.8.6.
+ */
+enum { UWB_NUM_STREAMS = 8 };
+
+/*
+ * mMasLength
+ *
+ * The length of a MAS in microseconds.
+ *
+ * [ECMA-368] section 17.16.
+ */
+enum { UWB_MAS_LENGTH_US = 256 };
+
+/*
+ * mBeaconSlotLength
+ *
+ * The length of the beacon slot in microseconds.
+ *
+ * [ECMA-368] section 17.16
+ */
+enum { UWB_BEACON_SLOT_LENGTH_US = 85 };
+
+/*
+ * mMaxLostBeacons
+ *
+ * The number beacons missing in consecutive superframes before a
+ * device can be considered as unreachable.
+ *
+ * [ECMA-368] section 17.16
+ */
+enum { UWB_MAX_LOST_BEACONS = 3 };
+
+/*
+ * Length of a superframe in microseconds.
+ */
+#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS)
+
+/**
+ * UWB MAC address
+ *
+ * It is *imperative* that this struct is exactly 6 packed bytes (as
+ * it is also used to define headers sent down and up the wire/radio).
+ */
+struct uwb_mac_addr {
+	u8 data[6];
+} __attribute__((packed));
+
+
+/**
+ * UWB device address
+ *
+ * It is *imperative* that this struct is exactly 6 packed bytes (as
+ * it is also used to define headers sent down and up the wire/radio).
+ */
+struct uwb_dev_addr {
+	u8 data[2];
+} __attribute__((packed));
+
+
+/**
+ * Types of UWB addresses
+ *
+ * Order matters (by size).
+ */
+enum uwb_addr_type {
+	UWB_ADDR_DEV = 0,
+	UWB_ADDR_MAC = 1,
+};
+
+
+/** Size of a char buffer for printing a MAC/device address */
+enum { UWB_ADDR_STRSIZE = 32 };
+
+
+/** UWB WiMedia protocol IDs. */
+enum uwb_prid {
+	UWB_PRID_WLP_RESERVED   = 0x0000,
+	UWB_PRID_WLP		= 0x0001,
+	UWB_PRID_WUSB_BOT	= 0x0010,
+	UWB_PRID_WUSB		= 0x0010,
+	UWB_PRID_WUSB_TOP	= 0x001F,
+};
+
+
+/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */
+enum uwb_phy_rate {
+	UWB_PHY_RATE_53 = 0,
+	UWB_PHY_RATE_80,
+	UWB_PHY_RATE_106,
+	UWB_PHY_RATE_160,
+	UWB_PHY_RATE_200,
+	UWB_PHY_RATE_320,
+	UWB_PHY_RATE_400,
+	UWB_PHY_RATE_480,
+	UWB_PHY_RATE_INVALID
+};
+
+
+/**
+ * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78])
+ */
+enum uwb_scan_type {
+	UWB_SCAN_ONLY = 0,
+	UWB_SCAN_OUTSIDE_BP,
+	UWB_SCAN_WHILE_INACTIVE,
+	UWB_SCAN_DISABLED,
+	UWB_SCAN_ONLY_STARTTIME,
+	UWB_SCAN_TOP
+};
+
+
+/** ACK Policy types (MBOA MAC[7.2.1.3]) */
+enum uwb_ack_pol {
+	UWB_ACK_NO = 0,
+	UWB_ACK_INM = 1,
+	UWB_ACK_B = 2,
+	UWB_ACK_B_REQ = 3,
+};
+
+
+/** DRP reservation types ([ECMA-368 table 106) */
+enum uwb_drp_type {
+	UWB_DRP_TYPE_ALIEN_BP = 0,
+	UWB_DRP_TYPE_HARD,
+	UWB_DRP_TYPE_SOFT,
+	UWB_DRP_TYPE_PRIVATE,
+	UWB_DRP_TYPE_PCA,
+};
+
+
+/** DRP Reason Codes ([ECMA-368] table 107) */
+enum uwb_drp_reason {
+	UWB_DRP_REASON_ACCEPTED = 0,
+	UWB_DRP_REASON_CONFLICT,
+	UWB_DRP_REASON_PENDING,
+	UWB_DRP_REASON_DENIED,
+	UWB_DRP_REASON_MODIFIED,
+};
+
+/**
+ *  DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9])
+ */
+enum uwb_drp_notif_reason {
+	UWB_DRP_NOTIF_DRP_IE_RCVD = 0,
+	UWB_DRP_NOTIF_CONFLICT,
+	UWB_DRP_NOTIF_TERMINATE,
+};
+
+
+/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */
+struct uwb_drp_alloc {
+	__le16 zone_bm;
+	__le16 mas_bm;
+} __attribute__((packed));
+
+
+/** General MAC Header format (ECMA-368[16.2]) */
+struct uwb_mac_frame_hdr {
+	__le16 Frame_Control;
+	struct uwb_dev_addr DestAddr;
+	struct uwb_dev_addr SrcAddr;
+	__le16 Sequence_Control;
+	__le16 Access_Information;
+} __attribute__((packed));
+
+
+/**
+ * uwb_beacon_frame - a beacon frame including MAC headers
+ *
+ * [ECMA] section 16.3.
+ */
+struct uwb_beacon_frame {
+	struct uwb_mac_frame_hdr hdr;
+	struct uwb_mac_addr Device_Identifier;	/* may be a NULL EUI-48 */
+	u8 Beacon_Slot_Number;
+	u8 Device_Control;
+	u8 IEData[];
+} __attribute__((packed));
+
+
+/** Information Element codes (MBOA MAC[T54]) */
+enum uwb_ie {
+	UWB_PCA_AVAILABILITY = 2,
+	UWB_IE_DRP_AVAILABILITY = 8,
+	UWB_IE_DRP = 9,
+	UWB_BP_SWITCH_IE = 11,
+	UWB_MAC_CAPABILITIES_IE = 12,
+	UWB_PHY_CAPABILITIES_IE = 13,
+	UWB_APP_SPEC_PROBE_IE = 15,
+	UWB_IDENTIFICATION_IE = 19,
+	UWB_MASTER_KEY_ID_IE = 20,
+	UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */
+	UWB_APP_SPEC_IE = 255,
+};
+
+
+/**
+ * Header common to all Information Elements (IEs)
+ */
+struct uwb_ie_hdr {
+	u8 element_id;	/* enum uwb_ie */
+	u8 length;
+} __attribute__((packed));
+
+
+/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */
+struct uwb_ie_drp {
+	struct uwb_ie_hdr	hdr;
+	__le16                  drp_control;
+	struct uwb_dev_addr	dev_addr;
+	struct uwb_drp_alloc	allocs[];
+} __attribute__((packed));
+
+static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie)
+{
+	return (le16_to_cpu(ie->drp_control) >> 0) & 0x7;
+}
+
+static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie)
+{
+	return (le16_to_cpu(ie->drp_control) >> 3) & 0x7;
+}
+
+static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie)
+{
+	return (le16_to_cpu(ie->drp_control) >> 6) & 0x7;
+}
+
+static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie)
+{
+	return (le16_to_cpu(ie->drp_control) >> 9) & 0x1;
+}
+
+static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie)
+{
+	return (le16_to_cpu(ie->drp_control) >> 10) & 0x1;
+}
+
+static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie)
+{
+	return (le16_to_cpu(ie->drp_control) >> 11) & 0x1;
+}
+
+static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie)
+{
+	return (le16_to_cpu(ie->drp_control) >> 12) & 0x1;
+}
+
+static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type)
+{
+	u16 drp_control = le16_to_cpu(ie->drp_control);
+	drp_control = (drp_control & ~(0x7 << 0)) | (type << 0);
+	ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index)
+{
+	u16 drp_control = le16_to_cpu(ie->drp_control);
+	drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3);
+	ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie,
+				       enum uwb_drp_reason reason_code)
+{
+	u16 drp_control = le16_to_cpu(ie->drp_control);
+	drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6);
+	ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status)
+{
+	u16 drp_control = le16_to_cpu(ie->drp_control);
+	drp_control = (drp_control & ~(0x1 << 9)) | (status << 9);
+	ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner)
+{
+	u16 drp_control = le16_to_cpu(ie->drp_control);
+	drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10);
+	ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker)
+{
+	u16 drp_control = le16_to_cpu(ie->drp_control);
+	drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11);
+	ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe)
+{
+	u16 drp_control = le16_to_cpu(ie->drp_control);
+	drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12);
+	ie->drp_control = cpu_to_le16(drp_control);
+}
+
+/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */
+struct uwb_ie_drp_avail {
+	struct uwb_ie_hdr	hdr;
+	DECLARE_BITMAP(bmp, UWB_NUM_MAS);
+} __attribute__((packed));
+
+/**
+ * The Vendor ID is set to an OUI that indicates the vendor of the device.
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_vendor_id {
+	u8 data[3];
+} __attribute__((packed));
+
+/**
+ * The device type ID
+ * FIXME: clarify what this means
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_device_type_id {
+	u8 data[3];
+} __attribute__((packed));
+
+
+/**
+ * UWB device information types
+ * ECMA-368 [16.8.10]
+ */
+enum uwb_dev_info_type {
+	UWB_DEV_INFO_VENDOR_ID = 0,
+	UWB_DEV_INFO_VENDOR_TYPE,
+	UWB_DEV_INFO_NAME,
+};
+
+/**
+ * UWB device information found in Identification IE
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_dev_info {
+	u8 type;	/* enum uwb_dev_info_type */
+	u8 length;
+	u8 data[];
+} __attribute__((packed));
+
+/**
+ * UWB Identification IE
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_identification_ie {
+	struct uwb_ie_hdr hdr;
+	struct uwb_dev_info info[];
+} __attribute__((packed));
+
+/*
+ * UWB Radio Controller
+ *
+ * These definitions are common to the Radio Control layers as
+ * exported by the WUSB1.0 HWA and WHCI interfaces.
+ */
+
+/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */
+struct uwb_rccb {
+	u8 bCommandType;		/* enum hwa_cet */
+	__le16 wCommand;		/* Command code */
+	u8 bCommandContext;		/* Context ID */
+} __attribute__((packed));
+
+
+/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */
+struct uwb_rceb {
+	u8 bEventType;			/* enum hwa_cet */
+	__le16 wEvent;			/* Event code */
+	u8 bEventContext;		/* Context ID */
+} __attribute__((packed));
+
+
+enum {
+	UWB_RC_CET_GENERAL = 0,		/* General Command/Event type */
+	UWB_RC_CET_EX_TYPE_1 = 1,	/* Extended Type 1 Command/Event type */
+};
+
+/* Commands to the radio controller */
+enum uwb_rc_cmd {
+	UWB_RC_CMD_CHANNEL_CHANGE = 16,
+	UWB_RC_CMD_DEV_ADDR_MGMT = 17,	/* Device Address Management */
+	UWB_RC_CMD_GET_IE = 18,		/* GET Information Elements */
+	UWB_RC_CMD_RESET = 19,
+	UWB_RC_CMD_SCAN = 20,		/* Scan management  */
+	UWB_RC_CMD_SET_BEACON_FILTER = 21,
+	UWB_RC_CMD_SET_DRP_IE = 22,	/* Dynamic Reservation Protocol IEs */
+	UWB_RC_CMD_SET_IE = 23,		/* Information Element management */
+	UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24,
+	UWB_RC_CMD_SET_TX_POWER = 25,
+	UWB_RC_CMD_SLEEP = 26,
+	UWB_RC_CMD_START_BEACON = 27,
+	UWB_RC_CMD_STOP_BEACON = 28,
+	UWB_RC_CMD_BP_MERGE = 29,
+	UWB_RC_CMD_SEND_COMMAND_FRAME = 30,
+	UWB_RC_CMD_SET_ASIE_NOTIF = 31,
+};
+
+/* Notifications from the radio controller */
+enum uwb_rc_evt {
+	UWB_RC_EVT_IE_RCV = 0,
+	UWB_RC_EVT_BEACON = 1,
+	UWB_RC_EVT_BEACON_SIZE = 2,
+	UWB_RC_EVT_BPOIE_CHANGE = 3,
+	UWB_RC_EVT_BP_SLOT_CHANGE = 4,
+	UWB_RC_EVT_BP_SWITCH_IE_RCV = 5,
+	UWB_RC_EVT_DEV_ADDR_CONFLICT = 6,
+	UWB_RC_EVT_DRP_AVAIL = 7,
+	UWB_RC_EVT_DRP = 8,
+	UWB_RC_EVT_BP_SWITCH_STATUS = 9,
+	UWB_RC_EVT_CMD_FRAME_RCV = 10,
+	UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11,
+	/* Events (command responses) use the same code as the command */
+	UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535,
+};
+
+enum uwb_rc_extended_type_1_cmd {
+	UWB_RC_SET_DAA_ENERGY_MASK = 32,
+	UWB_RC_SET_NOTIFICATION_FILTER_EX = 33,
+};
+
+enum uwb_rc_extended_type_1_evt {
+	UWB_RC_DAA_ENERGY_DETECTED = 0,
+};
+
+/* Radio Control Result Code. [WHCI] table 3-3. */
+enum {
+	UWB_RC_RES_SUCCESS = 0,
+	UWB_RC_RES_FAIL,
+	UWB_RC_RES_FAIL_HARDWARE,
+	UWB_RC_RES_FAIL_NO_SLOTS,
+	UWB_RC_RES_FAIL_BEACON_TOO_LARGE,
+	UWB_RC_RES_FAIL_INVALID_PARAMETER,
+	UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL,
+	UWB_RC_RES_FAIL_INVALID_IE_DATA,
+	UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED,
+	UWB_RC_RES_FAIL_CANCELLED,
+	UWB_RC_RES_FAIL_INVALID_STATE,
+	UWB_RC_RES_FAIL_INVALID_SIZE,
+	UWB_RC_RES_FAIL_ACK_NOT_RECEIVED,
+	UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF,
+	UWB_RC_RES_FAIL_TIME_OUT = 255,
+};
+
+/* Confirm event. [WHCI] section 3.1.3.1 etc. */
+struct uwb_rc_evt_confirm {
+	struct uwb_rceb rceb;
+	u8 bResultCode;
+} __attribute__((packed));
+
+/* Device Address Management event. [WHCI] section 3.1.3.2. */
+struct uwb_rc_evt_dev_addr_mgmt {
+	struct uwb_rceb rceb;
+	u8 baAddr[6];
+	u8 bResultCode;
+} __attribute__((packed));
+
+
+/* Get IE Event. [WHCI] section 3.1.3.3. */
+struct uwb_rc_evt_get_ie {
+	struct uwb_rceb rceb;
+	__le16 wIELength;
+	u8 IEData[];
+} __attribute__((packed));
+
+/* Set DRP IE Event. [WHCI] section 3.1.3.7. */
+struct uwb_rc_evt_set_drp_ie {
+	struct uwb_rceb rceb;
+	__le16 wRemainingSpace;
+	u8 bResultCode;
+} __attribute__((packed));
+
+/* Set IE Event. [WHCI] section 3.1.3.8. */
+struct uwb_rc_evt_set_ie {
+	struct uwb_rceb rceb;
+	__le16 RemainingSpace;
+	u8 bResultCode;
+} __attribute__((packed));
+
+/* Scan command. [WHCI] 3.1.3.5. */
+struct uwb_rc_cmd_scan {
+	struct uwb_rccb rccb;
+	u8 bChannelNumber;
+	u8 bScanState;
+	__le16 wStartTime;
+} __attribute__((packed));
+
+/* Set DRP IE command. [WHCI] section 3.1.3.7. */
+struct uwb_rc_cmd_set_drp_ie {
+	struct uwb_rccb rccb;
+	__le16 wIELength;
+	struct uwb_ie_drp IEData[];
+} __attribute__((packed));
+
+/* Set IE command. [WHCI] section 3.1.3.8. */
+struct uwb_rc_cmd_set_ie {
+	struct uwb_rccb rccb;
+	__le16 wIELength;
+	u8 IEData[];
+} __attribute__((packed));
+
+/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */
+struct uwb_rc_evt_set_daa_energy_mask {
+	struct uwb_rceb rceb;
+	__le16 wLength;
+	u8 result;
+} __attribute__((packed));
+
+/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */
+struct uwb_rc_evt_set_notification_filter_ex {
+	struct uwb_rceb rceb;
+	__le16 wLength;
+	u8 result;
+} __attribute__((packed));
+
+/* IE Received notification. [WHCI] section 3.1.4.1. */
+struct uwb_rc_evt_ie_rcv {
+	struct uwb_rceb rceb;
+	struct uwb_dev_addr SrcAddr;
+	__le16 wIELength;
+	u8 IEData[];
+} __attribute__((packed));
+
+/* Type of the received beacon. [WHCI] section 3.1.4.2. */
+enum uwb_rc_beacon_type {
+	UWB_RC_BEACON_TYPE_SCAN = 0,
+	UWB_RC_BEACON_TYPE_NEIGHBOR,
+	UWB_RC_BEACON_TYPE_OL_ALIEN,
+	UWB_RC_BEACON_TYPE_NOL_ALIEN,
+};
+
+/* Beacon received notification. [WHCI] 3.1.4.2. */
+struct uwb_rc_evt_beacon {
+	struct uwb_rceb rceb;
+	u8	bChannelNumber;
+	u8	bBeaconType;
+	__le16	wBPSTOffset;
+	u8	bLQI;
+	u8	bRSSI;
+	__le16	wBeaconInfoLength;
+	u8	BeaconInfo[];
+} __attribute__((packed));
+
+
+/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */
+struct uwb_rc_evt_beacon_size {
+	struct uwb_rceb rceb;
+	__le16 wNewBeaconSize;
+} __attribute__((packed));
+
+
+/* BPOIE Change notification. [WHCI] section 3.1.4.4. */
+struct uwb_rc_evt_bpoie_change {
+	struct uwb_rceb rceb;
+	__le16 wBPOIELength;
+	u8 BPOIE[];
+} __attribute__((packed));
+
+
+/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */
+struct uwb_rc_evt_bp_slot_change {
+	struct uwb_rceb rceb;
+	u8 slot_info;
+} __attribute__((packed));
+
+static inline int uwb_rc_evt_bp_slot_change_slot_num(
+	const struct uwb_rc_evt_bp_slot_change *evt)
+{
+	return evt->slot_info & 0x7f;
+}
+
+static inline int uwb_rc_evt_bp_slot_change_no_slot(
+	const struct uwb_rc_evt_bp_slot_change *evt)
+{
+	return (evt->slot_info & 0x80) >> 7;
+}
+
+/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */
+struct uwb_rc_evt_bp_switch_ie_rcv {
+	struct uwb_rceb rceb;
+	struct uwb_dev_addr wSrcAddr;
+	__le16 wIELength;
+	u8 IEData[];
+} __attribute__((packed));
+
+/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */
+struct uwb_rc_evt_dev_addr_conflict {
+	struct uwb_rceb rceb;
+} __attribute__((packed));
+
+/* DRP notification. [WHCI] section 3.1.4.9. */
+struct uwb_rc_evt_drp {
+	struct uwb_rceb           rceb;
+	struct uwb_dev_addr       src_addr;
+	u8                        reason;
+	u8                        beacon_slot_number;
+	__le16                    ie_length;
+	u8                        ie_data[];
+} __attribute__((packed));
+
+static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt)
+{
+	return evt->reason & 0x0f;
+}
+
+
+/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */
+struct uwb_rc_evt_drp_avail {
+	struct uwb_rceb rceb;
+	DECLARE_BITMAP(bmp, UWB_NUM_MAS);
+} __attribute__((packed));
+
+/* BP switch status notification. [WHCI] section 3.1.4.10. */
+struct uwb_rc_evt_bp_switch_status {
+	struct uwb_rceb rceb;
+	u8 status;
+	u8 slot_offset;
+	__le16 bpst_offset;
+	u8 move_countdown;
+} __attribute__((packed));
+
+/* Command Frame Received notification. [WHCI] section 3.1.4.11. */
+struct uwb_rc_evt_cmd_frame_rcv {
+	struct uwb_rceb rceb;
+	__le16 receive_time;
+	struct uwb_dev_addr wSrcAddr;
+	struct uwb_dev_addr wDstAddr;
+	__le16 control;
+	__le16 reserved;
+	__le16 dataLength;
+	u8 data[];
+} __attribute__((packed));
+
+/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */
+struct uwb_rc_evt_channel_change_ie_rcv {
+	struct uwb_rceb rceb;
+	struct uwb_dev_addr wSrcAddr;
+	__le16 wIELength;
+	u8 IEData[];
+} __attribute__((packed));
+
+/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */
+struct uwb_rc_evt_daa_energy_detected {
+	struct uwb_rceb rceb;
+	__le16 wLength;
+	u8 bandID;
+	u8 reserved;
+	u8 toneBmp[16];
+} __attribute__((packed));
+
+
+/**
+ * Radio Control Interface Class Descriptor
+ *
+ *  WUSB 1.0 [8.6.1.2]
+ */
+struct uwb_rc_control_intf_class_desc {
+	u8 bLength;
+	u8 bDescriptorType;
+	__le16 bcdRCIVersion;
+} __attribute__((packed));
+
+#endif /* #ifndef __LINUX__UWB_SPEC_H__ */
diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h
new file mode 100644
index 0000000..36a39e3
--- /dev/null
+++ b/include/linux/uwb/umc.h
@@ -0,0 +1,194 @@
+/*
+ * UWB Multi-interface Controller support.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This file is released under the GPLv2
+ *
+ * UMC (UWB Multi-interface Controller) capabilities (e.g., radio
+ * controller, host controller) are presented as devices on the "umc"
+ * bus.
+ *
+ * The radio controller is not strictly a UMC capability but it's
+ * useful to present it as such.
+ *
+ * References:
+ *
+ *   [WHCI] Wireless Host Controller Interface Specification for
+ *          Certified Wireless Universal Serial Bus, revision 0.95.
+ *
+ * How this works is kind of convoluted but simple. The whci.ko driver
+ * loads when WHCI devices are detected. These WHCI devices expose
+ * many devices in the same PCI function (they couldn't have reused
+ * functions, no), so for each PCI function that exposes these many
+ * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()]
+ * with umc_device_create() and adds it to the bus with
+ * umc_device_register().
+ *
+ * umc_device_register() calls device_register() which will push the
+ * bus management code to load your UMC driver's somehting_probe()
+ * that you have registered for that capability code.
+ *
+ * Now when the WHCI device is removed, whci_remove() will go over
+ * each umc_dev assigned to each of the PCI function's capabilities
+ * and through whci_del_cap() call umc_device_unregister() each
+ * created umc_dev. Of course, if you are bound to the device, your
+ * driver's something_remove() will be called.
+ */
+
+#ifndef _LINUX_UWB_UMC_H_
+#define _LINUX_UWB_UMC_H_
+
+#include <linux/device.h>
+#include <linux/pci.h>
+
+/*
+ * UMC capability IDs.
+ *
+ * 0x00 is reserved so use it for the radio controller device.
+ *
+ * [WHCI] table 2-8
+ */
+#define UMC_CAP_ID_WHCI_RC      0x00 /* radio controller */
+#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */
+
+/**
+ * struct umc_dev - UMC capability device
+ *
+ * @version:  version of the specification this capability conforms to.
+ * @cap_id:   capability ID.
+ * @bar:      PCI Bar (64 bit) where the resource lies
+ * @resource: register space resource.
+ * @irq:      interrupt line.
+ */
+struct umc_dev {
+	u16		version;
+	u8		cap_id;
+	u8		bar;
+	struct resource resource;
+	unsigned	irq;
+	struct device	dev;
+};
+
+#define to_umc_dev(d) container_of(d, struct umc_dev, dev)
+
+/**
+ * struct umc_driver - UMC capability driver
+ * @cap_id: supported capability ID.
+ * @match: driver specific capability matching function.
+ * @match_data: driver specific data for match() (e.g., a
+ * table of pci_device_id's if umc_match_pci_id() is used).
+ */
+struct umc_driver {
+	char *name;
+	u8 cap_id;
+	int (*match)(struct umc_driver *, struct umc_dev *);
+	const void *match_data;
+
+	int  (*probe)(struct umc_dev *);
+	void (*remove)(struct umc_dev *);
+	int  (*suspend)(struct umc_dev *, pm_message_t state);
+	int  (*resume)(struct umc_dev *);
+
+	struct device_driver driver;
+};
+
+#define to_umc_driver(d) container_of(d, struct umc_driver, driver)
+
+extern struct bus_type umc_bus_type;
+
+struct umc_dev *umc_device_create(struct device *parent, int n);
+int __must_check umc_device_register(struct umc_dev *umc);
+void umc_device_unregister(struct umc_dev *umc);
+
+int __must_check __umc_driver_register(struct umc_driver *umc_drv,
+				       struct module *mod,
+				       const char *mod_name);
+
+/**
+ * umc_driver_register - register a UMC capabiltity driver.
+ * @umc_drv:  pointer to the driver.
+ */
+static inline int __must_check umc_driver_register(struct umc_driver *umc_drv)
+{
+	return __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME);
+}
+void umc_driver_unregister(struct umc_driver *umc_drv);
+
+/*
+ * Utility function you can use to match (umc_driver->match) against a
+ * null-terminated array of 'struct pci_device_id' in
+ * umc_driver->match_data.
+ */
+int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc);
+
+/**
+ * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none
+ * @umc_dev: UMC device whose parent PCI device we are looking for
+ *
+ * DIRTY!!! DON'T RELY ON THIS
+ *
+ * FIXME: This is as dirty as it gets, but we need some way to check
+ * the correct type of umc_dev->parent (so that for example, we can
+ * cast to pci_dev). Casting to pci_dev is necesary because at some
+ * point we need to request resources from the device. Mapping is
+ * easily over come (ioremap and stuff are bus agnostic), but hooking
+ * up to some error handlers (such as pci error handlers) might need
+ * this.
+ *
+ * THIS might (probably will) be removed in the future, so don't count
+ * on it.
+ */
+static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev)
+{
+	struct pci_dev *pci_dev = NULL;
+	if (umc_dev->dev.parent->bus == &pci_bus_type)
+		pci_dev = to_pci_dev(umc_dev->dev.parent);
+	return pci_dev;
+}
+
+/**
+ * umc_dev_get() - reference a UMC device.
+ * @umc_dev: Pointer to UMC device.
+ *
+ * NOTE: we are assuming in this whole scheme that the parent device
+ *       is referenced at _probe() time and unreferenced at _remove()
+ *       time by the parent's subsystem.
+ */
+static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev)
+{
+	get_device(&umc_dev->dev);
+	return umc_dev;
+}
+
+/**
+ * umc_dev_put() - unreference a UMC device.
+ * @umc_dev: Pointer to UMC device.
+ */
+static inline void umc_dev_put(struct umc_dev *umc_dev)
+{
+	put_device(&umc_dev->dev);
+}
+
+/**
+ * umc_set_drvdata - set UMC device's driver data.
+ * @umc_dev: Pointer to UMC device.
+ * @data:    Data to set.
+ */
+static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data)
+{
+	dev_set_drvdata(&umc_dev->dev, data);
+}
+
+/**
+ * umc_get_drvdata - recover UMC device's driver data.
+ * @umc_dev: Pointer to UMC device.
+ */
+static inline void *umc_get_drvdata(struct umc_dev *umc_dev)
+{
+	return dev_get_drvdata(&umc_dev->dev);
+}
+
+int umc_controller_reset(struct umc_dev *umc);
+
+#endif /* #ifndef _LINUX_UWB_UMC_H_ */
diff --git a/include/linux/uwb/whci.h b/include/linux/uwb/whci.h
new file mode 100644
index 0000000..915ec23
--- /dev/null
+++ b/include/linux/uwb/whci.h
@@ -0,0 +1,117 @@
+/*
+ * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ *
+ * References:
+ *   [WHCI] Wireless Host Controller Interface Specification for
+ *          Certified Wireless Universal Serial Bus, revision 0.95.
+ */
+#ifndef _LINUX_UWB_WHCI_H_
+#define _LINUX_UWB_WHCI_H_
+
+#include <linux/pci.h>
+
+/*
+ * UWB interface capability registers (offsets from UWBBASE)
+ *
+ * [WHCI] section 2.2
+ */
+#define UWBCAPINFO	0x00 /* == UWBCAPDATA(0) */
+#  define UWBCAPINFO_TO_N_CAPS(c)	(((c) >> 0)  & 0xFull)
+#define UWBCAPDATA(n)	(8*(n))
+#  define UWBCAPDATA_TO_VERSION(c)	(((c) >> 32) & 0xFFFFull)
+#  define UWBCAPDATA_TO_OFFSET(c)	(((c) >> 18) & 0x3FFFull)
+#  define UWBCAPDATA_TO_BAR(c)		(((c) >> 16) & 0x3ull)
+#  define UWBCAPDATA_TO_SIZE(c)		((((c) >> 8) & 0xFFull) * sizeof(u32))
+#  define UWBCAPDATA_TO_CAP_ID(c)	(((c) >> 0)  & 0xFFull)
+
+/* Size of the WHCI capability data (including the RC capability) for
+   a device with n capabilities. */
+#define UWBCAPDATA_SIZE(n) (8 + 8*(n))
+
+
+/*
+ * URC registers (offsets from URCBASE)
+ *
+ * [WHCI] section 2.3
+ */
+#define URCCMD		0x00
+#  define URCCMD_RESET		(1 << 31)  /* UMC Hardware reset */
+#  define URCCMD_RS		(1 << 30)  /* Run/Stop */
+#  define URCCMD_EARV		(1 << 29)  /* Event Address Register Valid */
+#  define URCCMD_ACTIVE		(1 << 15)  /* Command is active */
+#  define URCCMD_IWR		(1 << 14)  /* Interrupt When Ready */
+#  define URCCMD_SIZE_MASK	0x00000fff /* Command size mask */
+#define URCSTS		0x04
+#  define URCSTS_EPS		(1 << 17)  /* Event Processing Status */
+#  define URCSTS_HALTED		(1 << 16)  /* RC halted */
+#  define URCSTS_HSE		(1 << 10)  /* Host System Error...fried */
+#  define URCSTS_ER		(1 <<  9)  /* Event Ready */
+#  define URCSTS_RCI		(1 <<  8)  /* Ready for Command Interrupt */
+#  define URCSTS_INT_MASK	0x00000700 /* URC interrupt sources */
+#  define URCSTS_ISI		0x000000ff /* Interrupt Source Identification */
+#define URCINTR		0x08
+#  define URCINTR_EN_ALL	0x000007ff /* Enable all interrupt sources */
+#define URCCMDADDR	0x10
+#define URCEVTADDR	0x18
+#  define URCEVTADDR_OFFSET_MASK 0xfff    /* Event pointer offset mask */
+
+
+/** Write 32 bit @value to little endian register at @addr */
+static inline
+void le_writel(u32 value, void __iomem *addr)
+{
+	iowrite32(value, addr);
+}
+
+
+/** Read from 32 bit little endian register at @addr */
+static inline
+u32 le_readl(void __iomem *addr)
+{
+	return ioread32(addr);
+}
+
+
+/** Write 64 bit @value to little endian register at @addr */
+static inline
+void le_writeq(u64 value, void __iomem *addr)
+{
+	iowrite32(value, addr);
+	iowrite32(value >> 32, addr + 4);
+}
+
+
+/** Read from 64 bit little endian register at @addr */
+static inline
+u64 le_readq(void __iomem *addr)
+{
+	u64 value;
+	value  = ioread32(addr);
+	value |= (u64)ioread32(addr + 4) << 32;
+	return value;
+}
+
+extern int whci_wait_for(struct device *dev, u32 __iomem *reg,
+			 u32 mask, u32 result,
+			 unsigned long max_ms,  const char *tag);
+
+#endif /* #ifndef _LINUX_UWB_WHCI_H_ */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index d4b0303..4669d7e 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -315,6 +315,13 @@
 /* see http://www.siliconimaging.com/RGB%20Bayer.htm */
 #define V4L2_PIX_FMT_SBGGR8  v4l2_fourcc('B', 'A', '8', '1') /*  8  BGBG.. GRGR.. */
 #define V4L2_PIX_FMT_SGBRG8  v4l2_fourcc('G', 'B', 'R', 'G') /*  8  GBGB.. RGRG.. */
+/*
+ * 10bit raw bayer, expanded to 16 bits
+ * xxxxrrrrrrrrrrxxxxgggggggggg xxxxggggggggggxxxxbbbbbbbbbb...
+ */
+#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0')
+/* 10bit raw bayer DPCM compressed to 8 bits */
+#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
 #define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16  BGBG.. GRGR.. */
 
 /* compressed formats */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 328eb40..4c28c4d 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -2,6 +2,7 @@
 #define _LINUX_VMALLOC_H
 
 #include <linux/spinlock.h>
+#include <linux/init.h>
 #include <asm/page.h>		/* pgprot_t */
 
 struct vm_area_struct;		/* vma defining user mapping in mm_types.h */
@@ -23,7 +24,6 @@
 #endif
 
 struct vm_struct {
-	/* keep next,addr,size together to speedup lookups */
 	struct vm_struct	*next;
 	void			*addr;
 	unsigned long		size;
@@ -37,6 +37,19 @@
 /*
  *	Highlevel APIs for driver use
  */
+extern void vm_unmap_ram(const void *mem, unsigned int count);
+extern void *vm_map_ram(struct page **pages, unsigned int count,
+				int node, pgprot_t prot);
+extern void vm_unmap_aliases(void);
+
+#ifdef CONFIG_MMU
+extern void __init vmalloc_init(void);
+#else
+static inline void vmalloc_init(void)
+{
+}
+#endif
+
 extern void *vmalloc(unsigned long size);
 extern void *vmalloc_user(unsigned long size);
 extern void *vmalloc_node(unsigned long size, int node);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 58334d4..9cd3ab0 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -41,6 +41,16 @@
 #ifdef CONFIG_HUGETLB_PAGE
 		HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
 #endif
+#ifdef CONFIG_UNEVICTABLE_LRU
+		UNEVICTABLE_PGCULLED,	/* culled to noreclaim list */
+		UNEVICTABLE_PGSCANNED,	/* scanned for reclaimability */
+		UNEVICTABLE_PGRESCUED,	/* rescued from noreclaim list */
+		UNEVICTABLE_PGMLOCKED,
+		UNEVICTABLE_PGMUNLOCKED,
+		UNEVICTABLE_PGCLEARED,	/* on COW, page truncate */
+		UNEVICTABLE_PGSTRANDED,	/* unable to isolate on unlock */
+		UNEVICTABLE_MLOCKFREED,
+#endif
 		NR_VM_EVENT_ITEMS
 };
 
@@ -159,6 +169,16 @@
 	return x;
 }
 
+extern unsigned long global_lru_pages(void);
+
+static inline unsigned long zone_lru_pages(struct zone *zone)
+{
+	return (zone_page_state(zone, NR_ACTIVE_ANON)
+		+ zone_page_state(zone, NR_ACTIVE_FILE)
+		+ zone_page_state(zone, NR_INACTIVE_ANON)
+		+ zone_page_state(zone, NR_INACTIVE_FILE));
+}
+
 #ifdef CONFIG_NUMA
 /*
  * Determine the per node value of a stat item. This function
diff --git a/include/linux/wlp.h b/include/linux/wlp.h
new file mode 100644
index 0000000..033545e
--- /dev/null
+++ b/include/linux/wlp.h
@@ -0,0 +1,735 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * - Does not (yet) include support for WLP control frames
+ *   WLP Draft 0.99 [6.5].
+ *
+ *   A visual representation of the data structures.
+ *
+ *                              wssidB      wssidB
+ *                               ^           ^
+ *                               |           |
+ *                              wssidA      wssidA
+ *   wlp interface {             ^           ^
+ *       ...                     |           |
+ *       ...               ...  wssid      wssid ...
+ *       wlp --- ...             |           |
+ *   };          neighbors --> neighbA --> neighbB
+ *               ...
+ *               wss
+ *               ...
+ *               eda cache  --> neighborA --> neighborB --> neighborC ...
+ */
+
+#ifndef __LINUX__WLP_H_
+#define __LINUX__WLP_H_
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/uwb.h>
+
+/**
+ * WLP Protocol ID
+ * WLP Draft 0.99 [6.2]
+ *
+ * The MUX header for all WLP frames
+ */
+#define WLP_PROTOCOL_ID 0x0100
+
+/**
+ * WLP Version
+ * WLP version placed in the association frames (WLP 0.99 [6.6])
+ */
+#define WLP_VERSION 0x10
+
+/**
+ * Bytes needed to print UUID as string
+ */
+#define WLP_WSS_UUID_STRSIZE 48
+
+/**
+ * Bytes needed to print nonce as string
+ */
+#define WLP_WSS_NONCE_STRSIZE 48
+
+
+/**
+ * Size used for WLP name size
+ *
+ * The WSS name is set to 65 bytes, 1 byte larger than the maximum
+ * allowed by the WLP spec. This is to have a null terminated string
+ * for display to the user. A maximum of 64 bytes will still be used
+ * when placing the WSS name field in association frames.
+ */
+#define WLP_WSS_NAME_SIZE 65
+
+/**
+ * Number of bytes added by WLP to data frame
+ *
+ * A data frame transmitted from a host will be placed in a Standard or
+ * Abbreviated WLP frame. These have an extra 4 bytes of header (struct
+ * wlp_frame_std_abbrv_hdr).
+ * When the stack sends this data frame for transmission it needs to ensure
+ * there is enough headroom for this header.
+ */
+#define WLP_DATA_HLEN 4
+
+/**
+ * State of device regarding WLP Service Set
+ *
+ * WLP_WSS_STATE_NONE: the host does not participate in any WSS
+ * WLP_WSS_STATE_PART_ENROLLED: used as part of the enrollment sequence
+ *                            ("Partial Enroll"). This state is used to
+ *                            indicate the first part of enrollment that is
+ *                            unsecure. If the WSS is unsecure then the
+ *                            state will promptly go to WLP_WSS_STATE_ENROLLED,
+ *                            if the WSS is not secure then the enrollment
+ *                            procedure is a few more steps before we are
+ *                            enrolled.
+ * WLP_WSS_STATE_ENROLLED: the host is enrolled in a WSS
+ * WLP_WSS_STATE_ACTIVE: WSS is activated
+ * WLP_WSS_STATE_CONNECTED: host is connected to neighbor in WSS
+ *
+ */
+enum wlp_wss_state {
+	WLP_WSS_STATE_NONE = 0,
+	WLP_WSS_STATE_PART_ENROLLED,
+	WLP_WSS_STATE_ENROLLED,
+	WLP_WSS_STATE_ACTIVE,
+	WLP_WSS_STATE_CONNECTED,
+};
+
+/**
+ * WSS Secure status
+ * WLP 0.99 Table 6
+ *
+ * Set to one if the WSS is secure, zero if it is not secure
+ */
+enum wlp_wss_sec_status {
+	WLP_WSS_UNSECURE = 0,
+	WLP_WSS_SECURE,
+};
+
+/**
+ * WLP frame type
+ * WLP Draft 0.99 [6.2 Table 1]
+ */
+enum wlp_frame_type {
+	WLP_FRAME_STANDARD = 0,
+	WLP_FRAME_ABBREVIATED,
+	WLP_FRAME_CONTROL,
+	WLP_FRAME_ASSOCIATION,
+};
+
+/**
+ * WLP Association Message Type
+ * WLP Draft 0.99 [6.6.1.2 Table 8]
+ */
+enum wlp_assoc_type {
+	WLP_ASSOC_D1 = 2,
+	WLP_ASSOC_D2 = 3,
+	WLP_ASSOC_M1 = 4,
+	WLP_ASSOC_M2 = 5,
+	WLP_ASSOC_M3 = 7,
+	WLP_ASSOC_M4 = 8,
+	WLP_ASSOC_M5 = 9,
+	WLP_ASSOC_M6 = 10,
+	WLP_ASSOC_M7 = 11,
+	WLP_ASSOC_M8 = 12,
+	WLP_ASSOC_F0 = 14,
+	WLP_ASSOC_E1 = 32,
+	WLP_ASSOC_E2 = 33,
+	WLP_ASSOC_C1 = 34,
+	WLP_ASSOC_C2 = 35,
+	WLP_ASSOC_C3 = 36,
+	WLP_ASSOC_C4 = 37,
+};
+
+/**
+ * WLP Attribute Type
+ * WLP Draft 0.99 [6.6.1 Table 6]
+ */
+enum wlp_attr_type {
+	WLP_ATTR_AUTH		= 0x1005, /* Authenticator */
+	WLP_ATTR_DEV_NAME 	= 0x1011, /* Device Name */
+	WLP_ATTR_DEV_PWD_ID 	= 0x1012, /* Device Password ID */
+	WLP_ATTR_E_HASH1	= 0x1014, /* E-Hash1 */
+	WLP_ATTR_E_HASH2	= 0x1015, /* E-Hash2 */
+	WLP_ATTR_E_SNONCE1	= 0x1016, /* E-SNonce1 */
+	WLP_ATTR_E_SNONCE2	= 0x1017, /* E-SNonce2 */
+	WLP_ATTR_ENCR_SET	= 0x1018, /* Encrypted Settings */
+	WLP_ATTR_ENRL_NONCE	= 0x101A, /* Enrollee Nonce */
+	WLP_ATTR_KEYWRAP_AUTH	= 0x101E, /* Key Wrap Authenticator */
+	WLP_ATTR_MANUF		= 0x1021, /* Manufacturer */
+	WLP_ATTR_MSG_TYPE	= 0x1022, /* Message Type */
+	WLP_ATTR_MODEL_NAME	= 0x1023, /* Model Name */
+	WLP_ATTR_MODEL_NR	= 0x1024, /* Model Number */
+	WLP_ATTR_PUB_KEY	= 0x1032, /* Public Key */
+	WLP_ATTR_REG_NONCE	= 0x1039, /* Registrar Nonce */
+	WLP_ATTR_R_HASH1	= 0x103D, /* R-Hash1 */
+	WLP_ATTR_R_HASH2	= 0x103E, /* R-Hash2 */
+	WLP_ATTR_R_SNONCE1	= 0x103F, /* R-SNonce1 */
+	WLP_ATTR_R_SNONCE2	= 0x1040, /* R-SNonce2 */
+	WLP_ATTR_SERIAL		= 0x1042, /* Serial number */
+	WLP_ATTR_UUID_E		= 0x1047, /* UUID-E */
+	WLP_ATTR_UUID_R		= 0x1048, /* UUID-R */
+	WLP_ATTR_PRI_DEV_TYPE	= 0x1054, /* Primary Device Type */
+	WLP_ATTR_SEC_DEV_TYPE	= 0x1055, /* Secondary Device Type */
+	WLP_ATTR_PORT_DEV	= 0x1056, /* Portable Device */
+	WLP_ATTR_APP_EXT	= 0x1058, /* Application Extension */
+	WLP_ATTR_WLP_VER	= 0x2000, /* WLP Version */
+	WLP_ATTR_WSSID		= 0x2001, /* WSSID */
+	WLP_ATTR_WSS_NAME	= 0x2002, /* WSS Name */
+	WLP_ATTR_WSS_SEC_STAT	= 0x2003, /* WSS Secure Status */
+	WLP_ATTR_WSS_BCAST	= 0x2004, /* WSS Broadcast Address */
+	WLP_ATTR_WSS_M_KEY	= 0x2005, /* WSS Master Key */
+	WLP_ATTR_ACC_ENRL	= 0x2006, /* Accepting Enrollment */
+	WLP_ATTR_WSS_INFO	= 0x2007, /* WSS Information */
+	WLP_ATTR_WSS_SEL_MTHD	= 0x2008, /* WSS Selection Method */
+	WLP_ATTR_ASSC_MTHD_LIST	= 0x2009, /* Association Methods List */
+	WLP_ATTR_SEL_ASSC_MTHD	= 0x200A, /* Selected Association Method */
+	WLP_ATTR_ENRL_HASH_COMM	= 0x200B, /* Enrollee Hash Commitment */
+	WLP_ATTR_WSS_TAG	= 0x200C, /* WSS Tag */
+	WLP_ATTR_WSS_VIRT	= 0x200D, /* WSS Virtual EUI-48 */
+	WLP_ATTR_WLP_ASSC_ERR	= 0x200E, /* WLP Association Error */
+	WLP_ATTR_VNDR_EXT	= 0x200F, /* Vendor Extension */
+};
+
+/**
+ * WLP Category ID of primary/secondary device
+ * WLP Draft 0.99 [6.6.1.8 Table 12]
+ */
+enum wlp_dev_category_id {
+	WLP_DEV_CAT_COMPUTER = 1,
+	WLP_DEV_CAT_INPUT,
+	WLP_DEV_CAT_PRINT_SCAN_FAX_COPIER,
+	WLP_DEV_CAT_CAMERA,
+	WLP_DEV_CAT_STORAGE,
+	WLP_DEV_CAT_INFRASTRUCTURE,
+	WLP_DEV_CAT_DISPLAY,
+	WLP_DEV_CAT_MULTIM,
+	WLP_DEV_CAT_GAMING,
+	WLP_DEV_CAT_TELEPHONE,
+	WLP_DEV_CAT_OTHER = 65535,
+};
+
+/**
+ * WLP WSS selection method
+ * WLP Draft 0.99 [6.6.1.6 Table 10]
+ */
+enum wlp_wss_sel_mthd {
+	WLP_WSS_ENRL_SELECT = 1,	/* Enrollee selects */
+	WLP_WSS_REG_SELECT,		/* Registrar selects */
+};
+
+/**
+ * WLP association error values
+ * WLP Draft 0.99 [6.6.1.5 Table 9]
+ */
+enum wlp_assc_error {
+	WLP_ASSOC_ERROR_NONE,
+	WLP_ASSOC_ERROR_AUTH,		/* Authenticator Failure */
+	WLP_ASSOC_ERROR_ROGUE,		/* Rogue activity suspected */
+	WLP_ASSOC_ERROR_BUSY,		/* Device busy */
+	WLP_ASSOC_ERROR_LOCK,		/* Setup Locked */
+	WLP_ASSOC_ERROR_NOT_READY,	/* Registrar not ready */
+	WLP_ASSOC_ERROR_INV,		/* Invalid WSS selection */
+	WLP_ASSOC_ERROR_MSG_TIME,	/* Message timeout */
+	WLP_ASSOC_ERROR_ENR_TIME,	/* Enrollment session timeout */
+	WLP_ASSOC_ERROR_PW,		/* Device password invalid */
+	WLP_ASSOC_ERROR_VER,		/* Unsupported version */
+	WLP_ASSOC_ERROR_INT,		/* Internal error */
+	WLP_ASSOC_ERROR_UNDEF,		/* Undefined error */
+	WLP_ASSOC_ERROR_NUM,		/* Numeric comparison failure */
+	WLP_ASSOC_ERROR_WAIT,		/* Waiting for user input */
+};
+
+/**
+ * WLP Parameters
+ * WLP 0.99 [7.7]
+ */
+enum wlp_parameters {
+	WLP_PER_MSG_TIMEOUT = 15,	/* Seconds to wait for response to
+					   association message. */
+};
+
+/**
+ * WLP IE
+ *
+ * The WLP IE should be included in beacons by all devices.
+ *
+ * The driver can set only a few of the fields in this information element,
+ * most fields are managed by the device self. When the driver needs to set
+ * a field it will only provide values for the fields of interest, the rest
+ * will be filled with zeroes. The fields of interest are:
+ *
+ * Element ID
+ * Length
+ * Capabilities (only to include WSSID Hash list length)
+ * WSSID Hash List fields
+ *
+ * WLP 0.99 [6.7]
+ *
+ * Only the fields that will be used are detailed in this structure, rest
+ * are not detailed or marked as "notused".
+ */
+struct wlp_ie {
+	struct uwb_ie_hdr hdr;
+	__le16 capabilities;
+	__le16 cycle_param;
+	__le16 acw_anchor_addr;
+	u8 wssid_hash_list[];
+} __attribute__((packed));
+
+static inline int wlp_ie_hash_length(struct wlp_ie *ie)
+{
+	return (le16_to_cpu(ie->capabilities) >> 12) & 0xf;
+}
+
+static inline void wlp_ie_set_hash_length(struct wlp_ie *ie, int hash_length)
+{
+	u16 caps = le16_to_cpu(ie->capabilities);
+	caps = (caps & ~(0xf << 12)) | (hash_length << 12);
+	ie->capabilities = cpu_to_le16(caps);
+}
+
+/**
+ * WLP nonce
+ * WLP Draft 0.99 [6.6.1 Table 6]
+ *
+ * A 128-bit random number often used (E-SNonce1, E-SNonce2, Enrollee
+ * Nonce, Registrar Nonce, R-SNonce1, R-SNonce2). It is passed to HW so
+ * it is packed.
+ */
+struct wlp_nonce {
+	u8 data[16];
+} __attribute__((packed));
+
+/**
+ * WLP UUID
+ * WLP Draft 0.99 [6.6.1 Table 6]
+ *
+ * Universally Unique Identifier (UUID) encoded as an octet string in the
+ * order the octets are shown in string representation in RFC4122. A UUID
+ * is often used (UUID-E, UUID-R, WSSID). It is passed to HW so it is packed.
+ */
+struct wlp_uuid {
+	u8 data[16];
+} __attribute__((packed));
+
+
+/**
+ * Primary and secondary device type attributes
+ * WLP Draft 0.99 [6.6.1.8]
+ */
+struct wlp_dev_type {
+	enum wlp_dev_category_id category:16;
+	u8 OUI[3];
+	u8 OUIsubdiv;
+	__le16 subID;
+} __attribute__((packed));
+
+/**
+ * WLP frame header
+ * WLP Draft 0.99 [6.2]
+ */
+struct wlp_frame_hdr {
+	__le16 mux_hdr;			/* WLP_PROTOCOL_ID */
+	enum wlp_frame_type type:8;
+} __attribute__((packed));
+
+/**
+ * WLP attribute field header
+ * WLP Draft 0.99 [6.6.1]
+ *
+ * Header of each attribute found in an association frame
+ */
+struct wlp_attr_hdr {
+	__le16 type;
+	__le16 length;
+} __attribute__((packed));
+
+/**
+ * Device information commonly used together
+ *
+ * Each of these device information elements has a specified range in which it
+ * should fit (WLP 0.99 [Table 6]). This range provided in the spec does not
+ * include the termination null '\0' character (when used in the
+ * association protocol the attribute fields are accompanied
+ * with a "length" field so the full range from the spec can be used for
+ * the value). We thus allocate an extra byte to be able to store a string
+ * of max length with a terminating '\0'.
+ */
+struct wlp_device_info {
+	char name[33];
+	char model_name[33];
+	char manufacturer[65];
+	char model_nr[33];
+	char serial[33];
+	struct wlp_dev_type prim_dev_type;
+};
+
+/**
+ * Macros for the WLP attributes
+ *
+ * There are quite a few attributes (total is 43). The attribute layout can be
+ * in one of three categories: one value, an array, an enum forced to 8 bits.
+ * These macros help with their definitions.
+ */
+#define wlp_attr(type, name)						\
+struct wlp_attr_##name {						\
+	struct wlp_attr_hdr hdr;					\
+	type name;							\
+} __attribute__((packed));
+
+#define wlp_attr_array(type, name)					\
+struct wlp_attr_##name {						\
+	struct wlp_attr_hdr hdr;					\
+	type name[];							\
+} __attribute__((packed));
+
+/**
+ * WLP association attribute fields
+ * WLP Draft 0.99 [6.6.1 Table 6]
+ *
+ * Attributes appear in same order as the Table in the spec
+ * FIXME Does not define all attributes yet
+ */
+
+/* Device name: Friendly name of sending device */
+wlp_attr_array(u8, dev_name)
+
+/* Enrollee Nonce: Random number generated by enrollee for an enrollment
+ * session */
+wlp_attr(struct wlp_nonce, enonce)
+
+/* Manufacturer name: Name of manufacturer of the sending device */
+wlp_attr_array(u8, manufacturer)
+
+/* WLP Message Type */
+wlp_attr(u8, msg_type)
+
+/* WLP Model name: Model name of sending device */
+wlp_attr_array(u8, model_name)
+
+/* WLP Model number: Model number of sending device */
+wlp_attr_array(u8, model_nr)
+
+/* Registrar Nonce: Random number generated by registrar for an enrollment
+ * session */
+wlp_attr(struct wlp_nonce, rnonce)
+
+/* Serial number of device */
+wlp_attr_array(u8, serial)
+
+/* UUID of enrollee */
+wlp_attr(struct wlp_uuid, uuid_e)
+
+/* UUID of registrar */
+wlp_attr(struct wlp_uuid, uuid_r)
+
+/* WLP Primary device type */
+wlp_attr(struct wlp_dev_type, prim_dev_type)
+
+/* WLP Secondary device type */
+wlp_attr(struct wlp_dev_type, sec_dev_type)
+
+/* WLP protocol version */
+wlp_attr(u8, version)
+
+/* WLP service set identifier */
+wlp_attr(struct wlp_uuid, wssid)
+
+/* WLP WSS name */
+wlp_attr_array(u8, wss_name)
+
+/* WLP WSS Secure Status */
+wlp_attr(u8, wss_sec_status)
+
+/* WSS Broadcast Address */
+wlp_attr(struct uwb_mac_addr, wss_bcast)
+
+/* WLP Accepting Enrollment */
+wlp_attr(u8, accept_enrl)
+
+/**
+ * WSS information attributes
+ * WLP Draft 0.99 [6.6.3 Table 15]
+ */
+struct wlp_wss_info {
+	struct wlp_attr_wssid wssid;
+	struct wlp_attr_wss_name name;
+	struct wlp_attr_accept_enrl accept;
+	struct wlp_attr_wss_sec_status sec_stat;
+	struct wlp_attr_wss_bcast bcast;
+} __attribute__((packed));
+
+/* WLP WSS Information */
+wlp_attr_array(struct wlp_wss_info, wss_info)
+
+/* WLP WSS Selection method */
+wlp_attr(u8, wss_sel_mthd)
+
+/* WLP WSS tag */
+wlp_attr(u8, wss_tag)
+
+/* WSS Virtual Address */
+wlp_attr(struct uwb_mac_addr, wss_virt)
+
+/* WLP association error */
+wlp_attr(u8, wlp_assc_err)
+
+/**
+ * WLP standard and abbreviated frames
+ *
+ * WLP Draft 0.99 [6.3] and [6.4]
+ *
+ * The difference between the WLP standard frame and the WLP
+ * abbreviated frame is that the standard frame includes the src
+ * and dest addresses from the Ethernet header, the abbreviated frame does
+ * not.
+ * The src/dest (as well as the type/length and client data) are already
+ * defined as part of the Ethernet header, we do not do this here.
+ * From this perspective the standard and abbreviated frames appear the
+ * same - they will be treated differently though.
+ *
+ * The size of this header is also captured in WLP_DATA_HLEN to enable
+ * interfaces to prepare their headroom.
+ */
+struct wlp_frame_std_abbrv_hdr {
+	struct wlp_frame_hdr hdr;
+	u8 tag;
+} __attribute__((packed));
+
+/**
+ * WLP association frames
+ *
+ * WLP Draft 0.99 [6.6]
+ */
+struct wlp_frame_assoc {
+	struct wlp_frame_hdr hdr;
+	enum wlp_assoc_type type:8;
+	struct wlp_attr_version version;
+	struct wlp_attr_msg_type msg_type;
+	u8 attr[];
+} __attribute__((packed));
+
+/* Ethernet to dev address mapping */
+struct wlp_eda {
+	spinlock_t lock;
+	struct list_head cache;	/* Eth<->Dev Addr cache */
+};
+
+/**
+ * WSS information temporary storage
+ *
+ * This information is only stored temporarily during discovery. It should
+ * not be stored unless the device is enrolled in the advertised WSS. This
+ * is done mainly because we follow the letter of the spec in this regard.
+ * See WLP 0.99 [7.2.3].
+ * When the device does become enrolled in a WSS the WSS information will
+ * be stored as part of the more comprehensive struct wlp_wss.
+ */
+struct wlp_wss_tmp_info {
+	char name[WLP_WSS_NAME_SIZE];
+	u8 accept_enroll;
+	u8 sec_status;
+	struct uwb_mac_addr bcast;
+};
+
+struct wlp_wssid_e {
+	struct list_head node;
+	struct wlp_uuid wssid;
+	struct wlp_wss_tmp_info *info;
+};
+
+/**
+ * A cache entry of WLP neighborhood
+ *
+ * @node: head of list is wlp->neighbors
+ * @wssid: list of wssids of this neighbor, element is wlp_wssid_e
+ * @info:  temporary storage for information learned during discovery. This
+ *         storage is used together with the wssid_e temporary storage
+ *         during discovery.
+ */
+struct wlp_neighbor_e {
+	struct list_head node;
+	struct wlp_uuid uuid;
+	struct uwb_dev *uwb_dev;
+	struct list_head wssid; /* Elements are wlp_wssid_e */
+	struct wlp_device_info *info;
+};
+
+struct wlp;
+/**
+ * Information for an association session in progress.
+ *
+ * @exp_message: The type of the expected message. Both this message and a
+ *               F0 message (which can be sent in response to any
+ *               association frame) will be accepted as a valid message for
+ *               this session.
+ * @cb:          The function that will be called upon receipt of this
+ *               message.
+ * @cb_priv:     Private data of callback
+ * @data:        Data used in association process (always a sk_buff?)
+ * @neighbor:    Address of neighbor with which association session is in
+ *               progress.
+ */
+struct wlp_session {
+	enum wlp_assoc_type exp_message;
+	void (*cb)(struct wlp *);
+	void *cb_priv;
+	void *data;
+	struct uwb_dev_addr neighbor_addr;
+};
+
+/**
+ * WLP Service Set
+ *
+ * @mutex: used to protect entire WSS structure.
+ *
+ * @name: The WSS name is set to 65 bytes, 1 byte larger than the maximum
+ *        allowed by the WLP spec. This is to have a null terminated string
+ *        for display to the user. A maximum of 64 bytes will still be used
+ *        when placing the WSS name field in association frames.
+ *
+ * @accept_enroll: Accepting enrollment: Set to one if registrar is
+ *                 accepting enrollment in WSS, or zero otherwise.
+ *
+ * Global and local information for each WSS in which we are enrolled.
+ * WLP 0.99 Section 7.2.1 and Section 7.2.2
+ */
+struct wlp_wss {
+	struct mutex mutex;
+	struct kobject kobj;
+	/* Global properties. */
+	struct wlp_uuid wssid;
+	u8 hash;
+	char name[WLP_WSS_NAME_SIZE];
+	struct uwb_mac_addr bcast;
+	u8 secure_status:1;
+	u8 master_key[16];
+	/* Local properties. */
+	u8 tag;
+	struct uwb_mac_addr virtual_addr;
+	/* Extra */
+	u8 accept_enroll:1;
+	enum wlp_wss_state state;
+};
+
+/**
+ * WLP main structure
+ * @mutex: protect changes to WLP structure. We only allow changes to the
+ *         uuid, so currently this mutex only protects this field.
+ */
+struct wlp {
+	struct mutex mutex;
+	struct uwb_rc *rc;		/* UWB radio controller */
+	struct uwb_pal pal;
+	struct wlp_eda eda;
+	struct wlp_uuid uuid;
+	struct wlp_session *session;
+	struct wlp_wss wss;
+	struct mutex nbmutex; /* Neighbor mutex protects neighbors list */
+	struct list_head neighbors; /* Elements are wlp_neighbor_e */
+	struct uwb_notifs_handler uwb_notifs_handler;
+	struct wlp_device_info *dev_info;
+	void (*fill_device_info)(struct wlp *wlp, struct wlp_device_info *info);
+	int (*xmit_frame)(struct wlp *, struct sk_buff *,
+			  struct uwb_dev_addr *);
+	void (*stop_queue)(struct wlp *);
+	void (*start_queue)(struct wlp *);
+};
+
+/* sysfs */
+
+
+struct wlp_wss_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct wlp_wss *wss, char *buf);
+	ssize_t (*store)(struct wlp_wss *wss, const char *buf, size_t count);
+};
+
+#define WSS_ATTR(_name, _mode, _show, _store) \
+static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode,	\
+							  _show, _store)
+
+extern int wlp_setup(struct wlp *, struct uwb_rc *);
+extern void wlp_remove(struct wlp *);
+extern ssize_t wlp_neighborhood_show(struct wlp *, char *);
+extern int wlp_wss_setup(struct net_device *, struct wlp_wss *);
+extern void wlp_wss_remove(struct wlp_wss *);
+extern ssize_t wlp_wss_activate_show(struct wlp_wss *, char *);
+extern ssize_t wlp_wss_activate_store(struct wlp_wss *, const char *, size_t);
+extern ssize_t wlp_eda_show(struct wlp *, char *);
+extern ssize_t wlp_eda_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_uuid_show(struct wlp *, char *);
+extern ssize_t wlp_uuid_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_name_show(struct wlp *, char *);
+extern ssize_t wlp_dev_name_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_manufacturer_show(struct wlp *, char *);
+extern ssize_t wlp_dev_manufacturer_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_model_name_show(struct wlp *, char *);
+extern ssize_t wlp_dev_model_name_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_model_nr_show(struct wlp *, char *);
+extern ssize_t wlp_dev_model_nr_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_serial_show(struct wlp *, char *);
+extern ssize_t wlp_dev_serial_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_prim_category_show(struct wlp *, char *);
+extern ssize_t wlp_dev_prim_category_store(struct wlp *, const char *,
+					   size_t);
+extern ssize_t wlp_dev_prim_OUI_show(struct wlp *, char *);
+extern ssize_t wlp_dev_prim_OUI_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *, char *);
+extern ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *, const char *,
+					  size_t);
+extern ssize_t wlp_dev_prim_subcat_show(struct wlp *, char *);
+extern ssize_t wlp_dev_prim_subcat_store(struct wlp *, const char *,
+					 size_t);
+extern int wlp_receive_frame(struct device *, struct wlp *, struct sk_buff *,
+			     struct uwb_dev_addr *);
+extern int wlp_prepare_tx_frame(struct device *, struct wlp *,
+			       struct sk_buff *, struct uwb_dev_addr *);
+void wlp_reset_all(struct wlp *wlp);
+
+/**
+ * Initialize WSS
+ */
+static inline
+void wlp_wss_init(struct wlp_wss *wss)
+{
+	mutex_init(&wss->mutex);
+}
+
+static inline
+void wlp_init(struct wlp *wlp)
+{
+	INIT_LIST_HEAD(&wlp->neighbors);
+	mutex_init(&wlp->mutex);
+	mutex_init(&wlp->nbmutex);
+	wlp_wss_init(&wlp->wss);
+}
+
+
+#endif /* #ifndef __LINUX__WLP_H_ */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 5c158c4..89a5a12 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -149,11 +149,11 @@
 
 extern struct workqueue_struct *
 __create_workqueue_key(const char *name, int singlethread,
-		       int freezeable, struct lock_class_key *key,
+		       int freezeable, int rt, struct lock_class_key *key,
 		       const char *lock_name);
 
 #ifdef CONFIG_LOCKDEP
-#define __create_workqueue(name, singlethread, freezeable)	\
+#define __create_workqueue(name, singlethread, freezeable, rt)	\
 ({								\
 	static struct lock_class_key __key;			\
 	const char *__lock_name;				\
@@ -164,17 +164,19 @@
 		__lock_name = #name;				\
 								\
 	__create_workqueue_key((name), (singlethread),		\
-			       (freezeable), &__key,		\
+			       (freezeable), (rt), &__key,	\
 			       __lock_name);			\
 })
 #else
-#define __create_workqueue(name, singlethread, freezeable)	\
-	__create_workqueue_key((name), (singlethread), (freezeable), NULL, NULL)
+#define __create_workqueue(name, singlethread, freezeable, rt)	\
+	__create_workqueue_key((name), (singlethread), (freezeable), (rt), \
+			       NULL, NULL)
 #endif
 
-#define create_workqueue(name) __create_workqueue((name), 0, 0)
-#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1)
-#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
+#define create_workqueue(name) __create_workqueue((name), 0, 0, 0)
+#define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1)
+#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0)
+#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 12b15c5..e585657 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -63,7 +63,15 @@
 	unsigned for_writepages:1;	/* This is a writepages() call */
 	unsigned range_cyclic:1;	/* range_start is cyclic */
 	unsigned more_io:1;		/* more io to be dispatched */
-	unsigned range_cont:1;
+	/*
+	 * write_cache_pages() won't update wbc->nr_to_write and
+	 * mapping->writeback_index if no_nrwrite_index_update
+	 * is set.  write_cache_pages() may write more than we
+	 * requested and we want to make sure nr_to_write and
+	 * writeback_index are updated in a consistent manner
+	 * so we use a single control to update them
+	 */
+	unsigned no_nrwrite_index_update:1;
 };
 
 /*
diff --git a/include/media/soc_camera_platform.h b/include/media/soc_camera_platform.h
index 851f182..1d092b4 100644
--- a/include/media/soc_camera_platform.h
+++ b/include/media/soc_camera_platform.h
@@ -1,3 +1,13 @@
+/*
+ * Generic Platform Camera Driver Header
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
 #ifndef __SOC_CAMERA_H__
 #define __SOC_CAMERA_H__
 
@@ -9,6 +19,7 @@
 	unsigned long format_depth;
 	struct v4l2_pix_format format;
 	unsigned long bus_param;
+	void (*power)(int);
 	int (*set_capture)(struct soc_camera_platform_info *info, int enable);
 };
 
diff --git a/include/media/tuner.h b/include/media/tuner.h
index 67c1f51..7d4e2db 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -123,6 +123,7 @@
 #define TUNER_TEA5761			75	/* Only FM Radio Tuner */
 #define TUNER_XC5000			76	/* Xceive Silicon Tuner */
 #define TUNER_TCL_MF02GIP_5N		77	/* TCL MF02GIP_5N */
+#define TUNER_PHILIPS_FMD1216MEX_MK3	78
 
 /* tv card specific */
 #define TDA9887_PRESENT 		(1<<0)
diff --git a/include/media/v4l2-i2c-drv-legacy.h b/include/media/v4l2-i2c-drv-legacy.h
index 975ffbf..e65dd9d 100644
--- a/include/media/v4l2-i2c-drv-legacy.h
+++ b/include/media/v4l2-i2c-drv-legacy.h
@@ -21,6 +21,17 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+/* NOTE: the full version of this header is in the v4l-dvb repository
+ * and allows v4l i2c drivers to be compiled on older kernels as well.
+ * The version of this header as it appears in the kernel is a stripped
+ * version (without all the backwards compatibility stuff) and so it
+ * looks a bit odd.
+ *
+ * If you look at the full version then you will understand the reason
+ * for introducing this header since you really don't want to have all
+ * the tricky backwards compatibility code in each and every i2c driver.
+ */
+
 struct v4l2_i2c_driver_data {
 	const char * const name;
 	int driverid;
diff --git a/include/media/v4l2-i2c-drv.h b/include/media/v4l2-i2c-drv.h
index 40ecef2..efdc8bf 100644
--- a/include/media/v4l2-i2c-drv.h
+++ b/include/media/v4l2-i2c-drv.h
@@ -21,6 +21,17 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+/* NOTE: the full version of this header is in the v4l-dvb repository
+ * and allows v4l i2c drivers to be compiled on older kernels as well.
+ * The version of this header as it appears in the kernel is a stripped
+ * version (without all the backwards compatibility stuff) and so it
+ * looks a bit odd.
+ *
+ * If you look at the full version then you will understand the reason
+ * for introducing this header since you really don't want to have all
+ * the tricky backwards compatibility code in each and every i2c driver.
+ */
+
 #ifndef __V4L2_I2C_DRV_H__
 #define __V4L2_I2C_DRV_H__
 
diff --git a/include/media/v4l2-int-device.h b/include/media/v4l2-int-device.h
index c8b80e0..9c2df41 100644
--- a/include/media/v4l2-int-device.h
+++ b/include/media/v4l2-int-device.h
@@ -84,6 +84,8 @@
 	void *priv;
 };
 
+void v4l2_int_device_try_attach_all(void);
+
 int v4l2_int_device_register(struct v4l2_int_device *d);
 void v4l2_int_device_unregister(struct v4l2_int_device *d);
 
@@ -96,6 +98,12 @@
  *
  */
 
+enum v4l2_power {
+	V4L2_POWER_OFF = 0,
+	V4L2_POWER_ON,
+	V4L2_POWER_STANDBY,
+};
+
 /* Slave interface type. */
 enum v4l2_if_type {
 	/*
@@ -170,6 +178,9 @@
 	vidioc_int_queryctrl_num,
 	vidioc_int_g_ctrl_num,
 	vidioc_int_s_ctrl_num,
+	vidioc_int_cropcap_num,
+	vidioc_int_g_crop_num,
+	vidioc_int_s_crop_num,
 	vidioc_int_g_parm_num,
 	vidioc_int_s_parm_num,
 
@@ -182,12 +193,19 @@
 	vidioc_int_dev_init_num = 1000,
 	/* Delinitialise the device at slave detach. */
 	vidioc_int_dev_exit_num,
-	/* Set device power state: 0 is off, non-zero is on. */
+	/* Set device power state. */
 	vidioc_int_s_power_num,
+	/*
+	* Get slave private data, e.g. platform-specific slave
+	* configuration used by the master.
+	*/
+	vidioc_int_g_priv_num,
 	/* Get slave interface parameters. */
 	vidioc_int_g_ifparm_num,
 	/* Does the slave need to be reset after VIDIOC_DQBUF? */
 	vidioc_int_g_needs_reset_num,
+	vidioc_int_enum_framesizes_num,
+	vidioc_int_enum_frameintervals_num,
 
 	/*
 	 *
@@ -261,14 +279,20 @@
 V4L2_INT_WRAPPER_1(queryctrl, struct v4l2_queryctrl, *);
 V4L2_INT_WRAPPER_1(g_ctrl, struct v4l2_control, *);
 V4L2_INT_WRAPPER_1(s_ctrl, struct v4l2_control, *);
+V4L2_INT_WRAPPER_1(cropcap, struct v4l2_cropcap, *);
+V4L2_INT_WRAPPER_1(g_crop, struct v4l2_crop, *);
+V4L2_INT_WRAPPER_1(s_crop, struct v4l2_crop, *);
 V4L2_INT_WRAPPER_1(g_parm, struct v4l2_streamparm, *);
 V4L2_INT_WRAPPER_1(s_parm, struct v4l2_streamparm, *);
 
 V4L2_INT_WRAPPER_0(dev_init);
 V4L2_INT_WRAPPER_0(dev_exit);
-V4L2_INT_WRAPPER_1(s_power, int, );
+V4L2_INT_WRAPPER_1(s_power, enum v4l2_power, );
+V4L2_INT_WRAPPER_1(g_priv, void, *);
 V4L2_INT_WRAPPER_1(g_ifparm, struct v4l2_ifparm, *);
 V4L2_INT_WRAPPER_1(g_needs_reset, void, *);
+V4L2_INT_WRAPPER_1(enum_framesizes, struct v4l2_frmsizeenum, *);
+V4L2_INT_WRAPPER_1(enum_frameintervals, struct v4l2_frmivalenum, *);
 
 V4L2_INT_WRAPPER_0(reset);
 V4L2_INT_WRAPPER_0(init);
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 0bef03a..e6ba25b 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -271,26 +271,38 @@
 extern const char *v4l2_type_names[];
 
 /*  Compatibility layer interface  --  v4l1-compat module */
-typedef int (*v4l2_kioctl)(struct inode *inode, struct file *file,
+typedef int (*v4l2_kioctl)(struct file *file,
 			   unsigned int cmd, void *arg);
 #ifdef CONFIG_VIDEO_V4L1_COMPAT
-int v4l_compat_translate_ioctl(struct inode *inode, struct file *file,
+int v4l_compat_translate_ioctl(struct file *file,
 			       int cmd, void *arg, v4l2_kioctl driver_ioctl);
 #else
-#define v4l_compat_translate_ioctl(inode, file, cmd, arg, ioctl) (-EINVAL)
+#define v4l_compat_translate_ioctl(file, cmd, arg, ioctl) (-EINVAL)
 #endif
 
 /* 32 Bits compatibility layer for 64 bits processors */
 extern long v4l_compat_ioctl32(struct file *file, unsigned int cmd,
 				unsigned long arg);
 
-extern int video_ioctl2(struct inode *inode, struct file *file,
-			  unsigned int cmd, unsigned long arg);
-
 /* Include support for obsoleted stuff */
 extern int video_usercopy(struct inode *inode, struct file *file,
 			  unsigned int cmd, unsigned long arg,
 			  int (*func)(struct inode *inode, struct file *file,
 				      unsigned int cmd, void *arg));
 
+/* Standard handlers for V4L ioctl's */
+
+/* This prototype is used on fops.unlocked_ioctl */
+extern int __video_ioctl2(struct file *file,
+			unsigned int cmd, unsigned long arg);
+
+/* This prototype is used on fops.ioctl
+ * Since fops.ioctl enables Kernel Big Lock, it is preferred
+ * to use __video_ioctl2 instead.
+ * It should be noticed that there's no lock code inside
+ * video_ioctl2().
+ */
+extern int video_ioctl2(struct inode *inode, struct file *file,
+			unsigned int cmd, unsigned long arg);
+
 #endif /* _V4L2_IOCTL_H */
diff --git a/include/media/videobuf-dvb.h b/include/media/videobuf-dvb.h
index b777486..6ba4f12 100644
--- a/include/media/videobuf-dvb.h
+++ b/include/media/videobuf-dvb.h
@@ -16,7 +16,6 @@
 	int                        nfeeds;
 
 	/* videobuf_dvb_(un)register manges this */
-	struct dvb_adapter         adapter;
 	struct dvb_demux           demux;
 	struct dmxdev              dmxdev;
 	struct dmx_frontend        fe_hw;
@@ -24,12 +23,35 @@
 	struct dvb_net             net;
 };
 
-int videobuf_dvb_register(struct videobuf_dvb *dvb,
+struct videobuf_dvb_frontend {
+	struct list_head felist;
+	int id;
+	struct videobuf_dvb dvb;
+};
+
+struct videobuf_dvb_frontends {
+	struct list_head felist;
+	struct mutex lock;
+	struct dvb_adapter adapter;
+	int active_fe_id; /* Indicates which frontend in the felist is in use */
+	int gate; /* Frontend with gate control 0=!MFE,1=fe0,2=fe1 etc */
+};
+
+int videobuf_dvb_register_bus(struct videobuf_dvb_frontends *f,
 			  struct module *module,
 			  void *adapter_priv,
 			  struct device *device,
-			  short *adapter_nr);
-void videobuf_dvb_unregister(struct videobuf_dvb *dvb);
+			  short *adapter_nr,
+			  int mfe_shared);
+
+void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f);
+
+struct videobuf_dvb_frontend * videobuf_dvb_alloc_frontend(struct videobuf_dvb_frontends *f, int id);
+void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f);
+
+struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_frontends *f, int id);
+int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p);
+
 
 /*
  * Local variables:
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index fb163e2..b77c147 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -27,8 +27,6 @@
 #ifndef NET_9P_H
 #define NET_9P_H
 
-#ifdef CONFIG_NET_9P_DEBUG
-
 /**
  * enum p9_debug_flags - bits for mount time debug parameter
  * @P9_DEBUG_ERROR: more verbose error messages including original error string
@@ -39,6 +37,7 @@
  * @P9_DEBUG_TRANS: transport tracing
  * @P9_DEBUG_SLABS: memory management tracing
  * @P9_DEBUG_FCALL: verbose dump of protocol messages
+ * @P9_DEBUG_FID: fid allocation/deallocation tracking
  *
  * These flags are passed at mount time to turn on various levels of
  * verbosity and tracing which will be output to the system logs.
@@ -53,24 +52,27 @@
 	P9_DEBUG_TRANS =	(1<<6),
 	P9_DEBUG_SLABS =      	(1<<7),
 	P9_DEBUG_FCALL =	(1<<8),
+	P9_DEBUG_FID =		(1<<9),
+	P9_DEBUG_PKT =		(1<<10),
 };
 
+#ifdef CONFIG_NET_9P_DEBUG
 extern unsigned int p9_debug_level;
 
 #define P9_DPRINTK(level, format, arg...) \
 do {  \
-	if ((p9_debug_level & level) == level) \
-		printk(KERN_NOTICE "-- %s (%d): " \
-		format , __func__, task_pid_nr(current) , ## arg); \
+	if ((p9_debug_level & level) == level) {\
+		if (level == P9_DEBUG_9P) \
+			printk(KERN_NOTICE "(%8.8d) " \
+			format , task_pid_nr(current) , ## arg); \
+		else \
+			printk(KERN_NOTICE "-- %s (%d): " \
+			format , __func__, task_pid_nr(current) , ## arg); \
+	} \
 } while (0)
 
-#define PRINT_FCALL_ERROR(s, fcall) P9_DPRINTK(P9_DEBUG_ERROR,   \
-	"%s: %.*s\n", s, fcall?fcall->params.rerror.error.len:0, \
-	fcall?fcall->params.rerror.error.str:"");
-
 #else
 #define P9_DPRINTK(level, format, arg...)  do { } while (0)
-#define PRINT_FCALL_ERROR(s, fcall) do { } while (0)
 #endif
 
 #define P9_EPRINTK(level, format, arg...) \
@@ -325,33 +327,6 @@
  * See Also: http://plan9.bell-labs.com/magic/man2html/2/stat
  */
 
-struct p9_stat {
-	u16 size;
-	u16 type;
-	u32 dev;
-	struct p9_qid qid;
-	u32 mode;
-	u32 atime;
-	u32 mtime;
-	u64 length;
-	struct p9_str name;
-	struct p9_str uid;
-	struct p9_str gid;
-	struct p9_str muid;
-	struct p9_str extension;	/* 9p2000.u extensions */
-	u32 n_uid;			/* 9p2000.u extensions */
-	u32 n_gid;			/* 9p2000.u extensions */
-	u32 n_muid;			/* 9p2000.u extensions */
-};
-
-/*
- * file metadata (stat) structure used to create Twstat message
- * The is identical to &p9_stat, but the strings don't point to
- * the same memory block and should be freed separately
- *
- * See Also: http://plan9.bell-labs.com/magic/man2html/2/stat
- */
-
 struct p9_wstat {
 	u16 size;
 	u16 type;
@@ -493,12 +468,12 @@
 };
 
 struct p9_rstat {
-	struct p9_stat stat;
+	struct p9_wstat stat;
 };
 
 struct p9_twstat {
 	u32 fid;
-	struct p9_stat stat;
+	struct p9_wstat stat;
 };
 
 struct p9_rwstat {
@@ -509,8 +484,9 @@
  * @size: prefixed length of the structure
  * @id: protocol operating identifier of type &p9_msg_t
  * @tag: transaction id of the request
+ * @offset: used by marshalling routines to track currentposition in buffer
+ * @capacity: used by marshalling routines to track total capacity
  * @sdata: payload
- * @params: per-operation parameters
  *
  * &p9_fcall represents the structure for all 9P RPC
  * transactions.  Requests are packaged into fcalls, and reponses
@@ -523,68 +499,15 @@
 	u32 size;
 	u8 id;
 	u16 tag;
-	void *sdata;
 
-	union {
-		struct p9_tversion tversion;
-		struct p9_rversion rversion;
-		struct p9_tauth tauth;
-		struct p9_rauth rauth;
-		struct p9_rerror rerror;
-		struct p9_tflush tflush;
-		struct p9_rflush rflush;
-		struct p9_tattach tattach;
-		struct p9_rattach rattach;
-		struct p9_twalk twalk;
-		struct p9_rwalk rwalk;
-		struct p9_topen topen;
-		struct p9_ropen ropen;
-		struct p9_tcreate tcreate;
-		struct p9_rcreate rcreate;
-		struct p9_tread tread;
-		struct p9_rread rread;
-		struct p9_twrite twrite;
-		struct p9_rwrite rwrite;
-		struct p9_tclunk tclunk;
-		struct p9_rclunk rclunk;
-		struct p9_tremove tremove;
-		struct p9_rremove rremove;
-		struct p9_tstat tstat;
-		struct p9_rstat rstat;
-		struct p9_twstat twstat;
-		struct p9_rwstat rwstat;
-	} params;
+	size_t offset;
+	size_t capacity;
+
+	uint8_t *sdata;
 };
 
 struct p9_idpool;
 
-int p9_deserialize_stat(void *buf, u32 buflen, struct p9_stat *stat,
-	int dotu);
-int p9_deserialize_fcall(void *buf, u32 buflen, struct p9_fcall *fc, int dotu);
-void p9_set_tag(struct p9_fcall *fc, u16 tag);
-struct p9_fcall *p9_create_tversion(u32 msize, char *version);
-struct p9_fcall *p9_create_tattach(u32 fid, u32 afid, char *uname,
-	char *aname, u32 n_uname, int dotu);
-struct p9_fcall *p9_create_tauth(u32 afid, char *uname, char *aname,
-	u32 n_uname, int dotu);
-struct p9_fcall *p9_create_tflush(u16 oldtag);
-struct p9_fcall *p9_create_twalk(u32 fid, u32 newfid, u16 nwname,
-	char **wnames);
-struct p9_fcall *p9_create_topen(u32 fid, u8 mode);
-struct p9_fcall *p9_create_tcreate(u32 fid, char *name, u32 perm, u8 mode,
-	char *extension, int dotu);
-struct p9_fcall *p9_create_tread(u32 fid, u64 offset, u32 count);
-struct p9_fcall *p9_create_twrite(u32 fid, u64 offset, u32 count,
-	const char *data);
-struct p9_fcall *p9_create_twrite_u(u32 fid, u64 offset, u32 count,
-	const char __user *data);
-struct p9_fcall *p9_create_tclunk(u32 fid);
-struct p9_fcall *p9_create_tremove(u32 fid);
-struct p9_fcall *p9_create_tstat(u32 fid);
-struct p9_fcall *p9_create_twstat(u32 fid, struct p9_wstat *wstat,
-	int dotu);
-
-int p9_printfcall(char *buf, int buflen, struct p9_fcall *fc, int dotu);
 int p9_errstr2errno(char *errstr, int len);
 
 struct p9_idpool *p9_idpool_create(void);
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index c936dd1..4012e071 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -26,6 +26,87 @@
 #ifndef NET_9P_CLIENT_H
 #define NET_9P_CLIENT_H
 
+/* Number of requests per row */
+#define P9_ROW_MAXTAG 255
+
+/**
+ * enum p9_trans_status - different states of underlying transports
+ * @Connected: transport is connected and healthy
+ * @Disconnected: transport has been disconnected
+ * @Hung: transport is connected by wedged
+ *
+ * This enumeration details the various states a transport
+ * instatiation can be in.
+ */
+
+enum p9_trans_status {
+	Connected,
+	Disconnected,
+	Hung,
+};
+
+/**
+ * enum p9_req_status_t - virtio request status
+ * @REQ_STATUS_IDLE: request slot unused
+ * @REQ_STATUS_ALLOC: request has been allocated but not sent
+ * @REQ_STATUS_UNSENT: request waiting to be sent
+ * @REQ_STATUS_SENT: request sent to server
+ * @REQ_STATUS_FLSH: a flush has been sent for this request
+ * @REQ_STATUS_RCVD: response received from server
+ * @REQ_STATUS_FLSHD: request has been flushed
+ * @REQ_STATUS_ERROR: request encountered an error on the client side
+ *
+ * The @REQ_STATUS_IDLE state is used to mark a request slot as unused
+ * but use is actually tracked by the idpool structure which handles tag
+ * id allocation.
+ *
+ */
+
+enum p9_req_status_t {
+	REQ_STATUS_IDLE,
+	REQ_STATUS_ALLOC,
+	REQ_STATUS_UNSENT,
+	REQ_STATUS_SENT,
+	REQ_STATUS_FLSH,
+	REQ_STATUS_RCVD,
+	REQ_STATUS_FLSHD,
+	REQ_STATUS_ERROR,
+};
+
+/**
+ * struct p9_req_t - request slots
+ * @status: status of this request slot
+ * @t_err: transport error
+ * @flush_tag: tag of request being flushed (for flush requests)
+ * @wq: wait_queue for the client to block on for this request
+ * @tc: the request fcall structure
+ * @rc: the response fcall structure
+ * @aux: transport specific data (provided for trans_fd migration)
+ * @req_list: link for higher level objects to chain requests
+ *
+ * Transport use an array to track outstanding requests
+ * instead of a list.  While this may incurr overhead during initial
+ * allocation or expansion, it makes request lookup much easier as the
+ * tag id is a index into an array.  (We use tag+1 so that we can accomodate
+ * the -1 tag for the T_VERSION request).
+ * This also has the nice effect of only having to allocate wait_queues
+ * once, instead of constantly allocating and freeing them.  Its possible
+ * other resources could benefit from this scheme as well.
+ *
+ */
+
+struct p9_req_t {
+	int status;
+	int t_err;
+	u16 flush_tag;
+	wait_queue_head_t *wq;
+	struct p9_fcall *tc;
+	struct p9_fcall *rc;
+	void *aux;
+
+	struct list_head req_list;
+};
+
 /**
  * struct p9_client - per client instance state
  * @lock: protect @fidlist
@@ -36,9 +117,20 @@
  * @conn: connection state information used by trans_fd
  * @fidpool: fid handle accounting for session
  * @fidlist: List of active fid handles
+ * @tagpool - transaction id accounting for session
+ * @reqs - 2D array of requests
+ * @max_tag - current maximum tag id allocated
  *
  * The client structure is used to keep track of various per-client
  * state that has been instantiated.
+ * In order to minimize per-transaction overhead we use a
+ * simple array to lookup requests instead of a hash table
+ * or linked list.  In order to support larger number of
+ * transactions, we make this a 2D array, allocating new rows
+ * when we need to grow the total number of the transactions.
+ *
+ * Each row is 256 requests and we'll support up to 256 rows for
+ * a total of 64k concurrent requests per session.
  *
  * Bugs: duplicated data and potentially unnecessary elements.
  */
@@ -48,11 +140,16 @@
 	int msize;
 	unsigned char dotu;
 	struct p9_trans_module *trans_mod;
-	struct p9_trans *trans;
+	enum p9_trans_status status;
+	void *trans;
 	struct p9_conn *conn;
 
 	struct p9_idpool *fidpool;
 	struct list_head fidlist;
+
+	struct p9_idpool *tagpool;
+	struct p9_req_t *reqs[P9_ROW_MAXTAG];
+	int max_tag;
 };
 
 /**
@@ -65,8 +162,6 @@
  * @uid: the numeric uid of the local user who owns this handle
  * @aux: transport specific information (unused?)
  * @rdir_fpos: tracks offset of file position when reading directory contents
- * @rdir_pos: (unused?)
- * @rdir_fcall: holds response of last directory read request
  * @flist: per-client-instance fid tracking
  * @dlist: per-dentry fid tracking
  *
@@ -83,12 +178,11 @@
 	void *aux;
 
 	int rdir_fpos;
-	int rdir_pos;
-	struct p9_fcall *rdir_fcall;
 	struct list_head flist;
 	struct list_head dlist;	/* list of all fids attached to a dentry */
 };
 
+int p9_client_version(struct p9_client *);
 struct p9_client *p9_client_create(const char *dev_name, char *options);
 void p9_client_destroy(struct p9_client *clnt);
 void p9_client_disconnect(struct p9_client *clnt);
@@ -103,15 +197,19 @@
 							char *extension);
 int p9_client_clunk(struct p9_fid *fid);
 int p9_client_remove(struct p9_fid *fid);
-int p9_client_read(struct p9_fid *fid, char *data, u64 offset, u32 count);
-int p9_client_readn(struct p9_fid *fid, char *data, u64 offset, u32 count);
-int p9_client_write(struct p9_fid *fid, char *data, u64 offset, u32 count);
-int p9_client_uread(struct p9_fid *fid, char __user *data, u64 offset,
-								u32 count);
-int p9_client_uwrite(struct p9_fid *fid, const char __user *data, u64 offset,
-								u32 count);
-struct p9_stat *p9_client_stat(struct p9_fid *fid);
+int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
+							u64 offset, u32 count);
+int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
+							u64 offset, u32 count);
+struct p9_wstat *p9_client_stat(struct p9_fid *fid);
 int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst);
-struct p9_stat *p9_client_dirread(struct p9_fid *fid, u64 offset);
+
+struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
+void p9_client_cb(struct p9_client *c, struct p9_req_t *req);
+
+int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
+int p9stat_read(char *, int, struct p9_wstat *, int);
+void p9stat_free(struct p9_wstat *);
+
 
 #endif /* NET_9P_CLIENT_H */
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 3ca7371..6d5886e 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -26,52 +26,6 @@
 #ifndef NET_9P_TRANSPORT_H
 #define NET_9P_TRANSPORT_H
 
-#include <linux/module.h>
-
-/**
- * enum p9_trans_status - different states of underlying transports
- * @Connected: transport is connected and healthy
- * @Disconnected: transport has been disconnected
- * @Hung: transport is connected by wedged
- *
- * This enumeration details the various states a transport
- * instatiation can be in.
- */
-
-enum p9_trans_status {
-	Connected,
-	Disconnected,
-	Hung,
-};
-
-/**
- * struct p9_trans - per-transport state and API
- * @status: transport &p9_trans_status
- * @msize: negotiated maximum packet size (duplicate from client)
- * @extended: negotiated protocol extensions (duplicate from client)
- * @priv: transport private data
- * @close: member function to disconnect and close the transport
- * @rpc: member function to issue a request to the transport
- *
- * This is the basic API for a transport instance.  It is used as
- * a handle by the client to issue requests.  This interface is currently
- * in flux during reorganization.
- *
- * Bugs: there is lots of duplicated data here and its not clear that
- * the member functions need to be per-instance versus per transport
- * module.
- */
-
-struct p9_trans {
-	enum p9_trans_status status;
-	int msize;
-	unsigned char extended;
-	void *priv;
-	void (*close) (struct p9_trans *);
-	int (*rpc) (struct p9_trans *t, struct p9_fcall *tc,
-							struct p9_fcall **rc);
-};
-
 /**
  * struct p9_trans_module - transport module interface
  * @list: used to maintain a list of currently available transports
@@ -79,12 +33,14 @@
  * @maxsize: transport provided maximum packet size
  * @def: set if this transport should be considered the default
  * @create: member function to create a new connection on this transport
+ * @request: member function to issue a request to the transport
+ * @cancel: member function to cancel a request (if it hasn't been sent)
  *
  * This is the basic API for a transport module which is registered by the
  * transport module with the 9P core network module and used by the client
  * to instantiate a new connection on a transport.
  *
- * Bugs: the transport module list isn't protected.
+ * BUGS: the transport module list isn't protected.
  */
 
 struct p9_trans_module {
@@ -92,8 +48,11 @@
 	char *name;		/* name of transport */
 	int maxsize;		/* max message size of transport */
 	int def;		/* this transport should be default */
-	struct p9_trans * (*create)(const char *, char *, int, unsigned char);
 	struct module *owner;
+	int (*create)(struct p9_client *, const char *, char *);
+	void (*close) (struct p9_client *);
+	int (*request) (struct p9_client *, struct p9_req_t *req);
+	int (*cancel) (struct p9_client *, struct p9_req_t *req);
 };
 
 void v9fs_register_trans(struct p9_trans_module *m);
diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h
index 0cb63ed..b809397 100644
--- a/include/net/netns/x_tables.h
+++ b/include/net/netns/x_tables.h
@@ -2,9 +2,9 @@
 #define __NETNS_X_TABLES_H
 
 #include <linux/list.h>
-#include <linux/net.h>
+#include <linux/netfilter.h>
 
 struct netns_xt {
-	struct list_head tables[NPROTO];
+	struct list_head tables[NFPROTO_NUMPROTO];
 };
 #endif
diff --git a/include/trace/sched.h b/include/trace/sched.h
new file mode 100644
index 0000000..ad47369
--- /dev/null
+++ b/include/trace/sched.h
@@ -0,0 +1,56 @@
+#ifndef _TRACE_SCHED_H
+#define _TRACE_SCHED_H
+
+#include <linux/sched.h>
+#include <linux/tracepoint.h>
+
+DEFINE_TRACE(sched_kthread_stop,
+	TPPROTO(struct task_struct *t),
+		TPARGS(t));
+
+DEFINE_TRACE(sched_kthread_stop_ret,
+	TPPROTO(int ret),
+		TPARGS(ret));
+
+DEFINE_TRACE(sched_wait_task,
+	TPPROTO(struct rq *rq, struct task_struct *p),
+		TPARGS(rq, p));
+
+DEFINE_TRACE(sched_wakeup,
+	TPPROTO(struct rq *rq, struct task_struct *p),
+		TPARGS(rq, p));
+
+DEFINE_TRACE(sched_wakeup_new,
+	TPPROTO(struct rq *rq, struct task_struct *p),
+		TPARGS(rq, p));
+
+DEFINE_TRACE(sched_switch,
+	TPPROTO(struct rq *rq, struct task_struct *prev,
+		struct task_struct *next),
+		TPARGS(rq, prev, next));
+
+DEFINE_TRACE(sched_migrate_task,
+	TPPROTO(struct rq *rq, struct task_struct *p, int dest_cpu),
+		TPARGS(rq, p, dest_cpu));
+
+DEFINE_TRACE(sched_process_free,
+	TPPROTO(struct task_struct *p),
+		TPARGS(p));
+
+DEFINE_TRACE(sched_process_exit,
+	TPPROTO(struct task_struct *p),
+		TPARGS(p));
+
+DEFINE_TRACE(sched_process_wait,
+	TPPROTO(struct pid *pid),
+		TPARGS(pid));
+
+DEFINE_TRACE(sched_process_fork,
+	TPPROTO(struct task_struct *parent, struct task_struct *child),
+		TPARGS(parent, child));
+
+DEFINE_TRACE(sched_signal_send,
+	TPPROTO(int sig, struct task_struct *p),
+		TPARGS(sig, p));
+
+#endif
diff --git a/arch/sh/include/asm/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h
similarity index 82%
rename from arch/sh/include/asm/sh_mobile_lcdc.h
rename to include/video/sh_mobile_lcdc.h
index 130102f..1a4bc6a 100644
--- a/arch/sh/include/asm/sh_mobile_lcdc.h
+++ b/include/video/sh_mobile_lcdc.h
@@ -28,6 +28,12 @@
 
 enum { LCDC_CLK_BUS, LCDC_CLK_PERIPHERAL, LCDC_CLK_EXTERNAL };
 
+#define LCDC_FLAGS_DWPOL (1 << 0) /* Rising edge dot clock data latch */
+#define LCDC_FLAGS_DIPOL (1 << 1) /* Active low display enable polarity */
+#define LCDC_FLAGS_DAPOL (1 << 2) /* Active low display data polarity */
+#define LCDC_FLAGS_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */
+#define LCDC_FLAGS_DWCNT (1 << 4) /* Disable dotclock during blanking */
+
 struct sh_mobile_lcdc_sys_bus_cfg {
 	unsigned long ldmt2r;
 	unsigned long ldmt3r;
@@ -57,6 +63,7 @@
 	int bpp;
 	int interface_type; /* selects RGBn or SYSn I/F, see above */
 	int clock_divider;
+	unsigned long flags; /* LCDC_FLAGS_... */
 	struct fb_videomode lcd_cfg;
 	struct sh_mobile_lcdc_lcd_size_cfg lcd_size_cfg;
 	struct sh_mobile_lcdc_board_cfg board_cfg;
@@ -64,7 +71,6 @@
 };
 
 struct sh_mobile_lcdc_info {
-	unsigned long lddckr;
 	int clock_source;
 	struct sh_mobile_lcdc_chan_cfg ch[2];
 };
diff --git a/init/Kconfig b/init/Kconfig
index 5ceff32..44e9208 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -299,6 +299,13 @@
           for instance virtual servers and checkpoint/restart
           jobs.
 
+config CGROUP_FREEZER
+        bool "control group freezer subsystem"
+        depends on CGROUPS
+        help
+          Provides a way to freeze and unfreeze all tasks in a
+	  cgroup.
+
 config CGROUP_DEVICE
 	bool "Device controller for cgroups"
 	depends on CGROUPS && EXPERIMENTAL
@@ -730,6 +737,15 @@
 	  on EMBEDDED systems.  /proc/vmstat will only show page counts
 	  if VM event counters are disabled.
 
+config PCI_QUIRKS
+	default y
+	bool "Enable PCI quirk workarounds" if EMBEDDED
+	depends on PCI
+	help
+	  This enables workarounds for various PCI chipset
+          bugs/quirks. Disable this only if your target machine is
+          unaffected by PCI quirks.
+
 config SLUB_DEBUG
 	default y
 	bool "Enable SLUB debugging support" if EMBEDDED
@@ -779,6 +795,13 @@
 	  Say Y here to enable the extended profiling support mechanisms used
 	  by profilers such as OProfile.
 
+#
+# Place an empty function call at each tracepoint site. Can be
+# dynamically changed for a probe function.
+#
+config TRACEPOINTS
+	bool
+
 config MARKERS
 	bool "Activate markers"
 	help
diff --git a/init/main.c b/init/main.c
index 27f6bf6..b038fa1 100644
--- a/init/main.c
+++ b/init/main.c
@@ -27,6 +27,7 @@
 #include <linux/gfp.h>
 #include <linux/percpu.h>
 #include <linux/kmod.h>
+#include <linux/vmalloc.h>
 #include <linux/kernel_stat.h>
 #include <linux/start_kernel.h>
 #include <linux/security.h>
@@ -51,6 +52,7 @@
 #include <linux/key.h>
 #include <linux/unwind.h>
 #include <linux/buffer_head.h>
+#include <linux/page_cgroup.h>
 #include <linux/debug_locks.h>
 #include <linux/debugobjects.h>
 #include <linux/lockdep.h>
@@ -60,6 +62,7 @@
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <linux/idr.h>
+#include <linux/ftrace.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -642,8 +645,10 @@
 		initrd_start = 0;
 	}
 #endif
+	vmalloc_init();
 	vfs_caches_init_early();
 	cpuset_init_early();
+	page_cgroup_init();
 	mem_init();
 	enable_debug_pagealloc();
 	cpu_hotplug_init();
@@ -687,46 +692,43 @@
 
 	acpi_early_init(); /* before LAPIC and SMP init */
 
+	ftrace_init();
+
 	/* Do the rest non-__init'ed, we're now alive */
 	rest_init();
 }
 
 static int initcall_debug;
-
-static int __init initcall_debug_setup(char *str)
-{
-	initcall_debug = 1;
-	return 1;
-}
-__setup("initcall_debug", initcall_debug_setup);
+core_param(initcall_debug, initcall_debug, bool, 0644);
 
 int do_one_initcall(initcall_t fn)
 {
 	int count = preempt_count();
-	ktime_t t0, t1, delta;
+	ktime_t delta;
 	char msgbuf[64];
-	int result;
+	struct boot_trace it;
 
 	if (initcall_debug) {
-		printk("calling  %pF @ %i\n", fn, task_pid_nr(current));
-		t0 = ktime_get();
+		it.caller = task_pid_nr(current);
+		printk("calling  %pF @ %i\n", fn, it.caller);
+		it.calltime = ktime_get();
 	}
 
-	result = fn();
+	it.result = fn();
 
 	if (initcall_debug) {
-		t1 = ktime_get();
-		delta = ktime_sub(t1, t0);
-
-		printk("initcall %pF returned %d after %Ld msecs\n",
-			fn, result,
-			(unsigned long long) delta.tv64 >> 20);
+		it.rettime = ktime_get();
+		delta = ktime_sub(it.rettime, it.calltime);
+		it.duration = (unsigned long long) delta.tv64 >> 10;
+		printk("initcall %pF returned %d after %Ld usecs\n", fn,
+			it.result, it.duration);
+		trace_boot(&it, fn);
 	}
 
 	msgbuf[0] = 0;
 
-	if (result && result != -ENODEV && initcall_debug)
-		sprintf(msgbuf, "error code %d ", result);
+	if (it.result && it.result != -ENODEV && initcall_debug)
+		sprintf(msgbuf, "error code %d ", it.result);
 
 	if (preempt_count() != count) {
 		strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
@@ -740,7 +742,7 @@
 		printk("initcall %pF returned with %s\n", fn, msgbuf);
 	}
 
-	return result;
+	return it.result;
 }
 
 
@@ -767,8 +769,6 @@
 static void __init do_basic_setup(void)
 {
 	rcu_init_sched(); /* needed by module_init stage. */
-	/* drivers will send hotplug events */
-	init_workqueues();
 	usermodehelper_init();
 	driver_init();
 	init_irq_proc();
@@ -852,9 +852,12 @@
 
 	cad_pid = task_pid(current);
 
+	init_workqueues();
+
 	smp_prepare_cpus(setup_max_cpus);
 
 	do_pre_smp_initcalls();
+	start_boot_trace();
 
 	smp_init();
 	sched_init_smp();
@@ -881,6 +884,7 @@
 	 * we're essentially up and running. Get rid of the
 	 * initmem segments and start the user-mode stuff..
 	 */
+	stop_boot_trace();
 	init_post();
 	return 0;
 }
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 96fb36c..68eb857 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -52,6 +52,14 @@
 #define HARD_MSGMAX 	(131072/sizeof(void*))
 #define DFLT_MSGSIZEMAX 8192	/* max message size */
 
+/*
+ * Define the ranges various user-specified maximum values can
+ * be set to.
+ */
+#define MIN_MSGMAX	1		/* min value for msg_max */
+#define MAX_MSGMAX	HARD_MSGMAX	/* max value for msg_max */
+#define MIN_MSGSIZEMAX	128		/* min value for msgsize_max */
+#define MAX_MSGSIZEMAX	(8192*128)	/* max value for msgsize_max */
 
 struct ext_wait_queue {		/* queue of sleeping tasks */
 	struct task_struct *task;
@@ -134,8 +142,8 @@
 			info->qsize = 0;
 			info->user = NULL;	/* set when all is ok */
 			memset(&info->attr, 0, sizeof(info->attr));
-			info->attr.mq_maxmsg = DFLT_MSGMAX;
-			info->attr.mq_msgsize = DFLT_MSGSIZEMAX;
+			info->attr.mq_maxmsg = msg_max;
+			info->attr.mq_msgsize = msgsize_max;
 			if (attr) {
 				info->attr.mq_maxmsg = attr->mq_maxmsg;
 				info->attr.mq_msgsize = attr->mq_msgsize;
@@ -1191,11 +1199,11 @@
 	.kill_sb = kill_litter_super,
 };
 
-static int msg_max_limit_min = DFLT_MSGMAX;
-static int msg_max_limit_max = HARD_MSGMAX;
+static int msg_max_limit_min = MIN_MSGMAX;
+static int msg_max_limit_max = MAX_MSGMAX;
 
-static int msg_maxsize_limit_min = DFLT_MSGSIZEMAX;
-static int msg_maxsize_limit_max = INT_MAX;
+static int msg_maxsize_limit_min = MIN_MSGSIZEMAX;
+static int msg_maxsize_limit_max = MAX_MSGSIZEMAX;
 
 static ctl_table mq_sysctls[] = {
 	{
diff --git a/ipc/shm.c b/ipc/shm.c
index e77ec698..0add3fa 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -737,6 +737,10 @@
 	case SHM_LOCK:
 	case SHM_UNLOCK:
 	{
+		struct file *uninitialized_var(shm_file);
+
+		lru_add_drain_all();  /* drain pagevecs to lru lists */
+
 		shp = shm_lock_check(ns, shmid);
 		if (IS_ERR(shp)) {
 			err = PTR_ERR(shp);
diff --git a/kernel/Kconfig.freezer b/kernel/Kconfig.freezer
new file mode 100644
index 0000000..a3bb4cb
--- /dev/null
+++ b/kernel/Kconfig.freezer
@@ -0,0 +1,2 @@
+config FREEZER
+	def_bool PM_SLEEP || CGROUP_FREEZER
diff --git a/kernel/Makefile b/kernel/Makefile
index 4e1d7df..305f11d 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -24,6 +24,7 @@
 CFLAGS_REMOVE_sched.o = -mno-spe -pg
 endif
 
+obj-$(CONFIG_FREEZER) += freezer.o
 obj-$(CONFIG_PROFILING) += profile.o
 obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
 obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -55,6 +56,7 @@
 obj-$(CONFIG_COMPAT) += compat.o
 obj-$(CONFIG_CGROUPS) += cgroup.o
 obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o
+obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o
 obj-$(CONFIG_CPUSETS) += cpuset.o
 obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o
 obj-$(CONFIG_UTS_NS) += utsname.o
@@ -83,6 +85,7 @@
 obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
 obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
 obj-$(CONFIG_MARKERS) += marker.o
+obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
 obj-$(CONFIG_LATENCYTOP) += latencytop.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
 obj-$(CONFIG_FTRACE) += trace/
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 8c6e1c1..046c160 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -241,7 +241,6 @@
 	struct cg_cgroup_link *link;
 	struct cg_cgroup_link *saved_link;
 
-	write_lock(&css_set_lock);
 	hlist_del(&cg->hlist);
 	css_set_count--;
 
@@ -251,16 +250,25 @@
 		list_del(&link->cgrp_link_list);
 		kfree(link);
 	}
-
-	write_unlock(&css_set_lock);
 }
 
-static void __release_css_set(struct kref *k, int taskexit)
+static void __put_css_set(struct css_set *cg, int taskexit)
 {
 	int i;
-	struct css_set *cg = container_of(k, struct css_set, ref);
-
+	/*
+	 * Ensure that the refcount doesn't hit zero while any readers
+	 * can see it. Similar to atomic_dec_and_lock(), but for an
+	 * rwlock
+	 */
+	if (atomic_add_unless(&cg->refcount, -1, 1))
+		return;
+	write_lock(&css_set_lock);
+	if (!atomic_dec_and_test(&cg->refcount)) {
+		write_unlock(&css_set_lock);
+		return;
+	}
 	unlink_css_set(cg);
+	write_unlock(&css_set_lock);
 
 	rcu_read_lock();
 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
@@ -276,32 +284,22 @@
 	kfree(cg);
 }
 
-static void release_css_set(struct kref *k)
-{
-	__release_css_set(k, 0);
-}
-
-static void release_css_set_taskexit(struct kref *k)
-{
-	__release_css_set(k, 1);
-}
-
 /*
  * refcounted get/put for css_set objects
  */
 static inline void get_css_set(struct css_set *cg)
 {
-	kref_get(&cg->ref);
+	atomic_inc(&cg->refcount);
 }
 
 static inline void put_css_set(struct css_set *cg)
 {
-	kref_put(&cg->ref, release_css_set);
+	__put_css_set(cg, 0);
 }
 
 static inline void put_css_set_taskexit(struct css_set *cg)
 {
-	kref_put(&cg->ref, release_css_set_taskexit);
+	__put_css_set(cg, 1);
 }
 
 /*
@@ -427,7 +425,7 @@
 		return NULL;
 	}
 
-	kref_init(&res->ref);
+	atomic_set(&res->refcount, 1);
 	INIT_LIST_HEAD(&res->cg_links);
 	INIT_LIST_HEAD(&res->tasks);
 	INIT_HLIST_NODE(&res->hlist);
@@ -870,6 +868,14 @@
 	.remount_fs = cgroup_remount,
 };
 
+static void init_cgroup_housekeeping(struct cgroup *cgrp)
+{
+	INIT_LIST_HEAD(&cgrp->sibling);
+	INIT_LIST_HEAD(&cgrp->children);
+	INIT_LIST_HEAD(&cgrp->css_sets);
+	INIT_LIST_HEAD(&cgrp->release_list);
+	init_rwsem(&cgrp->pids_mutex);
+}
 static void init_cgroup_root(struct cgroupfs_root *root)
 {
 	struct cgroup *cgrp = &root->top_cgroup;
@@ -878,10 +884,7 @@
 	root->number_of_cgroups = 1;
 	cgrp->root = root;
 	cgrp->top_cgroup = cgrp;
-	INIT_LIST_HEAD(&cgrp->sibling);
-	INIT_LIST_HEAD(&cgrp->children);
-	INIT_LIST_HEAD(&cgrp->css_sets);
-	INIT_LIST_HEAD(&cgrp->release_list);
+	init_cgroup_housekeeping(cgrp);
 }
 
 static int cgroup_test_super(struct super_block *sb, void *data)
@@ -1728,7 +1731,7 @@
 
 	read_lock(&css_set_lock);
 	list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
-		count += atomic_read(&link->cg->ref.refcount);
+		count += atomic_read(&link->cg->refcount);
 	}
 	read_unlock(&css_set_lock);
 	return count;
@@ -1997,16 +2000,7 @@
  * but we cannot guarantee that the information we produce is correct
  * unless we produce it entirely atomically.
  *
- * Upon tasks file open(), a struct ctr_struct is allocated, that
- * will have a pointer to an array (also allocated here).  The struct
- * ctr_struct * is stored in file->private_data.  Its resources will
- * be freed by release() when the file is closed.  The array is used
- * to sprintf the PIDs and then used by read().
  */
-struct ctr_struct {
-	char *buf;
-	int bufsz;
-};
 
 /*
  * Load into 'pidarray' up to 'npids' of the tasks using cgroup
@@ -2088,41 +2082,131 @@
 	return *(pid_t *)a - *(pid_t *)b;
 }
 
-/*
- * Convert array 'a' of 'npids' pid_t's to a string of newline separated
- * decimal pids in 'buf'.  Don't write more than 'sz' chars, but return
- * count 'cnt' of how many chars would be written if buf were large enough.
- */
-static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
-{
-	int cnt = 0;
-	int i;
 
-	for (i = 0; i < npids; i++)
-		cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]);
-	return cnt;
+/*
+ * seq_file methods for the "tasks" file. The seq_file position is the
+ * next pid to display; the seq_file iterator is a pointer to the pid
+ * in the cgroup->tasks_pids array.
+ */
+
+static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos)
+{
+	/*
+	 * Initially we receive a position value that corresponds to
+	 * one more than the last pid shown (or 0 on the first call or
+	 * after a seek to the start). Use a binary-search to find the
+	 * next pid to display, if any
+	 */
+	struct cgroup *cgrp = s->private;
+	int index = 0, pid = *pos;
+	int *iter;
+
+	down_read(&cgrp->pids_mutex);
+	if (pid) {
+		int end = cgrp->pids_length;
+		int i;
+		while (index < end) {
+			int mid = (index + end) / 2;
+			if (cgrp->tasks_pids[mid] == pid) {
+				index = mid;
+				break;
+			} else if (cgrp->tasks_pids[mid] <= pid)
+				index = mid + 1;
+			else
+				end = mid;
+		}
+	}
+	/* If we're off the end of the array, we're done */
+	if (index >= cgrp->pids_length)
+		return NULL;
+	/* Update the abstract position to be the actual pid that we found */
+	iter = cgrp->tasks_pids + index;
+	*pos = *iter;
+	return iter;
 }
 
-/*
- * Handle an open on 'tasks' file.  Prepare a buffer listing the
- * process id's of tasks currently attached to the cgroup being opened.
- *
- * Does not require any specific cgroup mutexes, and does not take any.
- */
-static int cgroup_tasks_open(struct inode *unused, struct file *file)
+static void cgroup_tasks_stop(struct seq_file *s, void *v)
+{
+	struct cgroup *cgrp = s->private;
+	up_read(&cgrp->pids_mutex);
+}
+
+static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	struct cgroup *cgrp = s->private;
+	int *p = v;
+	int *end = cgrp->tasks_pids + cgrp->pids_length;
+
+	/*
+	 * Advance to the next pid in the array. If this goes off the
+	 * end, we're done
+	 */
+	p++;
+	if (p >= end) {
+		return NULL;
+	} else {
+		*pos = *p;
+		return p;
+	}
+}
+
+static int cgroup_tasks_show(struct seq_file *s, void *v)
+{
+	return seq_printf(s, "%d\n", *(int *)v);
+}
+
+static struct seq_operations cgroup_tasks_seq_operations = {
+	.start = cgroup_tasks_start,
+	.stop = cgroup_tasks_stop,
+	.next = cgroup_tasks_next,
+	.show = cgroup_tasks_show,
+};
+
+static void release_cgroup_pid_array(struct cgroup *cgrp)
+{
+	down_write(&cgrp->pids_mutex);
+	BUG_ON(!cgrp->pids_use_count);
+	if (!--cgrp->pids_use_count) {
+		kfree(cgrp->tasks_pids);
+		cgrp->tasks_pids = NULL;
+		cgrp->pids_length = 0;
+	}
+	up_write(&cgrp->pids_mutex);
+}
+
+static int cgroup_tasks_release(struct inode *inode, struct file *file)
 {
 	struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
-	struct ctr_struct *ctr;
-	pid_t *pidarray;
-	int npids;
-	char c;
 
 	if (!(file->f_mode & FMODE_READ))
 		return 0;
 
-	ctr = kmalloc(sizeof(*ctr), GFP_KERNEL);
-	if (!ctr)
-		goto err0;
+	release_cgroup_pid_array(cgrp);
+	return seq_release(inode, file);
+}
+
+static struct file_operations cgroup_tasks_operations = {
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = cgroup_file_write,
+	.release = cgroup_tasks_release,
+};
+
+/*
+ * Handle an open on 'tasks' file.  Prepare an array containing the
+ * process id's of tasks currently attached to the cgroup being opened.
+ */
+
+static int cgroup_tasks_open(struct inode *unused, struct file *file)
+{
+	struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
+	pid_t *pidarray;
+	int npids;
+	int retval;
+
+	/* Nothing to do for write-only files */
+	if (!(file->f_mode & FMODE_READ))
+		return 0;
 
 	/*
 	 * If cgroup gets more users after we read count, we won't have
@@ -2131,57 +2215,31 @@
 	 * show up until sometime later on.
 	 */
 	npids = cgroup_task_count(cgrp);
-	if (npids) {
-		pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
-		if (!pidarray)
-			goto err1;
+	pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
+	if (!pidarray)
+		return -ENOMEM;
+	npids = pid_array_load(pidarray, npids, cgrp);
+	sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
 
-		npids = pid_array_load(pidarray, npids, cgrp);
-		sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
+	/*
+	 * Store the array in the cgroup, freeing the old
+	 * array if necessary
+	 */
+	down_write(&cgrp->pids_mutex);
+	kfree(cgrp->tasks_pids);
+	cgrp->tasks_pids = pidarray;
+	cgrp->pids_length = npids;
+	cgrp->pids_use_count++;
+	up_write(&cgrp->pids_mutex);
 
-		/* Call pid_array_to_buf() twice, first just to get bufsz */
-		ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1;
-		ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL);
-		if (!ctr->buf)
-			goto err2;
-		ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids);
+	file->f_op = &cgroup_tasks_operations;
 
-		kfree(pidarray);
-	} else {
-		ctr->buf = NULL;
-		ctr->bufsz = 0;
+	retval = seq_open(file, &cgroup_tasks_seq_operations);
+	if (retval) {
+		release_cgroup_pid_array(cgrp);
+		return retval;
 	}
-	file->private_data = ctr;
-	return 0;
-
-err2:
-	kfree(pidarray);
-err1:
-	kfree(ctr);
-err0:
-	return -ENOMEM;
-}
-
-static ssize_t cgroup_tasks_read(struct cgroup *cgrp,
-				    struct cftype *cft,
-				    struct file *file, char __user *buf,
-				    size_t nbytes, loff_t *ppos)
-{
-	struct ctr_struct *ctr = file->private_data;
-
-	return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz);
-}
-
-static int cgroup_tasks_release(struct inode *unused_inode,
-					struct file *file)
-{
-	struct ctr_struct *ctr;
-
-	if (file->f_mode & FMODE_READ) {
-		ctr = file->private_data;
-		kfree(ctr->buf);
-		kfree(ctr);
-	}
+	((struct seq_file *)file->private_data)->private = cgrp;
 	return 0;
 }
 
@@ -2210,7 +2268,6 @@
 	{
 		.name = "tasks",
 		.open = cgroup_tasks_open,
-		.read = cgroup_tasks_read,
 		.write_u64 = cgroup_tasks_write,
 		.release = cgroup_tasks_release,
 		.private = FILE_TASKLIST,
@@ -2300,10 +2357,7 @@
 
 	mutex_lock(&cgroup_mutex);
 
-	INIT_LIST_HEAD(&cgrp->sibling);
-	INIT_LIST_HEAD(&cgrp->children);
-	INIT_LIST_HEAD(&cgrp->css_sets);
-	INIT_LIST_HEAD(&cgrp->release_list);
+	init_cgroup_housekeeping(cgrp);
 
 	cgrp->parent = parent;
 	cgrp->root = parent->root;
@@ -2495,8 +2549,7 @@
 int __init cgroup_init_early(void)
 {
 	int i;
-	kref_init(&init_css_set.ref);
-	kref_get(&init_css_set.ref);
+	atomic_set(&init_css_set.refcount, 1);
 	INIT_LIST_HEAD(&init_css_set.cg_links);
 	INIT_LIST_HEAD(&init_css_set.tasks);
 	INIT_HLIST_NODE(&init_css_set.hlist);
diff --git a/kernel/cgroup_debug.c b/kernel/cgroup_debug.c
index c3dc3ab..daca620 100644
--- a/kernel/cgroup_debug.c
+++ b/kernel/cgroup_debug.c
@@ -57,7 +57,7 @@
 	u64 count;
 
 	rcu_read_lock();
-	count = atomic_read(&current->cgroups->ref.refcount);
+	count = atomic_read(&current->cgroups->refcount);
 	rcu_read_unlock();
 	return count;
 }
@@ -90,7 +90,7 @@
 	{
 		.name = "releasable",
 		.read_u64 = releasable_read,
-	}
+	},
 };
 
 static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
new file mode 100644
index 0000000..e950569
--- /dev/null
+++ b/kernel/cgroup_freezer.c
@@ -0,0 +1,379 @@
+/*
+ * cgroup_freezer.c -  control group freezer subsystem
+ *
+ * Copyright IBM Corporation, 2007
+ *
+ * Author : Cedric Le Goater <clg@fr.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/module.h>
+#include <linux/cgroup.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/freezer.h>
+#include <linux/seq_file.h>
+
+enum freezer_state {
+	CGROUP_THAWED = 0,
+	CGROUP_FREEZING,
+	CGROUP_FROZEN,
+};
+
+struct freezer {
+	struct cgroup_subsys_state css;
+	enum freezer_state state;
+	spinlock_t lock; /* protects _writes_ to state */
+};
+
+static inline struct freezer *cgroup_freezer(
+		struct cgroup *cgroup)
+{
+	return container_of(
+		cgroup_subsys_state(cgroup, freezer_subsys_id),
+		struct freezer, css);
+}
+
+static inline struct freezer *task_freezer(struct task_struct *task)
+{
+	return container_of(task_subsys_state(task, freezer_subsys_id),
+			    struct freezer, css);
+}
+
+int cgroup_frozen(struct task_struct *task)
+{
+	struct freezer *freezer;
+	enum freezer_state state;
+
+	task_lock(task);
+	freezer = task_freezer(task);
+	state = freezer->state;
+	task_unlock(task);
+
+	return state == CGROUP_FROZEN;
+}
+
+/*
+ * cgroups_write_string() limits the size of freezer state strings to
+ * CGROUP_LOCAL_BUFFER_SIZE
+ */
+static const char *freezer_state_strs[] = {
+	"THAWED",
+	"FREEZING",
+	"FROZEN",
+};
+
+/*
+ * State diagram
+ * Transitions are caused by userspace writes to the freezer.state file.
+ * The values in parenthesis are state labels. The rest are edge labels.
+ *
+ * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
+ *    ^ ^                    |                     |
+ *    | \_______THAWED_______/                     |
+ *    \__________________________THAWED____________/
+ */
+
+struct cgroup_subsys freezer_subsys;
+
+/* Locks taken and their ordering
+ * ------------------------------
+ * css_set_lock
+ * cgroup_mutex (AKA cgroup_lock)
+ * task->alloc_lock (AKA task_lock)
+ * freezer->lock
+ * task->sighand->siglock
+ *
+ * cgroup code forces css_set_lock to be taken before task->alloc_lock
+ *
+ * freezer_create(), freezer_destroy():
+ * cgroup_mutex [ by cgroup core ]
+ *
+ * can_attach():
+ * cgroup_mutex
+ *
+ * cgroup_frozen():
+ * task->alloc_lock (to get task's cgroup)
+ *
+ * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
+ * task->alloc_lock (to get task's cgroup)
+ * freezer->lock
+ *  sighand->siglock (if the cgroup is freezing)
+ *
+ * freezer_read():
+ * cgroup_mutex
+ *  freezer->lock
+ *   read_lock css_set_lock (cgroup iterator start)
+ *
+ * freezer_write() (freeze):
+ * cgroup_mutex
+ *  freezer->lock
+ *   read_lock css_set_lock (cgroup iterator start)
+ *    sighand->siglock
+ *
+ * freezer_write() (unfreeze):
+ * cgroup_mutex
+ *  freezer->lock
+ *   read_lock css_set_lock (cgroup iterator start)
+ *    task->alloc_lock (to prevent races with freeze_task())
+ *     sighand->siglock
+ */
+static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
+						  struct cgroup *cgroup)
+{
+	struct freezer *freezer;
+
+	freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
+	if (!freezer)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&freezer->lock);
+	freezer->state = CGROUP_THAWED;
+	return &freezer->css;
+}
+
+static void freezer_destroy(struct cgroup_subsys *ss,
+			    struct cgroup *cgroup)
+{
+	kfree(cgroup_freezer(cgroup));
+}
+
+/* Task is frozen or will freeze immediately when next it gets woken */
+static bool is_task_frozen_enough(struct task_struct *task)
+{
+	return frozen(task) ||
+		(task_is_stopped_or_traced(task) && freezing(task));
+}
+
+/*
+ * The call to cgroup_lock() in the freezer.state write method prevents
+ * a write to that file racing against an attach, and hence the
+ * can_attach() result will remain valid until the attach completes.
+ */
+static int freezer_can_attach(struct cgroup_subsys *ss,
+			      struct cgroup *new_cgroup,
+			      struct task_struct *task)
+{
+	struct freezer *freezer;
+	int retval;
+
+	/* Anything frozen can't move or be moved to/from */
+
+	if (is_task_frozen_enough(task))
+		return -EBUSY;
+
+	freezer = cgroup_freezer(new_cgroup);
+	if (freezer->state == CGROUP_FROZEN)
+		return -EBUSY;
+
+	retval = 0;
+	task_lock(task);
+	freezer = task_freezer(task);
+	if (freezer->state == CGROUP_FROZEN)
+		retval = -EBUSY;
+	task_unlock(task);
+	return retval;
+}
+
+static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
+{
+	struct freezer *freezer;
+
+	task_lock(task);
+	freezer = task_freezer(task);
+	task_unlock(task);
+
+	BUG_ON(freezer->state == CGROUP_FROZEN);
+	spin_lock_irq(&freezer->lock);
+	/* Locking avoids race with FREEZING -> THAWED transitions. */
+	if (freezer->state == CGROUP_FREEZING)
+		freeze_task(task, true);
+	spin_unlock_irq(&freezer->lock);
+}
+
+/*
+ * caller must hold freezer->lock
+ */
+static void update_freezer_state(struct cgroup *cgroup,
+				 struct freezer *freezer)
+{
+	struct cgroup_iter it;
+	struct task_struct *task;
+	unsigned int nfrozen = 0, ntotal = 0;
+
+	cgroup_iter_start(cgroup, &it);
+	while ((task = cgroup_iter_next(cgroup, &it))) {
+		ntotal++;
+		if (is_task_frozen_enough(task))
+			nfrozen++;
+	}
+
+	/*
+	 * Transition to FROZEN when no new tasks can be added ensures
+	 * that we never exist in the FROZEN state while there are unfrozen
+	 * tasks.
+	 */
+	if (nfrozen == ntotal)
+		freezer->state = CGROUP_FROZEN;
+	else if (nfrozen > 0)
+		freezer->state = CGROUP_FREEZING;
+	else
+		freezer->state = CGROUP_THAWED;
+	cgroup_iter_end(cgroup, &it);
+}
+
+static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
+			struct seq_file *m)
+{
+	struct freezer *freezer;
+	enum freezer_state state;
+
+	if (!cgroup_lock_live_group(cgroup))
+		return -ENODEV;
+
+	freezer = cgroup_freezer(cgroup);
+	spin_lock_irq(&freezer->lock);
+	state = freezer->state;
+	if (state == CGROUP_FREEZING) {
+		/* We change from FREEZING to FROZEN lazily if the cgroup was
+		 * only partially frozen when we exitted write. */
+		update_freezer_state(cgroup, freezer);
+		state = freezer->state;
+	}
+	spin_unlock_irq(&freezer->lock);
+	cgroup_unlock();
+
+	seq_puts(m, freezer_state_strs[state]);
+	seq_putc(m, '\n');
+	return 0;
+}
+
+static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
+{
+	struct cgroup_iter it;
+	struct task_struct *task;
+	unsigned int num_cant_freeze_now = 0;
+
+	freezer->state = CGROUP_FREEZING;
+	cgroup_iter_start(cgroup, &it);
+	while ((task = cgroup_iter_next(cgroup, &it))) {
+		if (!freeze_task(task, true))
+			continue;
+		if (is_task_frozen_enough(task))
+			continue;
+		if (!freezing(task) && !freezer_should_skip(task))
+			num_cant_freeze_now++;
+	}
+	cgroup_iter_end(cgroup, &it);
+
+	return num_cant_freeze_now ? -EBUSY : 0;
+}
+
+static int unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
+{
+	struct cgroup_iter it;
+	struct task_struct *task;
+
+	cgroup_iter_start(cgroup, &it);
+	while ((task = cgroup_iter_next(cgroup, &it))) {
+		int do_wake;
+
+		task_lock(task);
+		do_wake = __thaw_process(task);
+		task_unlock(task);
+		if (do_wake)
+			wake_up_process(task);
+	}
+	cgroup_iter_end(cgroup, &it);
+	freezer->state = CGROUP_THAWED;
+
+	return 0;
+}
+
+static int freezer_change_state(struct cgroup *cgroup,
+				enum freezer_state goal_state)
+{
+	struct freezer *freezer;
+	int retval = 0;
+
+	freezer = cgroup_freezer(cgroup);
+	spin_lock_irq(&freezer->lock);
+	update_freezer_state(cgroup, freezer);
+	if (goal_state == freezer->state)
+		goto out;
+	switch (freezer->state) {
+	case CGROUP_THAWED:
+		retval = try_to_freeze_cgroup(cgroup, freezer);
+		break;
+	case CGROUP_FREEZING:
+		if (goal_state == CGROUP_FROZEN) {
+			/* Userspace is retrying after
+			 * "/bin/echo FROZEN > freezer.state" returned -EBUSY */
+			retval = try_to_freeze_cgroup(cgroup, freezer);
+			break;
+		}
+		/* state == FREEZING and goal_state == THAWED, so unfreeze */
+	case CGROUP_FROZEN:
+		retval = unfreeze_cgroup(cgroup, freezer);
+		break;
+	default:
+		break;
+	}
+out:
+	spin_unlock_irq(&freezer->lock);
+
+	return retval;
+}
+
+static int freezer_write(struct cgroup *cgroup,
+			 struct cftype *cft,
+			 const char *buffer)
+{
+	int retval;
+	enum freezer_state goal_state;
+
+	if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
+		goal_state = CGROUP_THAWED;
+	else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
+		goal_state = CGROUP_FROZEN;
+	else
+		return -EIO;
+
+	if (!cgroup_lock_live_group(cgroup))
+		return -ENODEV;
+	retval = freezer_change_state(cgroup, goal_state);
+	cgroup_unlock();
+	return retval;
+}
+
+static struct cftype files[] = {
+	{
+		.name = "state",
+		.read_seq_string = freezer_read,
+		.write_string = freezer_write,
+	},
+};
+
+static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
+{
+	return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
+}
+
+struct cgroup_subsys freezer_subsys = {
+	.name		= "freezer",
+	.create		= freezer_create,
+	.destroy	= freezer_destroy,
+	.populate	= freezer_populate,
+	.subsys_id	= freezer_subsys_id,
+	.can_attach	= freezer_can_attach,
+	.attach		= NULL,
+	.fork		= freezer_fork,
+	.exit		= NULL,
+};
diff --git a/kernel/compat.c b/kernel/compat.c
index 143990e..8eafe3e 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -23,6 +23,7 @@
 #include <linux/timex.h>
 #include <linux/migrate.h>
 #include <linux/posix-timers.h>
+#include <linux/times.h>
 
 #include <asm/uaccess.h>
 
@@ -208,49 +209,23 @@
 	return 0;
 }
 
+static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
+{
+	return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
+}
+
 asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
 {
-	/*
-	 *	In the SMP world we might just be unlucky and have one of
-	 *	the times increment as we use it. Since the value is an
-	 *	atomically safe type this is just fine. Conceptually its
-	 *	as if the syscall took an instant longer to occur.
-	 */
 	if (tbuf) {
+		struct tms tms;
 		struct compat_tms tmp;
-		struct task_struct *tsk = current;
-		struct task_struct *t;
-		cputime_t utime, stime, cutime, cstime;
 
-		read_lock(&tasklist_lock);
-		utime = tsk->signal->utime;
-		stime = tsk->signal->stime;
-		t = tsk;
-		do {
-			utime = cputime_add(utime, t->utime);
-			stime = cputime_add(stime, t->stime);
-			t = next_thread(t);
-		} while (t != tsk);
-
-		/*
-		 * While we have tasklist_lock read-locked, no dying thread
-		 * can be updating current->signal->[us]time.  Instead,
-		 * we got their counts included in the live thread loop.
-		 * However, another thread can come in right now and
-		 * do a wait call that updates current->signal->c[us]time.
-		 * To make sure we always see that pair updated atomically,
-		 * we take the siglock around fetching them.
-		 */
-		spin_lock_irq(&tsk->sighand->siglock);
-		cutime = tsk->signal->cutime;
-		cstime = tsk->signal->cstime;
-		spin_unlock_irq(&tsk->sighand->siglock);
-		read_unlock(&tasklist_lock);
-
-		tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime));
-		tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime));
-		tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime));
-		tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime));
+		do_sys_times(&tms);
+		/* Convert our struct tms to the compat version. */
+		tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
+		tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
+		tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
+		tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
 		if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
 			return -EFAULT;
 	}
diff --git a/kernel/configs.c b/kernel/configs.c
index 4c34521..abaee68 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -54,9 +54,6 @@
 
 #ifdef CONFIG_IKCONFIG_PROC
 
-/**************************************************/
-/* globals and useful constants                   */
-
 static ssize_t
 ikconfig_read_current(struct file *file, char __user *buf,
 		      size_t len, loff_t * offset)
@@ -71,9 +68,6 @@
 	.read = ikconfig_read_current,
 };
 
-/***************************************************/
-/* ikconfig_init: start up everything we need to */
-
 static int __init ikconfig_init(void)
 {
 	struct proc_dir_entry *entry;
@@ -89,9 +83,6 @@
 	return 0;
 }
 
-/***************************************************/
-/* ikconfig_cleanup: clean up our mess           */
-
 static void __exit ikconfig_cleanup(void)
 {
 	remove_proc_entry("config.gz", NULL);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index eab7bd6..3e00526 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1172,7 +1172,7 @@
 {
 	struct cpuset trialcs;
 	int err;
-	int cpus_nonempty, balance_flag_changed;
+	int balance_flag_changed;
 
 	trialcs = *cs;
 	if (turning_on)
@@ -1184,7 +1184,6 @@
 	if (err < 0)
 		return err;
 
-	cpus_nonempty = !cpus_empty(trialcs.cpus_allowed);
 	balance_flag_changed = (is_sched_load_balance(cs) !=
 		 			is_sched_load_balance(&trialcs));
 
@@ -1192,7 +1191,7 @@
 	cs->flags = trialcs.flags;
 	mutex_unlock(&callback_mutex);
 
-	if (cpus_nonempty && balance_flag_changed)
+	if (!cpus_empty(trialcs.cpus_allowed) && balance_flag_changed)
 		async_rebuild_sched_domains();
 
 	return 0;
@@ -2437,19 +2436,15 @@
 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
 {
 	seq_printf(m, "Cpus_allowed:\t");
-	m->count += cpumask_scnprintf(m->buf + m->count, m->size - m->count,
-					task->cpus_allowed);
+	seq_cpumask(m, &task->cpus_allowed);
 	seq_printf(m, "\n");
 	seq_printf(m, "Cpus_allowed_list:\t");
-	m->count += cpulist_scnprintf(m->buf + m->count, m->size - m->count,
-					task->cpus_allowed);
+	seq_cpumask_list(m, &task->cpus_allowed);
 	seq_printf(m, "\n");
 	seq_printf(m, "Mems_allowed:\t");
-	m->count += nodemask_scnprintf(m->buf + m->count, m->size - m->count,
-					task->mems_allowed);
+	seq_nodemask(m, &task->mems_allowed);
 	seq_printf(m, "\n");
 	seq_printf(m, "Mems_allowed_list:\t");
-	m->count += nodelist_scnprintf(m->buf + m->count, m->size - m->count,
-					task->mems_allowed);
+	seq_nodemask_list(m, &task->mems_allowed);
 	seq_printf(m, "\n");
 }
diff --git a/kernel/exit.c b/kernel/exit.c
index 0ef4673..80137a5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -47,6 +47,7 @@
 #include <linux/blkdev.h>
 #include <linux/task_io_accounting_ops.h>
 #include <linux/tracehook.h>
+#include <trace/sched.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -112,8 +113,6 @@
 		 * We won't ever get here for the group leader, since it
 		 * will have been the last reference on the signal_struct.
 		 */
-		sig->utime = cputime_add(sig->utime, task_utime(tsk));
-		sig->stime = cputime_add(sig->stime, task_stime(tsk));
 		sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
 		sig->min_flt += tsk->min_flt;
 		sig->maj_flt += tsk->maj_flt;
@@ -122,7 +121,6 @@
 		sig->inblock += task_io_get_inblock(tsk);
 		sig->oublock += task_io_get_oublock(tsk);
 		task_io_accounting_add(&sig->ioac, &tsk->ioac);
-		sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
 		sig = NULL; /* Marker for below. */
 	}
 
@@ -149,7 +147,10 @@
 
 static void delayed_put_task_struct(struct rcu_head *rhp)
 {
-	put_task_struct(container_of(rhp, struct task_struct, rcu));
+	struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
+
+	trace_sched_process_free(tsk);
+	put_task_struct(tsk);
 }
 
 
@@ -1073,6 +1074,8 @@
 
 	if (group_dead)
 		acct_process();
+	trace_sched_process_exit(tsk);
+
 	exit_sem(tsk);
 	exit_files(tsk);
 	exit_fs(tsk);
@@ -1301,6 +1304,7 @@
 	if (likely(!traced)) {
 		struct signal_struct *psig;
 		struct signal_struct *sig;
+		struct task_cputime cputime;
 
 		/*
 		 * The resource counters for the group leader are in its
@@ -1316,20 +1320,23 @@
 		 * need to protect the access to p->parent->signal fields,
 		 * as other threads in the parent group can be right
 		 * here reaping other children at the same time.
+		 *
+		 * We use thread_group_cputime() to get times for the thread
+		 * group, which consolidates times for all threads in the
+		 * group including the group leader.
 		 */
 		spin_lock_irq(&p->parent->sighand->siglock);
 		psig = p->parent->signal;
 		sig = p->signal;
+		thread_group_cputime(p, &cputime);
 		psig->cutime =
 			cputime_add(psig->cutime,
-			cputime_add(p->utime,
-			cputime_add(sig->utime,
-				    sig->cutime)));
+			cputime_add(cputime.utime,
+				    sig->cutime));
 		psig->cstime =
 			cputime_add(psig->cstime,
-			cputime_add(p->stime,
-			cputime_add(sig->stime,
-				    sig->cstime)));
+			cputime_add(cputime.stime,
+				    sig->cstime));
 		psig->cgtime =
 			cputime_add(psig->cgtime,
 			cputime_add(p->gtime,
@@ -1674,6 +1681,8 @@
 	struct task_struct *tsk;
 	int retval;
 
+	trace_sched_process_wait(pid);
+
 	add_wait_queue(&current->signal->wait_chldexit,&wait);
 repeat:
 	/*
diff --git a/kernel/fork.c b/kernel/fork.c
index 30de644..4d09355 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -58,6 +58,7 @@
 #include <linux/tty.h>
 #include <linux/proc_fs.h>
 #include <linux/blkdev.h>
+#include <trace/sched.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -759,15 +760,44 @@
 		kmem_cache_free(sighand_cachep, sighand);
 }
 
+
+/*
+ * Initialize POSIX timer handling for a thread group.
+ */
+static void posix_cpu_timers_init_group(struct signal_struct *sig)
+{
+	/* Thread group counters. */
+	thread_group_cputime_init(sig);
+
+	/* Expiration times and increments. */
+	sig->it_virt_expires = cputime_zero;
+	sig->it_virt_incr = cputime_zero;
+	sig->it_prof_expires = cputime_zero;
+	sig->it_prof_incr = cputime_zero;
+
+	/* Cached expiration times. */
+	sig->cputime_expires.prof_exp = cputime_zero;
+	sig->cputime_expires.virt_exp = cputime_zero;
+	sig->cputime_expires.sched_exp = 0;
+
+	/* The timer lists. */
+	INIT_LIST_HEAD(&sig->cpu_timers[0]);
+	INIT_LIST_HEAD(&sig->cpu_timers[1]);
+	INIT_LIST_HEAD(&sig->cpu_timers[2]);
+}
+
 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
 {
 	struct signal_struct *sig;
 	int ret;
 
 	if (clone_flags & CLONE_THREAD) {
-		atomic_inc(&current->signal->count);
-		atomic_inc(&current->signal->live);
-		return 0;
+		ret = thread_group_cputime_clone_thread(current);
+		if (likely(!ret)) {
+			atomic_inc(&current->signal->count);
+			atomic_inc(&current->signal->live);
+		}
+		return ret;
 	}
 	sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
 	tsk->signal = sig;
@@ -795,40 +825,25 @@
 	sig->it_real_incr.tv64 = 0;
 	sig->real_timer.function = it_real_fn;
 
-	sig->it_virt_expires = cputime_zero;
-	sig->it_virt_incr = cputime_zero;
-	sig->it_prof_expires = cputime_zero;
-	sig->it_prof_incr = cputime_zero;
-
 	sig->leader = 0;	/* session leadership doesn't inherit */
 	sig->tty_old_pgrp = NULL;
 	sig->tty = NULL;
 
-	sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
+	sig->cutime = sig->cstime = cputime_zero;
 	sig->gtime = cputime_zero;
 	sig->cgtime = cputime_zero;
 	sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
 	sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
 	sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
 	task_io_accounting_init(&sig->ioac);
-	sig->sum_sched_runtime = 0;
-	INIT_LIST_HEAD(&sig->cpu_timers[0]);
-	INIT_LIST_HEAD(&sig->cpu_timers[1]);
-	INIT_LIST_HEAD(&sig->cpu_timers[2]);
 	taskstats_tgid_init(sig);
 
 	task_lock(current->group_leader);
 	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
 	task_unlock(current->group_leader);
 
-	if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
-		/*
-		 * New sole thread in the process gets an expiry time
-		 * of the whole CPU time limit.
-		 */
-		tsk->it_prof_expires =
-			secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
-	}
+	posix_cpu_timers_init_group(sig);
+
 	acct_init_pacct(&sig->pacct);
 
 	tty_audit_fork(sig);
@@ -838,6 +853,7 @@
 
 void __cleanup_signal(struct signal_struct *sig)
 {
+	thread_group_cputime_free(sig);
 	exit_thread_group_keys(sig);
 	tty_kref_put(sig->tty);
 	kmem_cache_free(signal_cachep, sig);
@@ -888,6 +904,19 @@
 #endif /* CONFIG_MM_OWNER */
 
 /*
+ * Initialize POSIX timer handling for a single task.
+ */
+static void posix_cpu_timers_init(struct task_struct *tsk)
+{
+	tsk->cputime_expires.prof_exp = cputime_zero;
+	tsk->cputime_expires.virt_exp = cputime_zero;
+	tsk->cputime_expires.sched_exp = 0;
+	INIT_LIST_HEAD(&tsk->cpu_timers[0]);
+	INIT_LIST_HEAD(&tsk->cpu_timers[1]);
+	INIT_LIST_HEAD(&tsk->cpu_timers[2]);
+}
+
+/*
  * This creates a new process as a copy of the old one,
  * but does not actually start it yet.
  *
@@ -997,12 +1026,7 @@
 	task_io_accounting_init(&p->ioac);
 	acct_clear_integrals(p);
 
-	p->it_virt_expires = cputime_zero;
-	p->it_prof_expires = cputime_zero;
-	p->it_sched_expires = 0;
-	INIT_LIST_HEAD(&p->cpu_timers[0]);
-	INIT_LIST_HEAD(&p->cpu_timers[1]);
-	INIT_LIST_HEAD(&p->cpu_timers[2]);
+	posix_cpu_timers_init(p);
 
 	p->lock_depth = -1;		/* -1 = no lock */
 	do_posix_clock_monotonic_gettime(&p->start_time);
@@ -1203,21 +1227,6 @@
 	if (clone_flags & CLONE_THREAD) {
 		p->group_leader = current->group_leader;
 		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
-
-		if (!cputime_eq(current->signal->it_virt_expires,
-				cputime_zero) ||
-		    !cputime_eq(current->signal->it_prof_expires,
-				cputime_zero) ||
-		    current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
-		    !list_empty(&current->signal->cpu_timers[0]) ||
-		    !list_empty(&current->signal->cpu_timers[1]) ||
-		    !list_empty(&current->signal->cpu_timers[2])) {
-			/*
-			 * Have child wake up on its first tick to check
-			 * for process CPU timers.
-			 */
-			p->it_prof_expires = jiffies_to_cputime(1);
-		}
 	}
 
 	if (likely(p->pid)) {
@@ -1364,6 +1373,8 @@
 	if (!IS_ERR(p)) {
 		struct completion vfork;
 
+		trace_sched_process_fork(current, p);
+
 		nr = task_pid_vnr(p);
 
 		if (clone_flags & CLONE_PARENT_SETTID)
diff --git a/kernel/freezer.c b/kernel/freezer.c
new file mode 100644
index 0000000..ba6248b
--- /dev/null
+++ b/kernel/freezer.c
@@ -0,0 +1,154 @@
+/*
+ * kernel/freezer.c - Function to freeze a process
+ *
+ * Originally from kernel/power/process.c
+ */
+
+#include <linux/interrupt.h>
+#include <linux/suspend.h>
+#include <linux/module.h>
+#include <linux/syscalls.h>
+#include <linux/freezer.h>
+
+/*
+ * freezing is complete, mark current process as frozen
+ */
+static inline void frozen_process(void)
+{
+	if (!unlikely(current->flags & PF_NOFREEZE)) {
+		current->flags |= PF_FROZEN;
+		wmb();
+	}
+	clear_freeze_flag(current);
+}
+
+/* Refrigerator is place where frozen processes are stored :-). */
+void refrigerator(void)
+{
+	/* Hmm, should we be allowed to suspend when there are realtime
+	   processes around? */
+	long save;
+
+	task_lock(current);
+	if (freezing(current)) {
+		frozen_process();
+		task_unlock(current);
+	} else {
+		task_unlock(current);
+		return;
+	}
+	save = current->state;
+	pr_debug("%s entered refrigerator\n", current->comm);
+
+	spin_lock_irq(&current->sighand->siglock);
+	recalc_sigpending(); /* We sent fake signal, clean it up */
+	spin_unlock_irq(&current->sighand->siglock);
+
+	for (;;) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		if (!frozen(current))
+			break;
+		schedule();
+	}
+	pr_debug("%s left refrigerator\n", current->comm);
+	__set_current_state(save);
+}
+EXPORT_SYMBOL(refrigerator);
+
+static void fake_signal_wake_up(struct task_struct *p)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&p->sighand->siglock, flags);
+	signal_wake_up(p, 0);
+	spin_unlock_irqrestore(&p->sighand->siglock, flags);
+}
+
+/**
+ *	freeze_task - send a freeze request to given task
+ *	@p: task to send the request to
+ *	@sig_only: if set, the request will only be sent if the task has the
+ *		PF_FREEZER_NOSIG flag unset
+ *	Return value: 'false', if @sig_only is set and the task has
+ *		PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
+ *
+ *	The freeze request is sent by setting the tasks's TIF_FREEZE flag and
+ *	either sending a fake signal to it or waking it up, depending on whether
+ *	or not it has PF_FREEZER_NOSIG set.  If @sig_only is set and the task
+ *	has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
+ *	TIF_FREEZE flag will not be set.
+ */
+bool freeze_task(struct task_struct *p, bool sig_only)
+{
+	/*
+	 * We first check if the task is freezing and next if it has already
+	 * been frozen to avoid the race with frozen_process() which first marks
+	 * the task as frozen and next clears its TIF_FREEZE.
+	 */
+	if (!freezing(p)) {
+		rmb();
+		if (frozen(p))
+			return false;
+
+		if (!sig_only || should_send_signal(p))
+			set_freeze_flag(p);
+		else
+			return false;
+	}
+
+	if (should_send_signal(p)) {
+		if (!signal_pending(p))
+			fake_signal_wake_up(p);
+	} else if (sig_only) {
+		return false;
+	} else {
+		wake_up_state(p, TASK_INTERRUPTIBLE);
+	}
+
+	return true;
+}
+
+void cancel_freezing(struct task_struct *p)
+{
+	unsigned long flags;
+
+	if (freezing(p)) {
+		pr_debug("  clean up: %s\n", p->comm);
+		clear_freeze_flag(p);
+		spin_lock_irqsave(&p->sighand->siglock, flags);
+		recalc_sigpending_and_wake(p);
+		spin_unlock_irqrestore(&p->sighand->siglock, flags);
+	}
+}
+
+/*
+ * Wake up a frozen process
+ *
+ * task_lock() is needed to prevent the race with refrigerator() which may
+ * occur if the freezing of tasks fails.  Namely, without the lock, if the
+ * freezing of tasks failed, thaw_tasks() might have run before a task in
+ * refrigerator() could call frozen_process(), in which case the task would be
+ * frozen and no one would thaw it.
+ */
+int __thaw_process(struct task_struct *p)
+{
+	if (frozen(p)) {
+		p->flags &= ~PF_FROZEN;
+		return 1;
+	}
+	clear_freeze_flag(p);
+	return 0;
+}
+
+int thaw_process(struct task_struct *p)
+{
+	task_lock(p);
+	if (__thaw_process(p) == 1) {
+		task_unlock(p);
+		wake_up_process(p);
+		return 1;
+	}
+	task_unlock(p);
+	return 0;
+}
+EXPORT_SYMBOL(thaw_process);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index cdec83e..95978f4 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1403,9 +1403,7 @@
 		if (!base->first)
 			continue;
 
-		if (base->get_softirq_time)
-			base->softirq_time = base->get_softirq_time();
-		else if (gettime) {
+		if (gettime) {
 			hrtimer_get_softirq_time(cpu_base);
 			gettime = 0;
 		}
@@ -1688,9 +1686,11 @@
 	new_base = &get_cpu_var(hrtimer_bases);
 
 	tick_cancel_sched_timer(cpu);
-
-	local_irq_disable();
-	spin_lock(&new_base->lock);
+	/*
+	 * The caller is globally serialized and nobody else
+	 * takes two locks at once, deadlock is not possible.
+	 */
+	spin_lock_irq(&new_base->lock);
 	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
@@ -1703,8 +1703,7 @@
 		raise = 1;
 
 	spin_unlock(&old_base->lock);
-	spin_unlock(&new_base->lock);
-	local_irq_enable();
+	spin_unlock_irq(&new_base->lock);
 	put_cpu_var(hrtimer_bases);
 
 	if (raise)
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 533068c..cc0f732 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -30,17 +30,16 @@
 unsigned long probe_irq_on(void)
 {
 	struct irq_desc *desc;
-	unsigned long mask;
-	unsigned int i;
+	unsigned long mask = 0;
+	unsigned int status;
+	int i;
 
 	mutex_lock(&probing_active);
 	/*
 	 * something may have generated an irq long ago and we want to
 	 * flush such a longstanding irq before considering it as spurious.
 	 */
-	for (i = NR_IRQS-1; i > 0; i--) {
-		desc = irq_desc + i;
-
+	for_each_irq_desc_reverse(i, desc) {
 		spin_lock_irq(&desc->lock);
 		if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
 			/*
@@ -68,9 +67,7 @@
 	 * (we must startup again here because if a longstanding irq
 	 * happened in the previous stage, it may have masked itself)
 	 */
-	for (i = NR_IRQS-1; i > 0; i--) {
-		desc = irq_desc + i;
-
+	for_each_irq_desc_reverse(i, desc) {
 		spin_lock_irq(&desc->lock);
 		if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
 			desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
@@ -88,11 +85,7 @@
 	/*
 	 * Now filter out any obviously spurious interrupts
 	 */
-	mask = 0;
-	for (i = 0; i < NR_IRQS; i++) {
-		unsigned int status;
-
-		desc = irq_desc + i;
+	for_each_irq_desc(i, desc) {
 		spin_lock_irq(&desc->lock);
 		status = desc->status;
 
@@ -126,14 +119,11 @@
  */
 unsigned int probe_irq_mask(unsigned long val)
 {
-	unsigned int mask;
+	unsigned int status, mask = 0;
+	struct irq_desc *desc;
 	int i;
 
-	mask = 0;
-	for (i = 0; i < NR_IRQS; i++) {
-		struct irq_desc *desc = irq_desc + i;
-		unsigned int status;
-
+	for_each_irq_desc(i, desc) {
 		spin_lock_irq(&desc->lock);
 		status = desc->status;
 
@@ -171,20 +161,19 @@
  */
 int probe_irq_off(unsigned long val)
 {
-	int i, irq_found = 0, nr_irqs = 0;
+	int i, irq_found = 0, nr_of_irqs = 0;
+	struct irq_desc *desc;
+	unsigned int status;
 
-	for (i = 0; i < NR_IRQS; i++) {
-		struct irq_desc *desc = irq_desc + i;
-		unsigned int status;
-
+	for_each_irq_desc(i, desc) {
 		spin_lock_irq(&desc->lock);
 		status = desc->status;
 
 		if (status & IRQ_AUTODETECT) {
 			if (!(status & IRQ_WAITING)) {
-				if (!nr_irqs)
+				if (!nr_of_irqs)
 					irq_found = i;
-				nr_irqs++;
+				nr_of_irqs++;
 			}
 			desc->status = status & ~IRQ_AUTODETECT;
 			desc->chip->shutdown(i);
@@ -193,7 +182,7 @@
 	}
 	mutex_unlock(&probing_active);
 
-	if (nr_irqs > 1)
+	if (nr_of_irqs > 1)
 		irq_found = -irq_found;
 
 	return irq_found;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 3cd441e..10b5092 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -24,16 +24,15 @@
  */
 void dynamic_irq_init(unsigned int irq)
 {
-	struct irq_desc *desc;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 
-	if (irq >= NR_IRQS) {
+	if (!desc) {
 		WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
 		return;
 	}
 
 	/* Ensure we don't have left over values from a previous use of this irq */
-	desc = irq_desc + irq;
 	spin_lock_irqsave(&desc->lock, flags);
 	desc->status = IRQ_DISABLED;
 	desc->chip = &no_irq_chip;
@@ -57,15 +56,14 @@
  */
 void dynamic_irq_cleanup(unsigned int irq)
 {
-	struct irq_desc *desc;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 
-	if (irq >= NR_IRQS) {
+	if (!desc) {
 		WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
 		return;
 	}
 
-	desc = irq_desc + irq;
 	spin_lock_irqsave(&desc->lock, flags);
 	if (desc->action) {
 		spin_unlock_irqrestore(&desc->lock, flags);
@@ -78,6 +76,7 @@
 	desc->chip_data = NULL;
 	desc->handle_irq = handle_bad_irq;
 	desc->chip = &no_irq_chip;
+	desc->name = NULL;
 	spin_unlock_irqrestore(&desc->lock, flags);
 }
 
@@ -89,10 +88,10 @@
  */
 int set_irq_chip(unsigned int irq, struct irq_chip *chip)
 {
-	struct irq_desc *desc;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 
-	if (irq >= NR_IRQS) {
+	if (!desc) {
 		WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
 		return -EINVAL;
 	}
@@ -100,7 +99,6 @@
 	if (!chip)
 		chip = &no_irq_chip;
 
-	desc = irq_desc + irq;
 	spin_lock_irqsave(&desc->lock, flags);
 	irq_chip_set_defaults(chip);
 	desc->chip = chip;
@@ -111,27 +109,27 @@
 EXPORT_SYMBOL(set_irq_chip);
 
 /**
- *	set_irq_type - set the irq type for an irq
+ *	set_irq_type - set the irq trigger type for an irq
  *	@irq:	irq number
- *	@type:	interrupt type - see include/linux/interrupt.h
+ *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  */
 int set_irq_type(unsigned int irq, unsigned int type)
 {
-	struct irq_desc *desc;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 	int ret = -ENXIO;
 
-	if (irq >= NR_IRQS) {
+	if (!desc) {
 		printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
 		return -ENODEV;
 	}
 
-	desc = irq_desc + irq;
-	if (desc->chip->set_type) {
-		spin_lock_irqsave(&desc->lock, flags);
-		ret = desc->chip->set_type(irq, type);
-		spin_unlock_irqrestore(&desc->lock, flags);
-	}
+	if (type == IRQ_TYPE_NONE)
+		return 0;
+
+	spin_lock_irqsave(&desc->lock, flags);
+	ret = __irq_set_trigger(desc, irq, type);
+	spin_unlock_irqrestore(&desc->lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL(set_irq_type);
@@ -145,16 +143,15 @@
  */
 int set_irq_data(unsigned int irq, void *data)
 {
-	struct irq_desc *desc;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 
-	if (irq >= NR_IRQS) {
+	if (!desc) {
 		printk(KERN_ERR
 		       "Trying to install controller data for IRQ%d\n", irq);
 		return -EINVAL;
 	}
 
-	desc = irq_desc + irq;
 	spin_lock_irqsave(&desc->lock, flags);
 	desc->handler_data = data;
 	spin_unlock_irqrestore(&desc->lock, flags);
@@ -171,15 +168,15 @@
  */
 int set_irq_msi(unsigned int irq, struct msi_desc *entry)
 {
-	struct irq_desc *desc;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 
-	if (irq >= NR_IRQS) {
+	if (!desc) {
 		printk(KERN_ERR
 		       "Trying to install msi data for IRQ%d\n", irq);
 		return -EINVAL;
 	}
-	desc = irq_desc + irq;
+
 	spin_lock_irqsave(&desc->lock, flags);
 	desc->msi_desc = entry;
 	if (entry)
@@ -197,10 +194,16 @@
  */
 int set_irq_chip_data(unsigned int irq, void *data)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 
-	if (irq >= NR_IRQS || !desc->chip) {
+	if (!desc) {
+		printk(KERN_ERR
+		       "Trying to install chip data for IRQ%d\n", irq);
+		return -EINVAL;
+	}
+
+	if (!desc->chip) {
 		printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
 		return -EINVAL;
 	}
@@ -218,7 +221,7 @@
  */
 static void default_enable(unsigned int irq)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 
 	desc->chip->unmask(irq);
 	desc->status &= ~IRQ_MASKED;
@@ -236,8 +239,9 @@
  */
 static unsigned int default_startup(unsigned int irq)
 {
-	irq_desc[irq].chip->enable(irq);
+	struct irq_desc *desc = irq_to_desc(irq);
 
+	desc->chip->enable(irq);
 	return 0;
 }
 
@@ -246,7 +250,7 @@
  */
 static void default_shutdown(unsigned int irq)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 
 	desc->chip->mask(irq);
 	desc->status |= IRQ_MASKED;
@@ -305,14 +309,13 @@
 {
 	struct irqaction *action;
 	irqreturn_t action_ret;
-	const unsigned int cpu = smp_processor_id();
 
 	spin_lock(&desc->lock);
 
 	if (unlikely(desc->status & IRQ_INPROGRESS))
 		goto out_unlock;
 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
-	kstat_cpu(cpu).irqs[irq]++;
+	kstat_incr_irqs_this_cpu(irq, desc);
 
 	action = desc->action;
 	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
@@ -344,7 +347,6 @@
 void
 handle_level_irq(unsigned int irq, struct irq_desc *desc)
 {
-	unsigned int cpu = smp_processor_id();
 	struct irqaction *action;
 	irqreturn_t action_ret;
 
@@ -354,7 +356,7 @@
 	if (unlikely(desc->status & IRQ_INPROGRESS))
 		goto out_unlock;
 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
-	kstat_cpu(cpu).irqs[irq]++;
+	kstat_incr_irqs_this_cpu(irq, desc);
 
 	/*
 	 * If its disabled or no action available
@@ -392,7 +394,6 @@
 void
 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
 {
-	unsigned int cpu = smp_processor_id();
 	struct irqaction *action;
 	irqreturn_t action_ret;
 
@@ -402,7 +403,7 @@
 		goto out;
 
 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
-	kstat_cpu(cpu).irqs[irq]++;
+	kstat_incr_irqs_this_cpu(irq, desc);
 
 	/*
 	 * If its disabled or no action available
@@ -451,8 +452,6 @@
 void
 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
 {
-	const unsigned int cpu = smp_processor_id();
-
 	spin_lock(&desc->lock);
 
 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -468,8 +467,7 @@
 		mask_ack_irq(desc, irq);
 		goto out_unlock;
 	}
-
-	kstat_cpu(cpu).irqs[irq]++;
+	kstat_incr_irqs_this_cpu(irq, desc);
 
 	/* Start handling the irq */
 	desc->chip->ack(irq);
@@ -524,7 +522,7 @@
 {
 	irqreturn_t action_ret;
 
-	kstat_this_cpu.irqs[irq]++;
+	kstat_incr_irqs_this_cpu(irq, desc);
 
 	if (desc->chip->ack)
 		desc->chip->ack(irq);
@@ -541,17 +539,15 @@
 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
 		  const char *name)
 {
-	struct irq_desc *desc;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 
-	if (irq >= NR_IRQS) {
+	if (!desc) {
 		printk(KERN_ERR
 		       "Trying to install type control for IRQ%d\n", irq);
 		return;
 	}
 
-	desc = irq_desc + irq;
-
 	if (!handle)
 		handle = handle_bad_irq;
 	else if (desc->chip == &no_irq_chip) {
@@ -583,7 +579,7 @@
 		desc->status &= ~IRQ_DISABLED;
 		desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
 		desc->depth = 0;
-		desc->chip->unmask(irq);
+		desc->chip->startup(irq);
 	}
 	spin_unlock_irqrestore(&desc->lock, flags);
 }
@@ -606,17 +602,14 @@
 
 void __init set_irq_noprobe(unsigned int irq)
 {
-	struct irq_desc *desc;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 
-	if (irq >= NR_IRQS) {
+	if (!desc) {
 		printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq);
-
 		return;
 	}
 
-	desc = irq_desc + irq;
-
 	spin_lock_irqsave(&desc->lock, flags);
 	desc->status |= IRQ_NOPROBE;
 	spin_unlock_irqrestore(&desc->lock, flags);
@@ -624,17 +617,14 @@
 
 void __init set_irq_probe(unsigned int irq)
 {
-	struct irq_desc *desc;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 
-	if (irq >= NR_IRQS) {
+	if (!desc) {
 		printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq);
-
 		return;
 	}
 
-	desc = irq_desc + irq;
-
 	spin_lock_irqsave(&desc->lock, flags);
 	desc->status &= ~IRQ_NOPROBE;
 	spin_unlock_irqrestore(&desc->lock, flags);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 5fa6198..c815b42 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -25,11 +25,10 @@
  *
  * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
  */
-void
-handle_bad_irq(unsigned int irq, struct irq_desc *desc)
+void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
 {
 	print_irq_desc(irq, desc);
-	kstat_this_cpu.irqs[irq]++;
+	kstat_incr_irqs_this_cpu(irq, desc);
 	ack_bad_irq(irq);
 }
 
@@ -47,6 +46,9 @@
  *
  * Controller mappings for all interrupt sources:
  */
+int nr_irqs = NR_IRQS;
+EXPORT_SYMBOL_GPL(nr_irqs);
+
 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
 	[0 ... NR_IRQS-1] = {
 		.status = IRQ_DISABLED,
@@ -66,7 +68,9 @@
  */
 static void ack_bad(unsigned int irq)
 {
-	print_irq_desc(irq, irq_desc + irq);
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	print_irq_desc(irq, desc);
 	ack_bad_irq(irq);
 }
 
@@ -131,8 +135,6 @@
 	irqreturn_t ret, retval = IRQ_NONE;
 	unsigned int status = 0;
 
-	handle_dynamic_tick(action);
-
 	if (!(action->flags & IRQF_DISABLED))
 		local_irq_enable_in_hardirq();
 
@@ -165,11 +167,12 @@
  */
 unsigned int __do_IRQ(unsigned int irq)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 	struct irqaction *action;
 	unsigned int status;
 
-	kstat_this_cpu.irqs[irq]++;
+	kstat_incr_irqs_this_cpu(irq, desc);
+
 	if (CHECK_IRQ_PER_CPU(desc->status)) {
 		irqreturn_t action_ret;
 
@@ -256,8 +259,8 @@
 }
 #endif
 
-#ifdef CONFIG_TRACE_IRQFLAGS
 
+#ifdef CONFIG_TRACE_IRQFLAGS
 /*
  * lockdep: we want to handle all irq_desc locks as a single lock-class:
  */
@@ -265,10 +268,10 @@
 
 void early_init_irq_lock_class(void)
 {
+	struct irq_desc *desc;
 	int i;
 
-	for (i = 0; i < NR_IRQS; i++)
-		lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class);
+	for_each_irq_desc(i, desc)
+		lockdep_set_class(&desc->lock, &irq_desc_lock_class);
 }
-
 #endif
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 08a849a..c9767e6 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -10,12 +10,15 @@
 /* Set default handler: */
 extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
 
+extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
+		unsigned long flags);
+
 #ifdef CONFIG_PROC_FS
-extern void register_irq_proc(unsigned int irq);
+extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
 extern void register_handler_proc(unsigned int irq, struct irqaction *action);
 extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
 #else
-static inline void register_irq_proc(unsigned int irq) { }
+static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { }
 static inline void register_handler_proc(unsigned int irq,
 					 struct irqaction *action) { }
 static inline void unregister_handler_proc(unsigned int irq,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 60c49e3..c498a1b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -31,10 +31,10 @@
  */
 void synchronize_irq(unsigned int irq)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned int status;
 
-	if (irq >= NR_IRQS)
+	if (!desc)
 		return;
 
 	do {
@@ -64,7 +64,7 @@
  */
 int irq_can_set_affinity(unsigned int irq)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 
 	if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
 	    !desc->chip->set_affinity)
@@ -81,18 +81,17 @@
  */
 int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 
 	if (!desc->chip->set_affinity)
 		return -EINVAL;
 
-	set_balance_irq_affinity(irq, cpumask);
-
 #ifdef CONFIG_GENERIC_PENDING_IRQ
-	if (desc->status & IRQ_MOVE_PCNTXT) {
+	if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
 		unsigned long flags;
 
 		spin_lock_irqsave(&desc->lock, flags);
+		desc->affinity = cpumask;
 		desc->chip->set_affinity(irq, cpumask);
 		spin_unlock_irqrestore(&desc->lock, flags);
 	} else
@@ -111,16 +110,17 @@
 int irq_select_affinity(unsigned int irq)
 {
 	cpumask_t mask;
+	struct irq_desc *desc;
 
 	if (!irq_can_set_affinity(irq))
 		return 0;
 
 	cpus_and(mask, cpu_online_map, irq_default_affinity);
 
-	irq_desc[irq].affinity = mask;
-	irq_desc[irq].chip->set_affinity(irq, mask);
+	desc = irq_to_desc(irq);
+	desc->affinity = mask;
+	desc->chip->set_affinity(irq, mask);
 
-	set_balance_irq_affinity(irq, mask);
 	return 0;
 }
 #endif
@@ -140,10 +140,10 @@
  */
 void disable_irq_nosync(unsigned int irq)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 
-	if (irq >= NR_IRQS)
+	if (!desc)
 		return;
 
 	spin_lock_irqsave(&desc->lock, flags);
@@ -169,9 +169,9 @@
  */
 void disable_irq(unsigned int irq)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 
-	if (irq >= NR_IRQS)
+	if (!desc)
 		return;
 
 	disable_irq_nosync(irq);
@@ -211,10 +211,10 @@
  */
 void enable_irq(unsigned int irq)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 
-	if (irq >= NR_IRQS)
+	if (!desc)
 		return;
 
 	spin_lock_irqsave(&desc->lock, flags);
@@ -223,9 +223,9 @@
 }
 EXPORT_SYMBOL(enable_irq);
 
-int set_irq_wake_real(unsigned int irq, unsigned int on)
+static int set_irq_wake_real(unsigned int irq, unsigned int on)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 	int ret = -ENXIO;
 
 	if (desc->chip->set_wake)
@@ -248,7 +248,7 @@
  */
 int set_irq_wake(unsigned int irq, unsigned int on)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 	int ret = 0;
 
@@ -288,12 +288,16 @@
  */
 int can_request_irq(unsigned int irq, unsigned long irqflags)
 {
+	struct irq_desc *desc = irq_to_desc(irq);
 	struct irqaction *action;
 
-	if (irq >= NR_IRQS || irq_desc[irq].status & IRQ_NOREQUEST)
+	if (!desc)
 		return 0;
 
-	action = irq_desc[irq].action;
+	if (desc->status & IRQ_NOREQUEST)
+		return 0;
+
+	action = desc->action;
 	if (action)
 		if (irqflags & action->flags & IRQF_SHARED)
 			action = NULL;
@@ -312,10 +316,11 @@
 		desc->handle_irq = NULL;
 }
 
-static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq,
+int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
 		unsigned long flags)
 {
 	int ret;
+	struct irq_chip *chip = desc->chip;
 
 	if (!chip || !chip->set_type) {
 		/*
@@ -333,6 +338,11 @@
 		pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
 				(int)(flags & IRQF_TRIGGER_MASK),
 				irq, chip->set_type);
+	else {
+		/* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
+		desc->status &= ~IRQ_TYPE_SENSE_MASK;
+		desc->status |= flags & IRQ_TYPE_SENSE_MASK;
+	}
 
 	return ret;
 }
@@ -341,16 +351,16 @@
  * Internal function to register an irqaction - typically used to
  * allocate special interrupts that are part of the architecture.
  */
-int setup_irq(unsigned int irq, struct irqaction *new)
+static int
+__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
 {
-	struct irq_desc *desc = irq_desc + irq;
 	struct irqaction *old, **p;
 	const char *old_name = NULL;
 	unsigned long flags;
 	int shared = 0;
 	int ret;
 
-	if (irq >= NR_IRQS)
+	if (!desc)
 		return -EINVAL;
 
 	if (desc->chip == &no_irq_chip)
@@ -411,7 +421,7 @@
 
 		/* Setup the type (level, edge polarity) if configured: */
 		if (new->flags & IRQF_TRIGGER_MASK) {
-			ret = __irq_set_trigger(desc->chip, irq, new->flags);
+			ret = __irq_set_trigger(desc, irq, new->flags);
 
 			if (ret) {
 				spin_unlock_irqrestore(&desc->lock, flags);
@@ -430,16 +440,21 @@
 		if (!(desc->status & IRQ_NOAUTOEN)) {
 			desc->depth = 0;
 			desc->status &= ~IRQ_DISABLED;
-			if (desc->chip->startup)
-				desc->chip->startup(irq);
-			else
-				desc->chip->enable(irq);
+			desc->chip->startup(irq);
 		} else
 			/* Undo nested disables: */
 			desc->depth = 1;
 
 		/* Set default affinity mask once everything is setup */
 		irq_select_affinity(irq);
+
+	} else if ((new->flags & IRQF_TRIGGER_MASK)
+			&& (new->flags & IRQF_TRIGGER_MASK)
+				!= (desc->status & IRQ_TYPE_SENSE_MASK)) {
+		/* hope the handler works with the actual trigger mode... */
+		pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
+				irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
+				(int)(new->flags & IRQF_TRIGGER_MASK));
 	}
 
 	*p = new;
@@ -464,7 +479,7 @@
 	spin_unlock_irqrestore(&desc->lock, flags);
 
 	new->irq = irq;
-	register_irq_proc(irq);
+	register_irq_proc(irq, desc);
 	new->dir = NULL;
 	register_handler_proc(irq, new);
 
@@ -484,6 +499,20 @@
 }
 
 /**
+ *	setup_irq - setup an interrupt
+ *	@irq: Interrupt line to setup
+ *	@act: irqaction for the interrupt
+ *
+ * Used to statically setup interrupts in the early boot process.
+ */
+int setup_irq(unsigned int irq, struct irqaction *act)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	return __setup_irq(irq, desc, act);
+}
+
+/**
  *	free_irq - free an interrupt
  *	@irq: Interrupt line to free
  *	@dev_id: Device identity to free
@@ -499,15 +528,15 @@
  */
 void free_irq(unsigned int irq, void *dev_id)
 {
-	struct irq_desc *desc;
+	struct irq_desc *desc = irq_to_desc(irq);
 	struct irqaction **p;
 	unsigned long flags;
 
 	WARN_ON(in_interrupt());
-	if (irq >= NR_IRQS)
+
+	if (!desc)
 		return;
 
-	desc = irq_desc + irq;
 	spin_lock_irqsave(&desc->lock, flags);
 	p = &desc->action;
 	for (;;) {
@@ -596,12 +625,14 @@
  *	IRQF_SHARED		Interrupt is shared
  *	IRQF_DISABLED	Disable local interrupts while processing
  *	IRQF_SAMPLE_RANDOM	The interrupt can be used for entropy
+ *	IRQF_TRIGGER_*		Specify active edge(s) or level
  *
  */
 int request_irq(unsigned int irq, irq_handler_t handler,
 		unsigned long irqflags, const char *devname, void *dev_id)
 {
 	struct irqaction *action;
+	struct irq_desc *desc;
 	int retval;
 
 #ifdef CONFIG_LOCKDEP
@@ -618,9 +649,12 @@
 	 */
 	if ((irqflags & IRQF_SHARED) && !dev_id)
 		return -EINVAL;
-	if (irq >= NR_IRQS)
+
+	desc = irq_to_desc(irq);
+	if (!desc)
 		return -EINVAL;
-	if (irq_desc[irq].status & IRQ_NOREQUEST)
+
+	if (desc->status & IRQ_NOREQUEST)
 		return -EINVAL;
 	if (!handler)
 		return -EINVAL;
@@ -636,26 +670,29 @@
 	action->next = NULL;
 	action->dev_id = dev_id;
 
+	retval = __setup_irq(irq, desc, action);
+	if (retval)
+		kfree(action);
+
 #ifdef CONFIG_DEBUG_SHIRQ
 	if (irqflags & IRQF_SHARED) {
 		/*
 		 * It's a shared IRQ -- the driver ought to be prepared for it
 		 * to happen immediately, so let's make sure....
-		 * We do this before actually registering it, to make sure that
-		 * a 'real' IRQ doesn't run in parallel with our fake
+		 * We disable the irq to make sure that a 'real' IRQ doesn't
+		 * run in parallel with our fake.
 		 */
 		unsigned long flags;
 
+		disable_irq(irq);
 		local_irq_save(flags);
+
 		handler(irq, dev_id);
+
 		local_irq_restore(flags);
+		enable_irq(irq);
 	}
 #endif
-
-	retval = setup_irq(irq, action);
-	if (retval)
-		kfree(action);
-
 	return retval;
 }
 EXPORT_SYMBOL(request_irq);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 77b7acc..90b920d 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -3,18 +3,18 @@
 
 void set_pending_irq(unsigned int irq, cpumask_t mask)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 
 	spin_lock_irqsave(&desc->lock, flags);
 	desc->status |= IRQ_MOVE_PENDING;
-	irq_desc[irq].pending_mask = mask;
+	desc->pending_mask = mask;
 	spin_unlock_irqrestore(&desc->lock, flags);
 }
 
 void move_masked_irq(int irq)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 	cpumask_t tmp;
 
 	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
@@ -30,7 +30,7 @@
 
 	desc->status &= ~IRQ_MOVE_PENDING;
 
-	if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
+	if (unlikely(cpus_empty(desc->pending_mask)))
 		return;
 
 	if (!desc->chip->set_affinity)
@@ -38,7 +38,7 @@
 
 	assert_spin_locked(&desc->lock);
 
-	cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
+	cpus_and(tmp, desc->pending_mask, cpu_online_map);
 
 	/*
 	 * If there was a valid mask to work with, please
@@ -55,12 +55,12 @@
 	if (likely(!cpus_empty(tmp))) {
 		desc->chip->set_affinity(irq,tmp);
 	}
-	cpus_clear(irq_desc[irq].pending_mask);
+	cpus_clear(desc->pending_mask);
 }
 
 void move_native_irq(int irq)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 
 	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
 		return;
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index a09dd29..fac014a 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -19,7 +19,7 @@
 
 static int irq_affinity_proc_show(struct seq_file *m, void *v)
 {
-	struct irq_desc *desc = irq_desc + (long)m->private;
+	struct irq_desc *desc = irq_to_desc((long)m->private);
 	cpumask_t *mask = &desc->affinity;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -43,7 +43,7 @@
 	cpumask_t new_value;
 	int err;
 
-	if (!irq_desc[irq].chip->set_affinity || no_irq_affinity ||
+	if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity ||
 	    irq_balancing_disabled(irq))
 		return -EIO;
 
@@ -132,20 +132,20 @@
 static int irq_spurious_read(char *page, char **start, off_t off,
 				  int count, int *eof, void *data)
 {
-	struct irq_desc *d = &irq_desc[(long) data];
+	struct irq_desc *desc = irq_to_desc((long) data);
 	return sprintf(page, "count %u\n"
 			     "unhandled %u\n"
 			     "last_unhandled %u ms\n",
-			d->irq_count,
-			d->irqs_unhandled,
-			jiffies_to_msecs(d->last_unhandled));
+			desc->irq_count,
+			desc->irqs_unhandled,
+			jiffies_to_msecs(desc->last_unhandled));
 }
 
 #define MAX_NAMELEN 128
 
 static int name_unique(unsigned int irq, struct irqaction *new_action)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 	struct irqaction *action;
 	unsigned long flags;
 	int ret = 1;
@@ -165,8 +165,9 @@
 void register_handler_proc(unsigned int irq, struct irqaction *action)
 {
 	char name [MAX_NAMELEN];
+	struct irq_desc *desc = irq_to_desc(irq);
 
-	if (!irq_desc[irq].dir || action->dir || !action->name ||
+	if (!desc->dir || action->dir || !action->name ||
 					!name_unique(irq, action))
 		return;
 
@@ -174,36 +175,34 @@
 	snprintf(name, MAX_NAMELEN, "%s", action->name);
 
 	/* create /proc/irq/1234/handler/ */
-	action->dir = proc_mkdir(name, irq_desc[irq].dir);
+	action->dir = proc_mkdir(name, desc->dir);
 }
 
 #undef MAX_NAMELEN
 
 #define MAX_NAMELEN 10
 
-void register_irq_proc(unsigned int irq)
+void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 {
 	char name [MAX_NAMELEN];
 	struct proc_dir_entry *entry;
 
-	if (!root_irq_dir ||
-		(irq_desc[irq].chip == &no_irq_chip) ||
-			irq_desc[irq].dir)
+	if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir)
 		return;
 
 	memset(name, 0, MAX_NAMELEN);
 	sprintf(name, "%d", irq);
 
 	/* create /proc/irq/1234 */
-	irq_desc[irq].dir = proc_mkdir(name, root_irq_dir);
+	desc->dir = proc_mkdir(name, root_irq_dir);
 
 #ifdef CONFIG_SMP
 	/* create /proc/irq/<irq>/smp_affinity */
-	proc_create_data("smp_affinity", 0600, irq_desc[irq].dir,
+	proc_create_data("smp_affinity", 0600, desc->dir,
 			 &irq_affinity_proc_fops, (void *)(long)irq);
 #endif
 
-	entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir);
+	entry = create_proc_entry("spurious", 0444, desc->dir);
 	if (entry) {
 		entry->data = (void *)(long)irq;
 		entry->read_proc = irq_spurious_read;
@@ -214,8 +213,11 @@
 
 void unregister_handler_proc(unsigned int irq, struct irqaction *action)
 {
-	if (action->dir)
-		remove_proc_entry(action->dir->name, irq_desc[irq].dir);
+	if (action->dir) {
+		struct irq_desc *desc = irq_to_desc(irq);
+
+		remove_proc_entry(action->dir->name, desc->dir);
+	}
 }
 
 void register_default_affinity_proc(void)
@@ -228,7 +230,8 @@
 
 void init_irq_proc(void)
 {
-	int i;
+	unsigned int irq;
+	struct irq_desc *desc;
 
 	/* create /proc/irq */
 	root_irq_dir = proc_mkdir("irq", NULL);
@@ -240,7 +243,7 @@
 	/*
 	 * Create entries for all existing IRQs.
 	 */
-	for (i = 0; i < NR_IRQS; i++)
-		register_irq_proc(i);
+	for_each_irq_desc(irq, desc)
+		register_irq_proc(irq, desc);
 }
 
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index a804679..89c7117 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -33,10 +33,10 @@
 	struct irq_desc *desc;
 	int irq;
 
-	while (!bitmap_empty(irqs_resend, NR_IRQS)) {
-		irq = find_first_bit(irqs_resend, NR_IRQS);
+	while (!bitmap_empty(irqs_resend, nr_irqs)) {
+		irq = find_first_bit(irqs_resend, nr_irqs);
 		clear_bit(irq, irqs_resend);
-		desc = irq_desc + irq;
+		desc = irq_to_desc(irq);
 		local_irq_disable();
 		desc->handle_irq(irq, desc);
 		local_irq_enable();
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index c66d3f1..dd364c1 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -12,83 +12,122 @@
 #include <linux/kallsyms.h>
 #include <linux/interrupt.h>
 #include <linux/moduleparam.h>
+#include <linux/timer.h>
 
 static int irqfixup __read_mostly;
 
+#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
+static void poll_spurious_irqs(unsigned long dummy);
+static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
+
 /*
  * Recovery handler for misrouted interrupts.
  */
+static int try_one_irq(int irq, struct irq_desc *desc)
+{
+	struct irqaction *action;
+	int ok = 0, work = 0;
+
+	spin_lock(&desc->lock);
+	/* Already running on another processor */
+	if (desc->status & IRQ_INPROGRESS) {
+		/*
+		 * Already running: If it is shared get the other
+		 * CPU to go looking for our mystery interrupt too
+		 */
+		if (desc->action && (desc->action->flags & IRQF_SHARED))
+			desc->status |= IRQ_PENDING;
+		spin_unlock(&desc->lock);
+		return ok;
+	}
+	/* Honour the normal IRQ locking */
+	desc->status |= IRQ_INPROGRESS;
+	action = desc->action;
+	spin_unlock(&desc->lock);
+
+	while (action) {
+		/* Only shared IRQ handlers are safe to call */
+		if (action->flags & IRQF_SHARED) {
+			if (action->handler(irq, action->dev_id) ==
+				IRQ_HANDLED)
+				ok = 1;
+		}
+		action = action->next;
+	}
+	local_irq_disable();
+	/* Now clean up the flags */
+	spin_lock(&desc->lock);
+	action = desc->action;
+
+	/*
+	 * While we were looking for a fixup someone queued a real
+	 * IRQ clashing with our walk:
+	 */
+	while ((desc->status & IRQ_PENDING) && action) {
+		/*
+		 * Perform real IRQ processing for the IRQ we deferred
+		 */
+		work = 1;
+		spin_unlock(&desc->lock);
+		handle_IRQ_event(irq, action);
+		spin_lock(&desc->lock);
+		desc->status &= ~IRQ_PENDING;
+	}
+	desc->status &= ~IRQ_INPROGRESS;
+	/*
+	 * If we did actual work for the real IRQ line we must let the
+	 * IRQ controller clean up too
+	 */
+	if (work && desc->chip && desc->chip->end)
+		desc->chip->end(irq);
+	spin_unlock(&desc->lock);
+
+	return ok;
+}
+
 static int misrouted_irq(int irq)
 {
-	int i;
-	int ok = 0;
-	int work = 0;	/* Did we do work for a real IRQ */
+	struct irq_desc *desc;
+	int i, ok = 0;
 
-	for (i = 1; i < NR_IRQS; i++) {
-		struct irq_desc *desc = irq_desc + i;
-		struct irqaction *action;
+	for_each_irq_desc(i, desc) {
+		if (!i)
+			 continue;
 
 		if (i == irq)	/* Already tried */
 			continue;
 
-		spin_lock(&desc->lock);
-		/* Already running on another processor */
-		if (desc->status & IRQ_INPROGRESS) {
-			/*
-			 * Already running: If it is shared get the other
-			 * CPU to go looking for our mystery interrupt too
-			 */
-			if (desc->action && (desc->action->flags & IRQF_SHARED))
-				desc->status |= IRQ_PENDING;
-			spin_unlock(&desc->lock);
-			continue;
-		}
-		/* Honour the normal IRQ locking */
-		desc->status |= IRQ_INPROGRESS;
-		action = desc->action;
-		spin_unlock(&desc->lock);
-
-		while (action) {
-			/* Only shared IRQ handlers are safe to call */
-			if (action->flags & IRQF_SHARED) {
-				if (action->handler(i, action->dev_id) ==
-						IRQ_HANDLED)
-					ok = 1;
-			}
-			action = action->next;
-		}
-		local_irq_disable();
-		/* Now clean up the flags */
-		spin_lock(&desc->lock);
-		action = desc->action;
-
-		/*
-		 * While we were looking for a fixup someone queued a real
-		 * IRQ clashing with our walk:
-		 */
-		while ((desc->status & IRQ_PENDING) && action) {
-			/*
-			 * Perform real IRQ processing for the IRQ we deferred
-			 */
-			work = 1;
-			spin_unlock(&desc->lock);
-			handle_IRQ_event(i, action);
-			spin_lock(&desc->lock);
-			desc->status &= ~IRQ_PENDING;
-		}
-		desc->status &= ~IRQ_INPROGRESS;
-		/*
-		 * If we did actual work for the real IRQ line we must let the
-		 * IRQ controller clean up too
-		 */
-		if (work && desc->chip && desc->chip->end)
-			desc->chip->end(i);
-		spin_unlock(&desc->lock);
+		if (try_one_irq(i, desc))
+			ok = 1;
 	}
 	/* So the caller can adjust the irq error counts */
 	return ok;
 }
 
+static void poll_spurious_irqs(unsigned long dummy)
+{
+	struct irq_desc *desc;
+	int i;
+
+	for_each_irq_desc(i, desc) {
+		unsigned int status;
+
+		if (!i)
+			 continue;
+
+		/* Racy but it doesn't matter */
+		status = desc->status;
+		barrier();
+		if (!(status & IRQ_SPURIOUS_DISABLED))
+			continue;
+
+		try_one_irq(i, desc);
+	}
+
+	mod_timer(&poll_spurious_irq_timer,
+		  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
+}
+
 /*
  * If 99,900 of the previous 100,000 interrupts have not been handled
  * then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -137,7 +176,9 @@
 	}
 }
 
-static inline int try_misrouted_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
+static inline int
+try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
+		  irqreturn_t action_ret)
 {
 	struct irqaction *action;
 
@@ -212,6 +253,9 @@
 		desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
 		desc->depth++;
 		desc->chip->disable(irq);
+
+		mod_timer(&poll_spurious_irq_timer,
+			  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
 	}
 	desc->irqs_unhandled = 0;
 }
@@ -241,7 +285,7 @@
 
 __setup("irqfixup", irqfixup_setup);
 module_param(irqfixup, int, 0644);
-MODULE_PARM_DESC("irqfixup", "0: No fixup, 1: irqfixup mode 2: irqpoll mode");
+MODULE_PARM_DESC("irqfixup", "0: No fixup, 1: irqfixup mode, 2: irqpoll mode");
 
 static int __init irqpoll_setup(char *str)
 {
diff --git a/kernel/itimer.c b/kernel/itimer.c
index ab98274..db7c358 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -55,17 +55,15 @@
 		spin_unlock_irq(&tsk->sighand->siglock);
 		break;
 	case ITIMER_VIRTUAL:
-		read_lock(&tasklist_lock);
 		spin_lock_irq(&tsk->sighand->siglock);
 		cval = tsk->signal->it_virt_expires;
 		cinterval = tsk->signal->it_virt_incr;
 		if (!cputime_eq(cval, cputime_zero)) {
-			struct task_struct *t = tsk;
-			cputime_t utime = tsk->signal->utime;
-			do {
-				utime = cputime_add(utime, t->utime);
-				t = next_thread(t);
-			} while (t != tsk);
+			struct task_cputime cputime;
+			cputime_t utime;
+
+			thread_group_cputime(tsk, &cputime);
+			utime = cputime.utime;
 			if (cputime_le(cval, utime)) { /* about to fire */
 				cval = jiffies_to_cputime(1);
 			} else {
@@ -73,25 +71,19 @@
 			}
 		}
 		spin_unlock_irq(&tsk->sighand->siglock);
-		read_unlock(&tasklist_lock);
 		cputime_to_timeval(cval, &value->it_value);
 		cputime_to_timeval(cinterval, &value->it_interval);
 		break;
 	case ITIMER_PROF:
-		read_lock(&tasklist_lock);
 		spin_lock_irq(&tsk->sighand->siglock);
 		cval = tsk->signal->it_prof_expires;
 		cinterval = tsk->signal->it_prof_incr;
 		if (!cputime_eq(cval, cputime_zero)) {
-			struct task_struct *t = tsk;
-			cputime_t ptime = cputime_add(tsk->signal->utime,
-						      tsk->signal->stime);
-			do {
-				ptime = cputime_add(ptime,
-						    cputime_add(t->utime,
-								t->stime));
-				t = next_thread(t);
-			} while (t != tsk);
+			struct task_cputime times;
+			cputime_t ptime;
+
+			thread_group_cputime(tsk, &times);
+			ptime = cputime_add(times.utime, times.stime);
 			if (cputime_le(cval, ptime)) { /* about to fire */
 				cval = jiffies_to_cputime(1);
 			} else {
@@ -99,7 +91,6 @@
 			}
 		}
 		spin_unlock_irq(&tsk->sighand->siglock);
-		read_unlock(&tasklist_lock);
 		cputime_to_timeval(cval, &value->it_value);
 		cputime_to_timeval(cinterval, &value->it_interval);
 		break;
@@ -185,7 +176,6 @@
 	case ITIMER_VIRTUAL:
 		nval = timeval_to_cputime(&value->it_value);
 		ninterval = timeval_to_cputime(&value->it_interval);
-		read_lock(&tasklist_lock);
 		spin_lock_irq(&tsk->sighand->siglock);
 		cval = tsk->signal->it_virt_expires;
 		cinterval = tsk->signal->it_virt_incr;
@@ -200,7 +190,6 @@
 		tsk->signal->it_virt_expires = nval;
 		tsk->signal->it_virt_incr = ninterval;
 		spin_unlock_irq(&tsk->sighand->siglock);
-		read_unlock(&tasklist_lock);
 		if (ovalue) {
 			cputime_to_timeval(cval, &ovalue->it_value);
 			cputime_to_timeval(cinterval, &ovalue->it_interval);
@@ -209,7 +198,6 @@
 	case ITIMER_PROF:
 		nval = timeval_to_cputime(&value->it_value);
 		ninterval = timeval_to_cputime(&value->it_interval);
-		read_lock(&tasklist_lock);
 		spin_lock_irq(&tsk->sighand->siglock);
 		cval = tsk->signal->it_prof_expires;
 		cinterval = tsk->signal->it_prof_incr;
@@ -224,7 +212,6 @@
 		tsk->signal->it_prof_expires = nval;
 		tsk->signal->it_prof_incr = ninterval;
 		spin_unlock_irq(&tsk->sighand->siglock);
-		read_unlock(&tasklist_lock);
 		if (ovalue) {
 			cputime_to_timeval(cval, &ovalue->it_value);
 			cputime_to_timeval(cinterval, &ovalue->it_interval);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index aef2653..ac0fde7 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -30,6 +30,7 @@
 #include <linux/pm.h>
 #include <linux/cpu.h>
 #include <linux/console.h>
+#include <linux/vmalloc.h>
 
 #include <asm/page.h>
 #include <asm/uaccess.h>
@@ -1371,6 +1372,7 @@
 	VMCOREINFO_SYMBOL(node_online_map);
 	VMCOREINFO_SYMBOL(swapper_pg_dir);
 	VMCOREINFO_SYMBOL(_stext);
+	VMCOREINFO_SYMBOL(vmlist);
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
 	VMCOREINFO_SYMBOL(mem_map);
@@ -1406,6 +1408,7 @@
 	VMCOREINFO_OFFSET(free_area, free_list);
 	VMCOREINFO_OFFSET(list_head, next);
 	VMCOREINFO_OFFSET(list_head, prev);
+	VMCOREINFO_OFFSET(vm_struct, addr);
 	VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
 	VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
 	VMCOREINFO_NUMBER(NR_FREE_PAGES);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 96cff2f..8e7a7ce 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -13,6 +13,7 @@
 #include <linux/file.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <trace/sched.h>
 
 #define KTHREAD_NICE_LEVEL (-5)
 
@@ -171,12 +172,11 @@
  */
 void kthread_bind(struct task_struct *k, unsigned int cpu)
 {
-	if (k->state != TASK_UNINTERRUPTIBLE) {
+	/* Must have done schedule() in kthread() before we set_task_cpu */
+	if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
 		WARN_ON(1);
 		return;
 	}
-	/* Must have done schedule() in kthread() before we set_task_cpu */
-	wait_task_inactive(k, 0);
 	set_task_cpu(k, cpu);
 	k->cpus_allowed = cpumask_of_cpu(cpu);
 	k->rt.nr_cpus_allowed = 1;
@@ -206,6 +206,8 @@
 	/* It could exit after stop_info.k set, but before wake_up_process. */
 	get_task_struct(k);
 
+	trace_sched_kthread_stop(k);
+
 	/* Must init completion *before* thread sees kthread_stop_info.k */
 	init_completion(&kthread_stop_info.done);
 	smp_wmb();
@@ -221,6 +223,8 @@
 	ret = kthread_stop_info.err;
 	mutex_unlock(&kthread_stop_lock);
 
+	trace_sched_kthread_stop_ret(ret);
+
 	return ret;
 }
 EXPORT_SYMBOL(kthread_stop);
diff --git a/kernel/marker.c b/kernel/marker.c
index 7d1faec..e9c6b2b 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -62,7 +62,7 @@
 	int refcount;	/* Number of times armed. 0 if disarmed. */
 	struct rcu_head rcu;
 	void *oldptr;
-	unsigned char rcu_pending:1;
+	int rcu_pending;
 	unsigned char ptype:1;
 	char name[0];	/* Contains name'\0'format'\0' */
 };
@@ -103,11 +103,11 @@
 	char ptype;
 
 	/*
-	 * preempt_disable does two things : disabling preemption to make sure
-	 * the teardown of the callbacks can be done correctly when they are in
-	 * modules and they insure RCU read coherency.
+	 * rcu_read_lock_sched does two things : disabling preemption to make
+	 * sure the teardown of the callbacks can be done correctly when they
+	 * are in modules and they insure RCU read coherency.
 	 */
-	preempt_disable();
+	rcu_read_lock_sched();
 	ptype = mdata->ptype;
 	if (likely(!ptype)) {
 		marker_probe_func *func;
@@ -145,7 +145,7 @@
 			va_end(args);
 		}
 	}
-	preempt_enable();
+	rcu_read_unlock_sched();
 }
 EXPORT_SYMBOL_GPL(marker_probe_cb);
 
@@ -162,7 +162,7 @@
 	va_list args;	/* not initialized */
 	char ptype;
 
-	preempt_disable();
+	rcu_read_lock_sched();
 	ptype = mdata->ptype;
 	if (likely(!ptype)) {
 		marker_probe_func *func;
@@ -195,7 +195,7 @@
 			multi[i].func(multi[i].probe_private, call_private,
 				mdata->format, &args);
 	}
-	preempt_enable();
+	rcu_read_unlock_sched();
 }
 EXPORT_SYMBOL_GPL(marker_probe_cb_noarg);
 
@@ -560,7 +560,7 @@
  * Disable a marker and its probe callback.
  * Note: only waiting an RCU period after setting elem->call to the empty
  * function insures that the original callback is not used anymore. This insured
- * by preempt_disable around the call site.
+ * by rcu_read_lock_sched around the call site.
  */
 static void disable_marker(struct marker *elem)
 {
@@ -653,11 +653,17 @@
 	entry = get_marker(name);
 	if (!entry) {
 		entry = add_marker(name, format);
-		if (IS_ERR(entry)) {
+		if (IS_ERR(entry))
 			ret = PTR_ERR(entry);
-			goto end;
-		}
+	} else if (format) {
+		if (!entry->format)
+			ret = marker_set_format(&entry, format);
+		else if (strcmp(entry->format, format))
+			ret = -EPERM;
 	}
+	if (ret)
+		goto end;
+
 	/*
 	 * If we detect that a call_rcu is pending for this marker,
 	 * make sure it's executed now.
@@ -674,6 +680,8 @@
 	mutex_lock(&markers_mutex);
 	entry = get_marker(name);
 	WARN_ON(!entry);
+	if (entry->rcu_pending)
+		rcu_barrier_sched();
 	entry->oldptr = old;
 	entry->rcu_pending = 1;
 	/* write rcu_pending before calling the RCU callback */
@@ -717,6 +725,8 @@
 	entry = get_marker(name);
 	if (!entry)
 		goto end;
+	if (entry->rcu_pending)
+		rcu_barrier_sched();
 	entry->oldptr = old;
 	entry->rcu_pending = 1;
 	/* write rcu_pending before calling the RCU callback */
@@ -795,6 +805,8 @@
 	mutex_lock(&markers_mutex);
 	entry = get_marker_from_private_data(probe, probe_private);
 	WARN_ON(!entry);
+	if (entry->rcu_pending)
+		rcu_barrier_sched();
 	entry->oldptr = old;
 	entry->rcu_pending = 1;
 	/* write rcu_pending before calling the RCU callback */
diff --git a/kernel/module.c b/kernel/module.c
index 25bc9ac..c0f1826 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -42,10 +42,13 @@
 #include <linux/string.h>
 #include <linux/mutex.h>
 #include <linux/unwind.h>
+#include <linux/rculist.h>
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
 #include <linux/license.h>
 #include <asm/sections.h>
+#include <linux/tracepoint.h>
+#include <linux/ftrace.h>
 
 #if 0
 #define DEBUGP printk
@@ -61,7 +64,7 @@
 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
 
 /* List of modules, protected by module_mutex or preempt_disable
- * (add/delete uses stop_machine). */
+ * (delete uses stop_machine/add uses RCU list operations). */
 static DEFINE_MUTEX(module_mutex);
 static LIST_HEAD(modules);
 
@@ -130,6 +133,29 @@
 	return 0;
 }
 
+/* Find a module section, or NULL. */
+static void *section_addr(Elf_Ehdr *hdr, Elf_Shdr *shdrs,
+			  const char *secstrings, const char *name)
+{
+	/* Section 0 has sh_addr 0. */
+	return (void *)shdrs[find_sec(hdr, shdrs, secstrings, name)].sh_addr;
+}
+
+/* Find a module section, or NULL.  Fill in number of "objects" in section. */
+static void *section_objs(Elf_Ehdr *hdr,
+			  Elf_Shdr *sechdrs,
+			  const char *secstrings,
+			  const char *name,
+			  size_t object_size,
+			  unsigned int *num)
+{
+	unsigned int sec = find_sec(hdr, sechdrs, secstrings, name);
+
+	/* Section 0 has sh_addr 0 and sh_size 0. */
+	*num = sechdrs[sec].sh_size / object_size;
+	return (void *)sechdrs[sec].sh_addr;
+}
+
 /* Provided by the linker */
 extern const struct kernel_symbol __start___ksymtab[];
 extern const struct kernel_symbol __stop___ksymtab[];
@@ -216,7 +242,7 @@
 	if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
 		return true;
 
-	list_for_each_entry(mod, &modules, list) {
+	list_for_each_entry_rcu(mod, &modules, list) {
 		struct symsearch arr[] = {
 			{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
 			  NOT_GPL_ONLY, false },
@@ -1392,17 +1418,6 @@
 }
 
 /*
- * link the module with the whole machine is stopped with interrupts off
- * - this defends against kallsyms not taking locks
- */
-static int __link_module(void *_mod)
-{
-	struct module *mod = _mod;
-	list_add(&mod->list, &modules);
-	return 0;
-}
-
-/*
  * unlink the module with the whole machine is stopped with interrupts off
  * - this defends against kallsyms not taking locks
  */
@@ -1430,6 +1445,9 @@
 	/* Module unload stuff */
 	module_unload_free(mod);
 
+	/* release any pointers to mcount in this module */
+	ftrace_release(mod->module_core, mod->core_size);
+
 	/* This may be NULL, but that's OK */
 	module_free(mod, mod->module_init);
 	kfree(mod->args);
@@ -1784,32 +1802,20 @@
 }
 #endif /* CONFIG_KALLSYMS */
 
+static void dynamic_printk_setup(struct mod_debug *debug, unsigned int num)
+{
 #ifdef CONFIG_DYNAMIC_PRINTK_DEBUG
-static void dynamic_printk_setup(Elf_Shdr *sechdrs, unsigned int verboseindex)
-{
-	struct mod_debug *debug_info;
-	unsigned long pos, end;
-	unsigned int num_verbose;
+	unsigned int i;
 
-	pos = sechdrs[verboseindex].sh_addr;
-	num_verbose = sechdrs[verboseindex].sh_size /
-				sizeof(struct mod_debug);
-	end = pos + (num_verbose * sizeof(struct mod_debug));
-
-	for (; pos < end; pos += sizeof(struct mod_debug)) {
-		debug_info = (struct mod_debug *)pos;
-		register_dynamic_debug_module(debug_info->modname,
-			debug_info->type, debug_info->logical_modname,
-			debug_info->flag_names, debug_info->hash,
-			debug_info->hash2);
+	for (i = 0; i < num; i++) {
+		register_dynamic_debug_module(debug[i].modname,
+					      debug[i].type,
+					      debug[i].logical_modname,
+					      debug[i].flag_names,
+					      debug[i].hash, debug[i].hash2);
 	}
-}
-#else
-static inline void dynamic_printk_setup(Elf_Shdr *sechdrs,
-					unsigned int verboseindex)
-{
-}
 #endif /* CONFIG_DYNAMIC_PRINTK_DEBUG */
+}
 
 static void *module_alloc_update_bounds(unsigned long size)
 {
@@ -1838,33 +1844,14 @@
 	unsigned int i;
 	unsigned int symindex = 0;
 	unsigned int strindex = 0;
-	unsigned int setupindex;
-	unsigned int exindex;
-	unsigned int exportindex;
-	unsigned int modindex;
-	unsigned int obsparmindex;
-	unsigned int infoindex;
-	unsigned int gplindex;
-	unsigned int crcindex;
-	unsigned int gplcrcindex;
-	unsigned int versindex;
-	unsigned int pcpuindex;
-	unsigned int gplfutureindex;
-	unsigned int gplfuturecrcindex;
+	unsigned int modindex, versindex, infoindex, pcpuindex;
 	unsigned int unwindex = 0;
-#ifdef CONFIG_UNUSED_SYMBOLS
-	unsigned int unusedindex;
-	unsigned int unusedcrcindex;
-	unsigned int unusedgplindex;
-	unsigned int unusedgplcrcindex;
-#endif
-	unsigned int markersindex;
-	unsigned int markersstringsindex;
-	unsigned int verboseindex;
+	unsigned int num_kp, num_mcount;
+	struct kernel_param *kp;
 	struct module *mod;
 	long err = 0;
 	void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
-	struct exception_table_entry *extable;
+	unsigned long *mseg;
 	mm_segment_t old_fs;
 
 	DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
@@ -1928,6 +1915,7 @@
 		err = -ENOEXEC;
 		goto free_hdr;
 	}
+	/* This is temporary: point mod into copy of data. */
 	mod = (void *)sechdrs[modindex].sh_addr;
 
 	if (symindex == 0) {
@@ -1937,22 +1925,6 @@
 		goto free_hdr;
 	}
 
-	/* Optional sections */
-	exportindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab");
-	gplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl");
-	gplfutureindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl_future");
-	crcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab");
-	gplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl");
-	gplfuturecrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl_future");
-#ifdef CONFIG_UNUSED_SYMBOLS
-	unusedindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused");
-	unusedgplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused_gpl");
-	unusedcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused");
-	unusedgplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused_gpl");
-#endif
-	setupindex = find_sec(hdr, sechdrs, secstrings, "__param");
-	exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table");
-	obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm");
 	versindex = find_sec(hdr, sechdrs, secstrings, "__versions");
 	infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo");
 	pcpuindex = find_pcpusec(hdr, sechdrs, secstrings);
@@ -2108,42 +2080,57 @@
 	if (err < 0)
 		goto cleanup;
 
-	/* Set up EXPORTed & EXPORT_GPLed symbols (section 0 is 0 length) */
-	mod->num_syms = sechdrs[exportindex].sh_size / sizeof(*mod->syms);
-	mod->syms = (void *)sechdrs[exportindex].sh_addr;
-	if (crcindex)
-		mod->crcs = (void *)sechdrs[crcindex].sh_addr;
-	mod->num_gpl_syms = sechdrs[gplindex].sh_size / sizeof(*mod->gpl_syms);
-	mod->gpl_syms = (void *)sechdrs[gplindex].sh_addr;
-	if (gplcrcindex)
-		mod->gpl_crcs = (void *)sechdrs[gplcrcindex].sh_addr;
-	mod->num_gpl_future_syms = sechdrs[gplfutureindex].sh_size /
-					sizeof(*mod->gpl_future_syms);
-	mod->gpl_future_syms = (void *)sechdrs[gplfutureindex].sh_addr;
-	if (gplfuturecrcindex)
-		mod->gpl_future_crcs = (void *)sechdrs[gplfuturecrcindex].sh_addr;
+	/* Now we've got everything in the final locations, we can
+	 * find optional sections. */
+	kp = section_objs(hdr, sechdrs, secstrings, "__param", sizeof(*kp),
+			  &num_kp);
+	mod->syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab",
+				 sizeof(*mod->syms), &mod->num_syms);
+	mod->crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab");
+	mod->gpl_syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab_gpl",
+				     sizeof(*mod->gpl_syms),
+				     &mod->num_gpl_syms);
+	mod->gpl_crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab_gpl");
+	mod->gpl_future_syms = section_objs(hdr, sechdrs, secstrings,
+					    "__ksymtab_gpl_future",
+					    sizeof(*mod->gpl_future_syms),
+					    &mod->num_gpl_future_syms);
+	mod->gpl_future_crcs = section_addr(hdr, sechdrs, secstrings,
+					    "__kcrctab_gpl_future");
 
 #ifdef CONFIG_UNUSED_SYMBOLS
-	mod->num_unused_syms = sechdrs[unusedindex].sh_size /
-					sizeof(*mod->unused_syms);
-	mod->num_unused_gpl_syms = sechdrs[unusedgplindex].sh_size /
-					sizeof(*mod->unused_gpl_syms);
-	mod->unused_syms = (void *)sechdrs[unusedindex].sh_addr;
-	if (unusedcrcindex)
-		mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr;
-	mod->unused_gpl_syms = (void *)sechdrs[unusedgplindex].sh_addr;
-	if (unusedgplcrcindex)
-		mod->unused_gpl_crcs
-			= (void *)sechdrs[unusedgplcrcindex].sh_addr;
+	mod->unused_syms = section_objs(hdr, sechdrs, secstrings,
+					"__ksymtab_unused",
+					sizeof(*mod->unused_syms),
+					&mod->num_unused_syms);
+	mod->unused_crcs = section_addr(hdr, sechdrs, secstrings,
+					"__kcrctab_unused");
+	mod->unused_gpl_syms = section_objs(hdr, sechdrs, secstrings,
+					    "__ksymtab_unused_gpl",
+					    sizeof(*mod->unused_gpl_syms),
+					    &mod->num_unused_gpl_syms);
+	mod->unused_gpl_crcs = section_addr(hdr, sechdrs, secstrings,
+					    "__kcrctab_unused_gpl");
+#endif
+
+#ifdef CONFIG_MARKERS
+	mod->markers = section_objs(hdr, sechdrs, secstrings, "__markers",
+				    sizeof(*mod->markers), &mod->num_markers);
+#endif
+#ifdef CONFIG_TRACEPOINTS
+	mod->tracepoints = section_objs(hdr, sechdrs, secstrings,
+					"__tracepoints",
+					sizeof(*mod->tracepoints),
+					&mod->num_tracepoints);
 #endif
 
 #ifdef CONFIG_MODVERSIONS
-	if ((mod->num_syms && !crcindex)
-	    || (mod->num_gpl_syms && !gplcrcindex)
-	    || (mod->num_gpl_future_syms && !gplfuturecrcindex)
+	if ((mod->num_syms && !mod->crcs)
+	    || (mod->num_gpl_syms && !mod->gpl_crcs)
+	    || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
 #ifdef CONFIG_UNUSED_SYMBOLS
-	    || (mod->num_unused_syms && !unusedcrcindex)
-	    || (mod->num_unused_gpl_syms && !unusedgplcrcindex)
+	    || (mod->num_unused_syms && !mod->unused_crcs)
+	    || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
 #endif
 		) {
 		printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name);
@@ -2152,10 +2139,6 @@
 			goto cleanup;
 	}
 #endif
-	markersindex = find_sec(hdr, sechdrs, secstrings, "__markers");
- 	markersstringsindex = find_sec(hdr, sechdrs, secstrings,
-					"__markers_strings");
-	verboseindex = find_sec(hdr, sechdrs, secstrings, "__verbose");
 
 	/* Now do relocations. */
 	for (i = 1; i < hdr->e_shnum; i++) {
@@ -2178,22 +2161,16 @@
 		if (err < 0)
 			goto cleanup;
 	}
-#ifdef CONFIG_MARKERS
-	mod->markers = (void *)sechdrs[markersindex].sh_addr;
-	mod->num_markers =
-		sechdrs[markersindex].sh_size / sizeof(*mod->markers);
-#endif
 
         /* Find duplicate symbols */
 	err = verify_export_symbols(mod);
-
 	if (err < 0)
 		goto cleanup;
 
   	/* Set up and sort exception table */
-	mod->num_exentries = sechdrs[exindex].sh_size / sizeof(*mod->extable);
-	mod->extable = extable = (void *)sechdrs[exindex].sh_addr;
-	sort_extable(extable, extable + mod->num_exentries);
+	mod->extable = section_objs(hdr, sechdrs, secstrings, "__ex_table",
+				    sizeof(*mod->extable), &mod->num_exentries);
+	sort_extable(mod->extable, mod->extable + mod->num_exentries);
 
 	/* Finally, copy percpu area over. */
 	percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr,
@@ -2201,12 +2178,29 @@
 
 	add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);
 
+	if (!mod->taints) {
+		struct mod_debug *debug;
+		unsigned int num_debug;
+
 #ifdef CONFIG_MARKERS
-	if (!mod->taints)
 		marker_update_probe_range(mod->markers,
 			mod->markers + mod->num_markers);
 #endif
-	dynamic_printk_setup(sechdrs, verboseindex);
+		debug = section_objs(hdr, sechdrs, secstrings, "__verbose",
+				     sizeof(*debug), &num_debug);
+		dynamic_printk_setup(debug, num_debug);
+
+#ifdef CONFIG_TRACEPOINTS
+		tracepoint_update_probe_range(mod->tracepoints,
+			mod->tracepoints + mod->num_tracepoints);
+#endif
+	}
+
+	/* sechdrs[0].sh_size is always zero */
+	mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc",
+			    sizeof(*mseg), &num_mcount);
+	ftrace_init_module(mseg, mseg + num_mcount);
+
 	err = module_finalize(hdr, sechdrs, mod);
 	if (err < 0)
 		goto cleanup;
@@ -2230,30 +2224,24 @@
 	set_fs(old_fs);
 
 	mod->args = args;
-	if (obsparmindex)
+	if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
 		printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
 		       mod->name);
 
 	/* Now sew it into the lists so we can get lockdep and oops
-         * info during argument parsing.  Noone should access us, since
-         * strong_try_module_get() will fail. */
-	stop_machine(__link_module, mod, NULL);
+	 * info during argument parsing.  Noone should access us, since
+	 * strong_try_module_get() will fail.
+	 * lockdep/oops can run asynchronous, so use the RCU list insertion
+	 * function to insert in a way safe to concurrent readers.
+	 * The mutex protects against concurrent writers.
+	 */
+	list_add_rcu(&mod->list, &modules);
 
-	/* Size of section 0 is 0, so this works well if no params */
-	err = parse_args(mod->name, mod->args,
-			 (struct kernel_param *)
-			 sechdrs[setupindex].sh_addr,
-			 sechdrs[setupindex].sh_size
-			 / sizeof(struct kernel_param),
-			 NULL);
+	err = parse_args(mod->name, mod->args, kp, num_kp, NULL);
 	if (err < 0)
 		goto unlink;
 
-	err = mod_sysfs_setup(mod,
-			      (struct kernel_param *)
-			      sechdrs[setupindex].sh_addr,
-			      sechdrs[setupindex].sh_size
-			      / sizeof(struct kernel_param));
+	err = mod_sysfs_setup(mod, kp, num_kp);
 	if (err < 0)
 		goto unlink;
 	add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
@@ -2276,6 +2264,7 @@
  cleanup:
 	kobject_del(&mod->mkobj.kobj);
 	kobject_put(&mod->mkobj.kobj);
+	ftrace_release(mod->module_core, mod->core_size);
  free_unload:
 	module_unload_free(mod);
 	module_free(mod, mod->module_init);
@@ -2441,7 +2430,7 @@
 	const char *ret = NULL;
 
 	preempt_disable();
-	list_for_each_entry(mod, &modules, list) {
+	list_for_each_entry_rcu(mod, &modules, list) {
 		if (within(addr, mod->module_init, mod->init_size)
 		    || within(addr, mod->module_core, mod->core_size)) {
 			if (modname)
@@ -2464,7 +2453,7 @@
 	struct module *mod;
 
 	preempt_disable();
-	list_for_each_entry(mod, &modules, list) {
+	list_for_each_entry_rcu(mod, &modules, list) {
 		if (within(addr, mod->module_init, mod->init_size) ||
 		    within(addr, mod->module_core, mod->core_size)) {
 			const char *sym;
@@ -2488,7 +2477,7 @@
 	struct module *mod;
 
 	preempt_disable();
-	list_for_each_entry(mod, &modules, list) {
+	list_for_each_entry_rcu(mod, &modules, list) {
 		if (within(addr, mod->module_init, mod->init_size) ||
 		    within(addr, mod->module_core, mod->core_size)) {
 			const char *sym;
@@ -2515,7 +2504,7 @@
 	struct module *mod;
 
 	preempt_disable();
-	list_for_each_entry(mod, &modules, list) {
+	list_for_each_entry_rcu(mod, &modules, list) {
 		if (symnum < mod->num_symtab) {
 			*value = mod->symtab[symnum].st_value;
 			*type = mod->symtab[symnum].st_info;
@@ -2558,7 +2547,7 @@
 			ret = mod_find_symname(mod, colon+1);
 		*colon = ':';
 	} else {
-		list_for_each_entry(mod, &modules, list)
+		list_for_each_entry_rcu(mod, &modules, list)
 			if ((ret = mod_find_symname(mod, name)) != 0)
 				break;
 	}
@@ -2661,7 +2650,7 @@
 	struct module *mod;
 
 	preempt_disable();
-	list_for_each_entry(mod, &modules, list) {
+	list_for_each_entry_rcu(mod, &modules, list) {
 		if (mod->num_exentries == 0)
 			continue;
 
@@ -2687,7 +2676,7 @@
 
 	preempt_disable();
 
-	list_for_each_entry(mod, &modules, list) {
+	list_for_each_entry_rcu(mod, &modules, list) {
 		if (within(addr, mod->module_core, mod->core_size)) {
 			preempt_enable();
 			return 1;
@@ -2708,7 +2697,7 @@
 	if (addr < module_addr_min || addr > module_addr_max)
 		return NULL;
 
-	list_for_each_entry(mod, &modules, list)
+	list_for_each_entry_rcu(mod, &modules, list)
 		if (within(addr, mod->module_init, mod->init_text_size)
 		    || within(addr, mod->module_core, mod->core_text_size))
 			return mod;
@@ -2733,8 +2722,11 @@
 	char buf[8];
 
 	printk("Modules linked in:");
-	list_for_each_entry(mod, &modules, list)
+	/* Most callers should already have preempt disabled, but make sure */
+	preempt_disable();
+	list_for_each_entry_rcu(mod, &modules, list)
 		printk(" %s%s", mod->name, module_flags(mod, buf));
+	preempt_enable();
 	if (last_unloaded_module[0])
 		printk(" [last unloaded: %s]", last_unloaded_module);
 	printk("\n");
@@ -2759,3 +2751,50 @@
 	mutex_unlock(&module_mutex);
 }
 #endif
+
+#ifdef CONFIG_TRACEPOINTS
+void module_update_tracepoints(void)
+{
+	struct module *mod;
+
+	mutex_lock(&module_mutex);
+	list_for_each_entry(mod, &modules, list)
+		if (!mod->taints)
+			tracepoint_update_probe_range(mod->tracepoints,
+				mod->tracepoints + mod->num_tracepoints);
+	mutex_unlock(&module_mutex);
+}
+
+/*
+ * Returns 0 if current not found.
+ * Returns 1 if current found.
+ */
+int module_get_iter_tracepoints(struct tracepoint_iter *iter)
+{
+	struct module *iter_mod;
+	int found = 0;
+
+	mutex_lock(&module_mutex);
+	list_for_each_entry(iter_mod, &modules, list) {
+		if (!iter_mod->taints) {
+			/*
+			 * Sorted module list
+			 */
+			if (iter_mod < iter->module)
+				continue;
+			else if (iter_mod > iter->module)
+				iter->tracepoint = NULL;
+			found = tracepoint_get_iter_range(&iter->tracepoint,
+				iter_mod->tracepoints,
+				iter_mod->tracepoints
+					+ iter_mod->num_tracepoints);
+			if (found) {
+				iter->module = iter_mod;
+				break;
+			}
+		}
+	}
+	mutex_unlock(&module_mutex);
+	return found;
+}
+#endif
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 823be115..4282c0a 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -550,7 +550,7 @@
 
 static ATOMIC_NOTIFIER_HEAD(die_chain);
 
-int notify_die(enum die_val val, const char *str,
+int notrace notify_die(enum die_val val, const char *str,
 	       struct pt_regs *regs, long err, int trap, int sig)
 {
 	struct die_args args = {
diff --git a/kernel/panic.c b/kernel/panic.c
index bda561e..6513aac 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -34,13 +34,6 @@
 
 EXPORT_SYMBOL(panic_notifier_list);
 
-static int __init panic_setup(char *str)
-{
-	panic_timeout = simple_strtoul(str, NULL, 0);
-	return 1;
-}
-__setup("panic=", panic_setup);
-
 static long no_blink(long time)
 {
 	return 0;
@@ -218,13 +211,6 @@
 }
 EXPORT_SYMBOL(add_taint);
 
-static int __init pause_on_oops_setup(char *str)
-{
-	pause_on_oops = simple_strtoul(str, NULL, 0);
-	return 1;
-}
-__setup("pause_on_oops=", pause_on_oops_setup);
-
 static void spin_msec(int msecs)
 {
 	int i;
@@ -384,3 +370,6 @@
 }
 EXPORT_SYMBOL(__stack_chk_fail);
 #endif
+
+core_param(panic, panic_timeout, int, 0644);
+core_param(pause_on_oops, pause_on_oops, int, 0644);
diff --git a/kernel/params.c b/kernel/params.c
index afc46a2..b077f1b 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -373,6 +373,8 @@
 }
 
 /* sysfs output in /sys/modules/XYZ/parameters/ */
+#define to_module_attr(n) container_of(n, struct module_attribute, attr);
+#define to_module_kobject(n) container_of(n, struct module_kobject, kobj);
 
 extern struct kernel_param __start___param[], __stop___param[];
 
@@ -384,6 +386,7 @@
 
 struct module_param_attrs
 {
+	unsigned int num;
 	struct attribute_group grp;
 	struct param_attribute attrs[0];
 };
@@ -434,69 +437,84 @@
 
 #ifdef CONFIG_SYSFS
 /*
- * param_sysfs_setup - setup sysfs support for one module or KBUILD_MODNAME
- * @mk: struct module_kobject (contains parent kobject)
- * @kparam: array of struct kernel_param, the actual parameter definitions
- * @num_params: number of entries in array
- * @name_skip: offset where the parameter name start in kparam[].name. Needed for built-in "modules"
+ * add_sysfs_param - add a parameter to sysfs
+ * @mk: struct module_kobject
+ * @kparam: the actual parameter definition to add to sysfs
+ * @name: name of parameter
  *
- * Create a kobject for a (per-module) group of parameters, and create files
- * in sysfs. A pointer to the param_kobject is returned on success,
- * NULL if there's no parameter to export, or other ERR_PTR(err).
+ * Create a kobject if for a (per-module) parameter if mp NULL, and
+ * create file in sysfs.  Returns an error on out of memory.  Always cleans up
+ * if there's an error.
  */
-static __modinit struct module_param_attrs *
-param_sysfs_setup(struct module_kobject *mk,
-		  struct kernel_param *kparam,
-		  unsigned int num_params,
-		  unsigned int name_skip)
+static __modinit int add_sysfs_param(struct module_kobject *mk,
+				     struct kernel_param *kp,
+				     const char *name)
 {
-	struct module_param_attrs *mp;
-	unsigned int valid_attrs = 0;
-	unsigned int i, size[2];
-	struct param_attribute *pattr;
-	struct attribute **gattr;
-	int err;
+	struct module_param_attrs *new;
+	struct attribute **attrs;
+	int err, num;
 
-	for (i=0; i<num_params; i++) {
-		if (kparam[i].perm)
-			valid_attrs++;
+	/* We don't bother calling this with invisible parameters. */
+	BUG_ON(!kp->perm);
+
+	if (!mk->mp) {
+		num = 0;
+		attrs = NULL;
+	} else {
+		num = mk->mp->num;
+		attrs = mk->mp->grp.attrs;
 	}
 
-	if (!valid_attrs)
-		return NULL;
-
-	size[0] = ALIGN(sizeof(*mp) +
-			valid_attrs * sizeof(mp->attrs[0]),
-			sizeof(mp->grp.attrs[0]));
-	size[1] = (valid_attrs + 1) * sizeof(mp->grp.attrs[0]);
-
-	mp = kzalloc(size[0] + size[1], GFP_KERNEL);
-	if (!mp)
-		return ERR_PTR(-ENOMEM);
-
-	mp->grp.name = "parameters";
-	mp->grp.attrs = (void *)mp + size[0];
-
-	pattr = &mp->attrs[0];
-	gattr = &mp->grp.attrs[0];
-	for (i = 0; i < num_params; i++) {
-		struct kernel_param *kp = &kparam[i];
-		if (kp->perm) {
-			pattr->param = kp;
-			pattr->mattr.show = param_attr_show;
-			pattr->mattr.store = param_attr_store;
-			pattr->mattr.attr.name = (char *)&kp->name[name_skip];
-			pattr->mattr.attr.mode = kp->perm;
-			*(gattr++) = &(pattr++)->mattr.attr;
-		}
+	/* Enlarge. */
+	new = krealloc(mk->mp,
+		       sizeof(*mk->mp) + sizeof(mk->mp->attrs[0]) * (num+1),
+		       GFP_KERNEL);
+	if (!new) {
+		kfree(mk->mp);
+		err = -ENOMEM;
+		goto fail;
 	}
-	*gattr = NULL;
-
-	if ((err = sysfs_create_group(&mk->kobj, &mp->grp))) {
-		kfree(mp);
-		return ERR_PTR(err);
+	attrs = krealloc(attrs, sizeof(new->grp.attrs[0])*(num+2), GFP_KERNEL);
+	if (!attrs) {
+		err = -ENOMEM;
+		goto fail_free_new;
 	}
-	return mp;
+
+	/* Sysfs wants everything zeroed. */
+	memset(new, 0, sizeof(*new));
+	memset(&new->attrs[num], 0, sizeof(new->attrs[num]));
+	memset(&attrs[num], 0, sizeof(attrs[num]));
+	new->grp.name = "parameters";
+	new->grp.attrs = attrs;
+
+	/* Tack new one on the end. */
+	new->attrs[num].param = kp;
+	new->attrs[num].mattr.show = param_attr_show;
+	new->attrs[num].mattr.store = param_attr_store;
+	new->attrs[num].mattr.attr.name = (char *)name;
+	new->attrs[num].mattr.attr.mode = kp->perm;
+	new->num = num+1;
+
+	/* Fix up all the pointers, since krealloc can move us */
+	for (num = 0; num < new->num; num++)
+		new->grp.attrs[num] = &new->attrs[num].mattr.attr;
+	new->grp.attrs[num] = NULL;
+
+	mk->mp = new;
+	return 0;
+
+fail_free_new:
+	kfree(new);
+fail:
+	mk->mp = NULL;
+	return err;
+}
+
+static void free_module_param_attrs(struct module_kobject *mk)
+{
+	kfree(mk->mp->grp.attrs);
+	kfree(mk->mp);
+	mk->mp = NULL;
 }
 
 #ifdef CONFIG_MODULES
@@ -506,21 +524,33 @@
  * @kparam: module parameters (array)
  * @num_params: number of module parameters
  *
- * Adds sysfs entries for module parameters, and creates a link from
- * /sys/module/[mod->name]/parameters to /sys/parameters/[mod->name]/
+ * Adds sysfs entries for module parameters under
+ * /sys/module/[mod->name]/parameters/
  */
 int module_param_sysfs_setup(struct module *mod,
 			     struct kernel_param *kparam,
 			     unsigned int num_params)
 {
-	struct module_param_attrs *mp;
+	int i, err;
+	bool params = false;
 
-	mp = param_sysfs_setup(&mod->mkobj, kparam, num_params, 0);
-	if (IS_ERR(mp))
-		return PTR_ERR(mp);
+	for (i = 0; i < num_params; i++) {
+		if (kparam[i].perm == 0)
+			continue;
+		err = add_sysfs_param(&mod->mkobj, &kparam[i], kparam[i].name);
+		if (err)
+			return err;
+		params = true;
+	}
 
-	mod->param_attrs = mp;
-	return 0;
+	if (!params)
+		return 0;
+
+	/* Create the param group. */
+	err = sysfs_create_group(&mod->mkobj.kobj, &mod->mkobj.mp->grp);
+	if (err)
+		free_module_param_attrs(&mod->mkobj);
+	return err;
 }
 
 /*
@@ -532,43 +562,55 @@
  */
 void module_param_sysfs_remove(struct module *mod)
 {
-	if (mod->param_attrs) {
-		sysfs_remove_group(&mod->mkobj.kobj,
-				   &mod->param_attrs->grp);
+	if (mod->mkobj.mp) {
+		sysfs_remove_group(&mod->mkobj.kobj, &mod->mkobj.mp->grp);
 		/* We are positive that no one is using any param
 		 * attrs at this point.  Deallocate immediately. */
-		kfree(mod->param_attrs);
-		mod->param_attrs = NULL;
+		free_module_param_attrs(&mod->mkobj);
 	}
 }
 #endif
 
-/*
- * kernel_param_sysfs_setup - wrapper for built-in params support
- */
-static void __init kernel_param_sysfs_setup(const char *name,
-					    struct kernel_param *kparam,
-					    unsigned int num_params,
-					    unsigned int name_skip)
+static void __init kernel_add_sysfs_param(const char *name,
+					  struct kernel_param *kparam,
+					  unsigned int name_skip)
 {
 	struct module_kobject *mk;
-	int ret;
+	struct kobject *kobj;
+	int err;
 
-	mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL);
-	BUG_ON(!mk);
+	kobj = kset_find_obj(module_kset, name);
+	if (kobj) {
+		/* We already have one.  Remove params so we can add more. */
+		mk = to_module_kobject(kobj);
+		/* We need to remove it before adding parameters. */
+		sysfs_remove_group(&mk->kobj, &mk->mp->grp);
+	} else {
+		mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL);
+		BUG_ON(!mk);
 
-	mk->mod = THIS_MODULE;
-	mk->kobj.kset = module_kset;
-	ret = kobject_init_and_add(&mk->kobj, &module_ktype, NULL, "%s", name);
-	if (ret) {
-		kobject_put(&mk->kobj);
-		printk(KERN_ERR "Module '%s' failed to be added to sysfs, "
-		      "error number %d\n", name, ret);
-		printk(KERN_ERR	"The system will be unstable now.\n");
-		return;
+		mk->mod = THIS_MODULE;
+		mk->kobj.kset = module_kset;
+		err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL,
+					   "%s", name);
+		if (err) {
+			kobject_put(&mk->kobj);
+			printk(KERN_ERR "Module '%s' failed add to sysfs, "
+			       "error number %d\n", name, err);
+			printk(KERN_ERR	"The system will be unstable now.\n");
+			return;
+		}
+		/* So that exit path is even. */
+		kobject_get(&mk->kobj);
 	}
-	param_sysfs_setup(mk, kparam, num_params, name_skip);
+
+	/* These should not fail at boot. */
+	err = add_sysfs_param(mk, kparam, kparam->name + name_skip);
+	BUG_ON(err);
+	err = sysfs_create_group(&mk->kobj, &mk->mp->grp);
+	BUG_ON(err);
 	kobject_uevent(&mk->kobj, KOBJ_ADD);
+	kobject_put(&mk->kobj);
 }
 
 /*
@@ -579,60 +621,36 @@
  * The "module" name (KBUILD_MODNAME) is stored before a dot, the
  * "parameter" name is stored behind a dot in kernel_param->name. So,
  * extract the "module" name for all built-in kernel_param-eters,
- * and for all who have the same, call kernel_param_sysfs_setup.
+ * and for all who have the same, call kernel_add_sysfs_param.
  */
 static void __init param_sysfs_builtin(void)
 {
-	struct kernel_param *kp, *kp_begin = NULL;
-	unsigned int i, name_len, count = 0;
-	char modname[MODULE_NAME_LEN + 1] = "";
+	struct kernel_param *kp;
+	unsigned int name_len;
+	char modname[MODULE_NAME_LEN];
 
-	for (i=0; i < __stop___param - __start___param; i++) {
+	for (kp = __start___param; kp < __stop___param; kp++) {
 		char *dot;
-		size_t max_name_len;
 
-		kp = &__start___param[i];
-		max_name_len =
-			min_t(size_t, MODULE_NAME_LEN, strlen(kp->name));
-
-		dot = memchr(kp->name, '.', max_name_len);
-		if (!dot) {
-			DEBUGP("couldn't find period in first %d characters "
-			       "of %s\n", MODULE_NAME_LEN, kp->name);
+		if (kp->perm == 0)
 			continue;
-		}
-		name_len = dot - kp->name;
 
- 		/* new kbuild_modname? */
-		if (strlen(modname) != name_len
-		    || strncmp(modname, kp->name, name_len) != 0) {
-			/* add a new kobject for previous kernel_params. */
-			if (count)
-				kernel_param_sysfs_setup(modname,
-							 kp_begin,
-							 count,
-							 strlen(modname)+1);
-
-			strncpy(modname, kp->name, name_len);
-			modname[name_len] = '\0';
-			count = 0;
-			kp_begin = kp;
+		dot = strchr(kp->name, '.');
+		if (!dot) {
+			/* This happens for core_param() */
+			strcpy(modname, "kernel");
+			name_len = 0;
+		} else {
+			name_len = dot - kp->name + 1;
+			strlcpy(modname, kp->name, name_len);
 		}
-		count++;
+		kernel_add_sysfs_param(modname, kp, name_len);
 	}
-
-	/* last kernel_params need to be registered as well */
-	if (count)
-		kernel_param_sysfs_setup(modname, kp_begin, count,
-					 strlen(modname)+1);
 }
 
 
 /* module-related sysfs stuff */
 
-#define to_module_attr(n) container_of(n, struct module_attribute, attr);
-#define to_module_kobject(n) container_of(n, struct module_kobject, kobj);
-
 static ssize_t module_attr_show(struct kobject *kobj,
 				struct attribute *attr,
 				char *buf)
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index c42a03a..153dcb2 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -7,6 +7,93 @@
 #include <linux/errno.h>
 #include <linux/math64.h>
 #include <asm/uaccess.h>
+#include <linux/kernel_stat.h>
+
+/*
+ * Allocate the thread_group_cputime structure appropriately and fill in the
+ * current values of the fields.  Called from copy_signal() via
+ * thread_group_cputime_clone_thread() when adding a second or subsequent
+ * thread to a thread group.  Assumes interrupts are enabled when called.
+ */
+int thread_group_cputime_alloc(struct task_struct *tsk)
+{
+	struct signal_struct *sig = tsk->signal;
+	struct task_cputime *cputime;
+
+	/*
+	 * If we have multiple threads and we don't already have a
+	 * per-CPU task_cputime struct (checked in the caller), allocate
+	 * one and fill it in with the times accumulated so far.  We may
+	 * race with another thread so recheck after we pick up the sighand
+	 * lock.
+	 */
+	cputime = alloc_percpu(struct task_cputime);
+	if (cputime == NULL)
+		return -ENOMEM;
+	spin_lock_irq(&tsk->sighand->siglock);
+	if (sig->cputime.totals) {
+		spin_unlock_irq(&tsk->sighand->siglock);
+		free_percpu(cputime);
+		return 0;
+	}
+	sig->cputime.totals = cputime;
+	cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id());
+	cputime->utime = tsk->utime;
+	cputime->stime = tsk->stime;
+	cputime->sum_exec_runtime = tsk->se.sum_exec_runtime;
+	spin_unlock_irq(&tsk->sighand->siglock);
+	return 0;
+}
+
+/**
+ * thread_group_cputime - Sum the thread group time fields across all CPUs.
+ *
+ * @tsk:	The task we use to identify the thread group.
+ * @times:	task_cputime structure in which we return the summed fields.
+ *
+ * Walk the list of CPUs to sum the per-CPU time fields in the thread group
+ * time structure.
+ */
+void thread_group_cputime(
+	struct task_struct *tsk,
+	struct task_cputime *times)
+{
+	struct signal_struct *sig;
+	int i;
+	struct task_cputime *tot;
+
+	sig = tsk->signal;
+	if (unlikely(!sig) || !sig->cputime.totals) {
+		times->utime = tsk->utime;
+		times->stime = tsk->stime;
+		times->sum_exec_runtime = tsk->se.sum_exec_runtime;
+		return;
+	}
+	times->stime = times->utime = cputime_zero;
+	times->sum_exec_runtime = 0;
+	for_each_possible_cpu(i) {
+		tot = per_cpu_ptr(tsk->signal->cputime.totals, i);
+		times->utime = cputime_add(times->utime, tot->utime);
+		times->stime = cputime_add(times->stime, tot->stime);
+		times->sum_exec_runtime += tot->sum_exec_runtime;
+	}
+}
+
+/*
+ * Called after updating RLIMIT_CPU to set timer expiration if necessary.
+ */
+void update_rlimit_cpu(unsigned long rlim_new)
+{
+	cputime_t cputime;
+
+	cputime = secs_to_cputime(rlim_new);
+	if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
+	    cputime_lt(current->signal->it_prof_expires, cputime)) {
+		spin_lock_irq(&current->sighand->siglock);
+		set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
+		spin_unlock_irq(&current->sighand->siglock);
+	}
+}
 
 static int check_clock(const clockid_t which_clock)
 {
@@ -158,10 +245,6 @@
 {
 	return p->utime;
 }
-static inline unsigned long long sched_ns(struct task_struct *p)
-{
-	return task_sched_runtime(p);
-}
 
 int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
 {
@@ -211,46 +294,7 @@
 		cpu->cpu = virt_ticks(p);
 		break;
 	case CPUCLOCK_SCHED:
-		cpu->sched = sched_ns(p);
-		break;
-	}
-	return 0;
-}
-
-/*
- * Sample a process (thread group) clock for the given group_leader task.
- * Must be called with tasklist_lock held for reading.
- * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
- */
-static int cpu_clock_sample_group_locked(unsigned int clock_idx,
-					 struct task_struct *p,
-					 union cpu_time_count *cpu)
-{
-	struct task_struct *t = p;
- 	switch (clock_idx) {
-	default:
-		return -EINVAL;
-	case CPUCLOCK_PROF:
-		cpu->cpu = cputime_add(p->signal->utime, p->signal->stime);
-		do {
-			cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
-			t = next_thread(t);
-		} while (t != p);
-		break;
-	case CPUCLOCK_VIRT:
-		cpu->cpu = p->signal->utime;
-		do {
-			cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
-			t = next_thread(t);
-		} while (t != p);
-		break;
-	case CPUCLOCK_SCHED:
-		cpu->sched = p->signal->sum_sched_runtime;
-		/* Add in each other live thread.  */
-		while ((t = next_thread(t)) != p) {
-			cpu->sched += t->se.sum_exec_runtime;
-		}
-		cpu->sched += sched_ns(p);
+		cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p);
 		break;
 	}
 	return 0;
@@ -264,13 +308,23 @@
 				  struct task_struct *p,
 				  union cpu_time_count *cpu)
 {
-	int ret;
-	unsigned long flags;
-	spin_lock_irqsave(&p->sighand->siglock, flags);
-	ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p,
-					    cpu);
-	spin_unlock_irqrestore(&p->sighand->siglock, flags);
-	return ret;
+	struct task_cputime cputime;
+
+	thread_group_cputime(p, &cputime);
+	switch (which_clock) {
+	default:
+		return -EINVAL;
+	case CPUCLOCK_PROF:
+		cpu->cpu = cputime_add(cputime.utime, cputime.stime);
+		break;
+	case CPUCLOCK_VIRT:
+		cpu->cpu = cputime.utime;
+		break;
+	case CPUCLOCK_SCHED:
+		cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
+		break;
+	}
+	return 0;
 }
 
 
@@ -471,80 +525,11 @@
 }
 void posix_cpu_timers_exit_group(struct task_struct *tsk)
 {
+	struct task_cputime cputime;
+
+	thread_group_cputime(tsk, &cputime);
 	cleanup_timers(tsk->signal->cpu_timers,
-		       cputime_add(tsk->utime, tsk->signal->utime),
-		       cputime_add(tsk->stime, tsk->signal->stime),
-		     tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime);
-}
-
-
-/*
- * Set the expiry times of all the threads in the process so one of them
- * will go off before the process cumulative expiry total is reached.
- */
-static void process_timer_rebalance(struct task_struct *p,
-				    unsigned int clock_idx,
-				    union cpu_time_count expires,
-				    union cpu_time_count val)
-{
-	cputime_t ticks, left;
-	unsigned long long ns, nsleft;
- 	struct task_struct *t = p;
-	unsigned int nthreads = atomic_read(&p->signal->live);
-
-	if (!nthreads)
-		return;
-
-	switch (clock_idx) {
-	default:
-		BUG();
-		break;
-	case CPUCLOCK_PROF:
-		left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
-				       nthreads);
-		do {
-			if (likely(!(t->flags & PF_EXITING))) {
-				ticks = cputime_add(prof_ticks(t), left);
-				if (cputime_eq(t->it_prof_expires,
-					       cputime_zero) ||
-				    cputime_gt(t->it_prof_expires, ticks)) {
-					t->it_prof_expires = ticks;
-				}
-			}
-			t = next_thread(t);
-		} while (t != p);
-		break;
-	case CPUCLOCK_VIRT:
-		left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
-				       nthreads);
-		do {
-			if (likely(!(t->flags & PF_EXITING))) {
-				ticks = cputime_add(virt_ticks(t), left);
-				if (cputime_eq(t->it_virt_expires,
-					       cputime_zero) ||
-				    cputime_gt(t->it_virt_expires, ticks)) {
-					t->it_virt_expires = ticks;
-				}
-			}
-			t = next_thread(t);
-		} while (t != p);
-		break;
-	case CPUCLOCK_SCHED:
-		nsleft = expires.sched - val.sched;
-		do_div(nsleft, nthreads);
-		nsleft = max_t(unsigned long long, nsleft, 1);
-		do {
-			if (likely(!(t->flags & PF_EXITING))) {
-				ns = t->se.sum_exec_runtime + nsleft;
-				if (t->it_sched_expires == 0 ||
-				    t->it_sched_expires > ns) {
-					t->it_sched_expires = ns;
-				}
-			}
-			t = next_thread(t);
-		} while (t != p);
-		break;
-	}
+		       cputime.utime, cputime.stime, cputime.sum_exec_runtime);
 }
 
 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
@@ -608,29 +593,32 @@
 			default:
 				BUG();
 			case CPUCLOCK_PROF:
-				if (cputime_eq(p->it_prof_expires,
+				if (cputime_eq(p->cputime_expires.prof_exp,
 					       cputime_zero) ||
-				    cputime_gt(p->it_prof_expires,
+				    cputime_gt(p->cputime_expires.prof_exp,
 					       nt->expires.cpu))
-					p->it_prof_expires = nt->expires.cpu;
+					p->cputime_expires.prof_exp =
+						nt->expires.cpu;
 				break;
 			case CPUCLOCK_VIRT:
-				if (cputime_eq(p->it_virt_expires,
+				if (cputime_eq(p->cputime_expires.virt_exp,
 					       cputime_zero) ||
-				    cputime_gt(p->it_virt_expires,
+				    cputime_gt(p->cputime_expires.virt_exp,
 					       nt->expires.cpu))
-					p->it_virt_expires = nt->expires.cpu;
+					p->cputime_expires.virt_exp =
+						nt->expires.cpu;
 				break;
 			case CPUCLOCK_SCHED:
-				if (p->it_sched_expires == 0 ||
-				    p->it_sched_expires > nt->expires.sched)
-					p->it_sched_expires = nt->expires.sched;
+				if (p->cputime_expires.sched_exp == 0 ||
+				    p->cputime_expires.sched_exp >
+							nt->expires.sched)
+					p->cputime_expires.sched_exp =
+						nt->expires.sched;
 				break;
 			}
 		} else {
 			/*
-			 * For a process timer, we must balance
-			 * all the live threads' expirations.
+			 * For a process timer, set the cached expiration time.
 			 */
 			switch (CPUCLOCK_WHICH(timer->it_clock)) {
 			default:
@@ -641,7 +629,9 @@
 				    cputime_lt(p->signal->it_virt_expires,
 					       timer->it.cpu.expires.cpu))
 					break;
-				goto rebalance;
+				p->signal->cputime_expires.virt_exp =
+					timer->it.cpu.expires.cpu;
+				break;
 			case CPUCLOCK_PROF:
 				if (!cputime_eq(p->signal->it_prof_expires,
 						cputime_zero) &&
@@ -652,13 +642,12 @@
 				if (i != RLIM_INFINITY &&
 				    i <= cputime_to_secs(timer->it.cpu.expires.cpu))
 					break;
-				goto rebalance;
+				p->signal->cputime_expires.prof_exp =
+					timer->it.cpu.expires.cpu;
+				break;
 			case CPUCLOCK_SCHED:
-			rebalance:
-				process_timer_rebalance(
-					timer->it.cpu.task,
-					CPUCLOCK_WHICH(timer->it_clock),
-					timer->it.cpu.expires, now);
+				p->signal->cputime_expires.sched_exp =
+					timer->it.cpu.expires.sched;
 				break;
 			}
 		}
@@ -969,13 +958,13 @@
 	struct signal_struct *const sig = tsk->signal;
 
 	maxfire = 20;
-	tsk->it_prof_expires = cputime_zero;
+	tsk->cputime_expires.prof_exp = cputime_zero;
 	while (!list_empty(timers)) {
 		struct cpu_timer_list *t = list_first_entry(timers,
 						      struct cpu_timer_list,
 						      entry);
 		if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
-			tsk->it_prof_expires = t->expires.cpu;
+			tsk->cputime_expires.prof_exp = t->expires.cpu;
 			break;
 		}
 		t->firing = 1;
@@ -984,13 +973,13 @@
 
 	++timers;
 	maxfire = 20;
-	tsk->it_virt_expires = cputime_zero;
+	tsk->cputime_expires.virt_exp = cputime_zero;
 	while (!list_empty(timers)) {
 		struct cpu_timer_list *t = list_first_entry(timers,
 						      struct cpu_timer_list,
 						      entry);
 		if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
-			tsk->it_virt_expires = t->expires.cpu;
+			tsk->cputime_expires.virt_exp = t->expires.cpu;
 			break;
 		}
 		t->firing = 1;
@@ -999,13 +988,13 @@
 
 	++timers;
 	maxfire = 20;
-	tsk->it_sched_expires = 0;
+	tsk->cputime_expires.sched_exp = 0;
 	while (!list_empty(timers)) {
 		struct cpu_timer_list *t = list_first_entry(timers,
 						      struct cpu_timer_list,
 						      entry);
 		if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
-			tsk->it_sched_expires = t->expires.sched;
+			tsk->cputime_expires.sched_exp = t->expires.sched;
 			break;
 		}
 		t->firing = 1;
@@ -1055,10 +1044,10 @@
 {
 	int maxfire;
 	struct signal_struct *const sig = tsk->signal;
-	cputime_t utime, stime, ptime, virt_expires, prof_expires;
+	cputime_t utime, ptime, virt_expires, prof_expires;
 	unsigned long long sum_sched_runtime, sched_expires;
-	struct task_struct *t;
 	struct list_head *timers = sig->cpu_timers;
+	struct task_cputime cputime;
 
 	/*
 	 * Don't sample the current process CPU clocks if there are no timers.
@@ -1074,18 +1063,10 @@
 	/*
 	 * Collect the current process totals.
 	 */
-	utime = sig->utime;
-	stime = sig->stime;
-	sum_sched_runtime = sig->sum_sched_runtime;
-	t = tsk;
-	do {
-		utime = cputime_add(utime, t->utime);
-		stime = cputime_add(stime, t->stime);
-		sum_sched_runtime += t->se.sum_exec_runtime;
-		t = next_thread(t);
-	} while (t != tsk);
-	ptime = cputime_add(utime, stime);
-
+	thread_group_cputime(tsk, &cputime);
+	utime = cputime.utime;
+	ptime = cputime_add(utime, cputime.stime);
+	sum_sched_runtime = cputime.sum_exec_runtime;
 	maxfire = 20;
 	prof_expires = cputime_zero;
 	while (!list_empty(timers)) {
@@ -1193,60 +1174,18 @@
 		}
 	}
 
-	if (!cputime_eq(prof_expires, cputime_zero) ||
-	    !cputime_eq(virt_expires, cputime_zero) ||
-	    sched_expires != 0) {
-		/*
-		 * Rebalance the threads' expiry times for the remaining
-		 * process CPU timers.
-		 */
-
-		cputime_t prof_left, virt_left, ticks;
-		unsigned long long sched_left, sched;
-		const unsigned int nthreads = atomic_read(&sig->live);
-
-		if (!nthreads)
-			return;
-
-		prof_left = cputime_sub(prof_expires, utime);
-		prof_left = cputime_sub(prof_left, stime);
-		prof_left = cputime_div_non_zero(prof_left, nthreads);
-		virt_left = cputime_sub(virt_expires, utime);
-		virt_left = cputime_div_non_zero(virt_left, nthreads);
-		if (sched_expires) {
-			sched_left = sched_expires - sum_sched_runtime;
-			do_div(sched_left, nthreads);
-			sched_left = max_t(unsigned long long, sched_left, 1);
-		} else {
-			sched_left = 0;
-		}
-		t = tsk;
-		do {
-			if (unlikely(t->flags & PF_EXITING))
-				continue;
-
-			ticks = cputime_add(cputime_add(t->utime, t->stime),
-					    prof_left);
-			if (!cputime_eq(prof_expires, cputime_zero) &&
-			    (cputime_eq(t->it_prof_expires, cputime_zero) ||
-			     cputime_gt(t->it_prof_expires, ticks))) {
-				t->it_prof_expires = ticks;
-			}
-
-			ticks = cputime_add(t->utime, virt_left);
-			if (!cputime_eq(virt_expires, cputime_zero) &&
-			    (cputime_eq(t->it_virt_expires, cputime_zero) ||
-			     cputime_gt(t->it_virt_expires, ticks))) {
-				t->it_virt_expires = ticks;
-			}
-
-			sched = t->se.sum_exec_runtime + sched_left;
-			if (sched_expires && (t->it_sched_expires == 0 ||
-					      t->it_sched_expires > sched)) {
-				t->it_sched_expires = sched;
-			}
-		} while ((t = next_thread(t)) != tsk);
-	}
+	if (!cputime_eq(prof_expires, cputime_zero) &&
+	    (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) ||
+	     cputime_gt(sig->cputime_expires.prof_exp, prof_expires)))
+		sig->cputime_expires.prof_exp = prof_expires;
+	if (!cputime_eq(virt_expires, cputime_zero) &&
+	    (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
+	     cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
+		sig->cputime_expires.virt_exp = virt_expires;
+	if (sched_expires != 0 &&
+	    (sig->cputime_expires.sched_exp == 0 ||
+	     sig->cputime_expires.sched_exp > sched_expires))
+		sig->cputime_expires.sched_exp = sched_expires;
 }
 
 /*
@@ -1314,6 +1253,86 @@
 	++timer->it_requeue_pending;
 }
 
+/**
+ * task_cputime_zero - Check a task_cputime struct for all zero fields.
+ *
+ * @cputime:	The struct to compare.
+ *
+ * Checks @cputime to see if all fields are zero.  Returns true if all fields
+ * are zero, false if any field is nonzero.
+ */
+static inline int task_cputime_zero(const struct task_cputime *cputime)
+{
+	if (cputime_eq(cputime->utime, cputime_zero) &&
+	    cputime_eq(cputime->stime, cputime_zero) &&
+	    cputime->sum_exec_runtime == 0)
+		return 1;
+	return 0;
+}
+
+/**
+ * task_cputime_expired - Compare two task_cputime entities.
+ *
+ * @sample:	The task_cputime structure to be checked for expiration.
+ * @expires:	Expiration times, against which @sample will be checked.
+ *
+ * Checks @sample against @expires to see if any field of @sample has expired.
+ * Returns true if any field of the former is greater than the corresponding
+ * field of the latter if the latter field is set.  Otherwise returns false.
+ */
+static inline int task_cputime_expired(const struct task_cputime *sample,
+					const struct task_cputime *expires)
+{
+	if (!cputime_eq(expires->utime, cputime_zero) &&
+	    cputime_ge(sample->utime, expires->utime))
+		return 1;
+	if (!cputime_eq(expires->stime, cputime_zero) &&
+	    cputime_ge(cputime_add(sample->utime, sample->stime),
+		       expires->stime))
+		return 1;
+	if (expires->sum_exec_runtime != 0 &&
+	    sample->sum_exec_runtime >= expires->sum_exec_runtime)
+		return 1;
+	return 0;
+}
+
+/**
+ * fastpath_timer_check - POSIX CPU timers fast path.
+ *
+ * @tsk:	The task (thread) being checked.
+ *
+ * Check the task and thread group timers.  If both are zero (there are no
+ * timers set) return false.  Otherwise snapshot the task and thread group
+ * timers and compare them with the corresponding expiration times.  Return
+ * true if a timer has expired, else return false.
+ */
+static inline int fastpath_timer_check(struct task_struct *tsk)
+{
+	struct signal_struct *sig = tsk->signal;
+
+	if (unlikely(!sig))
+		return 0;
+
+	if (!task_cputime_zero(&tsk->cputime_expires)) {
+		struct task_cputime task_sample = {
+			.utime = tsk->utime,
+			.stime = tsk->stime,
+			.sum_exec_runtime = tsk->se.sum_exec_runtime
+		};
+
+		if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
+			return 1;
+	}
+	if (!task_cputime_zero(&sig->cputime_expires)) {
+		struct task_cputime group_sample;
+
+		thread_group_cputime(tsk, &group_sample);
+		if (task_cputime_expired(&group_sample, &sig->cputime_expires))
+			return 1;
+	}
+	return 0;
+}
+
 /*
  * This is called from the timer interrupt handler.  The irq handler has
  * already updated our counts.  We need to check if any timers fire now.
@@ -1326,42 +1345,31 @@
 
 	BUG_ON(!irqs_disabled());
 
-#define UNEXPIRED(clock) \
-		(cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
-		 cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
-
-	if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
-	    (tsk->it_sched_expires == 0 ||
-	     tsk->se.sum_exec_runtime < tsk->it_sched_expires))
+	/*
+	 * The fast path checks that there are no expired thread or thread
+	 * group timers.  If that's so, just return.
+	 */
+	if (!fastpath_timer_check(tsk))
 		return;
 
-#undef	UNEXPIRED
+	spin_lock(&tsk->sighand->siglock);
+	/*
+	 * Here we take off tsk->signal->cpu_timers[N] and
+	 * tsk->cpu_timers[N] all the timers that are firing, and
+	 * put them on the firing list.
+	 */
+	check_thread_timers(tsk, &firing);
+	check_process_timers(tsk, &firing);
 
 	/*
-	 * Double-check with locks held.
+	 * We must release these locks before taking any timer's lock.
+	 * There is a potential race with timer deletion here, as the
+	 * siglock now protects our private firing list.  We have set
+	 * the firing flag in each timer, so that a deletion attempt
+	 * that gets the timer lock before we do will give it up and
+	 * spin until we've taken care of that timer below.
 	 */
-	read_lock(&tasklist_lock);
-	if (likely(tsk->signal != NULL)) {
-		spin_lock(&tsk->sighand->siglock);
-
-		/*
-		 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
-		 * all the timers that are firing, and put them on the firing list.
-		 */
-		check_thread_timers(tsk, &firing);
-		check_process_timers(tsk, &firing);
-
-		/*
-		 * We must release these locks before taking any timer's lock.
-		 * There is a potential race with timer deletion here, as the
-		 * siglock now protects our private firing list.  We have set
-		 * the firing flag in each timer, so that a deletion attempt
-		 * that gets the timer lock before we do will give it up and
-		 * spin until we've taken care of that timer below.
-		 */
-		spin_unlock(&tsk->sighand->siglock);
-	}
-	read_unlock(&tasklist_lock);
+	spin_unlock(&tsk->sighand->siglock);
 
 	/*
 	 * Now that all the timers on our list have the firing flag,
@@ -1389,10 +1397,9 @@
 
 /*
  * Set one of the process-wide special case CPU timers.
- * The tasklist_lock and tsk->sighand->siglock must be held by the caller.
- * The oldval argument is null for the RLIMIT_CPU timer, where *newval is
- * absolute; non-null for ITIMER_*, where *newval is relative and we update
- * it to be absolute, *oldval is absolute and we update it to be relative.
+ * The tsk->sighand->siglock must be held by the caller.
+ * The *newval argument is relative and we update it to be absolute, *oldval
+ * is absolute and we update it to be relative.
  */
 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
 			   cputime_t *newval, cputime_t *oldval)
@@ -1401,7 +1408,7 @@
 	struct list_head *head;
 
 	BUG_ON(clock_idx == CPUCLOCK_SCHED);
-	cpu_clock_sample_group_locked(clock_idx, tsk, &now);
+	cpu_clock_sample_group(clock_idx, tsk, &now);
 
 	if (oldval) {
 		if (!cputime_eq(*oldval, cputime_zero)) {
@@ -1435,13 +1442,14 @@
 	    cputime_ge(list_first_entry(head,
 				  struct cpu_timer_list, entry)->expires.cpu,
 		       *newval)) {
-		/*
-		 * Rejigger each thread's expiry time so that one will
-		 * notice before we hit the process-cumulative expiry time.
-		 */
-		union cpu_time_count expires = { .sched = 0 };
-		expires.cpu = *newval;
-		process_timer_rebalance(tsk, clock_idx, expires, now);
+		switch (clock_idx) {
+		case CPUCLOCK_PROF:
+			tsk->signal->cputime_expires.prof_exp = *newval;
+			break;
+		case CPUCLOCK_VIRT:
+			tsk->signal->cputime_expires.virt_exp = *newval;
+			break;
+		}
 	}
 }
 
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 5131e54..b931d7c 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -223,6 +223,15 @@
 }
 
 /*
+ * Get monotonic time for posix timers
+ */
+static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
+{
+	getrawmonotonic(tp);
+	return 0;
+}
+
+/*
  * Initialize everything, well, just everything in Posix clocks/timers ;)
  */
 static __init int init_posix_timers(void)
@@ -235,9 +244,15 @@
 		.clock_get = posix_ktime_get_ts,
 		.clock_set = do_posix_clock_nosettime,
 	};
+	struct k_clock clock_monotonic_raw = {
+		.clock_getres = hrtimer_get_res,
+		.clock_get = posix_get_monotonic_raw,
+		.clock_set = do_posix_clock_nosettime,
+	};
 
 	register_posix_clock(CLOCK_REALTIME, &clock_realtime);
 	register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
+	register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
 
 	posix_timers_cache = kmem_cache_create("posix_timers_cache",
 					sizeof (struct k_itimer), 0, SLAB_PANIC,
@@ -298,6 +313,7 @@
 
 int posix_timer_event(struct k_itimer *timr, int si_private)
 {
+	int shared, ret;
 	/*
 	 * FIXME: if ->sigq is queued we can race with
 	 * dequeue_signal()->do_schedule_next_timer().
@@ -311,25 +327,10 @@
 	 */
 	timr->sigq->info.si_sys_private = si_private;
 
-	timr->sigq->info.si_signo = timr->it_sigev_signo;
-	timr->sigq->info.si_code = SI_TIMER;
-	timr->sigq->info.si_tid = timr->it_id;
-	timr->sigq->info.si_value = timr->it_sigev_value;
-
-	if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
-		struct task_struct *leader;
-		int ret = send_sigqueue(timr->sigq, timr->it_process, 0);
-
-		if (likely(ret >= 0))
-			return ret;
-
-		timr->it_sigev_notify = SIGEV_SIGNAL;
-		leader = timr->it_process->group_leader;
-		put_task_struct(timr->it_process);
-		timr->it_process = leader;
-	}
-
-	return send_sigqueue(timr->sigq, timr->it_process, 1);
+	shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
+	ret = send_sigqueue(timr->sigq, timr->it_process, shared);
+	/* If we failed to send the signal the timer stops. */
+	return ret > 0;
 }
 EXPORT_SYMBOL_GPL(posix_timer_event);
 
@@ -468,11 +469,9 @@
 		 struct sigevent __user *timer_event_spec,
 		 timer_t __user * created_timer_id)
 {
-	int error = 0;
-	struct k_itimer *new_timer = NULL;
-	int new_timer_id;
-	struct task_struct *process = NULL;
-	unsigned long flags;
+	struct k_itimer *new_timer;
+	int error, new_timer_id;
+	struct task_struct *process;
 	sigevent_t event;
 	int it_id_set = IT_ID_NOT_SET;
 
@@ -490,12 +489,11 @@
 		goto out;
 	}
 	spin_lock_irq(&idr_lock);
-	error = idr_get_new(&posix_timers_id, (void *) new_timer,
-			    &new_timer_id);
+	error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
 	spin_unlock_irq(&idr_lock);
-	if (error == -EAGAIN)
-		goto retry;
-	else if (error) {
+	if (error) {
+		if (error == -EAGAIN)
+			goto retry;
 		/*
 		 * Weird looking, but we return EAGAIN if the IDR is
 		 * full (proper POSIX return value for this)
@@ -526,67 +524,43 @@
 			error = -EFAULT;
 			goto out;
 		}
-		new_timer->it_sigev_notify = event.sigev_notify;
-		new_timer->it_sigev_signo = event.sigev_signo;
-		new_timer->it_sigev_value = event.sigev_value;
-
-		read_lock(&tasklist_lock);
-		if ((process = good_sigevent(&event))) {
-			/*
-			 * We may be setting up this process for another
-			 * thread.  It may be exiting.  To catch this
-			 * case the we check the PF_EXITING flag.  If
-			 * the flag is not set, the siglock will catch
-			 * him before it is too late (in exit_itimers).
-			 *
-			 * The exec case is a bit more invloved but easy
-			 * to code.  If the process is in our thread
-			 * group (and it must be or we would not allow
-			 * it here) and is doing an exec, it will cause
-			 * us to be killed.  In this case it will wait
-			 * for us to die which means we can finish this
-			 * linkage with our last gasp. I.e. no code :)
-			 */
-			spin_lock_irqsave(&process->sighand->siglock, flags);
-			if (!(process->flags & PF_EXITING)) {
-				new_timer->it_process = process;
-				list_add(&new_timer->list,
-					 &process->signal->posix_timers);
-				if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
-					get_task_struct(process);
-				spin_unlock_irqrestore(&process->sighand->siglock, flags);
-			} else {
-				spin_unlock_irqrestore(&process->sighand->siglock, flags);
-				process = NULL;
-			}
-		}
-		read_unlock(&tasklist_lock);
+		rcu_read_lock();
+		process = good_sigevent(&event);
+		if (process)
+			get_task_struct(process);
+		rcu_read_unlock();
 		if (!process) {
 			error = -EINVAL;
 			goto out;
 		}
 	} else {
-		new_timer->it_sigev_notify = SIGEV_SIGNAL;
-		new_timer->it_sigev_signo = SIGALRM;
-		new_timer->it_sigev_value.sival_int = new_timer->it_id;
+		event.sigev_notify = SIGEV_SIGNAL;
+		event.sigev_signo = SIGALRM;
+		event.sigev_value.sival_int = new_timer->it_id;
 		process = current->group_leader;
-		spin_lock_irqsave(&process->sighand->siglock, flags);
-		new_timer->it_process = process;
-		list_add(&new_timer->list, &process->signal->posix_timers);
-		spin_unlock_irqrestore(&process->sighand->siglock, flags);
+		get_task_struct(process);
 	}
 
+	new_timer->it_sigev_notify     = event.sigev_notify;
+	new_timer->sigq->info.si_signo = event.sigev_signo;
+	new_timer->sigq->info.si_value = event.sigev_value;
+	new_timer->sigq->info.si_tid   = new_timer->it_id;
+	new_timer->sigq->info.si_code  = SI_TIMER;
+
+	spin_lock_irq(&current->sighand->siglock);
+	new_timer->it_process = process;
+	list_add(&new_timer->list, &current->signal->posix_timers);
+	spin_unlock_irq(&current->sighand->siglock);
+
+	return 0;
  	/*
 	 * In the case of the timer belonging to another task, after
 	 * the task is unlocked, the timer is owned by the other task
 	 * and may cease to exist at any time.  Don't use or modify
 	 * new_timer after the unlock call.
 	 */
-
 out:
-	if (error)
-		release_posix_timer(new_timer, it_id_set);
-
+	release_posix_timer(new_timer, it_id_set);
 	return error;
 }
 
@@ -597,7 +571,7 @@
  * the find to the timer lock.  To avoid a dead lock, the timer id MUST
  * be release with out holding the timer lock.
  */
-static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
+static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
 {
 	struct k_itimer *timr;
 	/*
@@ -605,23 +579,20 @@
 	 * flags part over to the timer lock.  Must not let interrupts in
 	 * while we are moving the lock.
 	 */
-
 	spin_lock_irqsave(&idr_lock, *flags);
-	timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id);
+	timr = idr_find(&posix_timers_id, (int)timer_id);
 	if (timr) {
 		spin_lock(&timr->it_lock);
-
-		if ((timr->it_id != timer_id) || !(timr->it_process) ||
-				!same_thread_group(timr->it_process, current)) {
-			spin_unlock(&timr->it_lock);
-			spin_unlock_irqrestore(&idr_lock, *flags);
-			timr = NULL;
-		} else
+		if (timr->it_process &&
+		    same_thread_group(timr->it_process, current)) {
 			spin_unlock(&idr_lock);
-	} else
-		spin_unlock_irqrestore(&idr_lock, *flags);
+			return timr;
+		}
+		spin_unlock(&timr->it_lock);
+	}
+	spin_unlock_irqrestore(&idr_lock, *flags);
 
-	return timr;
+	return NULL;
 }
 
 /*
@@ -862,8 +833,7 @@
 	 * This keeps any tasks waiting on the spin lock from thinking
 	 * they got something (see the lock code above).
 	 */
-	if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
-		put_task_struct(timer->it_process);
+	put_task_struct(timer->it_process);
 	timer->it_process = NULL;
 
 	unlock_timer(timer, flags);
@@ -890,8 +860,7 @@
 	 * This keeps any tasks waiting on the spin lock from thinking
 	 * they got something (see the lock code above).
 	 */
-	if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
-		put_task_struct(timer->it_process);
+	put_task_struct(timer->it_process);
 	timer->it_process = NULL;
 
 	unlock_timer(timer, flags);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 278946a..ca63401 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -28,121 +28,6 @@
 	return 1;
 }
 
-/*
- * freezing is complete, mark current process as frozen
- */
-static inline void frozen_process(void)
-{
-	if (!unlikely(current->flags & PF_NOFREEZE)) {
-		current->flags |= PF_FROZEN;
-		wmb();
-	}
-	clear_freeze_flag(current);
-}
-
-/* Refrigerator is place where frozen processes are stored :-). */
-void refrigerator(void)
-{
-	/* Hmm, should we be allowed to suspend when there are realtime
-	   processes around? */
-	long save;
-
-	task_lock(current);
-	if (freezing(current)) {
-		frozen_process();
-		task_unlock(current);
-	} else {
-		task_unlock(current);
-		return;
-	}
-	save = current->state;
-	pr_debug("%s entered refrigerator\n", current->comm);
-
-	spin_lock_irq(&current->sighand->siglock);
-	recalc_sigpending(); /* We sent fake signal, clean it up */
-	spin_unlock_irq(&current->sighand->siglock);
-
-	for (;;) {
-		set_current_state(TASK_UNINTERRUPTIBLE);
-		if (!frozen(current))
-			break;
-		schedule();
-	}
-	pr_debug("%s left refrigerator\n", current->comm);
-	__set_current_state(save);
-}
-
-static void fake_signal_wake_up(struct task_struct *p)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&p->sighand->siglock, flags);
-	signal_wake_up(p, 0);
-	spin_unlock_irqrestore(&p->sighand->siglock, flags);
-}
-
-static inline bool should_send_signal(struct task_struct *p)
-{
-	return !(p->flags & PF_FREEZER_NOSIG);
-}
-
-/**
- *	freeze_task - send a freeze request to given task
- *	@p: task to send the request to
- *	@sig_only: if set, the request will only be sent if the task has the
- *		PF_FREEZER_NOSIG flag unset
- *	Return value: 'false', if @sig_only is set and the task has
- *		PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
- *
- *	The freeze request is sent by setting the tasks's TIF_FREEZE flag and
- *	either sending a fake signal to it or waking it up, depending on whether
- *	or not it has PF_FREEZER_NOSIG set.  If @sig_only is set and the task
- *	has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
- *	TIF_FREEZE flag will not be set.
- */
-static bool freeze_task(struct task_struct *p, bool sig_only)
-{
-	/*
-	 * We first check if the task is freezing and next if it has already
-	 * been frozen to avoid the race with frozen_process() which first marks
-	 * the task as frozen and next clears its TIF_FREEZE.
-	 */
-	if (!freezing(p)) {
-		rmb();
-		if (frozen(p))
-			return false;
-
-		if (!sig_only || should_send_signal(p))
-			set_freeze_flag(p);
-		else
-			return false;
-	}
-
-	if (should_send_signal(p)) {
-		if (!signal_pending(p))
-			fake_signal_wake_up(p);
-	} else if (sig_only) {
-		return false;
-	} else {
-		wake_up_state(p, TASK_INTERRUPTIBLE);
-	}
-
-	return true;
-}
-
-static void cancel_freezing(struct task_struct *p)
-{
-	unsigned long flags;
-
-	if (freezing(p)) {
-		pr_debug("  clean up: %s\n", p->comm);
-		clear_freeze_flag(p);
-		spin_lock_irqsave(&p->sighand->siglock, flags);
-		recalc_sigpending_and_wake(p);
-		spin_unlock_irqrestore(&p->sighand->siglock, flags);
-	}
-}
-
 static int try_to_freeze_tasks(bool sig_only)
 {
 	struct task_struct *g, *p;
@@ -250,6 +135,9 @@
 		if (nosig_only && should_send_signal(p))
 			continue;
 
+		if (cgroup_frozen(p))
+			continue;
+
 		thaw_process(p);
 	} while_each_thread(g, p);
 	read_unlock(&tasklist_lock);
@@ -264,4 +152,3 @@
 	printk("done.\n");
 }
 
-EXPORT_SYMBOL(refrigerator);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 356699a..1e68e4c 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -45,7 +45,7 @@
  * TASK_TRACED, resume it now.
  * Requires that irqs be disabled.
  */
-void ptrace_untrace(struct task_struct *child)
+static void ptrace_untrace(struct task_struct *child)
 {
 	spin_lock(&child->sighand->siglock);
 	if (task_is_traced(child)) {
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 467d594..ad63af8 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -119,18 +119,19 @@
 	/* Take cpucontrol mutex to protect against CPU hotplug */
 	mutex_lock(&rcu_barrier_mutex);
 	init_completion(&rcu_barrier_completion);
-	atomic_set(&rcu_barrier_cpu_count, 0);
 	/*
-	 * The queueing of callbacks in all CPUs must be atomic with
-	 * respect to RCU, otherwise one CPU may queue a callback,
-	 * wait for a grace period, decrement barrier count and call
-	 * complete(), while other CPUs have not yet queued anything.
-	 * So, we need to make sure that grace periods cannot complete
-	 * until all the callbacks are queued.
+	 * Initialize rcu_barrier_cpu_count to 1, then invoke
+	 * rcu_barrier_func() on each CPU, so that each CPU also has
+	 * incremented rcu_barrier_cpu_count.  Only then is it safe to
+	 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
+	 * might complete its grace period before all of the other CPUs
+	 * did their increment, causing this function to return too
+	 * early.
 	 */
-	rcu_read_lock();
+	atomic_set(&rcu_barrier_cpu_count, 1);
 	on_each_cpu(rcu_barrier_func, (void *)type, 1);
-	rcu_read_unlock();
+	if (atomic_dec_and_test(&rcu_barrier_cpu_count))
+		complete(&rcu_barrier_completion);
 	wait_for_completion(&rcu_barrier_completion);
 	mutex_unlock(&rcu_barrier_mutex);
 }
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index ca4bbbe..59236e8 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -54,9 +54,9 @@
 #include <linux/cpu.h>
 #include <linux/random.h>
 #include <linux/delay.h>
-#include <linux/byteorder/swabb.h>
 #include <linux/cpumask.h>
 #include <linux/rcupreempt_trace.h>
+#include <asm/byteorder.h>
 
 /*
  * PREEMPT_RCU data structures.
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 90b5b12..85cb905 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -42,10 +42,10 @@
 #include <linux/freezer.h>
 #include <linux/cpu.h>
 #include <linux/delay.h>
-#include <linux/byteorder/swabb.h>
 #include <linux/stat.h>
 #include <linux/srcu.h>
 #include <linux/slab.h>
+#include <asm/byteorder.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
diff --git a/kernel/sched.c b/kernel/sched.c
index 6f23059..945a97b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
 #include <linux/debugfs.h>
 #include <linux/ctype.h>
 #include <linux/ftrace.h>
+#include <trace/sched.h>
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
@@ -818,6 +819,13 @@
 unsigned int sysctl_sched_shares_ratelimit = 250000;
 
 /*
+ * Inject some fuzzyness into changing the per-cpu group shares
+ * this avoids remote rq-locks at the expense of fairness.
+ * default: 4
+ */
+unsigned int sysctl_sched_shares_thresh = 4;
+
+/*
  * period over which we measure -rt task cpu usage in us.
  * default: 1s
  */
@@ -1453,8 +1461,8 @@
  * Calculate and set the cpu's group shares.
  */
 static void
-__update_group_shares_cpu(struct task_group *tg, int cpu,
-			  unsigned long sd_shares, unsigned long sd_rq_weight)
+update_group_shares_cpu(struct task_group *tg, int cpu,
+			unsigned long sd_shares, unsigned long sd_rq_weight)
 {
 	int boost = 0;
 	unsigned long shares;
@@ -1485,19 +1493,23 @@
 	 *
 	 */
 	shares = (sd_shares * rq_weight) / (sd_rq_weight + 1);
+	shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
 
-	/*
-	 * record the actual number of shares, not the boosted amount.
-	 */
-	tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
-	tg->cfs_rq[cpu]->rq_weight = rq_weight;
+	if (abs(shares - tg->se[cpu]->load.weight) >
+			sysctl_sched_shares_thresh) {
+		struct rq *rq = cpu_rq(cpu);
+		unsigned long flags;
 
-	if (shares < MIN_SHARES)
-		shares = MIN_SHARES;
-	else if (shares > MAX_SHARES)
-		shares = MAX_SHARES;
+		spin_lock_irqsave(&rq->lock, flags);
+		/*
+		 * record the actual number of shares, not the boosted amount.
+		 */
+		tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
+		tg->cfs_rq[cpu]->rq_weight = rq_weight;
 
-	__set_se_shares(tg->se[cpu], shares);
+		__set_se_shares(tg->se[cpu], shares);
+		spin_unlock_irqrestore(&rq->lock, flags);
+	}
 }
 
 /*
@@ -1526,14 +1538,8 @@
 	if (!rq_weight)
 		rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
 
-	for_each_cpu_mask(i, sd->span) {
-		struct rq *rq = cpu_rq(i);
-		unsigned long flags;
-
-		spin_lock_irqsave(&rq->lock, flags);
-		__update_group_shares_cpu(tg, i, shares, rq_weight);
-		spin_unlock_irqrestore(&rq->lock, flags);
-	}
+	for_each_cpu_mask(i, sd->span)
+		update_group_shares_cpu(tg, i, shares, rq_weight);
 
 	return 0;
 }
@@ -1936,6 +1942,7 @@
 		 * just go back and repeat.
 		 */
 		rq = task_rq_lock(p, &flags);
+		trace_sched_wait_task(rq, p);
 		running = task_running(rq, p);
 		on_rq = p->se.on_rq;
 		ncsw = 0;
@@ -2297,9 +2304,7 @@
 	success = 1;
 
 out_running:
-	trace_mark(kernel_sched_wakeup,
-		"pid %d state %ld ## rq %p task %p rq->curr %p",
-		p->pid, p->state, rq, p, rq->curr);
+	trace_sched_wakeup(rq, p);
 	check_preempt_curr(rq, p, sync);
 
 	p->state = TASK_RUNNING;
@@ -2432,9 +2437,7 @@
 		p->sched_class->task_new(rq, p);
 		inc_nr_running(rq);
 	}
-	trace_mark(kernel_sched_wakeup_new,
-		"pid %d state %ld ## rq %p task %p rq->curr %p",
-		p->pid, p->state, rq, p, rq->curr);
+	trace_sched_wakeup_new(rq, p);
 	check_preempt_curr(rq, p, 0);
 #ifdef CONFIG_SMP
 	if (p->sched_class->task_wake_up)
@@ -2607,11 +2610,7 @@
 	struct mm_struct *mm, *oldmm;
 
 	prepare_task_switch(rq, prev, next);
-	trace_mark(kernel_sched_schedule,
-		"prev_pid %d next_pid %d prev_state %ld "
-		"## rq %p prev %p next %p",
-		prev->pid, next->pid, prev->state,
-		rq, prev, next);
+	trace_sched_switch(rq, prev, next);
 	mm = next->mm;
 	oldmm = prev->active_mm;
 	/*
@@ -2851,6 +2850,7 @@
 	    || unlikely(!cpu_active(dest_cpu)))
 		goto out;
 
+	trace_sched_migrate_task(rq, p, dest_cpu);
 	/* force the process onto the specified CPU */
 	if (migrate_task(p, dest_cpu, &req)) {
 		/* Need to wait for migration thread (might exit: take ref). */
@@ -4052,23 +4052,26 @@
 EXPORT_PER_CPU_SYMBOL(kstat);
 
 /*
- * Return p->sum_exec_runtime plus any more ns on the sched_clock
- * that have not yet been banked in case the task is currently running.
+ * Return any ns on the sched_clock that have not yet been banked in
+ * @p in case that task is currently running.
  */
-unsigned long long task_sched_runtime(struct task_struct *p)
+unsigned long long task_delta_exec(struct task_struct *p)
 {
 	unsigned long flags;
-	u64 ns, delta_exec;
 	struct rq *rq;
+	u64 ns = 0;
 
 	rq = task_rq_lock(p, &flags);
-	ns = p->se.sum_exec_runtime;
+
 	if (task_current(rq, p)) {
+		u64 delta_exec;
+
 		update_rq_clock(rq);
 		delta_exec = rq->clock - p->se.exec_start;
 		if ((s64)delta_exec > 0)
-			ns += delta_exec;
+			ns = delta_exec;
 	}
+
 	task_rq_unlock(rq, &flags);
 
 	return ns;
@@ -4085,6 +4088,7 @@
 	cputime64_t tmp;
 
 	p->utime = cputime_add(p->utime, cputime);
+	account_group_user_time(p, cputime);
 
 	/* Add user time to cpustat. */
 	tmp = cputime_to_cputime64(cputime);
@@ -4109,6 +4113,7 @@
 	tmp = cputime_to_cputime64(cputime);
 
 	p->utime = cputime_add(p->utime, cputime);
+	account_group_user_time(p, cputime);
 	p->gtime = cputime_add(p->gtime, cputime);
 
 	cpustat->user = cputime64_add(cpustat->user, tmp);
@@ -4144,6 +4149,7 @@
 	}
 
 	p->stime = cputime_add(p->stime, cputime);
+	account_group_system_time(p, cputime);
 
 	/* Add system time to cpustat. */
 	tmp = cputime_to_cputime64(cputime);
@@ -4185,6 +4191,7 @@
 
 	if (p == rq->idle) {
 		p->stime = cputime_add(p->stime, steal);
+		account_group_system_time(p, steal);
 		if (atomic_read(&rq->nr_iowait) > 0)
 			cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
 		else
@@ -4441,12 +4448,8 @@
 	if (sched_feat(HRTICK))
 		hrtick_clear(rq);
 
-	/*
-	 * Do the rq-clock update outside the rq lock:
-	 */
-	local_irq_disable();
+	spin_lock_irq(&rq->lock);
 	update_rq_clock(rq);
-	spin_lock(&rq->lock);
 	clear_tsk_need_resched(prev);
 
 	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 18fd171..9573c33 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -73,6 +73,8 @@
 
 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
 
+static const struct sched_class fair_sched_class;
+
 /**************************************************************
  * CFS operations on generic schedulable entities:
  */
@@ -334,7 +336,7 @@
 #endif
 
 /*
- * delta *= w / rw
+ * delta *= P[w / rw]
  */
 static inline unsigned long
 calc_delta_weight(unsigned long delta, struct sched_entity *se)
@@ -348,15 +350,13 @@
 }
 
 /*
- * delta *= rw / w
+ * delta /= w
  */
 static inline unsigned long
 calc_delta_fair(unsigned long delta, struct sched_entity *se)
 {
-	for_each_sched_entity(se) {
-		delta = calc_delta_mine(delta,
-				cfs_rq_of(se)->load.weight, &se->load);
-	}
+	if (unlikely(se->load.weight != NICE_0_LOAD))
+		delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
 
 	return delta;
 }
@@ -386,26 +386,26 @@
  * We calculate the wall-time slice from the period by taking a part
  * proportional to the weight.
  *
- * s = p*w/rw
+ * s = p*P[w/rw]
  */
 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-	return calc_delta_weight(__sched_period(cfs_rq->nr_running), se);
+	unsigned long nr_running = cfs_rq->nr_running;
+
+	if (unlikely(!se->on_rq))
+		nr_running++;
+
+	return calc_delta_weight(__sched_period(nr_running), se);
 }
 
 /*
  * We calculate the vruntime slice of a to be inserted task
  *
- * vs = s*rw/w = p
+ * vs = s/w
  */
-static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-	unsigned long nr_running = cfs_rq->nr_running;
-
-	if (!se->on_rq)
-		nr_running++;
-
-	return __sched_period(nr_running);
+	return calc_delta_fair(sched_slice(cfs_rq, se), se);
 }
 
 /*
@@ -449,6 +449,7 @@
 		struct task_struct *curtask = task_of(curr);
 
 		cpuacct_charge(curtask, delta_exec);
+		account_group_exec_runtime(curtask, delta_exec);
 	}
 }
 
@@ -627,7 +628,7 @@
 	 * stays open at the end.
 	 */
 	if (initial && sched_feat(START_DEBIT))
-		vruntime += sched_vslice_add(cfs_rq, se);
+		vruntime += sched_vslice(cfs_rq, se);
 
 	if (!initial) {
 		/* sleeps upto a single latency don't count. */
@@ -747,7 +748,7 @@
 	struct rq *rq = rq_of(cfs_rq);
 	u64 pair_slice = rq->clock - cfs_rq->pair_start;
 
-	if (!cfs_rq->next || pair_slice > sched_slice(cfs_rq, cfs_rq->next)) {
+	if (!cfs_rq->next || pair_slice > sysctl_sched_min_granularity) {
 		cfs_rq->pair_start = rq->clock;
 		return se;
 	}
@@ -848,11 +849,31 @@
 		hrtick_start(rq, delta);
 	}
 }
+
+/*
+ * called from enqueue/dequeue and updates the hrtick when the
+ * current task is from our class and nr_running is low enough
+ * to matter.
+ */
+static void hrtick_update(struct rq *rq)
+{
+	struct task_struct *curr = rq->curr;
+
+	if (curr->sched_class != &fair_sched_class)
+		return;
+
+	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
+		hrtick_start_fair(rq, curr);
+}
 #else /* !CONFIG_SCHED_HRTICK */
 static inline void
 hrtick_start_fair(struct rq *rq, struct task_struct *p)
 {
 }
+
+static inline void hrtick_update(struct rq *rq)
+{
+}
 #endif
 
 /*
@@ -873,7 +894,7 @@
 		wakeup = 1;
 	}
 
-	hrtick_start_fair(rq, rq->curr);
+	hrtick_update(rq);
 }
 
 /*
@@ -895,7 +916,7 @@
 		sleep = 1;
 	}
 
-	hrtick_start_fair(rq, rq->curr);
+	hrtick_update(rq);
 }
 
 /*
@@ -1001,8 +1022,6 @@
 
 #ifdef CONFIG_SMP
 
-static const struct sched_class fair_sched_class;
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 /*
  * effective_load() calculates the load change as seen from the root_task_group
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 7c9e8f4..fda0162 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -5,7 +5,7 @@
 SCHED_FEAT(AFFINE_WAKEUPS, 1)
 SCHED_FEAT(CACHE_HOT_BUDDY, 1)
 SCHED_FEAT(SYNC_WAKEUPS, 1)
-SCHED_FEAT(HRTICK, 1)
+SCHED_FEAT(HRTICK, 0)
 SCHED_FEAT(DOUBLE_TICK, 0)
 SCHED_FEAT(ASYM_GRAN, 1)
 SCHED_FEAT(LB_BIAS, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index cdf5740..b446dc8 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -526,6 +526,8 @@
 	schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
 
 	curr->se.sum_exec_runtime += delta_exec;
+	account_group_exec_runtime(curr, delta_exec);
+
 	curr->se.exec_start = rq->clock;
 	cpuacct_charge(curr, delta_exec);
 
@@ -1458,7 +1460,7 @@
 		p->rt.timeout++;
 		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
 		if (p->rt.timeout > next)
-			p->it_sched_expires = p->se.sum_exec_runtime;
+			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
 	}
 }
 
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 8385d43..2df9d29 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -9,7 +9,7 @@
 static int show_schedstat(struct seq_file *seq, void *v)
 {
 	int cpu;
-	int mask_len = NR_CPUS/32 * 9;
+	int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
 	char *mask_str = kmalloc(mask_len, GFP_KERNEL);
 
 	if (mask_str == NULL)
@@ -270,3 +270,89 @@
 #define sched_info_switch(t, next)		do { } while (0)
 #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
 
+/*
+ * The following are functions that support scheduler-internal time accounting.
+ * These functions are generally called at the timer tick.  None of this depends
+ * on CONFIG_SCHEDSTATS.
+ */
+
+/**
+ * account_group_user_time - Maintain utime for a thread group.
+ *
+ * @tsk:	Pointer to task structure.
+ * @cputime:	Time value by which to increment the utime field of the
+ *		thread_group_cputime structure.
+ *
+ * If thread group time is being maintained, get the structure for the
+ * running CPU and update the utime field there.
+ */
+static inline void account_group_user_time(struct task_struct *tsk,
+					   cputime_t cputime)
+{
+	struct signal_struct *sig;
+
+	sig = tsk->signal;
+	if (unlikely(!sig))
+		return;
+	if (sig->cputime.totals) {
+		struct task_cputime *times;
+
+		times = per_cpu_ptr(sig->cputime.totals, get_cpu());
+		times->utime = cputime_add(times->utime, cputime);
+		put_cpu_no_resched();
+	}
+}
+
+/**
+ * account_group_system_time - Maintain stime for a thread group.
+ *
+ * @tsk:	Pointer to task structure.
+ * @cputime:	Time value by which to increment the stime field of the
+ *		thread_group_cputime structure.
+ *
+ * If thread group time is being maintained, get the structure for the
+ * running CPU and update the stime field there.
+ */
+static inline void account_group_system_time(struct task_struct *tsk,
+					     cputime_t cputime)
+{
+	struct signal_struct *sig;
+
+	sig = tsk->signal;
+	if (unlikely(!sig))
+		return;
+	if (sig->cputime.totals) {
+		struct task_cputime *times;
+
+		times = per_cpu_ptr(sig->cputime.totals, get_cpu());
+		times->stime = cputime_add(times->stime, cputime);
+		put_cpu_no_resched();
+	}
+}
+
+/**
+ * account_group_exec_runtime - Maintain exec runtime for a thread group.
+ *
+ * @tsk:	Pointer to task structure.
+ * @ns:		Time value by which to increment the sum_exec_runtime field
+ *		of the thread_group_cputime structure.
+ *
+ * If thread group time is being maintained, get the structure for the
+ * running CPU and update the sum_exec_runtime field there.
+ */
+static inline void account_group_exec_runtime(struct task_struct *tsk,
+					      unsigned long long ns)
+{
+	struct signal_struct *sig;
+
+	sig = tsk->signal;
+	if (unlikely(!sig))
+		return;
+	if (sig->cputime.totals) {
+		struct task_cputime *times;
+
+		times = per_cpu_ptr(sig->cputime.totals, get_cpu());
+		times->sum_exec_runtime += ns;
+		put_cpu_no_resched();
+	}
+}
diff --git a/kernel/signal.c b/kernel/signal.c
index e661b01..105217d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -27,6 +27,7 @@
 #include <linux/freezer.h>
 #include <linux/pid_namespace.h>
 #include <linux/nsproxy.h>
+#include <trace/sched.h>
 
 #include <asm/param.h>
 #include <asm/uaccess.h>
@@ -803,6 +804,8 @@
 	struct sigpending *pending;
 	struct sigqueue *q;
 
+	trace_sched_signal_send(sig, t);
+
 	assert_spin_locked(&t->sighand->siglock);
 	if (!prepare_signal(sig, t))
 		return 0;
@@ -1338,6 +1341,7 @@
 	struct siginfo info;
 	unsigned long flags;
 	struct sighand_struct *psig;
+	struct task_cputime cputime;
 	int ret = sig;
 
 	BUG_ON(sig == -1);
@@ -1368,10 +1372,9 @@
 
 	info.si_uid = tsk->uid;
 
-	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
-						       tsk->signal->utime));
-	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
-						       tsk->signal->stime));
+	thread_group_cputime(tsk, &cputime);
+	info.si_utime = cputime_to_jiffies(cputime.utime);
+	info.si_stime = cputime_to_jiffies(cputime.stime);
 
 	info.si_status = tsk->exit_code & 0x7f;
 	if (tsk->exit_code & 0x80)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 83ba21a..7110dae 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -267,16 +267,12 @@
  */
 void irq_enter(void)
 {
-#ifdef CONFIG_NO_HZ
 	int cpu = smp_processor_id();
+
 	if (idle_cpu(cpu) && !in_interrupt())
-		tick_nohz_stop_idle(cpu);
-#endif
+		tick_check_idle(cpu);
+
 	__irq_enter();
-#ifdef CONFIG_NO_HZ
-	if (idle_cpu(cpu))
-		tick_nohz_update_jiffies();
-#endif
 }
 
 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index af3c7ce..8aff79d 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -37,9 +37,13 @@
 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
 static unsigned int num_threads;
 static atomic_t thread_ack;
-static struct completion finished;
 static DEFINE_MUTEX(lock);
 
+static struct workqueue_struct *stop_machine_wq;
+static struct stop_machine_data active, idle;
+static const cpumask_t *active_cpus;
+static void *stop_machine_work;
+
 static void set_state(enum stopmachine_state newstate)
 {
 	/* Reset ack counter. */
@@ -51,21 +55,26 @@
 /* Last one to ack a state moves to the next state. */
 static void ack_state(void)
 {
-	if (atomic_dec_and_test(&thread_ack)) {
-		/* If we're the last one to ack the EXIT, we're finished. */
-		if (state == STOPMACHINE_EXIT)
-			complete(&finished);
-		else
-			set_state(state + 1);
-	}
+	if (atomic_dec_and_test(&thread_ack))
+		set_state(state + 1);
 }
 
-/* This is the actual thread which stops the CPU.  It exits by itself rather
- * than waiting for kthread_stop(), because it's easier for hotplug CPU. */
-static int stop_cpu(struct stop_machine_data *smdata)
+/* This is the actual function which stops the CPU. It runs
+ * in the context of a dedicated stopmachine workqueue. */
+static void stop_cpu(struct work_struct *unused)
 {
 	enum stopmachine_state curstate = STOPMACHINE_NONE;
+	struct stop_machine_data *smdata = &idle;
+	int cpu = smp_processor_id();
+	int err;
 
+	if (!active_cpus) {
+		if (cpu == first_cpu(cpu_online_map))
+			smdata = &active;
+	} else {
+		if (cpu_isset(cpu, *active_cpus))
+			smdata = &active;
+	}
 	/* Simple state machine */
 	do {
 		/* Chill out and ensure we re-read stopmachine_state. */
@@ -78,9 +87,11 @@
 				hard_irq_disable();
 				break;
 			case STOPMACHINE_RUN:
-				/* |= allows error detection if functions on
-				 * multiple CPUs. */
-				smdata->fnret |= smdata->fn(smdata->data);
+				/* On multiple CPUs only a single error code
+				 * is needed to tell that something failed. */
+				err = smdata->fn(smdata->data);
+				if (err)
+					smdata->fnret = err;
 				break;
 			default:
 				break;
@@ -90,7 +101,6 @@
 	} while (curstate != STOPMACHINE_EXIT);
 
 	local_irq_enable();
-	do_exit(0);
 }
 
 /* Callback for CPUs which aren't supposed to do anything. */
@@ -101,78 +111,34 @@
 
 int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
 {
-	int i, err;
-	struct stop_machine_data active, idle;
-	struct task_struct **threads;
+	struct work_struct *sm_work;
+	int i;
 
+	/* Set up initial state. */
+	mutex_lock(&lock);
+	num_threads = num_online_cpus();
+	active_cpus = cpus;
 	active.fn = fn;
 	active.data = data;
 	active.fnret = 0;
 	idle.fn = chill;
 	idle.data = NULL;
 
-	/* This could be too big for stack on large machines. */
-	threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL);
-	if (!threads)
-		return -ENOMEM;
-
-	/* Set up initial state. */
-	mutex_lock(&lock);
-	init_completion(&finished);
-	num_threads = num_online_cpus();
 	set_state(STOPMACHINE_PREPARE);
 
-	for_each_online_cpu(i) {
-		struct stop_machine_data *smdata = &idle;
-		struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
-
-		if (!cpus) {
-			if (i == first_cpu(cpu_online_map))
-				smdata = &active;
-		} else {
-			if (cpu_isset(i, *cpus))
-				smdata = &active;
-		}
-
-		threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u",
-					    i);
-		if (IS_ERR(threads[i])) {
-			err = PTR_ERR(threads[i]);
-			threads[i] = NULL;
-			goto kill_threads;
-		}
-
-		/* Place it onto correct cpu. */
-		kthread_bind(threads[i], i);
-
-		/* Make it highest prio. */
-		if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, &param))
-			BUG();
-	}
-
-	/* We've created all the threads.  Wake them all: hold this CPU so one
+	/* Schedule the stop_cpu work on all cpus: hold this CPU so one
 	 * doesn't hit this CPU until we're ready. */
 	get_cpu();
-	for_each_online_cpu(i)
-		wake_up_process(threads[i]);
-
+	for_each_online_cpu(i) {
+		sm_work = percpu_ptr(stop_machine_work, i);
+		INIT_WORK(sm_work, stop_cpu);
+		queue_work_on(i, stop_machine_wq, sm_work);
+	}
 	/* This will release the thread on our CPU. */
 	put_cpu();
-	wait_for_completion(&finished);
+	flush_workqueue(stop_machine_wq);
 	mutex_unlock(&lock);
-
-	kfree(threads);
-
 	return active.fnret;
-
-kill_threads:
-	for_each_online_cpu(i)
-		if (threads[i])
-			kthread_stop(threads[i]);
-	mutex_unlock(&lock);
-
-	kfree(threads);
-	return err;
 }
 
 int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
@@ -187,3 +153,11 @@
 	return ret;
 }
 EXPORT_SYMBOL_GPL(stop_machine);
+
+static int __init stop_machine_init(void)
+{
+	stop_machine_wq = create_rt_workqueue("kstop");
+	stop_machine_work = alloc_percpu(struct work_struct);
+	return 0;
+}
+early_initcall(stop_machine_init);
diff --git a/kernel/sys.c b/kernel/sys.c
index 0bc8fa3..53879cd 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -853,38 +853,28 @@
 	return old_fsgid;
 }
 
+void do_sys_times(struct tms *tms)
+{
+	struct task_cputime cputime;
+	cputime_t cutime, cstime;
+
+	spin_lock_irq(&current->sighand->siglock);
+	thread_group_cputime(current, &cputime);
+	cutime = current->signal->cutime;
+	cstime = current->signal->cstime;
+	spin_unlock_irq(&current->sighand->siglock);
+	tms->tms_utime = cputime_to_clock_t(cputime.utime);
+	tms->tms_stime = cputime_to_clock_t(cputime.stime);
+	tms->tms_cutime = cputime_to_clock_t(cutime);
+	tms->tms_cstime = cputime_to_clock_t(cstime);
+}
+
 asmlinkage long sys_times(struct tms __user * tbuf)
 {
-	/*
-	 *	In the SMP world we might just be unlucky and have one of
-	 *	the times increment as we use it. Since the value is an
-	 *	atomically safe type this is just fine. Conceptually its
-	 *	as if the syscall took an instant longer to occur.
-	 */
 	if (tbuf) {
 		struct tms tmp;
-		struct task_struct *tsk = current;
-		struct task_struct *t;
-		cputime_t utime, stime, cutime, cstime;
 
-		spin_lock_irq(&tsk->sighand->siglock);
-		utime = tsk->signal->utime;
-		stime = tsk->signal->stime;
-		t = tsk;
-		do {
-			utime = cputime_add(utime, t->utime);
-			stime = cputime_add(stime, t->stime);
-			t = next_thread(t);
-		} while (t != tsk);
-
-		cutime = tsk->signal->cutime;
-		cstime = tsk->signal->cstime;
-		spin_unlock_irq(&tsk->sighand->siglock);
-
-		tmp.tms_utime = cputime_to_clock_t(utime);
-		tmp.tms_stime = cputime_to_clock_t(stime);
-		tmp.tms_cutime = cputime_to_clock_t(cutime);
-		tmp.tms_cstime = cputime_to_clock_t(cstime);
+		do_sys_times(&tmp);
 		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
 			return -EFAULT;
 	}
@@ -1449,7 +1439,6 @@
 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
 {
 	struct rlimit new_rlim, *old_rlim;
-	unsigned long it_prof_secs;
 	int retval;
 
 	if (resource >= RLIM_NLIMITS)
@@ -1503,18 +1492,7 @@
 	if (new_rlim.rlim_cur == RLIM_INFINITY)
 		goto out;
 
-	it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
-	if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
-		unsigned long rlim_cur = new_rlim.rlim_cur;
-		cputime_t cputime;
-
-		cputime = secs_to_cputime(rlim_cur);
-		read_lock(&tasklist_lock);
-		spin_lock_irq(&current->sighand->siglock);
-		set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
-		spin_unlock_irq(&current->sighand->siglock);
-		read_unlock(&tasklist_lock);
-	}
+	update_rlimit_cpu(new_rlim.rlim_cur);
 out:
 	return 0;
 }
@@ -1552,11 +1530,8 @@
  *
  */
 
-static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r,
-				     cputime_t *utimep, cputime_t *stimep)
+static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
 {
-	*utimep = cputime_add(*utimep, t->utime);
-	*stimep = cputime_add(*stimep, t->stime);
 	r->ru_nvcsw += t->nvcsw;
 	r->ru_nivcsw += t->nivcsw;
 	r->ru_minflt += t->min_flt;
@@ -1570,12 +1545,13 @@
 	struct task_struct *t;
 	unsigned long flags;
 	cputime_t utime, stime;
+	struct task_cputime cputime;
 
 	memset((char *) r, 0, sizeof *r);
 	utime = stime = cputime_zero;
 
 	if (who == RUSAGE_THREAD) {
-		accumulate_thread_rusage(p, r, &utime, &stime);
+		accumulate_thread_rusage(p, r);
 		goto out;
 	}
 
@@ -1598,8 +1574,9 @@
 				break;
 
 		case RUSAGE_SELF:
-			utime = cputime_add(utime, p->signal->utime);
-			stime = cputime_add(stime, p->signal->stime);
+			thread_group_cputime(p, &cputime);
+			utime = cputime_add(utime, cputime.utime);
+			stime = cputime_add(stime, cputime.stime);
 			r->ru_nvcsw += p->signal->nvcsw;
 			r->ru_nivcsw += p->signal->nivcsw;
 			r->ru_minflt += p->signal->min_flt;
@@ -1608,7 +1585,7 @@
 			r->ru_oublock += p->signal->oublock;
 			t = p;
 			do {
-				accumulate_thread_rusage(t, r, &utime, &stime);
+				accumulate_thread_rusage(t, r);
 				t = next_thread(t);
 			} while (t != p);
 			break;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 617d41e..a13bd4d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -276,6 +276,16 @@
 	},
 	{
 		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_shares_thresh",
+		.data		= &sysctl_sched_shares_thresh,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &zero,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
 		.procname	= "sched_child_runs_first",
 		.data		= &sysctl_sched_child_runs_first,
 		.maxlen		= sizeof(unsigned int),
@@ -833,6 +843,16 @@
 		.proc_handler   = &proc_dointvec,
 	},
 #endif
+#ifdef CONFIG_UNEVICTABLE_LRU
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "scan_unevictable_pages",
+		.data		= &scan_unevictable_pages,
+		.maxlen		= sizeof(scan_unevictable_pages),
+		.mode		= 0644,
+		.proc_handler	= &scan_unevictable_handler,
+	},
+#endif
 /*
  * NOTE: do not add new entries to this table unless you have read
  * Documentation/sysctl/ctl_unnumbered.txt
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 093d4ac..9ed2eec 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -325,6 +325,9 @@
 	unsigned long flags;
 	int ret;
 
+	/* save mult_orig on registration */
+	c->mult_orig = c->mult;
+
 	spin_lock_irqsave(&clocksource_lock, flags);
 	ret = clocksource_enqueue(c);
 	if (!ret)
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 4c256fd..1ca9955 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -61,6 +61,7 @@
 	.read		= jiffies_read,
 	.mask		= 0xffffffff, /*32bits*/
 	.mult		= NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
+	.mult_orig	= NSEC_PER_JIFFY << JIFFIES_SHIFT,
 	.shift		= JIFFIES_SHIFT,
 };
 
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 1ad46f3..1a20715 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -10,13 +10,13 @@
 
 #include <linux/mm.h>
 #include <linux/time.h>
-#include <linux/timer.h>
 #include <linux/timex.h>
 #include <linux/jiffies.h>
 #include <linux/hrtimer.h>
 #include <linux/capability.h>
 #include <linux/math64.h>
 #include <linux/clocksource.h>
+#include <linux/workqueue.h>
 #include <asm/timex.h>
 
 /*
@@ -218,11 +218,11 @@
 /* Disable the cmos update - used by virtualization and embedded */
 int no_sync_cmos_clock  __read_mostly;
 
-static void sync_cmos_clock(unsigned long dummy);
+static void sync_cmos_clock(struct work_struct *work);
 
-static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
+static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
 
-static void sync_cmos_clock(unsigned long dummy)
+static void sync_cmos_clock(struct work_struct *work)
 {
 	struct timespec now, next;
 	int fail = 1;
@@ -258,13 +258,13 @@
 		next.tv_sec++;
 		next.tv_nsec -= NSEC_PER_SEC;
 	}
-	mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next));
+	schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
 }
 
 static void notify_cmos_timer(void)
 {
 	if (!no_sync_cmos_clock)
-		mod_timer(&sync_cmos_timer, jiffies + 1);
+		schedule_delayed_work(&sync_cmos_work, 0);
 }
 
 #else
@@ -277,38 +277,50 @@
 int do_adjtimex(struct timex *txc)
 {
 	struct timespec ts;
-	long save_adjust, sec;
 	int result;
 
-	/* In order to modify anything, you gotta be super-user! */
-	if (txc->modes && !capable(CAP_SYS_TIME))
-		return -EPERM;
-
-	/* Now we validate the data before disabling interrupts */
-
-	if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) {
+	/* Validate the data before disabling interrupts */
+	if (txc->modes & ADJ_ADJTIME) {
 		/* singleshot must not be used with any other mode bits */
-		if (txc->modes & ~ADJ_OFFSET_SS_READ)
+		if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
 			return -EINVAL;
+		if (!(txc->modes & ADJ_OFFSET_READONLY) &&
+		    !capable(CAP_SYS_TIME))
+			return -EPERM;
+	} else {
+		/* In order to modify anything, you gotta be super-user! */
+		 if (txc->modes && !capable(CAP_SYS_TIME))
+			return -EPERM;
+
+		/* if the quartz is off by more than 10% something is VERY wrong! */
+		if (txc->modes & ADJ_TICK &&
+		    (txc->tick <  900000/USER_HZ ||
+		     txc->tick > 1100000/USER_HZ))
+				return -EINVAL;
+
+		if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
+			hrtimer_cancel(&leap_timer);
 	}
 
-	/* if the quartz is off by more than 10% something is VERY wrong ! */
-	if (txc->modes & ADJ_TICK)
-		if (txc->tick <  900000/USER_HZ ||
-		    txc->tick > 1100000/USER_HZ)
-			return -EINVAL;
-
-	if (time_state != TIME_OK && txc->modes & ADJ_STATUS)
-		hrtimer_cancel(&leap_timer);
 	getnstimeofday(&ts);
 
 	write_seqlock_irq(&xtime_lock);
 
-	/* Save for later - semantics of adjtime is to return old value */
-	save_adjust = time_adjust;
-
 	/* If there are input parameters, then process them */
+	if (txc->modes & ADJ_ADJTIME) {
+		long save_adjust = time_adjust;
+
+		if (!(txc->modes & ADJ_OFFSET_READONLY)) {
+			/* adjtime() is independent from ntp_adjtime() */
+			time_adjust = txc->offset;
+			ntp_update_frequency();
+		}
+		txc->offset = save_adjust;
+		goto adj_done;
+	}
 	if (txc->modes) {
+		long sec;
+
 		if (txc->modes & ADJ_STATUS) {
 			if ((time_status & STA_PLL) &&
 			    !(txc->status & STA_PLL)) {
@@ -375,13 +387,8 @@
 		if (txc->modes & ADJ_TAI && txc->constant > 0)
 			time_tai = txc->constant;
 
-		if (txc->modes & ADJ_OFFSET) {
-			if (txc->modes == ADJ_OFFSET_SINGLESHOT)
-				/* adjtime() is independent from ntp_adjtime() */
-				time_adjust = txc->offset;
-			else
-				ntp_update_offset(txc->offset);
-		}
+		if (txc->modes & ADJ_OFFSET)
+			ntp_update_offset(txc->offset);
 		if (txc->modes & ADJ_TICK)
 			tick_usec = txc->tick;
 
@@ -389,22 +396,18 @@
 			ntp_update_frequency();
 	}
 
+	txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
+				  NTP_SCALE_SHIFT);
+	if (!(time_status & STA_NANO))
+		txc->offset /= NSEC_PER_USEC;
+
+adj_done:
 	result = time_state;	/* mostly `TIME_OK' */
 	if (time_status & (STA_UNSYNC|STA_CLOCKERR))
 		result = TIME_ERROR;
 
-	if ((txc->modes == ADJ_OFFSET_SINGLESHOT) ||
-	    (txc->modes == ADJ_OFFSET_SS_READ))
-		txc->offset = save_adjust;
-	else {
-		txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
-					  NTP_SCALE_SHIFT);
-		if (!(time_status & STA_NANO))
-			txc->offset /= NSEC_PER_USEC;
-	}
-	txc->freq	   = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) *
-					 (s64)PPM_SCALE_INV,
-					 NTP_SCALE_SHIFT);
+	txc->freq	   = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
+					 (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT);
 	txc->maxerror	   = time_maxerror;
 	txc->esterror	   = time_esterror;
 	txc->status	   = time_status;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index cb01cd8..f98a1b7 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -384,6 +384,19 @@
 }
 
 /*
+ * Called from irq_enter() when idle was interrupted to reenable the
+ * per cpu device.
+ */
+void tick_check_oneshot_broadcast(int cpu)
+{
+	if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
+		struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
+
+		clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
+	}
+}
+
+/*
  * Handle oneshot mode broadcasting
  */
 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 4692487..b1c05bf 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -36,6 +36,7 @@
 extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
 extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
 extern int tick_broadcast_oneshot_active(void);
+extern void tick_check_oneshot_broadcast(int cpu);
 # else /* BROADCAST */
 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 {
@@ -45,6 +46,7 @@
 static inline void tick_broadcast_switch_to_oneshot(void) { }
 static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
+static inline void tick_check_oneshot_broadcast(int cpu) { }
 # endif /* !BROADCAST */
 
 #else /* !ONESHOT */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b711ffc..727c1ae 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -155,7 +155,7 @@
 	touch_softlockup_watchdog();
 }
 
-void tick_nohz_stop_idle(int cpu)
+static void tick_nohz_stop_idle(int cpu)
 {
 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 
@@ -377,6 +377,32 @@
 	return ts->sleep_length;
 }
 
+static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
+{
+	hrtimer_cancel(&ts->sched_timer);
+	ts->sched_timer.expires = ts->idle_tick;
+
+	while (1) {
+		/* Forward the time to expire in the future */
+		hrtimer_forward(&ts->sched_timer, now, tick_period);
+
+		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
+			hrtimer_start(&ts->sched_timer,
+				      ts->sched_timer.expires,
+				      HRTIMER_MODE_ABS);
+			/* Check, if the timer was already in the past */
+			if (hrtimer_active(&ts->sched_timer))
+				break;
+		} else {
+			if (!tick_program_event(ts->sched_timer.expires, 0))
+				break;
+		}
+		/* Update jiffies and reread time */
+		tick_do_update_jiffies64(now);
+		now = ktime_get();
+	}
+}
+
 /**
  * tick_nohz_restart_sched_tick - restart the idle tick from the idle task
  *
@@ -430,28 +456,7 @@
 	 */
 	ts->tick_stopped  = 0;
 	ts->idle_exittime = now;
-	hrtimer_cancel(&ts->sched_timer);
-	ts->sched_timer.expires = ts->idle_tick;
-
-	while (1) {
-		/* Forward the time to expire in the future */
-		hrtimer_forward(&ts->sched_timer, now, tick_period);
-
-		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
-			hrtimer_start(&ts->sched_timer,
-				      ts->sched_timer.expires,
-				      HRTIMER_MODE_ABS);
-			/* Check, if the timer was already in the past */
-			if (hrtimer_active(&ts->sched_timer))
-				break;
-		} else {
-			if (!tick_program_event(ts->sched_timer.expires, 0))
-				break;
-		}
-		/* Update jiffies and reread time */
-		tick_do_update_jiffies64(now);
-		now = ktime_get();
-	}
+	tick_nohz_restart(ts, now);
 	local_irq_enable();
 }
 
@@ -503,10 +508,6 @@
 	update_process_times(user_mode(regs));
 	profile_tick(CPU_PROFILING);
 
-	/* Do not restart, when we are in the idle loop */
-	if (ts->tick_stopped)
-		return;
-
 	while (tick_nohz_reprogram(ts, now)) {
 		now = ktime_get();
 		tick_do_update_jiffies64(now);
@@ -552,6 +553,37 @@
 	       smp_processor_id());
 }
 
+/*
+ * When NOHZ is enabled and the tick is stopped, we need to kick the
+ * tick timer from irq_enter() so that the jiffies update is kept
+ * alive during long running softirqs. That's ugly as hell, but
+ * correctness is key even if we need to fix the offending softirq in
+ * the first place.
+ *
+ * Note, this is different to tick_nohz_restart. We just kick the
+ * timer and do not touch the other magic bits which need to be done
+ * when idle is left.
+ */
+static void tick_nohz_kick_tick(int cpu)
+{
+	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+	ktime_t delta, now;
+
+	if (!ts->tick_stopped)
+		return;
+
+	/*
+	 * Do not touch the tick device, when the next expiry is either
+	 * already reached or less/equal than the tick period.
+	 */
+	now = ktime_get();
+	delta =	ktime_sub(ts->sched_timer.expires, now);
+	if (delta.tv64 <= tick_period.tv64)
+		return;
+
+	tick_nohz_restart(ts, now);
+}
+
 #else
 
 static inline void tick_nohz_switch_to_nohz(void) { }
@@ -559,6 +591,19 @@
 #endif /* NO_HZ */
 
 /*
+ * Called from irq_enter to notify about the possible interruption of idle()
+ */
+void tick_check_idle(int cpu)
+{
+	tick_check_oneshot_broadcast(cpu);
+#ifdef CONFIG_NO_HZ
+	tick_nohz_stop_idle(cpu);
+	tick_nohz_update_jiffies();
+	tick_nohz_kick_tick(cpu);
+#endif
+}
+
+/*
  * High resolution timer specific code
  */
 #ifdef CONFIG_HIGH_RES_TIMERS
@@ -611,10 +656,6 @@
 		profile_tick(CPU_PROFILING);
 	}
 
-	/* Do not restart, when we are in the idle loop */
-	if (ts->tick_stopped)
-		return HRTIMER_NORESTART;
-
 	hrtimer_forward(timer, now, tick_period);
 
 	return HRTIMER_RESTART;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e91c29f..e7acfb4 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -58,27 +58,26 @@
 
 #ifdef CONFIG_GENERIC_TIME
 /**
- * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
+ * clocksource_forward_now - update clock to the current time
  *
- * private function, must hold xtime_lock lock when being
- * called. Returns the number of nanoseconds since the
- * last call to update_wall_time() (adjusted by NTP scaling)
+ * Forward the current clock to update its state since the last call to
+ * update_wall_time(). This is useful before significant clock changes,
+ * as it avoids having to deal with this time offset explicitly.
  */
-static inline s64 __get_nsec_offset(void)
+static void clocksource_forward_now(void)
 {
 	cycle_t cycle_now, cycle_delta;
-	s64 ns_offset;
+	s64 nsec;
 
-	/* read clocksource: */
 	cycle_now = clocksource_read(clock);
-
-	/* calculate the delta since the last update_wall_time: */
 	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+	clock->cycle_last = cycle_now;
 
-	/* convert to nanoseconds: */
-	ns_offset = cyc2ns(clock, cycle_delta);
+	nsec = cyc2ns(clock, cycle_delta);
+	timespec_add_ns(&xtime, nsec);
 
-	return ns_offset;
+	nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
+	clock->raw_time.tv_nsec += nsec;
 }
 
 /**
@@ -89,6 +88,7 @@
  */
 void getnstimeofday(struct timespec *ts)
 {
+	cycle_t cycle_now, cycle_delta;
 	unsigned long seq;
 	s64 nsecs;
 
@@ -96,7 +96,15 @@
 		seq = read_seqbegin(&xtime_lock);
 
 		*ts = xtime;
-		nsecs = __get_nsec_offset();
+
+		/* read clocksource: */
+		cycle_now = clocksource_read(clock);
+
+		/* calculate the delta since the last update_wall_time: */
+		cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+
+		/* convert to nanoseconds: */
+		nsecs = cyc2ns(clock, cycle_delta);
 
 	} while (read_seqretry(&xtime_lock, seq));
 
@@ -129,22 +137,22 @@
  */
 int do_settimeofday(struct timespec *tv)
 {
+	struct timespec ts_delta;
 	unsigned long flags;
-	time_t wtm_sec, sec = tv->tv_sec;
-	long wtm_nsec, nsec = tv->tv_nsec;
 
 	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
 		return -EINVAL;
 
 	write_seqlock_irqsave(&xtime_lock, flags);
 
-	nsec -= __get_nsec_offset();
+	clocksource_forward_now();
 
-	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
-	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+	ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
+	ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
+	wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
 
-	set_normalized_timespec(&xtime, sec, nsec);
-	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+	xtime = *tv;
+
 	update_xtime_cache(0);
 
 	clock->error = 0;
@@ -170,22 +178,19 @@
 static void change_clocksource(void)
 {
 	struct clocksource *new;
-	cycle_t now;
-	u64 nsec;
 
 	new = clocksource_get_next();
 
 	if (clock == new)
 		return;
 
-	new->cycle_last = 0;
-	now = clocksource_read(new);
-	nsec =  __get_nsec_offset();
-	timespec_add_ns(&xtime, nsec);
+	clocksource_forward_now();
+
+	new->raw_time = clock->raw_time;
 
 	clock = new;
-	clock->cycle_last = now;
-
+	clock->cycle_last = 0;
+	clock->cycle_last = clocksource_read(new);
 	clock->error = 0;
 	clock->xtime_nsec = 0;
 	clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -200,11 +205,44 @@
 	 */
 }
 #else
+static inline void clocksource_forward_now(void) { }
 static inline void change_clocksource(void) { }
-static inline s64 __get_nsec_offset(void) { return 0; }
 #endif
 
 /**
+ * getrawmonotonic - Returns the raw monotonic time in a timespec
+ * @ts:		pointer to the timespec to be set
+ *
+ * Returns the raw monotonic time (completely un-modified by ntp)
+ */
+void getrawmonotonic(struct timespec *ts)
+{
+	unsigned long seq;
+	s64 nsecs;
+	cycle_t cycle_now, cycle_delta;
+
+	do {
+		seq = read_seqbegin(&xtime_lock);
+
+		/* read clocksource: */
+		cycle_now = clocksource_read(clock);
+
+		/* calculate the delta since the last update_wall_time: */
+		cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+
+		/* convert to nanoseconds: */
+		nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
+
+		*ts = clock->raw_time;
+
+	} while (read_seqretry(&xtime_lock, seq));
+
+	timespec_add_ns(ts, nsecs);
+}
+EXPORT_SYMBOL(getrawmonotonic);
+
+
+/**
  * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
  */
 int timekeeping_valid_for_hres(void)
@@ -265,8 +303,6 @@
 static int timekeeping_suspended;
 /* time in seconds when suspend began */
 static unsigned long timekeeping_suspend_time;
-/* xtime offset when we went into suspend */
-static s64 timekeeping_suspend_nsecs;
 
 /**
  * timekeeping_resume - Resumes the generic timekeeping subsystem.
@@ -292,8 +328,6 @@
 		wall_to_monotonic.tv_sec -= sleep_length;
 		total_sleep_time += sleep_length;
 	}
-	/* Make sure that we have the correct xtime reference */
-	timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
 	update_xtime_cache(0);
 	/* re-base the last cycle value */
 	clock->cycle_last = 0;
@@ -319,8 +353,7 @@
 	timekeeping_suspend_time = read_persistent_clock();
 
 	write_seqlock_irqsave(&xtime_lock, flags);
-	/* Get the current xtime offset */
-	timekeeping_suspend_nsecs = __get_nsec_offset();
+	clocksource_forward_now();
 	timekeeping_suspended = 1;
 	write_sequnlock_irqrestore(&xtime_lock, flags);
 
@@ -454,23 +487,29 @@
 #else
 	offset = clock->cycle_interval;
 #endif
-	clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
+	clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift;
 
 	/* normally this loop will run just once, however in the
 	 * case of lost or late ticks, it will accumulate correctly.
 	 */
 	while (offset >= clock->cycle_interval) {
 		/* accumulate one interval */
-		clock->xtime_nsec += clock->xtime_interval;
-		clock->cycle_last += clock->cycle_interval;
 		offset -= clock->cycle_interval;
+		clock->cycle_last += clock->cycle_interval;
 
+		clock->xtime_nsec += clock->xtime_interval;
 		if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
 			clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
 			xtime.tv_sec++;
 			second_overflow();
 		}
 
+		clock->raw_time.tv_nsec += clock->raw_interval;
+		if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) {
+			clock->raw_time.tv_nsec -= NSEC_PER_SEC;
+			clock->raw_time.tv_sec++;
+		}
+
 		/* accumulate error between NTP and clock interval */
 		clock->error += tick_length;
 		clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
@@ -479,9 +518,12 @@
 	/* correct the clock when NTP error is too big */
 	clocksource_adjust(offset);
 
-	/* store full nanoseconds into xtime */
-	xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
+	/* store full nanoseconds into xtime after rounding it up and
+	 * add the remainder to the error difference.
+	 */
+	xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1;
 	clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
+	clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift);
 
 	update_xtime_cache(cyc2ns(clock, offset));
 
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index a40e20f..f642691 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -47,13 +47,14 @@
 }
 
 static void
-print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now)
+print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
+	    int idx, u64 now)
 {
 #ifdef CONFIG_TIMER_STATS
 	char tmp[TASK_COMM_LEN + 1];
 #endif
 	SEQ_printf(m, " #%d: ", idx);
-	print_name_offset(m, timer);
+	print_name_offset(m, taddr);
 	SEQ_printf(m, ", ");
 	print_name_offset(m, timer->function);
 	SEQ_printf(m, ", S:%02lx", timer->state);
@@ -99,7 +100,7 @@
 		tmp = *timer;
 		spin_unlock_irqrestore(&base->cpu_base->lock, flags);
 
-		print_timer(m, &tmp, i, now);
+		print_timer(m, timer, &tmp, i, now);
 		next++;
 		goto next_one;
 	}
@@ -109,6 +110,7 @@
 static void
 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
 {
+	SEQ_printf(m, "  .base:       %p\n", base);
 	SEQ_printf(m, "  .index:      %d\n",
 			base->index);
 	SEQ_printf(m, "  .resolution: %Lu nsecs\n",
@@ -183,12 +185,16 @@
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 static void
-print_tickdevice(struct seq_file *m, struct tick_device *td)
+print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
 {
 	struct clock_event_device *dev = td->evtdev;
 
 	SEQ_printf(m, "\n");
 	SEQ_printf(m, "Tick Device: mode:     %d\n", td->mode);
+	if (cpu < 0)
+		SEQ_printf(m, "Broadcast device\n");
+	else
+		SEQ_printf(m, "Per CPU device: %d\n", cpu);
 
 	SEQ_printf(m, "Clock Event Device: ");
 	if (!dev) {
@@ -222,7 +228,7 @@
 	int cpu;
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-	print_tickdevice(m, tick_get_broadcast_device());
+	print_tickdevice(m, tick_get_broadcast_device(), -1);
 	SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
 		   tick_get_broadcast_mask()->bits[0]);
 #ifdef CONFIG_TICK_ONESHOT
@@ -232,7 +238,7 @@
 	SEQ_printf(m, "\n");
 #endif
 	for_each_online_cpu(cpu)
-		   print_tickdevice(m, tick_get_device(cpu));
+		print_tickdevice(m, tick_get_device(cpu), cpu);
 	SEQ_printf(m, "\n");
 }
 #else
@@ -244,7 +250,7 @@
 	u64 now = ktime_to_ns(ktime_get());
 	int cpu;
 
-	SEQ_printf(m, "Timer List Version: v0.3\n");
+	SEQ_printf(m, "Timer List Version: v0.4\n");
 	SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
 	SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
 
diff --git a/kernel/timer.c b/kernel/timer.c
index 510fe69..56becf3 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1436,9 +1436,11 @@
 	BUG_ON(cpu_online(cpu));
 	old_base = per_cpu(tvec_bases, cpu);
 	new_base = get_cpu_var(tvec_bases);
-
-	local_irq_disable();
-	spin_lock(&new_base->lock);
+	/*
+	 * The caller is globally serialized and nobody else
+	 * takes two locks at once, deadlock is not possible.
+	 */
+	spin_lock_irq(&new_base->lock);
 	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
 	BUG_ON(old_base->running_timer);
@@ -1453,8 +1455,7 @@
 	}
 
 	spin_unlock(&old_base->lock);
-	spin_unlock(&new_base->lock);
-	local_irq_enable();
+	spin_unlock_irq(&new_base->lock);
 	put_cpu_var(tvec_bases);
 }
 #endif /* CONFIG_HOTPLUG_CPU */
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 263e9e6..1cb3e1f 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -1,23 +1,37 @@
 #
 # Architectures that offer an FTRACE implementation should select HAVE_FTRACE:
 #
-config HAVE_FTRACE
+
+config NOP_TRACER
 	bool
 
+config HAVE_FTRACE
+	bool
+	select NOP_TRACER
+
 config HAVE_DYNAMIC_FTRACE
 	bool
 
+config HAVE_FTRACE_MCOUNT_RECORD
+	bool
+
 config TRACER_MAX_TRACE
 	bool
 
+config RING_BUFFER
+	bool
+
 config TRACING
 	bool
 	select DEBUG_FS
+	select RING_BUFFER
 	select STACKTRACE
+	select TRACEPOINTS
 
 config FTRACE
 	bool "Kernel Function Tracer"
 	depends on HAVE_FTRACE
+	depends on DEBUG_KERNEL
 	select FRAME_POINTER
 	select TRACING
 	select CONTEXT_SWITCH_TRACER
@@ -36,6 +50,7 @@
 	depends on TRACE_IRQFLAGS_SUPPORT
 	depends on GENERIC_TIME
 	depends on HAVE_FTRACE
+	depends on DEBUG_KERNEL
 	select TRACE_IRQFLAGS
 	select TRACING
 	select TRACER_MAX_TRACE
@@ -59,6 +74,7 @@
 	depends on GENERIC_TIME
 	depends on PREEMPT
 	depends on HAVE_FTRACE
+	depends on DEBUG_KERNEL
 	select TRACING
 	select TRACER_MAX_TRACE
 	help
@@ -86,6 +102,7 @@
 config SCHED_TRACER
 	bool "Scheduling Latency Tracer"
 	depends on HAVE_FTRACE
+	depends on DEBUG_KERNEL
 	select TRACING
 	select CONTEXT_SWITCH_TRACER
 	select TRACER_MAX_TRACE
@@ -96,16 +113,56 @@
 config CONTEXT_SWITCH_TRACER
 	bool "Trace process context switches"
 	depends on HAVE_FTRACE
+	depends on DEBUG_KERNEL
 	select TRACING
 	select MARKERS
 	help
 	  This tracer gets called from the context switch and records
 	  all switching of tasks.
 
+config BOOT_TRACER
+	bool "Trace boot initcalls"
+	depends on HAVE_FTRACE
+	depends on DEBUG_KERNEL
+	select TRACING
+	help
+	  This tracer helps developers to optimize boot times: it records
+	  the timings of the initcalls and traces key events and the identity
+	  of tasks that can cause boot delays, such as context-switches.
+
+	  Its aim is to be parsed by the /scripts/bootgraph.pl tool to
+	  produce pretty graphics about boot inefficiencies, giving a visual
+	  representation of the delays during initcalls - but the raw
+	  /debug/tracing/trace text output is readable too.
+
+	  ( Note that tracing self tests can't be enabled if this tracer is
+	    selected, because the self-tests are an initcall as well and that
+	    would invalidate the boot trace. )
+
+config STACK_TRACER
+	bool "Trace max stack"
+	depends on HAVE_FTRACE
+	depends on DEBUG_KERNEL
+	select FTRACE
+	select STACKTRACE
+	help
+	  This special tracer records the maximum stack footprint of the
+	  kernel and displays it in debugfs/tracing/stack_trace.
+
+	  This tracer works by hooking into every function call that the
+	  kernel executes, and keeping a maximum stack depth value and
+	  stack-trace saved. Because this logic has to execute in every
+	  kernel function, all the time, this option can slow down the
+	  kernel measurably and is generally intended for kernel
+	  developers only.
+
+	  Say N if unsure.
+
 config DYNAMIC_FTRACE
 	bool "enable/disable ftrace tracepoints dynamically"
 	depends on FTRACE
 	depends on HAVE_DYNAMIC_FTRACE
+	depends on DEBUG_KERNEL
 	default y
 	help
          This option will modify all the calls to ftrace dynamically
@@ -121,12 +178,17 @@
 	 were made. If so, it runs stop_machine (stops all CPUS)
 	 and modifies the code to jump over the call to ftrace.
 
+config FTRACE_MCOUNT_RECORD
+	def_bool y
+	depends on DYNAMIC_FTRACE
+	depends on HAVE_FTRACE_MCOUNT_RECORD
+
 config FTRACE_SELFTEST
 	bool
 
 config FTRACE_STARTUP_TEST
 	bool "Perform a startup test on ftrace"
-	depends on TRACING
+	depends on TRACING && DEBUG_KERNEL && !BOOT_TRACER
 	select FTRACE_SELFTEST
 	help
 	  This option performs a series of startup tests on ftrace. On bootup
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 71d17de..a85dfba 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -11,6 +11,7 @@
 endif
 
 obj-$(CONFIG_FTRACE) += libftrace.o
+obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
 
 obj-$(CONFIG_TRACING) += trace.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
@@ -19,6 +20,9 @@
 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
+obj-$(CONFIG_NOP_TRACER) += trace_nop.o
+obj-$(CONFIG_STACK_TRACER) += trace_stack.o
 obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
+obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
 
 libftrace-y := ftrace.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f6e3af3..4dda4f6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -81,7 +81,7 @@
 
 static int __register_ftrace_function(struct ftrace_ops *ops)
 {
-	/* Should never be called by interrupts */
+	/* should not be called from interrupt context */
 	spin_lock(&ftrace_lock);
 
 	ops->next = ftrace_list;
@@ -115,6 +115,7 @@
 	struct ftrace_ops **p;
 	int ret = 0;
 
+	/* should not be called from interrupt context */
 	spin_lock(&ftrace_lock);
 
 	/*
@@ -153,6 +154,30 @@
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
+#ifndef CONFIG_FTRACE_MCOUNT_RECORD
+/*
+ * The hash lock is only needed when the recording of the mcount
+ * callers are dynamic. That is, by the caller themselves and
+ * not recorded via the compilation.
+ */
+static DEFINE_SPINLOCK(ftrace_hash_lock);
+#define ftrace_hash_lock(flags)	  spin_lock_irqsave(&ftrace_hash_lock, flags)
+#define ftrace_hash_unlock(flags) \
+			spin_unlock_irqrestore(&ftrace_hash_lock, flags)
+#else
+/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
+#define ftrace_hash_lock(flags)   do { (void)(flags); } while (0)
+#define ftrace_hash_unlock(flags) do { } while(0)
+#endif
+
+/*
+ * Since MCOUNT_ADDR may point to mcount itself, we do not want
+ * to get it confused by reading a reference in the code as we
+ * are parsing on objcopy output of text. Use a variable for
+ * it instead.
+ */
+static unsigned long mcount_addr = MCOUNT_ADDR;
+
 static struct task_struct *ftraced_task;
 
 enum {
@@ -171,7 +196,6 @@
 
 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
 
-static DEFINE_SPINLOCK(ftrace_shutdown_lock);
 static DEFINE_MUTEX(ftraced_lock);
 static DEFINE_MUTEX(ftrace_regex_lock);
 
@@ -294,13 +318,37 @@
 
 static void ftrace_free_rec(struct dyn_ftrace *rec)
 {
-	/* no locking, only called from kstop_machine */
-
 	rec->ip = (unsigned long)ftrace_free_records;
 	ftrace_free_records = rec;
 	rec->flags |= FTRACE_FL_FREE;
 }
 
+void ftrace_release(void *start, unsigned long size)
+{
+	struct dyn_ftrace *rec;
+	struct ftrace_page *pg;
+	unsigned long s = (unsigned long)start;
+	unsigned long e = s + size;
+	int i;
+
+	if (ftrace_disabled || !start)
+		return;
+
+	/* should not be called from interrupt context */
+	spin_lock(&ftrace_lock);
+
+	for (pg = ftrace_pages_start; pg; pg = pg->next) {
+		for (i = 0; i < pg->index; i++) {
+			rec = &pg->records[i];
+
+			if ((rec->ip >= s) && (rec->ip < e))
+				ftrace_free_rec(rec);
+		}
+	}
+	spin_unlock(&ftrace_lock);
+
+}
+
 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
 {
 	struct dyn_ftrace *rec;
@@ -338,7 +386,6 @@
 	unsigned long flags;
 	unsigned long key;
 	int resched;
-	int atomic;
 	int cpu;
 
 	if (!ftrace_enabled || ftrace_disabled)
@@ -368,9 +415,7 @@
 	if (ftrace_ip_in_hash(ip, key))
 		goto out;
 
-	atomic = irqs_disabled();
-
-	spin_lock_irqsave(&ftrace_shutdown_lock, flags);
+	ftrace_hash_lock(flags);
 
 	/* This ip may have hit the hash before the lock */
 	if (ftrace_ip_in_hash(ip, key))
@@ -387,7 +432,7 @@
 	ftraced_trigger = 1;
 
  out_unlock:
-	spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
+	ftrace_hash_unlock(flags);
  out:
 	per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
 
@@ -531,6 +576,16 @@
 	ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
 }
 
+static void print_ip_ins(const char *fmt, unsigned char *p)
+{
+	int i;
+
+	printk(KERN_CONT "%s", fmt);
+
+	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
+		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
+}
+
 static int
 ftrace_code_disable(struct dyn_ftrace *rec)
 {
@@ -541,10 +596,27 @@
 	ip = rec->ip;
 
 	nop = ftrace_nop_replace();
-	call = ftrace_call_replace(ip, MCOUNT_ADDR);
+	call = ftrace_call_replace(ip, mcount_addr);
 
 	failed = ftrace_modify_code(ip, call, nop);
 	if (failed) {
+		switch (failed) {
+		case 1:
+			WARN_ON_ONCE(1);
+			pr_info("ftrace faulted on modifying ");
+			print_ip_sym(ip);
+			break;
+		case 2:
+			WARN_ON_ONCE(1);
+			pr_info("ftrace failed to modify ");
+			print_ip_sym(ip);
+			print_ip_ins(" expected: ", call);
+			print_ip_ins(" actual: ", (unsigned char *)ip);
+			print_ip_ins(" replace: ", nop);
+			printk(KERN_CONT "\n");
+			break;
+		}
+
 		rec->flags |= FTRACE_FL_FAILED;
 		return 0;
 	}
@@ -792,47 +864,7 @@
 	return 1;
 }
 
-static int ftraced(void *ignore)
-{
-	unsigned long usecs;
-
-	while (!kthread_should_stop()) {
-
-		set_current_state(TASK_INTERRUPTIBLE);
-
-		/* check once a second */
-		schedule_timeout(HZ);
-
-		if (unlikely(ftrace_disabled))
-			continue;
-
-		mutex_lock(&ftrace_sysctl_lock);
-		mutex_lock(&ftraced_lock);
-		if (!ftraced_suspend && !ftraced_stop &&
-		    ftrace_update_code()) {
-			usecs = nsecs_to_usecs(ftrace_update_time);
-			if (ftrace_update_tot_cnt > 100000) {
-				ftrace_update_tot_cnt = 0;
-				pr_info("hm, dftrace overflow: %lu change%s"
-					" (%lu total) in %lu usec%s\n",
-					ftrace_update_cnt,
-					ftrace_update_cnt != 1 ? "s" : "",
-					ftrace_update_tot_cnt,
-					usecs, usecs != 1 ? "s" : "");
-				ftrace_disabled = 1;
-				WARN_ON_ONCE(1);
-			}
-		}
-		mutex_unlock(&ftraced_lock);
-		mutex_unlock(&ftrace_sysctl_lock);
-
-		ftrace_shutdown_replenish();
-	}
-	__set_current_state(TASK_RUNNING);
-	return 0;
-}
-
-static int __init ftrace_dyn_table_alloc(void)
+static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
 {
 	struct ftrace_page *pg;
 	int cnt;
@@ -859,7 +891,9 @@
 
 	pg = ftrace_pages = ftrace_pages_start;
 
-	cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
+	cnt = num_to_init / ENTRIES_PER_PAGE;
+	pr_info("ftrace: allocating %ld hash entries in %d pages\n",
+		num_to_init, cnt);
 
 	for (i = 0; i < cnt; i++) {
 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
@@ -901,6 +935,8 @@
 
 	(*pos)++;
 
+	/* should not be called from interrupt context */
+	spin_lock(&ftrace_lock);
  retry:
 	if (iter->idx >= iter->pg->index) {
 		if (iter->pg->next) {
@@ -910,15 +946,13 @@
 		}
 	} else {
 		rec = &iter->pg->records[iter->idx++];
-		if ((!(iter->flags & FTRACE_ITER_FAILURES) &&
+		if ((rec->flags & FTRACE_FL_FREE) ||
+
+		    (!(iter->flags & FTRACE_ITER_FAILURES) &&
 		     (rec->flags & FTRACE_FL_FAILED)) ||
 
 		    ((iter->flags & FTRACE_ITER_FAILURES) &&
-		     (!(rec->flags & FTRACE_FL_FAILED) ||
-		      (rec->flags & FTRACE_FL_FREE))) ||
-
-		    ((iter->flags & FTRACE_ITER_FILTER) &&
-		     !(rec->flags & FTRACE_FL_FILTER)) ||
+		     !(rec->flags & FTRACE_FL_FAILED)) ||
 
 		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
 		     !(rec->flags & FTRACE_FL_NOTRACE))) {
@@ -926,6 +960,7 @@
 			goto retry;
 		}
 	}
+	spin_unlock(&ftrace_lock);
 
 	iter->pos = *pos;
 
@@ -1039,8 +1074,8 @@
 	unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
 	unsigned i;
 
-	/* keep kstop machine from running */
-	preempt_disable();
+	/* should not be called from interrupt context */
+	spin_lock(&ftrace_lock);
 	if (enable)
 		ftrace_filtered = 0;
 	pg = ftrace_pages_start;
@@ -1053,7 +1088,7 @@
 		}
 		pg = pg->next;
 	}
-	preempt_enable();
+	spin_unlock(&ftrace_lock);
 }
 
 static int
@@ -1165,8 +1200,8 @@
 		}
 	}
 
-	/* keep kstop machine from running */
-	preempt_disable();
+	/* should not be called from interrupt context */
+	spin_lock(&ftrace_lock);
 	if (enable)
 		ftrace_filtered = 1;
 	pg = ftrace_pages_start;
@@ -1203,7 +1238,7 @@
 		}
 		pg = pg->next;
 	}
-	preempt_enable();
+	spin_unlock(&ftrace_lock);
 }
 
 static ssize_t
@@ -1556,6 +1591,114 @@
 
 fs_initcall(ftrace_init_debugfs);
 
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+static int ftrace_convert_nops(unsigned long *start,
+			       unsigned long *end)
+{
+	unsigned long *p;
+	unsigned long addr;
+	unsigned long flags;
+
+	p = start;
+	while (p < end) {
+		addr = ftrace_call_adjust(*p++);
+		/* should not be called from interrupt context */
+		spin_lock(&ftrace_lock);
+		ftrace_record_ip(addr);
+		spin_unlock(&ftrace_lock);
+		ftrace_shutdown_replenish();
+	}
+
+	/* p is ignored */
+	local_irq_save(flags);
+	__ftrace_update_code(p);
+	local_irq_restore(flags);
+
+	return 0;
+}
+
+void ftrace_init_module(unsigned long *start, unsigned long *end)
+{
+	if (ftrace_disabled || start == end)
+		return;
+	ftrace_convert_nops(start, end);
+}
+
+extern unsigned long __start_mcount_loc[];
+extern unsigned long __stop_mcount_loc[];
+
+void __init ftrace_init(void)
+{
+	unsigned long count, addr, flags;
+	int ret;
+
+	/* Keep the ftrace pointer to the stub */
+	addr = (unsigned long)ftrace_stub;
+
+	local_irq_save(flags);
+	ftrace_dyn_arch_init(&addr);
+	local_irq_restore(flags);
+
+	/* ftrace_dyn_arch_init places the return code in addr */
+	if (addr)
+		goto failed;
+
+	count = __stop_mcount_loc - __start_mcount_loc;
+
+	ret = ftrace_dyn_table_alloc(count);
+	if (ret)
+		goto failed;
+
+	last_ftrace_enabled = ftrace_enabled = 1;
+
+	ret = ftrace_convert_nops(__start_mcount_loc,
+				  __stop_mcount_loc);
+
+	return;
+ failed:
+	ftrace_disabled = 1;
+}
+#else /* CONFIG_FTRACE_MCOUNT_RECORD */
+static int ftraced(void *ignore)
+{
+	unsigned long usecs;
+
+	while (!kthread_should_stop()) {
+
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		/* check once a second */
+		schedule_timeout(HZ);
+
+		if (unlikely(ftrace_disabled))
+			continue;
+
+		mutex_lock(&ftrace_sysctl_lock);
+		mutex_lock(&ftraced_lock);
+		if (!ftraced_suspend && !ftraced_stop &&
+		    ftrace_update_code()) {
+			usecs = nsecs_to_usecs(ftrace_update_time);
+			if (ftrace_update_tot_cnt > 100000) {
+				ftrace_update_tot_cnt = 0;
+				pr_info("hm, dftrace overflow: %lu change%s"
+					" (%lu total) in %lu usec%s\n",
+					ftrace_update_cnt,
+					ftrace_update_cnt != 1 ? "s" : "",
+					ftrace_update_tot_cnt,
+					usecs, usecs != 1 ? "s" : "");
+				ftrace_disabled = 1;
+				WARN_ON_ONCE(1);
+			}
+		}
+		mutex_unlock(&ftraced_lock);
+		mutex_unlock(&ftrace_sysctl_lock);
+
+		ftrace_shutdown_replenish();
+	}
+	__set_current_state(TASK_RUNNING);
+	return 0;
+}
+
 static int __init ftrace_dynamic_init(void)
 {
 	struct task_struct *p;
@@ -1572,7 +1715,7 @@
 		goto failed;
 	}
 
-	ret = ftrace_dyn_table_alloc();
+	ret = ftrace_dyn_table_alloc(NR_TO_INIT);
 	if (ret)
 		goto failed;
 
@@ -1593,6 +1736,8 @@
 }
 
 core_initcall(ftrace_dynamic_init);
+#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
+
 #else
 # define ftrace_startup()		do { } while (0)
 # define ftrace_shutdown()		do { } while (0)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
new file mode 100644
index 0000000..94af1fe
--- /dev/null
+++ b/kernel/trace/ring_buffer.c
@@ -0,0 +1,2014 @@
+/*
+ * Generic ring buffer
+ *
+ * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
+ */
+#include <linux/ring_buffer.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>	/* used for sched_clock() (for now) */
+#include <linux/init.h>
+#include <linux/hash.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+
+/* Up this if you want to test the TIME_EXTENTS and normalization */
+#define DEBUG_SHIFT 0
+
+/* FIXME!!! */
+u64 ring_buffer_time_stamp(int cpu)
+{
+	/* shift to debug/test normalization and TIME_EXTENTS */
+	return sched_clock() << DEBUG_SHIFT;
+}
+
+void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
+{
+	/* Just stupid testing the normalize function and deltas */
+	*ts >>= DEBUG_SHIFT;
+}
+
+#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
+#define RB_ALIGNMENT_SHIFT	2
+#define RB_ALIGNMENT		(1 << RB_ALIGNMENT_SHIFT)
+#define RB_MAX_SMALL_DATA	28
+
+enum {
+	RB_LEN_TIME_EXTEND = 8,
+	RB_LEN_TIME_STAMP = 16,
+};
+
+/* inline for ring buffer fast paths */
+static inline unsigned
+rb_event_length(struct ring_buffer_event *event)
+{
+	unsigned length;
+
+	switch (event->type) {
+	case RINGBUF_TYPE_PADDING:
+		/* undefined */
+		return -1;
+
+	case RINGBUF_TYPE_TIME_EXTEND:
+		return RB_LEN_TIME_EXTEND;
+
+	case RINGBUF_TYPE_TIME_STAMP:
+		return RB_LEN_TIME_STAMP;
+
+	case RINGBUF_TYPE_DATA:
+		if (event->len)
+			length = event->len << RB_ALIGNMENT_SHIFT;
+		else
+			length = event->array[0];
+		return length + RB_EVNT_HDR_SIZE;
+	default:
+		BUG();
+	}
+	/* not hit */
+	return 0;
+}
+
+/**
+ * ring_buffer_event_length - return the length of the event
+ * @event: the event to get the length of
+ */
+unsigned ring_buffer_event_length(struct ring_buffer_event *event)
+{
+	return rb_event_length(event);
+}
+
+/* inline for ring buffer fast paths */
+static inline void *
+rb_event_data(struct ring_buffer_event *event)
+{
+	BUG_ON(event->type != RINGBUF_TYPE_DATA);
+	/* If length is in len field, then array[0] has the data */
+	if (event->len)
+		return (void *)&event->array[0];
+	/* Otherwise length is in array[0] and array[1] has the data */
+	return (void *)&event->array[1];
+}
+
+/**
+ * ring_buffer_event_data - return the data of the event
+ * @event: the event to get the data from
+ */
+void *ring_buffer_event_data(struct ring_buffer_event *event)
+{
+	return rb_event_data(event);
+}
+
+#define for_each_buffer_cpu(buffer, cpu)		\
+	for_each_cpu_mask(cpu, buffer->cpumask)
+
+#define TS_SHIFT	27
+#define TS_MASK		((1ULL << TS_SHIFT) - 1)
+#define TS_DELTA_TEST	(~TS_MASK)
+
+/*
+ * This hack stolen from mm/slob.c.
+ * We can store per page timing information in the page frame of the page.
+ * Thanks to Peter Zijlstra for suggesting this idea.
+ */
+struct buffer_page {
+	u64		 time_stamp;	/* page time stamp */
+	local_t		 write;		/* index for next write */
+	local_t		 commit;	/* write commited index */
+	unsigned	 read;		/* index for next read */
+	struct list_head list;		/* list of free pages */
+	void *page;			/* Actual data page */
+};
+
+/*
+ * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
+ * this issue out.
+ */
+static inline void free_buffer_page(struct buffer_page *bpage)
+{
+	if (bpage->page)
+		__free_page(bpage->page);
+	kfree(bpage);
+}
+
+/*
+ * We need to fit the time_stamp delta into 27 bits.
+ */
+static inline int test_time_stamp(u64 delta)
+{
+	if (delta & TS_DELTA_TEST)
+		return 1;
+	return 0;
+}
+
+#define BUF_PAGE_SIZE PAGE_SIZE
+
+/*
+ * head_page == tail_page && head == tail then buffer is empty.
+ */
+struct ring_buffer_per_cpu {
+	int				cpu;
+	struct ring_buffer		*buffer;
+	spinlock_t			lock;
+	struct lock_class_key		lock_key;
+	struct list_head		pages;
+	struct buffer_page		*head_page;	/* read from head */
+	struct buffer_page		*tail_page;	/* write to tail */
+	struct buffer_page		*commit_page;	/* commited pages */
+	struct buffer_page		*reader_page;
+	unsigned long			overrun;
+	unsigned long			entries;
+	u64				write_stamp;
+	u64				read_stamp;
+	atomic_t			record_disabled;
+};
+
+struct ring_buffer {
+	unsigned long			size;
+	unsigned			pages;
+	unsigned			flags;
+	int				cpus;
+	cpumask_t			cpumask;
+	atomic_t			record_disabled;
+
+	struct mutex			mutex;
+
+	struct ring_buffer_per_cpu	**buffers;
+};
+
+struct ring_buffer_iter {
+	struct ring_buffer_per_cpu	*cpu_buffer;
+	unsigned long			head;
+	struct buffer_page		*head_page;
+	u64				read_stamp;
+};
+
+#define RB_WARN_ON(buffer, cond)				\
+	do {							\
+		if (unlikely(cond)) {				\
+			atomic_inc(&buffer->record_disabled);	\
+			WARN_ON(1);				\
+		}						\
+	} while (0)
+
+#define RB_WARN_ON_RET(buffer, cond)				\
+	do {							\
+		if (unlikely(cond)) {				\
+			atomic_inc(&buffer->record_disabled);	\
+			WARN_ON(1);				\
+			return -1;				\
+		}						\
+	} while (0)
+
+#define RB_WARN_ON_ONCE(buffer, cond)				\
+	do {							\
+		static int once;				\
+		if (unlikely(cond) && !once) {			\
+			once++;					\
+			atomic_inc(&buffer->record_disabled);	\
+			WARN_ON(1);				\
+		}						\
+	} while (0)
+
+/**
+ * check_pages - integrity check of buffer pages
+ * @cpu_buffer: CPU buffer with pages to test
+ *
+ * As a safty measure we check to make sure the data pages have not
+ * been corrupted.
+ */
+static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	struct list_head *head = &cpu_buffer->pages;
+	struct buffer_page *page, *tmp;
+
+	RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
+	RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
+
+	list_for_each_entry_safe(page, tmp, head, list) {
+		RB_WARN_ON_RET(cpu_buffer,
+			       page->list.next->prev != &page->list);
+		RB_WARN_ON_RET(cpu_buffer,
+			       page->list.prev->next != &page->list);
+	}
+
+	return 0;
+}
+
+static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
+			     unsigned nr_pages)
+{
+	struct list_head *head = &cpu_buffer->pages;
+	struct buffer_page *page, *tmp;
+	unsigned long addr;
+	LIST_HEAD(pages);
+	unsigned i;
+
+	for (i = 0; i < nr_pages; i++) {
+		page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
+				    GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
+		if (!page)
+			goto free_pages;
+		list_add(&page->list, &pages);
+
+		addr = __get_free_page(GFP_KERNEL);
+		if (!addr)
+			goto free_pages;
+		page->page = (void *)addr;
+	}
+
+	list_splice(&pages, head);
+
+	rb_check_pages(cpu_buffer);
+
+	return 0;
+
+ free_pages:
+	list_for_each_entry_safe(page, tmp, &pages, list) {
+		list_del_init(&page->list);
+		free_buffer_page(page);
+	}
+	return -ENOMEM;
+}
+
+static struct ring_buffer_per_cpu *
+rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+	struct buffer_page *page;
+	unsigned long addr;
+	int ret;
+
+	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
+				  GFP_KERNEL, cpu_to_node(cpu));
+	if (!cpu_buffer)
+		return NULL;
+
+	cpu_buffer->cpu = cpu;
+	cpu_buffer->buffer = buffer;
+	spin_lock_init(&cpu_buffer->lock);
+	INIT_LIST_HEAD(&cpu_buffer->pages);
+
+	page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
+			    GFP_KERNEL, cpu_to_node(cpu));
+	if (!page)
+		goto fail_free_buffer;
+
+	cpu_buffer->reader_page = page;
+	addr = __get_free_page(GFP_KERNEL);
+	if (!addr)
+		goto fail_free_reader;
+	page->page = (void *)addr;
+
+	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
+
+	ret = rb_allocate_pages(cpu_buffer, buffer->pages);
+	if (ret < 0)
+		goto fail_free_reader;
+
+	cpu_buffer->head_page
+		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
+	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
+
+	return cpu_buffer;
+
+ fail_free_reader:
+	free_buffer_page(cpu_buffer->reader_page);
+
+ fail_free_buffer:
+	kfree(cpu_buffer);
+	return NULL;
+}
+
+static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	struct list_head *head = &cpu_buffer->pages;
+	struct buffer_page *page, *tmp;
+
+	list_del_init(&cpu_buffer->reader_page->list);
+	free_buffer_page(cpu_buffer->reader_page);
+
+	list_for_each_entry_safe(page, tmp, head, list) {
+		list_del_init(&page->list);
+		free_buffer_page(page);
+	}
+	kfree(cpu_buffer);
+}
+
+/*
+ * Causes compile errors if the struct buffer_page gets bigger
+ * than the struct page.
+ */
+extern int ring_buffer_page_too_big(void);
+
+/**
+ * ring_buffer_alloc - allocate a new ring_buffer
+ * @size: the size in bytes that is needed.
+ * @flags: attributes to set for the ring buffer.
+ *
+ * Currently the only flag that is available is the RB_FL_OVERWRITE
+ * flag. This flag means that the buffer will overwrite old data
+ * when the buffer wraps. If this flag is not set, the buffer will
+ * drop data when the tail hits the head.
+ */
+struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
+{
+	struct ring_buffer *buffer;
+	int bsize;
+	int cpu;
+
+	/* Paranoid! Optimizes out when all is well */
+	if (sizeof(struct buffer_page) > sizeof(struct page))
+		ring_buffer_page_too_big();
+
+
+	/* keep it in its own cache line */
+	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
+			 GFP_KERNEL);
+	if (!buffer)
+		return NULL;
+
+	buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+	buffer->flags = flags;
+
+	/* need at least two pages */
+	if (buffer->pages == 1)
+		buffer->pages++;
+
+	buffer->cpumask = cpu_possible_map;
+	buffer->cpus = nr_cpu_ids;
+
+	bsize = sizeof(void *) * nr_cpu_ids;
+	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
+				  GFP_KERNEL);
+	if (!buffer->buffers)
+		goto fail_free_buffer;
+
+	for_each_buffer_cpu(buffer, cpu) {
+		buffer->buffers[cpu] =
+			rb_allocate_cpu_buffer(buffer, cpu);
+		if (!buffer->buffers[cpu])
+			goto fail_free_buffers;
+	}
+
+	mutex_init(&buffer->mutex);
+
+	return buffer;
+
+ fail_free_buffers:
+	for_each_buffer_cpu(buffer, cpu) {
+		if (buffer->buffers[cpu])
+			rb_free_cpu_buffer(buffer->buffers[cpu]);
+	}
+	kfree(buffer->buffers);
+
+ fail_free_buffer:
+	kfree(buffer);
+	return NULL;
+}
+
+/**
+ * ring_buffer_free - free a ring buffer.
+ * @buffer: the buffer to free.
+ */
+void
+ring_buffer_free(struct ring_buffer *buffer)
+{
+	int cpu;
+
+	for_each_buffer_cpu(buffer, cpu)
+		rb_free_cpu_buffer(buffer->buffers[cpu]);
+
+	kfree(buffer);
+}
+
+static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
+
+static void
+rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
+{
+	struct buffer_page *page;
+	struct list_head *p;
+	unsigned i;
+
+	atomic_inc(&cpu_buffer->record_disabled);
+	synchronize_sched();
+
+	for (i = 0; i < nr_pages; i++) {
+		BUG_ON(list_empty(&cpu_buffer->pages));
+		p = cpu_buffer->pages.next;
+		page = list_entry(p, struct buffer_page, list);
+		list_del_init(&page->list);
+		free_buffer_page(page);
+	}
+	BUG_ON(list_empty(&cpu_buffer->pages));
+
+	rb_reset_cpu(cpu_buffer);
+
+	rb_check_pages(cpu_buffer);
+
+	atomic_dec(&cpu_buffer->record_disabled);
+
+}
+
+static void
+rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
+		struct list_head *pages, unsigned nr_pages)
+{
+	struct buffer_page *page;
+	struct list_head *p;
+	unsigned i;
+
+	atomic_inc(&cpu_buffer->record_disabled);
+	synchronize_sched();
+
+	for (i = 0; i < nr_pages; i++) {
+		BUG_ON(list_empty(pages));
+		p = pages->next;
+		page = list_entry(p, struct buffer_page, list);
+		list_del_init(&page->list);
+		list_add_tail(&page->list, &cpu_buffer->pages);
+	}
+	rb_reset_cpu(cpu_buffer);
+
+	rb_check_pages(cpu_buffer);
+
+	atomic_dec(&cpu_buffer->record_disabled);
+}
+
+/**
+ * ring_buffer_resize - resize the ring buffer
+ * @buffer: the buffer to resize.
+ * @size: the new size.
+ *
+ * The tracer is responsible for making sure that the buffer is
+ * not being used while changing the size.
+ * Note: We may be able to change the above requirement by using
+ *  RCU synchronizations.
+ *
+ * Minimum size is 2 * BUF_PAGE_SIZE.
+ *
+ * Returns -1 on failure.
+ */
+int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+	unsigned nr_pages, rm_pages, new_pages;
+	struct buffer_page *page, *tmp;
+	unsigned long buffer_size;
+	unsigned long addr;
+	LIST_HEAD(pages);
+	int i, cpu;
+
+	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+	size *= BUF_PAGE_SIZE;
+	buffer_size = buffer->pages * BUF_PAGE_SIZE;
+
+	/* we need a minimum of two pages */
+	if (size < BUF_PAGE_SIZE * 2)
+		size = BUF_PAGE_SIZE * 2;
+
+	if (size == buffer_size)
+		return size;
+
+	mutex_lock(&buffer->mutex);
+
+	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+
+	if (size < buffer_size) {
+
+		/* easy case, just free pages */
+		BUG_ON(nr_pages >= buffer->pages);
+
+		rm_pages = buffer->pages - nr_pages;
+
+		for_each_buffer_cpu(buffer, cpu) {
+			cpu_buffer = buffer->buffers[cpu];
+			rb_remove_pages(cpu_buffer, rm_pages);
+		}
+		goto out;
+	}
+
+	/*
+	 * This is a bit more difficult. We only want to add pages
+	 * when we can allocate enough for all CPUs. We do this
+	 * by allocating all the pages and storing them on a local
+	 * link list. If we succeed in our allocation, then we
+	 * add these pages to the cpu_buffers. Otherwise we just free
+	 * them all and return -ENOMEM;
+	 */
+	BUG_ON(nr_pages <= buffer->pages);
+	new_pages = nr_pages - buffer->pages;
+
+	for_each_buffer_cpu(buffer, cpu) {
+		for (i = 0; i < new_pages; i++) {
+			page = kzalloc_node(ALIGN(sizeof(*page),
+						  cache_line_size()),
+					    GFP_KERNEL, cpu_to_node(cpu));
+			if (!page)
+				goto free_pages;
+			list_add(&page->list, &pages);
+			addr = __get_free_page(GFP_KERNEL);
+			if (!addr)
+				goto free_pages;
+			page->page = (void *)addr;
+		}
+	}
+
+	for_each_buffer_cpu(buffer, cpu) {
+		cpu_buffer = buffer->buffers[cpu];
+		rb_insert_pages(cpu_buffer, &pages, new_pages);
+	}
+
+	BUG_ON(!list_empty(&pages));
+
+ out:
+	buffer->pages = nr_pages;
+	mutex_unlock(&buffer->mutex);
+
+	return size;
+
+ free_pages:
+	list_for_each_entry_safe(page, tmp, &pages, list) {
+		list_del_init(&page->list);
+		free_buffer_page(page);
+	}
+	return -ENOMEM;
+}
+
+static inline int rb_null_event(struct ring_buffer_event *event)
+{
+	return event->type == RINGBUF_TYPE_PADDING;
+}
+
+static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
+{
+	return page->page + index;
+}
+
+static inline struct ring_buffer_event *
+rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	return __rb_page_index(cpu_buffer->reader_page,
+			       cpu_buffer->reader_page->read);
+}
+
+static inline struct ring_buffer_event *
+rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	return __rb_page_index(cpu_buffer->head_page,
+			       cpu_buffer->head_page->read);
+}
+
+static inline struct ring_buffer_event *
+rb_iter_head_event(struct ring_buffer_iter *iter)
+{
+	return __rb_page_index(iter->head_page, iter->head);
+}
+
+static inline unsigned rb_page_write(struct buffer_page *bpage)
+{
+	return local_read(&bpage->write);
+}
+
+static inline unsigned rb_page_commit(struct buffer_page *bpage)
+{
+	return local_read(&bpage->commit);
+}
+
+/* Size is determined by what has been commited */
+static inline unsigned rb_page_size(struct buffer_page *bpage)
+{
+	return rb_page_commit(bpage);
+}
+
+static inline unsigned
+rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	return rb_page_commit(cpu_buffer->commit_page);
+}
+
+static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	return rb_page_commit(cpu_buffer->head_page);
+}
+
+/*
+ * When the tail hits the head and the buffer is in overwrite mode,
+ * the head jumps to the next page and all content on the previous
+ * page is discarded. But before doing so, we update the overrun
+ * variable of the buffer.
+ */
+static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	struct ring_buffer_event *event;
+	unsigned long head;
+
+	for (head = 0; head < rb_head_size(cpu_buffer);
+	     head += rb_event_length(event)) {
+
+		event = __rb_page_index(cpu_buffer->head_page, head);
+		BUG_ON(rb_null_event(event));
+		/* Only count data entries */
+		if (event->type != RINGBUF_TYPE_DATA)
+			continue;
+		cpu_buffer->overrun++;
+		cpu_buffer->entries--;
+	}
+}
+
+static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
+			       struct buffer_page **page)
+{
+	struct list_head *p = (*page)->list.next;
+
+	if (p == &cpu_buffer->pages)
+		p = p->next;
+
+	*page = list_entry(p, struct buffer_page, list);
+}
+
+static inline unsigned
+rb_event_index(struct ring_buffer_event *event)
+{
+	unsigned long addr = (unsigned long)event;
+
+	return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
+}
+
+static inline int
+rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
+	     struct ring_buffer_event *event)
+{
+	unsigned long addr = (unsigned long)event;
+	unsigned long index;
+
+	index = rb_event_index(event);
+	addr &= PAGE_MASK;
+
+	return cpu_buffer->commit_page->page == (void *)addr &&
+		rb_commit_index(cpu_buffer) == index;
+}
+
+static inline void
+rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
+		    struct ring_buffer_event *event)
+{
+	unsigned long addr = (unsigned long)event;
+	unsigned long index;
+
+	index = rb_event_index(event);
+	addr &= PAGE_MASK;
+
+	while (cpu_buffer->commit_page->page != (void *)addr) {
+		RB_WARN_ON(cpu_buffer,
+			   cpu_buffer->commit_page == cpu_buffer->tail_page);
+		cpu_buffer->commit_page->commit =
+			cpu_buffer->commit_page->write;
+		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
+		cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
+	}
+
+	/* Now set the commit to the event's index */
+	local_set(&cpu_buffer->commit_page->commit, index);
+}
+
+static inline void
+rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	/*
+	 * We only race with interrupts and NMIs on this CPU.
+	 * If we own the commit event, then we can commit
+	 * all others that interrupted us, since the interruptions
+	 * are in stack format (they finish before they come
+	 * back to us). This allows us to do a simple loop to
+	 * assign the commit to the tail.
+	 */
+	while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
+		cpu_buffer->commit_page->commit =
+			cpu_buffer->commit_page->write;
+		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
+		cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
+		/* add barrier to keep gcc from optimizing too much */
+		barrier();
+	}
+	while (rb_commit_index(cpu_buffer) !=
+	       rb_page_write(cpu_buffer->commit_page)) {
+		cpu_buffer->commit_page->commit =
+			cpu_buffer->commit_page->write;
+		barrier();
+	}
+}
+
+static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
+	cpu_buffer->reader_page->read = 0;
+}
+
+static inline void rb_inc_iter(struct ring_buffer_iter *iter)
+{
+	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+
+	/*
+	 * The iterator could be on the reader page (it starts there).
+	 * But the head could have moved, since the reader was
+	 * found. Check for this case and assign the iterator
+	 * to the head page instead of next.
+	 */
+	if (iter->head_page == cpu_buffer->reader_page)
+		iter->head_page = cpu_buffer->head_page;
+	else
+		rb_inc_page(cpu_buffer, &iter->head_page);
+
+	iter->read_stamp = iter->head_page->time_stamp;
+	iter->head = 0;
+}
+
+/**
+ * ring_buffer_update_event - update event type and data
+ * @event: the even to update
+ * @type: the type of event
+ * @length: the size of the event field in the ring buffer
+ *
+ * Update the type and data fields of the event. The length
+ * is the actual size that is written to the ring buffer,
+ * and with this, we can determine what to place into the
+ * data field.
+ */
+static inline void
+rb_update_event(struct ring_buffer_event *event,
+			 unsigned type, unsigned length)
+{
+	event->type = type;
+
+	switch (type) {
+
+	case RINGBUF_TYPE_PADDING:
+		break;
+
+	case RINGBUF_TYPE_TIME_EXTEND:
+		event->len =
+			(RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
+			>> RB_ALIGNMENT_SHIFT;
+		break;
+
+	case RINGBUF_TYPE_TIME_STAMP:
+		event->len =
+			(RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
+			>> RB_ALIGNMENT_SHIFT;
+		break;
+
+	case RINGBUF_TYPE_DATA:
+		length -= RB_EVNT_HDR_SIZE;
+		if (length > RB_MAX_SMALL_DATA) {
+			event->len = 0;
+			event->array[0] = length;
+		} else
+			event->len =
+				(length + (RB_ALIGNMENT-1))
+				>> RB_ALIGNMENT_SHIFT;
+		break;
+	default:
+		BUG();
+	}
+}
+
+static inline unsigned rb_calculate_event_length(unsigned length)
+{
+	struct ring_buffer_event event; /* Used only for sizeof array */
+
+	/* zero length can cause confusions */
+	if (!length)
+		length = 1;
+
+	if (length > RB_MAX_SMALL_DATA)
+		length += sizeof(event.array[0]);
+
+	length += RB_EVNT_HDR_SIZE;
+	length = ALIGN(length, RB_ALIGNMENT);
+
+	return length;
+}
+
+static struct ring_buffer_event *
+__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+		  unsigned type, unsigned long length, u64 *ts)
+{
+	struct buffer_page *tail_page, *head_page, *reader_page;
+	unsigned long tail, write;
+	struct ring_buffer *buffer = cpu_buffer->buffer;
+	struct ring_buffer_event *event;
+	unsigned long flags;
+
+	tail_page = cpu_buffer->tail_page;
+	write = local_add_return(length, &tail_page->write);
+	tail = write - length;
+
+	/* See if we shot pass the end of this buffer page */
+	if (write > BUF_PAGE_SIZE) {
+		struct buffer_page *next_page = tail_page;
+
+		spin_lock_irqsave(&cpu_buffer->lock, flags);
+
+		rb_inc_page(cpu_buffer, &next_page);
+
+		head_page = cpu_buffer->head_page;
+		reader_page = cpu_buffer->reader_page;
+
+		/* we grabbed the lock before incrementing */
+		RB_WARN_ON(cpu_buffer, next_page == reader_page);
+
+		/*
+		 * If for some reason, we had an interrupt storm that made
+		 * it all the way around the buffer, bail, and warn
+		 * about it.
+		 */
+		if (unlikely(next_page == cpu_buffer->commit_page)) {
+			WARN_ON_ONCE(1);
+			goto out_unlock;
+		}
+
+		if (next_page == head_page) {
+			if (!(buffer->flags & RB_FL_OVERWRITE)) {
+				/* reset write */
+				if (tail <= BUF_PAGE_SIZE)
+					local_set(&tail_page->write, tail);
+				goto out_unlock;
+			}
+
+			/* tail_page has not moved yet? */
+			if (tail_page == cpu_buffer->tail_page) {
+				/* count overflows */
+				rb_update_overflow(cpu_buffer);
+
+				rb_inc_page(cpu_buffer, &head_page);
+				cpu_buffer->head_page = head_page;
+				cpu_buffer->head_page->read = 0;
+			}
+		}
+
+		/*
+		 * If the tail page is still the same as what we think
+		 * it is, then it is up to us to update the tail
+		 * pointer.
+		 */
+		if (tail_page == cpu_buffer->tail_page) {
+			local_set(&next_page->write, 0);
+			local_set(&next_page->commit, 0);
+			cpu_buffer->tail_page = next_page;
+
+			/* reread the time stamp */
+			*ts = ring_buffer_time_stamp(cpu_buffer->cpu);
+			cpu_buffer->tail_page->time_stamp = *ts;
+		}
+
+		/*
+		 * The actual tail page has moved forward.
+		 */
+		if (tail < BUF_PAGE_SIZE) {
+			/* Mark the rest of the page with padding */
+			event = __rb_page_index(tail_page, tail);
+			event->type = RINGBUF_TYPE_PADDING;
+		}
+
+		if (tail <= BUF_PAGE_SIZE)
+			/* Set the write back to the previous setting */
+			local_set(&tail_page->write, tail);
+
+		/*
+		 * If this was a commit entry that failed,
+		 * increment that too
+		 */
+		if (tail_page == cpu_buffer->commit_page &&
+		    tail == rb_commit_index(cpu_buffer)) {
+			rb_set_commit_to_write(cpu_buffer);
+		}
+
+		spin_unlock_irqrestore(&cpu_buffer->lock, flags);
+
+		/* fail and let the caller try again */
+		return ERR_PTR(-EAGAIN);
+	}
+
+	/* We reserved something on the buffer */
+
+	BUG_ON(write > BUF_PAGE_SIZE);
+
+	event = __rb_page_index(tail_page, tail);
+	rb_update_event(event, type, length);
+
+	/*
+	 * If this is a commit and the tail is zero, then update
+	 * this page's time stamp.
+	 */
+	if (!tail && rb_is_commit(cpu_buffer, event))
+		cpu_buffer->commit_page->time_stamp = *ts;
+
+	return event;
+
+ out_unlock:
+	spin_unlock_irqrestore(&cpu_buffer->lock, flags);
+	return NULL;
+}
+
+static int
+rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
+		  u64 *ts, u64 *delta)
+{
+	struct ring_buffer_event *event;
+	static int once;
+	int ret;
+
+	if (unlikely(*delta > (1ULL << 59) && !once++)) {
+		printk(KERN_WARNING "Delta way too big! %llu"
+		       " ts=%llu write stamp = %llu\n",
+		       *delta, *ts, cpu_buffer->write_stamp);
+		WARN_ON(1);
+	}
+
+	/*
+	 * The delta is too big, we to add a
+	 * new timestamp.
+	 */
+	event = __rb_reserve_next(cpu_buffer,
+				  RINGBUF_TYPE_TIME_EXTEND,
+				  RB_LEN_TIME_EXTEND,
+				  ts);
+	if (!event)
+		return -EBUSY;
+
+	if (PTR_ERR(event) == -EAGAIN)
+		return -EAGAIN;
+
+	/* Only a commited time event can update the write stamp */
+	if (rb_is_commit(cpu_buffer, event)) {
+		/*
+		 * If this is the first on the page, then we need to
+		 * update the page itself, and just put in a zero.
+		 */
+		if (rb_event_index(event)) {
+			event->time_delta = *delta & TS_MASK;
+			event->array[0] = *delta >> TS_SHIFT;
+		} else {
+			cpu_buffer->commit_page->time_stamp = *ts;
+			event->time_delta = 0;
+			event->array[0] = 0;
+		}
+		cpu_buffer->write_stamp = *ts;
+		/* let the caller know this was the commit */
+		ret = 1;
+	} else {
+		/* Darn, this is just wasted space */
+		event->time_delta = 0;
+		event->array[0] = 0;
+		ret = 0;
+	}
+
+	*delta = 0;
+
+	return ret;
+}
+
+static struct ring_buffer_event *
+rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
+		      unsigned type, unsigned long length)
+{
+	struct ring_buffer_event *event;
+	u64 ts, delta;
+	int commit = 0;
+
+ again:
+	ts = ring_buffer_time_stamp(cpu_buffer->cpu);
+
+	/*
+	 * Only the first commit can update the timestamp.
+	 * Yes there is a race here. If an interrupt comes in
+	 * just after the conditional and it traces too, then it
+	 * will also check the deltas. More than one timestamp may
+	 * also be made. But only the entry that did the actual
+	 * commit will be something other than zero.
+	 */
+	if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
+	    rb_page_write(cpu_buffer->tail_page) ==
+	    rb_commit_index(cpu_buffer)) {
+
+		delta = ts - cpu_buffer->write_stamp;
+
+		/* make sure this delta is calculated here */
+		barrier();
+
+		/* Did the write stamp get updated already? */
+		if (unlikely(ts < cpu_buffer->write_stamp))
+			goto again;
+
+		if (test_time_stamp(delta)) {
+
+			commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
+
+			if (commit == -EBUSY)
+				return NULL;
+
+			if (commit == -EAGAIN)
+				goto again;
+
+			RB_WARN_ON(cpu_buffer, commit < 0);
+		}
+	} else
+		/* Non commits have zero deltas */
+		delta = 0;
+
+	event = __rb_reserve_next(cpu_buffer, type, length, &ts);
+	if (PTR_ERR(event) == -EAGAIN)
+		goto again;
+
+	if (!event) {
+		if (unlikely(commit))
+			/*
+			 * Ouch! We needed a timestamp and it was commited. But
+			 * we didn't get our event reserved.
+			 */
+			rb_set_commit_to_write(cpu_buffer);
+		return NULL;
+	}
+
+	/*
+	 * If the timestamp was commited, make the commit our entry
+	 * now so that we will update it when needed.
+	 */
+	if (commit)
+		rb_set_commit_event(cpu_buffer, event);
+	else if (!rb_is_commit(cpu_buffer, event))
+		delta = 0;
+
+	event->time_delta = delta;
+
+	return event;
+}
+
+static DEFINE_PER_CPU(int, rb_need_resched);
+
+/**
+ * ring_buffer_lock_reserve - reserve a part of the buffer
+ * @buffer: the ring buffer to reserve from
+ * @length: the length of the data to reserve (excluding event header)
+ * @flags: a pointer to save the interrupt flags
+ *
+ * Returns a reseverd event on the ring buffer to copy directly to.
+ * The user of this interface will need to get the body to write into
+ * and can use the ring_buffer_event_data() interface.
+ *
+ * The length is the length of the data needed, not the event length
+ * which also includes the event header.
+ *
+ * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
+ * If NULL is returned, then nothing has been allocated or locked.
+ */
+struct ring_buffer_event *
+ring_buffer_lock_reserve(struct ring_buffer *buffer,
+			 unsigned long length,
+			 unsigned long *flags)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+	struct ring_buffer_event *event;
+	int cpu, resched;
+
+	if (atomic_read(&buffer->record_disabled))
+		return NULL;
+
+	/* If we are tracing schedule, we don't want to recurse */
+	resched = need_resched();
+	preempt_disable_notrace();
+
+	cpu = raw_smp_processor_id();
+
+	if (!cpu_isset(cpu, buffer->cpumask))
+		goto out;
+
+	cpu_buffer = buffer->buffers[cpu];
+
+	if (atomic_read(&cpu_buffer->record_disabled))
+		goto out;
+
+	length = rb_calculate_event_length(length);
+	if (length > BUF_PAGE_SIZE)
+		goto out;
+
+	event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
+	if (!event)
+		goto out;
+
+	/*
+	 * Need to store resched state on this cpu.
+	 * Only the first needs to.
+	 */
+
+	if (preempt_count() == 1)
+		per_cpu(rb_need_resched, cpu) = resched;
+
+	return event;
+
+ out:
+	if (resched)
+		preempt_enable_notrace();
+	else
+		preempt_enable_notrace();
+	return NULL;
+}
+
+static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
+		      struct ring_buffer_event *event)
+{
+	cpu_buffer->entries++;
+
+	/* Only process further if we own the commit */
+	if (!rb_is_commit(cpu_buffer, event))
+		return;
+
+	cpu_buffer->write_stamp += event->time_delta;
+
+	rb_set_commit_to_write(cpu_buffer);
+}
+
+/**
+ * ring_buffer_unlock_commit - commit a reserved
+ * @buffer: The buffer to commit to
+ * @event: The event pointer to commit.
+ * @flags: the interrupt flags received from ring_buffer_lock_reserve.
+ *
+ * This commits the data to the ring buffer, and releases any locks held.
+ *
+ * Must be paired with ring_buffer_lock_reserve.
+ */
+int ring_buffer_unlock_commit(struct ring_buffer *buffer,
+			      struct ring_buffer_event *event,
+			      unsigned long flags)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+	int cpu = raw_smp_processor_id();
+
+	cpu_buffer = buffer->buffers[cpu];
+
+	rb_commit(cpu_buffer, event);
+
+	/*
+	 * Only the last preempt count needs to restore preemption.
+	 */
+	if (preempt_count() == 1) {
+		if (per_cpu(rb_need_resched, cpu))
+			preempt_enable_no_resched_notrace();
+		else
+			preempt_enable_notrace();
+	} else
+		preempt_enable_no_resched_notrace();
+
+	return 0;
+}
+
+/**
+ * ring_buffer_write - write data to the buffer without reserving
+ * @buffer: The ring buffer to write to.
+ * @length: The length of the data being written (excluding the event header)
+ * @data: The data to write to the buffer.
+ *
+ * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
+ * one function. If you already have the data to write to the buffer, it
+ * may be easier to simply call this function.
+ *
+ * Note, like ring_buffer_lock_reserve, the length is the length of the data
+ * and not the length of the event which would hold the header.
+ */
+int ring_buffer_write(struct ring_buffer *buffer,
+			unsigned long length,
+			void *data)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+	struct ring_buffer_event *event;
+	unsigned long event_length;
+	void *body;
+	int ret = -EBUSY;
+	int cpu, resched;
+
+	if (atomic_read(&buffer->record_disabled))
+		return -EBUSY;
+
+	resched = need_resched();
+	preempt_disable_notrace();
+
+	cpu = raw_smp_processor_id();
+
+	if (!cpu_isset(cpu, buffer->cpumask))
+		goto out;
+
+	cpu_buffer = buffer->buffers[cpu];
+
+	if (atomic_read(&cpu_buffer->record_disabled))
+		goto out;
+
+	event_length = rb_calculate_event_length(length);
+	event = rb_reserve_next_event(cpu_buffer,
+				      RINGBUF_TYPE_DATA, event_length);
+	if (!event)
+		goto out;
+
+	body = rb_event_data(event);
+
+	memcpy(body, data, length);
+
+	rb_commit(cpu_buffer, event);
+
+	ret = 0;
+ out:
+	if (resched)
+		preempt_enable_no_resched_notrace();
+	else
+		preempt_enable_notrace();
+
+	return ret;
+}
+
+static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	struct buffer_page *reader = cpu_buffer->reader_page;
+	struct buffer_page *head = cpu_buffer->head_page;
+	struct buffer_page *commit = cpu_buffer->commit_page;
+
+	return reader->read == rb_page_commit(reader) &&
+		(commit == reader ||
+		 (commit == head &&
+		  head->read == rb_page_commit(commit)));
+}
+
+/**
+ * ring_buffer_record_disable - stop all writes into the buffer
+ * @buffer: The ring buffer to stop writes to.
+ *
+ * This prevents all writes to the buffer. Any attempt to write
+ * to the buffer after this will fail and return NULL.
+ *
+ * The caller should call synchronize_sched() after this.
+ */
+void ring_buffer_record_disable(struct ring_buffer *buffer)
+{
+	atomic_inc(&buffer->record_disabled);
+}
+
+/**
+ * ring_buffer_record_enable - enable writes to the buffer
+ * @buffer: The ring buffer to enable writes
+ *
+ * Note, multiple disables will need the same number of enables
+ * to truely enable the writing (much like preempt_disable).
+ */
+void ring_buffer_record_enable(struct ring_buffer *buffer)
+{
+	atomic_dec(&buffer->record_disabled);
+}
+
+/**
+ * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
+ * @buffer: The ring buffer to stop writes to.
+ * @cpu: The CPU buffer to stop
+ *
+ * This prevents all writes to the buffer. Any attempt to write
+ * to the buffer after this will fail and return NULL.
+ *
+ * The caller should call synchronize_sched() after this.
+ */
+void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+
+	if (!cpu_isset(cpu, buffer->cpumask))
+		return;
+
+	cpu_buffer = buffer->buffers[cpu];
+	atomic_inc(&cpu_buffer->record_disabled);
+}
+
+/**
+ * ring_buffer_record_enable_cpu - enable writes to the buffer
+ * @buffer: The ring buffer to enable writes
+ * @cpu: The CPU to enable.
+ *
+ * Note, multiple disables will need the same number of enables
+ * to truely enable the writing (much like preempt_disable).
+ */
+void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+
+	if (!cpu_isset(cpu, buffer->cpumask))
+		return;
+
+	cpu_buffer = buffer->buffers[cpu];
+	atomic_dec(&cpu_buffer->record_disabled);
+}
+
+/**
+ * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
+ * @buffer: The ring buffer
+ * @cpu: The per CPU buffer to get the entries from.
+ */
+unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+
+	if (!cpu_isset(cpu, buffer->cpumask))
+		return 0;
+
+	cpu_buffer = buffer->buffers[cpu];
+	return cpu_buffer->entries;
+}
+
+/**
+ * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
+ * @buffer: The ring buffer
+ * @cpu: The per CPU buffer to get the number of overruns from
+ */
+unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+
+	if (!cpu_isset(cpu, buffer->cpumask))
+		return 0;
+
+	cpu_buffer = buffer->buffers[cpu];
+	return cpu_buffer->overrun;
+}
+
+/**
+ * ring_buffer_entries - get the number of entries in a buffer
+ * @buffer: The ring buffer
+ *
+ * Returns the total number of entries in the ring buffer
+ * (all CPU entries)
+ */
+unsigned long ring_buffer_entries(struct ring_buffer *buffer)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+	unsigned long entries = 0;
+	int cpu;
+
+	/* if you care about this being correct, lock the buffer */
+	for_each_buffer_cpu(buffer, cpu) {
+		cpu_buffer = buffer->buffers[cpu];
+		entries += cpu_buffer->entries;
+	}
+
+	return entries;
+}
+
+/**
+ * ring_buffer_overrun_cpu - get the number of overruns in buffer
+ * @buffer: The ring buffer
+ *
+ * Returns the total number of overruns in the ring buffer
+ * (all CPU entries)
+ */
+unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+	unsigned long overruns = 0;
+	int cpu;
+
+	/* if you care about this being correct, lock the buffer */
+	for_each_buffer_cpu(buffer, cpu) {
+		cpu_buffer = buffer->buffers[cpu];
+		overruns += cpu_buffer->overrun;
+	}
+
+	return overruns;
+}
+
+/**
+ * ring_buffer_iter_reset - reset an iterator
+ * @iter: The iterator to reset
+ *
+ * Resets the iterator, so that it will start from the beginning
+ * again.
+ */
+void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
+{
+	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+
+	/* Iterator usage is expected to have record disabled */
+	if (list_empty(&cpu_buffer->reader_page->list)) {
+		iter->head_page = cpu_buffer->head_page;
+		iter->head = cpu_buffer->head_page->read;
+	} else {
+		iter->head_page = cpu_buffer->reader_page;
+		iter->head = cpu_buffer->reader_page->read;
+	}
+	if (iter->head)
+		iter->read_stamp = cpu_buffer->read_stamp;
+	else
+		iter->read_stamp = iter->head_page->time_stamp;
+}
+
+/**
+ * ring_buffer_iter_empty - check if an iterator has no more to read
+ * @iter: The iterator to check
+ */
+int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+
+	cpu_buffer = iter->cpu_buffer;
+
+	return iter->head_page == cpu_buffer->commit_page &&
+		iter->head == rb_commit_index(cpu_buffer);
+}
+
+static void
+rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
+		     struct ring_buffer_event *event)
+{
+	u64 delta;
+
+	switch (event->type) {
+	case RINGBUF_TYPE_PADDING:
+		return;
+
+	case RINGBUF_TYPE_TIME_EXTEND:
+		delta = event->array[0];
+		delta <<= TS_SHIFT;
+		delta += event->time_delta;
+		cpu_buffer->read_stamp += delta;
+		return;
+
+	case RINGBUF_TYPE_TIME_STAMP:
+		/* FIXME: not implemented */
+		return;
+
+	case RINGBUF_TYPE_DATA:
+		cpu_buffer->read_stamp += event->time_delta;
+		return;
+
+	default:
+		BUG();
+	}
+	return;
+}
+
+static void
+rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
+			  struct ring_buffer_event *event)
+{
+	u64 delta;
+
+	switch (event->type) {
+	case RINGBUF_TYPE_PADDING:
+		return;
+
+	case RINGBUF_TYPE_TIME_EXTEND:
+		delta = event->array[0];
+		delta <<= TS_SHIFT;
+		delta += event->time_delta;
+		iter->read_stamp += delta;
+		return;
+
+	case RINGBUF_TYPE_TIME_STAMP:
+		/* FIXME: not implemented */
+		return;
+
+	case RINGBUF_TYPE_DATA:
+		iter->read_stamp += event->time_delta;
+		return;
+
+	default:
+		BUG();
+	}
+	return;
+}
+
+static struct buffer_page *
+rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	struct buffer_page *reader = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cpu_buffer->lock, flags);
+
+ again:
+	reader = cpu_buffer->reader_page;
+
+	/* If there's more to read, return this page */
+	if (cpu_buffer->reader_page->read < rb_page_size(reader))
+		goto out;
+
+	/* Never should we have an index greater than the size */
+	RB_WARN_ON(cpu_buffer,
+		   cpu_buffer->reader_page->read > rb_page_size(reader));
+
+	/* check if we caught up to the tail */
+	reader = NULL;
+	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
+		goto out;
+
+	/*
+	 * Splice the empty reader page into the list around the head.
+	 * Reset the reader page to size zero.
+	 */
+
+	reader = cpu_buffer->head_page;
+	cpu_buffer->reader_page->list.next = reader->list.next;
+	cpu_buffer->reader_page->list.prev = reader->list.prev;
+
+	local_set(&cpu_buffer->reader_page->write, 0);
+	local_set(&cpu_buffer->reader_page->commit, 0);
+
+	/* Make the reader page now replace the head */
+	reader->list.prev->next = &cpu_buffer->reader_page->list;
+	reader->list.next->prev = &cpu_buffer->reader_page->list;
+
+	/*
+	 * If the tail is on the reader, then we must set the head
+	 * to the inserted page, otherwise we set it one before.
+	 */
+	cpu_buffer->head_page = cpu_buffer->reader_page;
+
+	if (cpu_buffer->commit_page != reader)
+		rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
+
+	/* Finally update the reader page to the new head */
+	cpu_buffer->reader_page = reader;
+	rb_reset_reader_page(cpu_buffer);
+
+	goto again;
+
+ out:
+	spin_unlock_irqrestore(&cpu_buffer->lock, flags);
+
+	return reader;
+}
+
+static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	struct ring_buffer_event *event;
+	struct buffer_page *reader;
+	unsigned length;
+
+	reader = rb_get_reader_page(cpu_buffer);
+
+	/* This function should not be called when buffer is empty */
+	BUG_ON(!reader);
+
+	event = rb_reader_event(cpu_buffer);
+
+	if (event->type == RINGBUF_TYPE_DATA)
+		cpu_buffer->entries--;
+
+	rb_update_read_stamp(cpu_buffer, event);
+
+	length = rb_event_length(event);
+	cpu_buffer->reader_page->read += length;
+}
+
+static void rb_advance_iter(struct ring_buffer_iter *iter)
+{
+	struct ring_buffer *buffer;
+	struct ring_buffer_per_cpu *cpu_buffer;
+	struct ring_buffer_event *event;
+	unsigned length;
+
+	cpu_buffer = iter->cpu_buffer;
+	buffer = cpu_buffer->buffer;
+
+	/*
+	 * Check if we are at the end of the buffer.
+	 */
+	if (iter->head >= rb_page_size(iter->head_page)) {
+		BUG_ON(iter->head_page == cpu_buffer->commit_page);
+		rb_inc_iter(iter);
+		return;
+	}
+
+	event = rb_iter_head_event(iter);
+
+	length = rb_event_length(event);
+
+	/*
+	 * This should not be called to advance the header if we are
+	 * at the tail of the buffer.
+	 */
+	BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
+	       (iter->head + length > rb_commit_index(cpu_buffer)));
+
+	rb_update_iter_read_stamp(iter, event);
+
+	iter->head += length;
+
+	/* check for end of page padding */
+	if ((iter->head >= rb_page_size(iter->head_page)) &&
+	    (iter->head_page != cpu_buffer->commit_page))
+		rb_advance_iter(iter);
+}
+
+/**
+ * ring_buffer_peek - peek at the next event to be read
+ * @buffer: The ring buffer to read
+ * @cpu: The cpu to peak at
+ * @ts: The timestamp counter of this event.
+ *
+ * This will return the event that will be read next, but does
+ * not consume the data.
+ */
+struct ring_buffer_event *
+ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+	struct ring_buffer_event *event;
+	struct buffer_page *reader;
+
+	if (!cpu_isset(cpu, buffer->cpumask))
+		return NULL;
+
+	cpu_buffer = buffer->buffers[cpu];
+
+ again:
+	reader = rb_get_reader_page(cpu_buffer);
+	if (!reader)
+		return NULL;
+
+	event = rb_reader_event(cpu_buffer);
+
+	switch (event->type) {
+	case RINGBUF_TYPE_PADDING:
+		RB_WARN_ON(cpu_buffer, 1);
+		rb_advance_reader(cpu_buffer);
+		return NULL;
+
+	case RINGBUF_TYPE_TIME_EXTEND:
+		/* Internal data, OK to advance */
+		rb_advance_reader(cpu_buffer);
+		goto again;
+
+	case RINGBUF_TYPE_TIME_STAMP:
+		/* FIXME: not implemented */
+		rb_advance_reader(cpu_buffer);
+		goto again;
+
+	case RINGBUF_TYPE_DATA:
+		if (ts) {
+			*ts = cpu_buffer->read_stamp + event->time_delta;
+			ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
+		}
+		return event;
+
+	default:
+		BUG();
+	}
+
+	return NULL;
+}
+
+/**
+ * ring_buffer_iter_peek - peek at the next event to be read
+ * @iter: The ring buffer iterator
+ * @ts: The timestamp counter of this event.
+ *
+ * This will return the event that will be read next, but does
+ * not increment the iterator.
+ */
+struct ring_buffer_event *
+ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
+{
+	struct ring_buffer *buffer;
+	struct ring_buffer_per_cpu *cpu_buffer;
+	struct ring_buffer_event *event;
+
+	if (ring_buffer_iter_empty(iter))
+		return NULL;
+
+	cpu_buffer = iter->cpu_buffer;
+	buffer = cpu_buffer->buffer;
+
+ again:
+	if (rb_per_cpu_empty(cpu_buffer))
+		return NULL;
+
+	event = rb_iter_head_event(iter);
+
+	switch (event->type) {
+	case RINGBUF_TYPE_PADDING:
+		rb_inc_iter(iter);
+		goto again;
+
+	case RINGBUF_TYPE_TIME_EXTEND:
+		/* Internal data, OK to advance */
+		rb_advance_iter(iter);
+		goto again;
+
+	case RINGBUF_TYPE_TIME_STAMP:
+		/* FIXME: not implemented */
+		rb_advance_iter(iter);
+		goto again;
+
+	case RINGBUF_TYPE_DATA:
+		if (ts) {
+			*ts = iter->read_stamp + event->time_delta;
+			ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
+		}
+		return event;
+
+	default:
+		BUG();
+	}
+
+	return NULL;
+}
+
+/**
+ * ring_buffer_consume - return an event and consume it
+ * @buffer: The ring buffer to get the next event from
+ *
+ * Returns the next event in the ring buffer, and that event is consumed.
+ * Meaning, that sequential reads will keep returning a different event,
+ * and eventually empty the ring buffer if the producer is slower.
+ */
+struct ring_buffer_event *
+ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+	struct ring_buffer_event *event;
+
+	if (!cpu_isset(cpu, buffer->cpumask))
+		return NULL;
+
+	event = ring_buffer_peek(buffer, cpu, ts);
+	if (!event)
+		return NULL;
+
+	cpu_buffer = buffer->buffers[cpu];
+	rb_advance_reader(cpu_buffer);
+
+	return event;
+}
+
+/**
+ * ring_buffer_read_start - start a non consuming read of the buffer
+ * @buffer: The ring buffer to read from
+ * @cpu: The cpu buffer to iterate over
+ *
+ * This starts up an iteration through the buffer. It also disables
+ * the recording to the buffer until the reading is finished.
+ * This prevents the reading from being corrupted. This is not
+ * a consuming read, so a producer is not expected.
+ *
+ * Must be paired with ring_buffer_finish.
+ */
+struct ring_buffer_iter *
+ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+	struct ring_buffer_iter *iter;
+	unsigned long flags;
+
+	if (!cpu_isset(cpu, buffer->cpumask))
+		return NULL;
+
+	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+	if (!iter)
+		return NULL;
+
+	cpu_buffer = buffer->buffers[cpu];
+
+	iter->cpu_buffer = cpu_buffer;
+
+	atomic_inc(&cpu_buffer->record_disabled);
+	synchronize_sched();
+
+	spin_lock_irqsave(&cpu_buffer->lock, flags);
+	ring_buffer_iter_reset(iter);
+	spin_unlock_irqrestore(&cpu_buffer->lock, flags);
+
+	return iter;
+}
+
+/**
+ * ring_buffer_finish - finish reading the iterator of the buffer
+ * @iter: The iterator retrieved by ring_buffer_start
+ *
+ * This re-enables the recording to the buffer, and frees the
+ * iterator.
+ */
+void
+ring_buffer_read_finish(struct ring_buffer_iter *iter)
+{
+	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+
+	atomic_dec(&cpu_buffer->record_disabled);
+	kfree(iter);
+}
+
+/**
+ * ring_buffer_read - read the next item in the ring buffer by the iterator
+ * @iter: The ring buffer iterator
+ * @ts: The time stamp of the event read.
+ *
+ * This reads the next event in the ring buffer and increments the iterator.
+ */
+struct ring_buffer_event *
+ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
+{
+	struct ring_buffer_event *event;
+
+	event = ring_buffer_iter_peek(iter, ts);
+	if (!event)
+		return NULL;
+
+	rb_advance_iter(iter);
+
+	return event;
+}
+
+/**
+ * ring_buffer_size - return the size of the ring buffer (in bytes)
+ * @buffer: The ring buffer.
+ */
+unsigned long ring_buffer_size(struct ring_buffer *buffer)
+{
+	return BUF_PAGE_SIZE * buffer->pages;
+}
+
+static void
+rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	cpu_buffer->head_page
+		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
+	local_set(&cpu_buffer->head_page->write, 0);
+	local_set(&cpu_buffer->head_page->commit, 0);
+
+	cpu_buffer->head_page->read = 0;
+
+	cpu_buffer->tail_page = cpu_buffer->head_page;
+	cpu_buffer->commit_page = cpu_buffer->head_page;
+
+	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
+	local_set(&cpu_buffer->reader_page->write, 0);
+	local_set(&cpu_buffer->reader_page->commit, 0);
+	cpu_buffer->reader_page->read = 0;
+
+	cpu_buffer->overrun = 0;
+	cpu_buffer->entries = 0;
+}
+
+/**
+ * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
+ * @buffer: The ring buffer to reset a per cpu buffer of
+ * @cpu: The CPU buffer to be reset
+ */
+void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
+{
+	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+	unsigned long flags;
+
+	if (!cpu_isset(cpu, buffer->cpumask))
+		return;
+
+	spin_lock_irqsave(&cpu_buffer->lock, flags);
+
+	rb_reset_cpu(cpu_buffer);
+
+	spin_unlock_irqrestore(&cpu_buffer->lock, flags);
+}
+
+/**
+ * ring_buffer_reset - reset a ring buffer
+ * @buffer: The ring buffer to reset all cpu buffers
+ */
+void ring_buffer_reset(struct ring_buffer *buffer)
+{
+	int cpu;
+
+	for_each_buffer_cpu(buffer, cpu)
+		ring_buffer_reset_cpu(buffer, cpu);
+}
+
+/**
+ * rind_buffer_empty - is the ring buffer empty?
+ * @buffer: The ring buffer to test
+ */
+int ring_buffer_empty(struct ring_buffer *buffer)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+	int cpu;
+
+	/* yes this is racy, but if you don't like the race, lock the buffer */
+	for_each_buffer_cpu(buffer, cpu) {
+		cpu_buffer = buffer->buffers[cpu];
+		if (!rb_per_cpu_empty(cpu_buffer))
+			return 0;
+	}
+	return 1;
+}
+
+/**
+ * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
+ * @buffer: The ring buffer
+ * @cpu: The CPU buffer to test
+ */
+int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+
+	if (!cpu_isset(cpu, buffer->cpumask))
+		return 1;
+
+	cpu_buffer = buffer->buffers[cpu];
+	return rb_per_cpu_empty(cpu_buffer);
+}
+
+/**
+ * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
+ * @buffer_a: One buffer to swap with
+ * @buffer_b: The other buffer to swap with
+ *
+ * This function is useful for tracers that want to take a "snapshot"
+ * of a CPU buffer and has another back up buffer lying around.
+ * it is expected that the tracer handles the cpu buffer not being
+ * used at the moment.
+ */
+int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
+			 struct ring_buffer *buffer_b, int cpu)
+{
+	struct ring_buffer_per_cpu *cpu_buffer_a;
+	struct ring_buffer_per_cpu *cpu_buffer_b;
+
+	if (!cpu_isset(cpu, buffer_a->cpumask) ||
+	    !cpu_isset(cpu, buffer_b->cpumask))
+		return -EINVAL;
+
+	/* At least make sure the two buffers are somewhat the same */
+	if (buffer_a->size != buffer_b->size ||
+	    buffer_a->pages != buffer_b->pages)
+		return -EINVAL;
+
+	cpu_buffer_a = buffer_a->buffers[cpu];
+	cpu_buffer_b = buffer_b->buffers[cpu];
+
+	/*
+	 * We can't do a synchronize_sched here because this
+	 * function can be called in atomic context.
+	 * Normally this will be called from the same CPU as cpu.
+	 * If not it's up to the caller to protect this.
+	 */
+	atomic_inc(&cpu_buffer_a->record_disabled);
+	atomic_inc(&cpu_buffer_b->record_disabled);
+
+	buffer_a->buffers[cpu] = cpu_buffer_b;
+	buffer_b->buffers[cpu] = cpu_buffer_a;
+
+	cpu_buffer_b->buffer = buffer_a;
+	cpu_buffer_a->buffer = buffer_b;
+
+	atomic_dec(&cpu_buffer_a->record_disabled);
+	atomic_dec(&cpu_buffer_b->record_disabled);
+
+	return 0;
+}
+
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8f3fb3d..d345d64 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -14,6 +14,7 @@
 #include <linux/utsrelease.h>
 #include <linux/kallsyms.h>
 #include <linux/seq_file.h>
+#include <linux/notifier.h>
 #include <linux/debugfs.h>
 #include <linux/pagemap.h>
 #include <linux/hardirq.h>
@@ -22,6 +23,7 @@
 #include <linux/ftrace.h>
 #include <linux/module.h>
 #include <linux/percpu.h>
+#include <linux/kdebug.h>
 #include <linux/ctype.h>
 #include <linux/init.h>
 #include <linux/poll.h>
@@ -31,25 +33,36 @@
 #include <linux/writeback.h>
 
 #include <linux/stacktrace.h>
+#include <linux/ring_buffer.h>
 
 #include "trace.h"
 
+#define TRACE_BUFFER_FLAGS	(RB_FL_OVERWRITE)
+
 unsigned long __read_mostly	tracing_max_latency = (cycle_t)ULONG_MAX;
 unsigned long __read_mostly	tracing_thresh;
 
-static unsigned long __read_mostly	tracing_nr_buffers;
+static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
+
+static inline void ftrace_disable_cpu(void)
+{
+	preempt_disable();
+	local_inc(&__get_cpu_var(ftrace_cpu_disabled));
+}
+
+static inline void ftrace_enable_cpu(void)
+{
+	local_dec(&__get_cpu_var(ftrace_cpu_disabled));
+	preempt_enable();
+}
+
 static cpumask_t __read_mostly		tracing_buffer_mask;
 
 #define for_each_tracing_cpu(cpu)	\
 	for_each_cpu_mask(cpu, tracing_buffer_mask)
 
-static int trace_alloc_page(void);
-static int trace_free_page(void);
-
 static int tracing_disabled = 1;
 
-static unsigned long tracing_pages_allocated;
-
 long
 ns2usecs(cycle_t nsec)
 {
@@ -60,7 +73,9 @@
 
 cycle_t ftrace_now(int cpu)
 {
-	return cpu_clock(cpu);
+	u64 ts = ring_buffer_time_stamp(cpu);
+	ring_buffer_normalize_time_stamp(cpu, &ts);
+	return ts;
 }
 
 /*
@@ -100,11 +115,18 @@
 int				ftrace_function_enabled;
 
 /*
- * trace_nr_entries is the number of entries that is allocated
- * for a buffer. Note, the number of entries is always rounded
- * to ENTRIES_PER_PAGE.
+ * trace_buf_size is the size in bytes that is allocated
+ * for a buffer. Note, the number of bytes is always rounded
+ * to page size.
+ *
+ * This number is purposely set to a low number of 16384.
+ * If the dump on oops happens, it will be much appreciated
+ * to not have to wait for all that output. Anyway this can be
+ * boot time and run time configurable.
  */
-static unsigned long		trace_nr_entries = 65536UL;
+#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
+
+static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
 
 /* trace_types holds a link list of available tracers. */
 static struct tracer		*trace_types __read_mostly;
@@ -133,24 +155,6 @@
 /* trace_flags holds iter_ctrl options */
 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
 
-static notrace void no_trace_init(struct trace_array *tr)
-{
-	int cpu;
-
-	ftrace_function_enabled = 0;
-	if(tr->ctrl)
-		for_each_online_cpu(cpu)
-			tracing_reset(tr->data[cpu]);
-	tracer_enabled = 0;
-}
-
-/* dummy trace to disable tracing */
-static struct tracer no_tracer __read_mostly = {
-	.name		= "none",
-	.init		= no_trace_init
-};
-
-
 /**
  * trace_wake_up - wake up tasks waiting for trace input
  *
@@ -167,23 +171,21 @@
 		wake_up(&trace_wait);
 }
 
-#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
-
-static int __init set_nr_entries(char *str)
+static int __init set_buf_size(char *str)
 {
-	unsigned long nr_entries;
+	unsigned long buf_size;
 	int ret;
 
 	if (!str)
 		return 0;
-	ret = strict_strtoul(str, 0, &nr_entries);
+	ret = strict_strtoul(str, 0, &buf_size);
 	/* nr_entries can not be zero */
-	if (ret < 0 || nr_entries == 0)
+	if (ret < 0 || buf_size == 0)
 		return 0;
-	trace_nr_entries = nr_entries;
+	trace_buf_size = buf_size;
 	return 1;
 }
-__setup("trace_entries=", set_nr_entries);
+__setup("trace_buf_size=", set_buf_size);
 
 unsigned long nsecs_to_usecs(unsigned long nsecs)
 {
@@ -191,21 +193,6 @@
 }
 
 /*
- * trace_flag_type is an enumeration that holds different
- * states when a trace occurs. These are:
- *  IRQS_OFF	- interrupts were disabled
- *  NEED_RESCED - reschedule is requested
- *  HARDIRQ	- inside an interrupt handler
- *  SOFTIRQ	- inside a softirq handler
- */
-enum trace_flag_type {
-	TRACE_FLAG_IRQS_OFF		= 0x01,
-	TRACE_FLAG_NEED_RESCHED		= 0x02,
-	TRACE_FLAG_HARDIRQ		= 0x04,
-	TRACE_FLAG_SOFTIRQ		= 0x08,
-};
-
-/*
  * TRACE_ITER_SYM_MASK masks the options in trace_flags that
  * control the output of kernel symbols.
  */
@@ -224,6 +211,7 @@
 	"block",
 	"stacktrace",
 	"sched-tree",
+	"ftrace_printk",
 	NULL
 };
 
@@ -266,54 +254,6 @@
 	tracing_record_cmdline(current);
 }
 
-#define CHECK_COND(cond)			\
-	if (unlikely(cond)) {			\
-		tracing_disabled = 1;		\
-		WARN_ON(1);			\
-		return -1;			\
-	}
-
-/**
- * check_pages - integrity check of trace buffers
- *
- * As a safty measure we check to make sure the data pages have not
- * been corrupted.
- */
-int check_pages(struct trace_array_cpu *data)
-{
-	struct page *page, *tmp;
-
-	CHECK_COND(data->trace_pages.next->prev != &data->trace_pages);
-	CHECK_COND(data->trace_pages.prev->next != &data->trace_pages);
-
-	list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
-		CHECK_COND(page->lru.next->prev != &page->lru);
-		CHECK_COND(page->lru.prev->next != &page->lru);
-	}
-
-	return 0;
-}
-
-/**
- * head_page - page address of the first page in per_cpu buffer.
- *
- * head_page returns the page address of the first page in
- * a per_cpu buffer. This also preforms various consistency
- * checks to make sure the buffer has not been corrupted.
- */
-void *head_page(struct trace_array_cpu *data)
-{
-	struct page *page;
-
-	if (list_empty(&data->trace_pages))
-		return NULL;
-
-	page = list_entry(data->trace_pages.next, struct page, lru);
-	BUG_ON(&page->lru == &data->trace_pages);
-
-	return page_address(page);
-}
-
 /**
  * trace_seq_printf - sequence printing of trace information
  * @s: trace sequence descriptor
@@ -395,28 +335,23 @@
 	return len;
 }
 
-#define HEX_CHARS 17
-static const char hex2asc[] = "0123456789abcdef";
+#define MAX_MEMHEX_BYTES	8
+#define HEX_CHARS		(MAX_MEMHEX_BYTES*2 + 1)
 
 static int
 trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
 {
 	unsigned char hex[HEX_CHARS];
 	unsigned char *data = mem;
-	unsigned char byte;
 	int i, j;
 
-	BUG_ON(len >= HEX_CHARS);
-
 #ifdef __BIG_ENDIAN
 	for (i = 0, j = 0; i < len; i++) {
 #else
 	for (i = len-1, j = 0; i >= 0; i--) {
 #endif
-		byte = data[i];
-
-		hex[j++] = hex2asc[byte & 0x0f];
-		hex[j++] = hex2asc[byte >> 4];
+		hex[j++] = hex_asc_hi(data[i]);
+		hex[j++] = hex_asc_lo(data[i]);
 	}
 	hex[j++] = ' ';
 
@@ -460,34 +395,6 @@
 	trace_seq_reset(s);
 }
 
-/*
- * flip the trace buffers between two trace descriptors.
- * This usually is the buffers between the global_trace and
- * the max_tr to record a snapshot of a current trace.
- *
- * The ftrace_max_lock must be held.
- */
-static void
-flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
-{
-	struct list_head flip_pages;
-
-	INIT_LIST_HEAD(&flip_pages);
-
-	memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
-		sizeof(struct trace_array_cpu) -
-		offsetof(struct trace_array_cpu, trace_head_idx));
-
-	check_pages(tr1);
-	check_pages(tr2);
-	list_splice_init(&tr1->trace_pages, &flip_pages);
-	list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
-	list_splice_init(&flip_pages, &tr2->trace_pages);
-	BUG_ON(!list_empty(&flip_pages));
-	check_pages(tr1);
-	check_pages(tr2);
-}
-
 /**
  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
  * @tr: tracer
@@ -500,17 +407,17 @@
 void
 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 {
-	struct trace_array_cpu *data;
-	int i;
+	struct ring_buffer *buf = tr->buffer;
 
 	WARN_ON_ONCE(!irqs_disabled());
 	__raw_spin_lock(&ftrace_max_lock);
-	/* clear out all the previous traces */
-	for_each_tracing_cpu(i) {
-		data = tr->data[i];
-		flip_trace(max_tr.data[i], data);
-		tracing_reset(data);
-	}
+
+	tr->buffer = max_tr.buffer;
+	max_tr.buffer = buf;
+
+	ftrace_disable_cpu();
+	ring_buffer_reset(tr->buffer);
+	ftrace_enable_cpu();
 
 	__update_max_tr(tr, tsk, cpu);
 	__raw_spin_unlock(&ftrace_max_lock);
@@ -527,16 +434,19 @@
 void
 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
 {
-	struct trace_array_cpu *data = tr->data[cpu];
-	int i;
+	int ret;
 
 	WARN_ON_ONCE(!irqs_disabled());
 	__raw_spin_lock(&ftrace_max_lock);
-	for_each_tracing_cpu(i)
-		tracing_reset(max_tr.data[i]);
 
-	flip_trace(max_tr.data[cpu], data);
-	tracing_reset(data);
+	ftrace_disable_cpu();
+
+	ring_buffer_reset(max_tr.buffer);
+	ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
+
+	ftrace_enable_cpu();
+
+	WARN_ON_ONCE(ret);
 
 	__update_max_tr(tr, tsk, cpu);
 	__raw_spin_unlock(&ftrace_max_lock);
@@ -573,7 +483,6 @@
 #ifdef CONFIG_FTRACE_STARTUP_TEST
 	if (type->selftest) {
 		struct tracer *saved_tracer = current_trace;
-		struct trace_array_cpu *data;
 		struct trace_array *tr = &global_trace;
 		int saved_ctrl = tr->ctrl;
 		int i;
@@ -585,10 +494,7 @@
 		 * If we fail, we do not register this tracer.
 		 */
 		for_each_tracing_cpu(i) {
-			data = tr->data[i];
-			if (!head_page(data))
-				continue;
-			tracing_reset(data);
+			tracing_reset(tr, i);
 		}
 		current_trace = type;
 		tr->ctrl = 0;
@@ -604,10 +510,7 @@
 		}
 		/* Only reset on passing, to avoid touching corrupted buffers */
 		for_each_tracing_cpu(i) {
-			data = tr->data[i];
-			if (!head_page(data))
-				continue;
-			tracing_reset(data);
+			tracing_reset(tr, i);
 		}
 		printk(KERN_CONT "PASSED\n");
 	}
@@ -653,13 +556,11 @@
 	mutex_unlock(&trace_types_lock);
 }
 
-void tracing_reset(struct trace_array_cpu *data)
+void tracing_reset(struct trace_array *tr, int cpu)
 {
-	data->trace_idx = 0;
-	data->overrun = 0;
-	data->trace_head = data->trace_tail = head_page(data);
-	data->trace_head_idx = 0;
-	data->trace_tail_idx = 0;
+	ftrace_disable_cpu();
+	ring_buffer_reset_cpu(tr->buffer, cpu);
+	ftrace_enable_cpu();
 }
 
 #define SAVED_CMDLINES 128
@@ -745,82 +646,16 @@
 	trace_save_cmdline(tsk);
 }
 
-static inline struct list_head *
-trace_next_list(struct trace_array_cpu *data, struct list_head *next)
-{
-	/*
-	 * Roundrobin - but skip the head (which is not a real page):
-	 */
-	next = next->next;
-	if (unlikely(next == &data->trace_pages))
-		next = next->next;
-	BUG_ON(next == &data->trace_pages);
-
-	return next;
-}
-
-static inline void *
-trace_next_page(struct trace_array_cpu *data, void *addr)
-{
-	struct list_head *next;
-	struct page *page;
-
-	page = virt_to_page(addr);
-
-	next = trace_next_list(data, &page->lru);
-	page = list_entry(next, struct page, lru);
-
-	return page_address(page);
-}
-
-static inline struct trace_entry *
-tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
-{
-	unsigned long idx, idx_next;
-	struct trace_entry *entry;
-
-	data->trace_idx++;
-	idx = data->trace_head_idx;
-	idx_next = idx + 1;
-
-	BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
-
-	entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
-
-	if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
-		data->trace_head = trace_next_page(data, data->trace_head);
-		idx_next = 0;
-	}
-
-	if (data->trace_head == data->trace_tail &&
-	    idx_next == data->trace_tail_idx) {
-		/* overrun */
-		data->overrun++;
-		data->trace_tail_idx++;
-		if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
-			data->trace_tail =
-				trace_next_page(data, data->trace_tail);
-			data->trace_tail_idx = 0;
-		}
-	}
-
-	data->trace_head_idx = idx_next;
-
-	return entry;
-}
-
-static inline void
-tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
+void
+tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
+			     int pc)
 {
 	struct task_struct *tsk = current;
-	unsigned long pc;
 
-	pc = preempt_count();
-
-	entry->preempt_count	= pc & 0xff;
-	entry->pid		= (tsk) ? tsk->pid : 0;
-	entry->t		= ftrace_now(raw_smp_processor_id());
-	entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
+	entry->preempt_count		= pc & 0xff;
+	entry->pid			= (tsk) ? tsk->pid : 0;
+	entry->flags =
+		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
 		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
 		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
 		(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
@@ -828,119 +663,110 @@
 
 void
 trace_function(struct trace_array *tr, struct trace_array_cpu *data,
-	       unsigned long ip, unsigned long parent_ip, unsigned long flags)
+	       unsigned long ip, unsigned long parent_ip, unsigned long flags,
+	       int pc)
 {
-	struct trace_entry *entry;
+	struct ring_buffer_event *event;
+	struct ftrace_entry *entry;
 	unsigned long irq_flags;
 
-	raw_local_irq_save(irq_flags);
-	__raw_spin_lock(&data->lock);
-	entry			= tracing_get_trace_entry(tr, data);
-	tracing_generic_entry_update(entry, flags);
-	entry->type		= TRACE_FN;
-	entry->fn.ip		= ip;
-	entry->fn.parent_ip	= parent_ip;
-	__raw_spin_unlock(&data->lock);
-	raw_local_irq_restore(irq_flags);
+	/* If we are reading the ring buffer, don't trace */
+	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+		return;
+
+	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+					 &irq_flags);
+	if (!event)
+		return;
+	entry	= ring_buffer_event_data(event);
+	tracing_generic_entry_update(&entry->ent, flags, pc);
+	entry->ent.type			= TRACE_FN;
+	entry->ip			= ip;
+	entry->parent_ip		= parent_ip;
+	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
 }
 
 void
 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
-       unsigned long ip, unsigned long parent_ip, unsigned long flags)
+       unsigned long ip, unsigned long parent_ip, unsigned long flags,
+       int pc)
 {
 	if (likely(!atomic_read(&data->disabled)))
-		trace_function(tr, data, ip, parent_ip, flags);
+		trace_function(tr, data, ip, parent_ip, flags, pc);
 }
 
-#ifdef CONFIG_MMIOTRACE
-void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data,
-						struct mmiotrace_rw *rw)
+static void ftrace_trace_stack(struct trace_array *tr,
+			       struct trace_array_cpu *data,
+			       unsigned long flags,
+			       int skip, int pc)
 {
-	struct trace_entry *entry;
+	struct ring_buffer_event *event;
+	struct stack_entry *entry;
+	struct stack_trace trace;
 	unsigned long irq_flags;
 
-	raw_local_irq_save(irq_flags);
-	__raw_spin_lock(&data->lock);
+	if (!(trace_flags & TRACE_ITER_STACKTRACE))
+		return;
 
-	entry			= tracing_get_trace_entry(tr, data);
-	tracing_generic_entry_update(entry, 0);
-	entry->type		= TRACE_MMIO_RW;
-	entry->mmiorw		= *rw;
+	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+					 &irq_flags);
+	if (!event)
+		return;
+	entry	= ring_buffer_event_data(event);
+	tracing_generic_entry_update(&entry->ent, flags, pc);
+	entry->ent.type		= TRACE_STACK;
 
-	__raw_spin_unlock(&data->lock);
-	raw_local_irq_restore(irq_flags);
+	memset(&entry->caller, 0, sizeof(entry->caller));
 
-	trace_wake_up();
+	trace.nr_entries	= 0;
+	trace.max_entries	= FTRACE_STACK_ENTRIES;
+	trace.skip		= skip;
+	trace.entries		= entry->caller;
+
+	save_stack_trace(&trace);
+	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
 }
 
-void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data,
-						struct mmiotrace_map *map)
-{
-	struct trace_entry *entry;
-	unsigned long irq_flags;
-
-	raw_local_irq_save(irq_flags);
-	__raw_spin_lock(&data->lock);
-
-	entry			= tracing_get_trace_entry(tr, data);
-	tracing_generic_entry_update(entry, 0);
-	entry->type		= TRACE_MMIO_MAP;
-	entry->mmiomap		= *map;
-
-	__raw_spin_unlock(&data->lock);
-	raw_local_irq_restore(irq_flags);
-
-	trace_wake_up();
-}
-#endif
-
 void __trace_stack(struct trace_array *tr,
 		   struct trace_array_cpu *data,
 		   unsigned long flags,
 		   int skip)
 {
-	struct trace_entry *entry;
-	struct stack_trace trace;
+	ftrace_trace_stack(tr, data, flags, skip, preempt_count());
+}
 
-	if (!(trace_flags & TRACE_ITER_STACKTRACE))
+static void
+ftrace_trace_special(void *__tr, void *__data,
+		     unsigned long arg1, unsigned long arg2, unsigned long arg3,
+		     int pc)
+{
+	struct ring_buffer_event *event;
+	struct trace_array_cpu *data = __data;
+	struct trace_array *tr = __tr;
+	struct special_entry *entry;
+	unsigned long irq_flags;
+
+	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+					 &irq_flags);
+	if (!event)
 		return;
+	entry	= ring_buffer_event_data(event);
+	tracing_generic_entry_update(&entry->ent, 0, pc);
+	entry->ent.type			= TRACE_SPECIAL;
+	entry->arg1			= arg1;
+	entry->arg2			= arg2;
+	entry->arg3			= arg3;
+	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+	ftrace_trace_stack(tr, data, irq_flags, 4, pc);
 
-	entry			= tracing_get_trace_entry(tr, data);
-	tracing_generic_entry_update(entry, flags);
-	entry->type		= TRACE_STACK;
-
-	memset(&entry->stack, 0, sizeof(entry->stack));
-
-	trace.nr_entries	= 0;
-	trace.max_entries	= FTRACE_STACK_ENTRIES;
-	trace.skip		= skip;
-	trace.entries		= entry->stack.caller;
-
-	save_stack_trace(&trace);
+	trace_wake_up();
 }
 
 void
 __trace_special(void *__tr, void *__data,
 		unsigned long arg1, unsigned long arg2, unsigned long arg3)
 {
-	struct trace_array_cpu *data = __data;
-	struct trace_array *tr = __tr;
-	struct trace_entry *entry;
-	unsigned long irq_flags;
-
-	raw_local_irq_save(irq_flags);
-	__raw_spin_lock(&data->lock);
-	entry			= tracing_get_trace_entry(tr, data);
-	tracing_generic_entry_update(entry, 0);
-	entry->type		= TRACE_SPECIAL;
-	entry->special.arg1	= arg1;
-	entry->special.arg2	= arg2;
-	entry->special.arg3	= arg3;
-	__trace_stack(tr, data, irq_flags, 4);
-	__raw_spin_unlock(&data->lock);
-	raw_local_irq_restore(irq_flags);
-
-	trace_wake_up();
+	ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
 }
 
 void
@@ -948,25 +774,28 @@
 			   struct trace_array_cpu *data,
 			   struct task_struct *prev,
 			   struct task_struct *next,
-			   unsigned long flags)
+			   unsigned long flags, int pc)
 {
-	struct trace_entry *entry;
+	struct ring_buffer_event *event;
+	struct ctx_switch_entry *entry;
 	unsigned long irq_flags;
 
-	raw_local_irq_save(irq_flags);
-	__raw_spin_lock(&data->lock);
-	entry			= tracing_get_trace_entry(tr, data);
-	tracing_generic_entry_update(entry, flags);
-	entry->type		= TRACE_CTX;
-	entry->ctx.prev_pid	= prev->pid;
-	entry->ctx.prev_prio	= prev->prio;
-	entry->ctx.prev_state	= prev->state;
-	entry->ctx.next_pid	= next->pid;
-	entry->ctx.next_prio	= next->prio;
-	entry->ctx.next_state	= next->state;
-	__trace_stack(tr, data, flags, 5);
-	__raw_spin_unlock(&data->lock);
-	raw_local_irq_restore(irq_flags);
+	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+					   &irq_flags);
+	if (!event)
+		return;
+	entry	= ring_buffer_event_data(event);
+	tracing_generic_entry_update(&entry->ent, flags, pc);
+	entry->ent.type			= TRACE_CTX;
+	entry->prev_pid			= prev->pid;
+	entry->prev_prio		= prev->prio;
+	entry->prev_state		= prev->state;
+	entry->next_pid			= next->pid;
+	entry->next_prio		= next->prio;
+	entry->next_state		= next->state;
+	entry->next_cpu	= task_cpu(next);
+	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+	ftrace_trace_stack(tr, data, flags, 5, pc);
 }
 
 void
@@ -974,25 +803,28 @@
 			   struct trace_array_cpu *data,
 			   struct task_struct *wakee,
 			   struct task_struct *curr,
-			   unsigned long flags)
+			   unsigned long flags, int pc)
 {
-	struct trace_entry *entry;
+	struct ring_buffer_event *event;
+	struct ctx_switch_entry *entry;
 	unsigned long irq_flags;
 
-	raw_local_irq_save(irq_flags);
-	__raw_spin_lock(&data->lock);
-	entry			= tracing_get_trace_entry(tr, data);
-	tracing_generic_entry_update(entry, flags);
-	entry->type		= TRACE_WAKE;
-	entry->ctx.prev_pid	= curr->pid;
-	entry->ctx.prev_prio	= curr->prio;
-	entry->ctx.prev_state	= curr->state;
-	entry->ctx.next_pid	= wakee->pid;
-	entry->ctx.next_prio	= wakee->prio;
-	entry->ctx.next_state	= wakee->state;
-	__trace_stack(tr, data, flags, 6);
-	__raw_spin_unlock(&data->lock);
-	raw_local_irq_restore(irq_flags);
+	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+					   &irq_flags);
+	if (!event)
+		return;
+	entry	= ring_buffer_event_data(event);
+	tracing_generic_entry_update(&entry->ent, flags, pc);
+	entry->ent.type			= TRACE_WAKE;
+	entry->prev_pid			= curr->pid;
+	entry->prev_prio		= curr->prio;
+	entry->prev_state		= curr->state;
+	entry->next_pid			= wakee->pid;
+	entry->next_prio		= wakee->prio;
+	entry->next_state		= wakee->state;
+	entry->next_cpu			= task_cpu(wakee);
+	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+	ftrace_trace_stack(tr, data, flags, 6, pc);
 
 	trace_wake_up();
 }
@@ -1002,23 +834,21 @@
 {
 	struct trace_array *tr = &global_trace;
 	struct trace_array_cpu *data;
-	unsigned long flags;
-	long disabled;
 	int cpu;
+	int pc;
 
-	if (tracing_disabled || current_trace == &no_tracer || !tr->ctrl)
+	if (tracing_disabled || !tr->ctrl)
 		return;
 
-	local_irq_save(flags);
+	pc = preempt_count();
+	preempt_disable_notrace();
 	cpu = raw_smp_processor_id();
 	data = tr->data[cpu];
-	disabled = atomic_inc_return(&data->disabled);
 
-	if (likely(disabled == 1))
-		__trace_special(tr, data, arg1, arg2, arg3);
+	if (likely(!atomic_read(&data->disabled)))
+		ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
 
-	atomic_dec(&data->disabled);
-	local_irq_restore(flags);
+	preempt_enable_notrace();
 }
 
 #ifdef CONFIG_FTRACE
@@ -1029,7 +859,8 @@
 	struct trace_array_cpu *data;
 	unsigned long flags;
 	long disabled;
-	int cpu;
+	int cpu, resched;
+	int pc;
 
 	if (unlikely(!ftrace_function_enabled))
 		return;
@@ -1037,16 +868,22 @@
 	if (skip_trace(ip))
 		return;
 
-	local_irq_save(flags);
+	pc = preempt_count();
+	resched = need_resched();
+	preempt_disable_notrace();
+	local_save_flags(flags);
 	cpu = raw_smp_processor_id();
 	data = tr->data[cpu];
 	disabled = atomic_inc_return(&data->disabled);
 
 	if (likely(disabled == 1))
-		trace_function(tr, data, ip, parent_ip, flags);
+		trace_function(tr, data, ip, parent_ip, flags, pc);
 
 	atomic_dec(&data->disabled);
-	local_irq_restore(flags);
+	if (resched)
+		preempt_enable_no_resched_notrace();
+	else
+		preempt_enable_notrace();
 }
 
 static struct ftrace_ops trace_ops __read_mostly =
@@ -1073,111 +910,96 @@
 	TRACE_FILE_LAT_FMT	= 1,
 };
 
-static struct trace_entry *
-trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
-		struct trace_iterator *iter, int cpu)
+static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
 {
-	struct page *page;
-	struct trace_entry *array;
+	/* Don't allow ftrace to trace into the ring buffers */
+	ftrace_disable_cpu();
 
-	if (iter->next_idx[cpu] >= tr->entries ||
-	    iter->next_idx[cpu] >= data->trace_idx ||
-	    (data->trace_head == data->trace_tail &&
-	     data->trace_head_idx == data->trace_tail_idx))
-		return NULL;
+	iter->idx++;
+	if (iter->buffer_iter[iter->cpu])
+		ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
 
-	if (!iter->next_page[cpu]) {
-		/* Initialize the iterator for this cpu trace buffer */
-		WARN_ON(!data->trace_tail);
-		page = virt_to_page(data->trace_tail);
-		iter->next_page[cpu] = &page->lru;
-		iter->next_page_idx[cpu] = data->trace_tail_idx;
-	}
-
-	page = list_entry(iter->next_page[cpu], struct page, lru);
-	BUG_ON(&data->trace_pages == &page->lru);
-
-	array = page_address(page);
-
-	WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
-	return &array[iter->next_page_idx[cpu]];
+	ftrace_enable_cpu();
 }
 
 static struct trace_entry *
-find_next_entry(struct trace_iterator *iter, int *ent_cpu)
+peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
 {
-	struct trace_array *tr = iter->tr;
+	struct ring_buffer_event *event;
+	struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
+
+	/* Don't allow ftrace to trace into the ring buffers */
+	ftrace_disable_cpu();
+
+	if (buf_iter)
+		event = ring_buffer_iter_peek(buf_iter, ts);
+	else
+		event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
+
+	ftrace_enable_cpu();
+
+	return event ? ring_buffer_event_data(event) : NULL;
+}
+
+static struct trace_entry *
+__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
+{
+	struct ring_buffer *buffer = iter->tr->buffer;
 	struct trace_entry *ent, *next = NULL;
+	u64 next_ts = 0, ts;
 	int next_cpu = -1;
 	int cpu;
 
 	for_each_tracing_cpu(cpu) {
-		if (!head_page(tr->data[cpu]))
+
+		if (ring_buffer_empty_cpu(buffer, cpu))
 			continue;
-		ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
+
+		ent = peek_next_entry(iter, cpu, &ts);
+
 		/*
 		 * Pick the entry with the smallest timestamp:
 		 */
-		if (ent && (!next || ent->t < next->t)) {
+		if (ent && (!next || ts < next_ts)) {
 			next = ent;
 			next_cpu = cpu;
+			next_ts = ts;
 		}
 	}
 
 	if (ent_cpu)
 		*ent_cpu = next_cpu;
 
+	if (ent_ts)
+		*ent_ts = next_ts;
+
 	return next;
 }
 
-static void trace_iterator_increment(struct trace_iterator *iter)
+/* Find the next real entry, without updating the iterator itself */
+static struct trace_entry *
+find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
 {
-	iter->idx++;
-	iter->next_idx[iter->cpu]++;
-	iter->next_page_idx[iter->cpu]++;
+	return __find_next_entry(iter, ent_cpu, ent_ts);
+}
 
-	if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
-		struct trace_array_cpu *data = iter->tr->data[iter->cpu];
+/* Find the next real entry, and increment the iterator to the next entry */
+static void *find_next_entry_inc(struct trace_iterator *iter)
+{
+	iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
 
-		iter->next_page_idx[iter->cpu] = 0;
-		iter->next_page[iter->cpu] =
-			trace_next_list(data, iter->next_page[iter->cpu]);
-	}
+	if (iter->ent)
+		trace_iterator_increment(iter, iter->cpu);
+
+	return iter->ent ? iter : NULL;
 }
 
 static void trace_consume(struct trace_iterator *iter)
 {
-	struct trace_array_cpu *data = iter->tr->data[iter->cpu];
-
-	data->trace_tail_idx++;
-	if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
-		data->trace_tail = trace_next_page(data, data->trace_tail);
-		data->trace_tail_idx = 0;
-	}
-
-	/* Check if we empty it, then reset the index */
-	if (data->trace_head == data->trace_tail &&
-	    data->trace_head_idx == data->trace_tail_idx)
-		data->trace_idx = 0;
-}
-
-static void *find_next_entry_inc(struct trace_iterator *iter)
-{
-	struct trace_entry *next;
-	int next_cpu = -1;
-
-	next = find_next_entry(iter, &next_cpu);
-
-	iter->prev_ent = iter->ent;
-	iter->prev_cpu = iter->cpu;
-
-	iter->ent = next;
-	iter->cpu = next_cpu;
-
-	if (next)
-		trace_iterator_increment(iter);
-
-	return next ? iter : NULL;
+	/* Don't allow ftrace to trace into the ring buffers */
+	ftrace_disable_cpu();
+	ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
+	ftrace_enable_cpu();
 }
 
 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
@@ -1210,7 +1032,7 @@
 	struct trace_iterator *iter = m->private;
 	void *p = NULL;
 	loff_t l = 0;
-	int i;
+	int cpu;
 
 	mutex_lock(&trace_types_lock);
 
@@ -1229,14 +1051,15 @@
 		iter->ent = NULL;
 		iter->cpu = 0;
 		iter->idx = -1;
-		iter->prev_ent = NULL;
-		iter->prev_cpu = -1;
 
-		for_each_tracing_cpu(i) {
-			iter->next_idx[i] = 0;
-			iter->next_page[i] = NULL;
+		ftrace_disable_cpu();
+
+		for_each_tracing_cpu(cpu) {
+			ring_buffer_iter_reset(iter->buffer_iter[cpu]);
 		}
 
+		ftrace_enable_cpu();
+
 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
 			;
 
@@ -1330,21 +1153,21 @@
 
 static void print_lat_help_header(struct seq_file *m)
 {
-	seq_puts(m, "#                _------=> CPU#            \n");
-	seq_puts(m, "#               / _-----=> irqs-off        \n");
-	seq_puts(m, "#              | / _----=> need-resched    \n");
-	seq_puts(m, "#              || / _---=> hardirq/softirq \n");
-	seq_puts(m, "#              ||| / _--=> preempt-depth   \n");
-	seq_puts(m, "#              |||| /                      \n");
-	seq_puts(m, "#              |||||     delay             \n");
-	seq_puts(m, "#  cmd     pid ||||| time  |   caller      \n");
-	seq_puts(m, "#     \\   /    |||||   \\   |   /           \n");
+	seq_puts(m, "#                  _------=> CPU#            \n");
+	seq_puts(m, "#                 / _-----=> irqs-off        \n");
+	seq_puts(m, "#                | / _----=> need-resched    \n");
+	seq_puts(m, "#                || / _---=> hardirq/softirq \n");
+	seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
+	seq_puts(m, "#                |||| /                      \n");
+	seq_puts(m, "#                |||||     delay             \n");
+	seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
+	seq_puts(m, "#     \\   /      |||||   \\   |   /           \n");
 }
 
 static void print_func_help_header(struct seq_file *m)
 {
-	seq_puts(m, "#           TASK-PID   CPU#    TIMESTAMP  FUNCTION\n");
-	seq_puts(m, "#              | |      |          |         |\n");
+	seq_puts(m, "#           TASK-PID    CPU#    TIMESTAMP  FUNCTION\n");
+	seq_puts(m, "#              | |       |          |         |\n");
 }
 
 
@@ -1355,23 +1178,16 @@
 	struct trace_array *tr = iter->tr;
 	struct trace_array_cpu *data = tr->data[tr->cpu];
 	struct tracer *type = current_trace;
-	unsigned long total   = 0;
-	unsigned long entries = 0;
-	int cpu;
+	unsigned long total;
+	unsigned long entries;
 	const char *name = "preemption";
 
 	if (type)
 		name = type->name;
 
-	for_each_tracing_cpu(cpu) {
-		if (head_page(tr->data[cpu])) {
-			total += tr->data[cpu]->trace_idx;
-			if (tr->data[cpu]->trace_idx > tr->entries)
-				entries += tr->entries;
-			else
-				entries += tr->data[cpu]->trace_idx;
-		}
-	}
+	entries = ring_buffer_entries(iter->tr->buffer);
+	total = entries +
+		ring_buffer_overruns(iter->tr->buffer);
 
 	seq_printf(m, "%s latency trace v1.1.5 on %s\n",
 		   name, UTS_RELEASE);
@@ -1428,7 +1244,7 @@
 	comm = trace_find_cmdline(entry->pid);
 
 	trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
-	trace_seq_printf(s, "%d", cpu);
+	trace_seq_printf(s, "%3d", cpu);
 	trace_seq_printf(s, "%c%c",
 			(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
 			((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
@@ -1457,7 +1273,7 @@
 unsigned long preempt_mark_thresh = 100;
 
 static void
-lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
+lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
 		    unsigned long rel_usecs)
 {
 	trace_seq_printf(s, " %4lldus", abs_usecs);
@@ -1471,34 +1287,76 @@
 
 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
 
-static int
+/*
+ * The message is supposed to contain an ending newline.
+ * If the printing stops prematurely, try to add a newline of our own.
+ */
+void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
+{
+	struct trace_entry *ent;
+	struct trace_field_cont *cont;
+	bool ok = true;
+
+	ent = peek_next_entry(iter, iter->cpu, NULL);
+	if (!ent || ent->type != TRACE_CONT) {
+		trace_seq_putc(s, '\n');
+		return;
+	}
+
+	do {
+		cont = (struct trace_field_cont *)ent;
+		if (ok)
+			ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
+
+		ftrace_disable_cpu();
+
+		if (iter->buffer_iter[iter->cpu])
+			ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
+		else
+			ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
+
+		ftrace_enable_cpu();
+
+		ent = peek_next_entry(iter, iter->cpu, NULL);
+	} while (ent && ent->type == TRACE_CONT);
+
+	if (!ok)
+		trace_seq_putc(s, '\n');
+}
+
+static enum print_line_t
 print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
 {
 	struct trace_seq *s = &iter->seq;
 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
-	struct trace_entry *next_entry = find_next_entry(iter, NULL);
+	struct trace_entry *next_entry;
 	unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
 	struct trace_entry *entry = iter->ent;
 	unsigned long abs_usecs;
 	unsigned long rel_usecs;
+	u64 next_ts;
 	char *comm;
 	int S, T;
 	int i;
 	unsigned state;
 
+	if (entry->type == TRACE_CONT)
+		return TRACE_TYPE_HANDLED;
+
+	next_entry = find_next_entry(iter, NULL, &next_ts);
 	if (!next_entry)
-		next_entry = entry;
-	rel_usecs = ns2usecs(next_entry->t - entry->t);
-	abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
+		next_ts = iter->ts;
+	rel_usecs = ns2usecs(next_ts - iter->ts);
+	abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
 
 	if (verbose) {
 		comm = trace_find_cmdline(entry->pid);
-		trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]"
+		trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
 				 " %ld.%03ldms (+%ld.%03ldms): ",
 				 comm,
 				 entry->pid, cpu, entry->flags,
 				 entry->preempt_count, trace_idx,
-				 ns2usecs(entry->t),
+				 ns2usecs(iter->ts),
 				 abs_usecs/1000,
 				 abs_usecs % 1000, rel_usecs/1000,
 				 rel_usecs % 1000);
@@ -1507,52 +1365,85 @@
 		lat_print_timestamp(s, abs_usecs, rel_usecs);
 	}
 	switch (entry->type) {
-	case TRACE_FN:
-		seq_print_ip_sym(s, entry->fn.ip, sym_flags);
+	case TRACE_FN: {
+		struct ftrace_entry *field;
+
+		trace_assign_type(field, entry);
+
+		seq_print_ip_sym(s, field->ip, sym_flags);
 		trace_seq_puts(s, " (");
-		if (kretprobed(entry->fn.parent_ip))
+		if (kretprobed(field->parent_ip))
 			trace_seq_puts(s, KRETPROBE_MSG);
 		else
-			seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
+			seq_print_ip_sym(s, field->parent_ip, sym_flags);
 		trace_seq_puts(s, ")\n");
 		break;
+	}
 	case TRACE_CTX:
-	case TRACE_WAKE:
-		T = entry->ctx.next_state < sizeof(state_to_char) ?
-			state_to_char[entry->ctx.next_state] : 'X';
+	case TRACE_WAKE: {
+		struct ctx_switch_entry *field;
 
-		state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0;
+		trace_assign_type(field, entry);
+
+		T = field->next_state < sizeof(state_to_char) ?
+			state_to_char[field->next_state] : 'X';
+
+		state = field->prev_state ?
+			__ffs(field->prev_state) + 1 : 0;
 		S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
-		comm = trace_find_cmdline(entry->ctx.next_pid);
-		trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n",
-				 entry->ctx.prev_pid,
-				 entry->ctx.prev_prio,
+		comm = trace_find_cmdline(field->next_pid);
+		trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
+				 field->prev_pid,
+				 field->prev_prio,
 				 S, entry->type == TRACE_CTX ? "==>" : "  +",
-				 entry->ctx.next_pid,
-				 entry->ctx.next_prio,
+				 field->next_cpu,
+				 field->next_pid,
+				 field->next_prio,
 				 T, comm);
 		break;
-	case TRACE_SPECIAL:
+	}
+	case TRACE_SPECIAL: {
+		struct special_entry *field;
+
+		trace_assign_type(field, entry);
+
 		trace_seq_printf(s, "# %ld %ld %ld\n",
-				 entry->special.arg1,
-				 entry->special.arg2,
-				 entry->special.arg3);
+				 field->arg1,
+				 field->arg2,
+				 field->arg3);
 		break;
-	case TRACE_STACK:
+	}
+	case TRACE_STACK: {
+		struct stack_entry *field;
+
+		trace_assign_type(field, entry);
+
 		for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
 			if (i)
 				trace_seq_puts(s, " <= ");
-			seq_print_ip_sym(s, entry->stack.caller[i], sym_flags);
+			seq_print_ip_sym(s, field->caller[i], sym_flags);
 		}
 		trace_seq_puts(s, "\n");
 		break;
+	}
+	case TRACE_PRINT: {
+		struct print_entry *field;
+
+		trace_assign_type(field, entry);
+
+		seq_print_ip_sym(s, field->ip, sym_flags);
+		trace_seq_printf(s, ": %s", field->buf);
+		if (entry->flags & TRACE_FLAG_CONT)
+			trace_seq_print_cont(s, iter);
+		break;
+	}
 	default:
 		trace_seq_printf(s, "Unknown type %d\n", entry->type);
 	}
-	return 1;
+	return TRACE_TYPE_HANDLED;
 }
 
-static int print_trace_fmt(struct trace_iterator *iter)
+static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
 {
 	struct trace_seq *s = &iter->seq;
 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
@@ -1567,90 +1458,126 @@
 
 	entry = iter->ent;
 
+	if (entry->type == TRACE_CONT)
+		return TRACE_TYPE_HANDLED;
+
 	comm = trace_find_cmdline(iter->ent->pid);
 
-	t = ns2usecs(entry->t);
+	t = ns2usecs(iter->ts);
 	usec_rem = do_div(t, 1000000ULL);
 	secs = (unsigned long)t;
 
 	ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
 	if (!ret)
-		return 0;
-	ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
+		return TRACE_TYPE_PARTIAL_LINE;
+	ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
 	if (!ret)
-		return 0;
+		return TRACE_TYPE_PARTIAL_LINE;
 	ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
 	if (!ret)
-		return 0;
+		return TRACE_TYPE_PARTIAL_LINE;
 
 	switch (entry->type) {
-	case TRACE_FN:
-		ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags);
+	case TRACE_FN: {
+		struct ftrace_entry *field;
+
+		trace_assign_type(field, entry);
+
+		ret = seq_print_ip_sym(s, field->ip, sym_flags);
 		if (!ret)
-			return 0;
+			return TRACE_TYPE_PARTIAL_LINE;
 		if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
-						entry->fn.parent_ip) {
+						field->parent_ip) {
 			ret = trace_seq_printf(s, " <-");
 			if (!ret)
-				return 0;
-			if (kretprobed(entry->fn.parent_ip))
+				return TRACE_TYPE_PARTIAL_LINE;
+			if (kretprobed(field->parent_ip))
 				ret = trace_seq_puts(s, KRETPROBE_MSG);
 			else
-				ret = seq_print_ip_sym(s, entry->fn.parent_ip,
+				ret = seq_print_ip_sym(s,
+						       field->parent_ip,
 						       sym_flags);
 			if (!ret)
-				return 0;
+				return TRACE_TYPE_PARTIAL_LINE;
 		}
 		ret = trace_seq_printf(s, "\n");
 		if (!ret)
-			return 0;
+			return TRACE_TYPE_PARTIAL_LINE;
 		break;
+	}
 	case TRACE_CTX:
-	case TRACE_WAKE:
-		S = entry->ctx.prev_state < sizeof(state_to_char) ?
-			state_to_char[entry->ctx.prev_state] : 'X';
-		T = entry->ctx.next_state < sizeof(state_to_char) ?
-			state_to_char[entry->ctx.next_state] : 'X';
-		ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n",
-				       entry->ctx.prev_pid,
-				       entry->ctx.prev_prio,
+	case TRACE_WAKE: {
+		struct ctx_switch_entry *field;
+
+		trace_assign_type(field, entry);
+
+		S = field->prev_state < sizeof(state_to_char) ?
+			state_to_char[field->prev_state] : 'X';
+		T = field->next_state < sizeof(state_to_char) ?
+			state_to_char[field->next_state] : 'X';
+		ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
+				       field->prev_pid,
+				       field->prev_prio,
 				       S,
 				       entry->type == TRACE_CTX ? "==>" : "  +",
-				       entry->ctx.next_pid,
-				       entry->ctx.next_prio,
+				       field->next_cpu,
+				       field->next_pid,
+				       field->next_prio,
 				       T);
 		if (!ret)
-			return 0;
+			return TRACE_TYPE_PARTIAL_LINE;
 		break;
-	case TRACE_SPECIAL:
+	}
+	case TRACE_SPECIAL: {
+		struct special_entry *field;
+
+		trace_assign_type(field, entry);
+
 		ret = trace_seq_printf(s, "# %ld %ld %ld\n",
-				 entry->special.arg1,
-				 entry->special.arg2,
-				 entry->special.arg3);
+				 field->arg1,
+				 field->arg2,
+				 field->arg3);
 		if (!ret)
-			return 0;
+			return TRACE_TYPE_PARTIAL_LINE;
 		break;
-	case TRACE_STACK:
+	}
+	case TRACE_STACK: {
+		struct stack_entry *field;
+
+		trace_assign_type(field, entry);
+
 		for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
 			if (i) {
 				ret = trace_seq_puts(s, " <= ");
 				if (!ret)
-					return 0;
+					return TRACE_TYPE_PARTIAL_LINE;
 			}
-			ret = seq_print_ip_sym(s, entry->stack.caller[i],
+			ret = seq_print_ip_sym(s, field->caller[i],
 					       sym_flags);
 			if (!ret)
-				return 0;
+				return TRACE_TYPE_PARTIAL_LINE;
 		}
 		ret = trace_seq_puts(s, "\n");
 		if (!ret)
-			return 0;
+			return TRACE_TYPE_PARTIAL_LINE;
 		break;
 	}
-	return 1;
+	case TRACE_PRINT: {
+		struct print_entry *field;
+
+		trace_assign_type(field, entry);
+
+		seq_print_ip_sym(s, field->ip, sym_flags);
+		trace_seq_printf(s, ": %s", field->buf);
+		if (entry->flags & TRACE_FLAG_CONT)
+			trace_seq_print_cont(s, iter);
+		break;
+	}
+	}
+	return TRACE_TYPE_HANDLED;
 }
 
-static int print_raw_fmt(struct trace_iterator *iter)
+static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
 {
 	struct trace_seq *s = &iter->seq;
 	struct trace_entry *entry;
@@ -1659,47 +1586,77 @@
 
 	entry = iter->ent;
 
+	if (entry->type == TRACE_CONT)
+		return TRACE_TYPE_HANDLED;
+
 	ret = trace_seq_printf(s, "%d %d %llu ",
-		entry->pid, iter->cpu, entry->t);
+		entry->pid, iter->cpu, iter->ts);
 	if (!ret)
-		return 0;
+		return TRACE_TYPE_PARTIAL_LINE;
 
 	switch (entry->type) {
-	case TRACE_FN:
+	case TRACE_FN: {
+		struct ftrace_entry *field;
+
+		trace_assign_type(field, entry);
+
 		ret = trace_seq_printf(s, "%x %x\n",
-					entry->fn.ip, entry->fn.parent_ip);
+					field->ip,
+					field->parent_ip);
 		if (!ret)
-			return 0;
-		break;
-	case TRACE_CTX:
-	case TRACE_WAKE:
-		S = entry->ctx.prev_state < sizeof(state_to_char) ?
-			state_to_char[entry->ctx.prev_state] : 'X';
-		T = entry->ctx.next_state < sizeof(state_to_char) ?
-			state_to_char[entry->ctx.next_state] : 'X';
-		if (entry->type == TRACE_WAKE)
-			S = '+';
-		ret = trace_seq_printf(s, "%d %d %c %d %d %c\n",
-				       entry->ctx.prev_pid,
-				       entry->ctx.prev_prio,
-				       S,
-				       entry->ctx.next_pid,
-				       entry->ctx.next_prio,
-				       T);
-		if (!ret)
-			return 0;
-		break;
-	case TRACE_SPECIAL:
-	case TRACE_STACK:
-		ret = trace_seq_printf(s, "# %ld %ld %ld\n",
-				 entry->special.arg1,
-				 entry->special.arg2,
-				 entry->special.arg3);
-		if (!ret)
-			return 0;
+			return TRACE_TYPE_PARTIAL_LINE;
 		break;
 	}
-	return 1;
+	case TRACE_CTX:
+	case TRACE_WAKE: {
+		struct ctx_switch_entry *field;
+
+		trace_assign_type(field, entry);
+
+		S = field->prev_state < sizeof(state_to_char) ?
+			state_to_char[field->prev_state] : 'X';
+		T = field->next_state < sizeof(state_to_char) ?
+			state_to_char[field->next_state] : 'X';
+		if (entry->type == TRACE_WAKE)
+			S = '+';
+		ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
+				       field->prev_pid,
+				       field->prev_prio,
+				       S,
+				       field->next_cpu,
+				       field->next_pid,
+				       field->next_prio,
+				       T);
+		if (!ret)
+			return TRACE_TYPE_PARTIAL_LINE;
+		break;
+	}
+	case TRACE_SPECIAL:
+	case TRACE_STACK: {
+		struct special_entry *field;
+
+		trace_assign_type(field, entry);
+
+		ret = trace_seq_printf(s, "# %ld %ld %ld\n",
+				 field->arg1,
+				 field->arg2,
+				 field->arg3);
+		if (!ret)
+			return TRACE_TYPE_PARTIAL_LINE;
+		break;
+	}
+	case TRACE_PRINT: {
+		struct print_entry *field;
+
+		trace_assign_type(field, entry);
+
+		trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
+		if (entry->flags & TRACE_FLAG_CONT)
+			trace_seq_print_cont(s, iter);
+		break;
+	}
+	}
+	return TRACE_TYPE_HANDLED;
 }
 
 #define SEQ_PUT_FIELD_RET(s, x)				\
@@ -1710,11 +1667,12 @@
 
 #define SEQ_PUT_HEX_FIELD_RET(s, x)			\
 do {							\
+	BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES);	\
 	if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))	\
 		return 0;				\
 } while (0)
 
-static int print_hex_fmt(struct trace_iterator *iter)
+static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
 {
 	struct trace_seq *s = &iter->seq;
 	unsigned char newline = '\n';
@@ -1723,97 +1681,139 @@
 
 	entry = iter->ent;
 
+	if (entry->type == TRACE_CONT)
+		return TRACE_TYPE_HANDLED;
+
 	SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
 	SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
-	SEQ_PUT_HEX_FIELD_RET(s, entry->t);
+	SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
 
 	switch (entry->type) {
-	case TRACE_FN:
-		SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip);
-		SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
+	case TRACE_FN: {
+		struct ftrace_entry *field;
+
+		trace_assign_type(field, entry);
+
+		SEQ_PUT_HEX_FIELD_RET(s, field->ip);
+		SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
 		break;
+	}
 	case TRACE_CTX:
-	case TRACE_WAKE:
-		S = entry->ctx.prev_state < sizeof(state_to_char) ?
-			state_to_char[entry->ctx.prev_state] : 'X';
-		T = entry->ctx.next_state < sizeof(state_to_char) ?
-			state_to_char[entry->ctx.next_state] : 'X';
+	case TRACE_WAKE: {
+		struct ctx_switch_entry *field;
+
+		trace_assign_type(field, entry);
+
+		S = field->prev_state < sizeof(state_to_char) ?
+			state_to_char[field->prev_state] : 'X';
+		T = field->next_state < sizeof(state_to_char) ?
+			state_to_char[field->next_state] : 'X';
 		if (entry->type == TRACE_WAKE)
 			S = '+';
-		SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid);
-		SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio);
+		SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
+		SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
 		SEQ_PUT_HEX_FIELD_RET(s, S);
-		SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid);
-		SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio);
-		SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
+		SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
+		SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
+		SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
 		SEQ_PUT_HEX_FIELD_RET(s, T);
 		break;
+	}
 	case TRACE_SPECIAL:
-	case TRACE_STACK:
-		SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1);
-		SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2);
-		SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3);
+	case TRACE_STACK: {
+		struct special_entry *field;
+
+		trace_assign_type(field, entry);
+
+		SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
+		SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
+		SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
 		break;
 	}
+	}
 	SEQ_PUT_FIELD_RET(s, newline);
 
-	return 1;
+	return TRACE_TYPE_HANDLED;
 }
 
-static int print_bin_fmt(struct trace_iterator *iter)
+static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
 {
 	struct trace_seq *s = &iter->seq;
 	struct trace_entry *entry;
 
 	entry = iter->ent;
 
+	if (entry->type == TRACE_CONT)
+		return TRACE_TYPE_HANDLED;
+
 	SEQ_PUT_FIELD_RET(s, entry->pid);
-	SEQ_PUT_FIELD_RET(s, entry->cpu);
-	SEQ_PUT_FIELD_RET(s, entry->t);
+	SEQ_PUT_FIELD_RET(s, iter->cpu);
+	SEQ_PUT_FIELD_RET(s, iter->ts);
 
 	switch (entry->type) {
-	case TRACE_FN:
-		SEQ_PUT_FIELD_RET(s, entry->fn.ip);
-		SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip);
+	case TRACE_FN: {
+		struct ftrace_entry *field;
+
+		trace_assign_type(field, entry);
+
+		SEQ_PUT_FIELD_RET(s, field->ip);
+		SEQ_PUT_FIELD_RET(s, field->parent_ip);
 		break;
-	case TRACE_CTX:
-		SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid);
-		SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio);
-		SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state);
-		SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid);
-		SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
-		SEQ_PUT_FIELD_RET(s, entry->ctx.next_state);
+	}
+	case TRACE_CTX: {
+		struct ctx_switch_entry *field;
+
+		trace_assign_type(field, entry);
+
+		SEQ_PUT_FIELD_RET(s, field->prev_pid);
+		SEQ_PUT_FIELD_RET(s, field->prev_prio);
+		SEQ_PUT_FIELD_RET(s, field->prev_state);
+		SEQ_PUT_FIELD_RET(s, field->next_pid);
+		SEQ_PUT_FIELD_RET(s, field->next_prio);
+		SEQ_PUT_FIELD_RET(s, field->next_state);
 		break;
+	}
 	case TRACE_SPECIAL:
-	case TRACE_STACK:
-		SEQ_PUT_FIELD_RET(s, entry->special.arg1);
-		SEQ_PUT_FIELD_RET(s, entry->special.arg2);
-		SEQ_PUT_FIELD_RET(s, entry->special.arg3);
+	case TRACE_STACK: {
+		struct special_entry *field;
+
+		trace_assign_type(field, entry);
+
+		SEQ_PUT_FIELD_RET(s, field->arg1);
+		SEQ_PUT_FIELD_RET(s, field->arg2);
+		SEQ_PUT_FIELD_RET(s, field->arg3);
 		break;
 	}
+	}
 	return 1;
 }
 
 static int trace_empty(struct trace_iterator *iter)
 {
-	struct trace_array_cpu *data;
 	int cpu;
 
 	for_each_tracing_cpu(cpu) {
-		data = iter->tr->data[cpu];
-
-		if (head_page(data) && data->trace_idx &&
-		    (data->trace_tail != data->trace_head ||
-		     data->trace_tail_idx != data->trace_head_idx))
-			return 0;
+		if (iter->buffer_iter[cpu]) {
+			if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
+				return 0;
+		} else {
+			if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
+				return 0;
+		}
 	}
+
 	return 1;
 }
 
-static int print_trace_line(struct trace_iterator *iter)
+static enum print_line_t print_trace_line(struct trace_iterator *iter)
 {
-	if (iter->trace && iter->trace->print_line)
-		return iter->trace->print_line(iter);
+	enum print_line_t ret;
+
+	if (iter->trace && iter->trace->print_line) {
+		ret = iter->trace->print_line(iter);
+		if (ret != TRACE_TYPE_UNHANDLED)
+			return ret;
+	}
 
 	if (trace_flags & TRACE_ITER_BIN)
 		return print_bin_fmt(iter);
@@ -1869,6 +1869,8 @@
 __tracing_open(struct inode *inode, struct file *file, int *ret)
 {
 	struct trace_iterator *iter;
+	struct seq_file *m;
+	int cpu;
 
 	if (tracing_disabled) {
 		*ret = -ENODEV;
@@ -1889,28 +1891,45 @@
 	iter->trace = current_trace;
 	iter->pos = -1;
 
+	for_each_tracing_cpu(cpu) {
+
+		iter->buffer_iter[cpu] =
+			ring_buffer_read_start(iter->tr->buffer, cpu);
+
+		if (!iter->buffer_iter[cpu])
+			goto fail_buffer;
+	}
+
 	/* TODO stop tracer */
 	*ret = seq_open(file, &tracer_seq_ops);
-	if (!*ret) {
-		struct seq_file *m = file->private_data;
-		m->private = iter;
+	if (*ret)
+		goto fail_buffer;
 
-		/* stop the trace while dumping */
-		if (iter->tr->ctrl) {
-			tracer_enabled = 0;
-			ftrace_function_enabled = 0;
-		}
+	m = file->private_data;
+	m->private = iter;
 
-		if (iter->trace && iter->trace->open)
-			iter->trace->open(iter);
-	} else {
-		kfree(iter);
-		iter = NULL;
+	/* stop the trace while dumping */
+	if (iter->tr->ctrl) {
+		tracer_enabled = 0;
+		ftrace_function_enabled = 0;
 	}
+
+	if (iter->trace && iter->trace->open)
+			iter->trace->open(iter);
+
 	mutex_unlock(&trace_types_lock);
 
  out:
 	return iter;
+
+ fail_buffer:
+	for_each_tracing_cpu(cpu) {
+		if (iter->buffer_iter[cpu])
+			ring_buffer_read_finish(iter->buffer_iter[cpu]);
+	}
+	mutex_unlock(&trace_types_lock);
+
+	return ERR_PTR(-ENOMEM);
 }
 
 int tracing_open_generic(struct inode *inode, struct file *filp)
@@ -1926,8 +1945,14 @@
 {
 	struct seq_file *m = (struct seq_file *)file->private_data;
 	struct trace_iterator *iter = m->private;
+	int cpu;
 
 	mutex_lock(&trace_types_lock);
+	for_each_tracing_cpu(cpu) {
+		if (iter->buffer_iter[cpu])
+			ring_buffer_read_finish(iter->buffer_iter[cpu]);
+	}
+
 	if (iter->trace && iter->trace->close)
 		iter->trace->close(iter);
 
@@ -2352,9 +2377,11 @@
 	struct tracer *t;
 	char buf[max_tracer_type_len+1];
 	int i;
+	size_t ret;
 
 	if (cnt > max_tracer_type_len)
 		cnt = max_tracer_type_len;
+	ret = cnt;
 
 	if (copy_from_user(&buf, ubuf, cnt))
 		return -EFAULT;
@@ -2370,7 +2397,11 @@
 		if (strcmp(t->name, buf) == 0)
 			break;
 	}
-	if (!t || t == current_trace)
+	if (!t) {
+		ret = -EINVAL;
+		goto out;
+	}
+	if (t == current_trace)
 		goto out;
 
 	if (current_trace && current_trace->reset)
@@ -2383,9 +2414,10 @@
  out:
 	mutex_unlock(&trace_types_lock);
 
-	filp->f_pos += cnt;
+	if (ret == cnt)
+		filp->f_pos += cnt;
 
-	return cnt;
+	return ret;
 }
 
 static ssize_t
@@ -2500,20 +2532,12 @@
 		  size_t cnt, loff_t *ppos)
 {
 	struct trace_iterator *iter = filp->private_data;
-	struct trace_array_cpu *data;
-	static cpumask_t mask;
-	unsigned long flags;
-#ifdef CONFIG_FTRACE
-	int ftrace_save;
-#endif
-	int cpu;
 	ssize_t sret;
 
 	/* return any leftover data */
 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
 	if (sret != -EBUSY)
 		return sret;
-	sret = 0;
 
 	trace_seq_reset(&iter->seq);
 
@@ -2524,6 +2548,8 @@
 			goto out;
 	}
 
+waitagain:
+	sret = 0;
 	while (trace_empty(iter)) {
 
 		if ((filp->f_flags & O_NONBLOCK)) {
@@ -2588,46 +2614,12 @@
 	       offsetof(struct trace_iterator, seq));
 	iter->pos = -1;
 
-	/*
-	 * We need to stop all tracing on all CPUS to read the
-	 * the next buffer. This is a bit expensive, but is
-	 * not done often. We fill all what we can read,
-	 * and then release the locks again.
-	 */
-
-	cpus_clear(mask);
-	local_irq_save(flags);
-#ifdef CONFIG_FTRACE
-	ftrace_save = ftrace_enabled;
-	ftrace_enabled = 0;
-#endif
-	smp_wmb();
-	for_each_tracing_cpu(cpu) {
-		data = iter->tr->data[cpu];
-
-		if (!head_page(data) || !data->trace_idx)
-			continue;
-
-		atomic_inc(&data->disabled);
-		cpu_set(cpu, mask);
-	}
-
-	for_each_cpu_mask(cpu, mask) {
-		data = iter->tr->data[cpu];
-		__raw_spin_lock(&data->lock);
-
-		if (data->overrun > iter->last_overrun[cpu])
-			iter->overrun[cpu] +=
-				data->overrun - iter->last_overrun[cpu];
-		iter->last_overrun[cpu] = data->overrun;
-	}
-
 	while (find_next_entry_inc(iter) != NULL) {
-		int ret;
+		enum print_line_t ret;
 		int len = iter->seq.len;
 
 		ret = print_trace_line(iter);
-		if (!ret) {
+		if (ret == TRACE_TYPE_PARTIAL_LINE) {
 			/* don't print partial lines */
 			iter->seq.len = len;
 			break;
@@ -2639,26 +2631,17 @@
 			break;
 	}
 
-	for_each_cpu_mask(cpu, mask) {
-		data = iter->tr->data[cpu];
-		__raw_spin_unlock(&data->lock);
-	}
-
-	for_each_cpu_mask(cpu, mask) {
-		data = iter->tr->data[cpu];
-		atomic_dec(&data->disabled);
-	}
-#ifdef CONFIG_FTRACE
-	ftrace_enabled = ftrace_save;
-#endif
-	local_irq_restore(flags);
-
 	/* Now copy what we have to the user */
 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
 	if (iter->seq.readpos >= iter->seq.len)
 		trace_seq_reset(&iter->seq);
+
+	/*
+	 * If there was nothing to send to user, inspite of consuming trace
+	 * entries, go back to wait for more entries.
+	 */
 	if (sret == -EBUSY)
-		sret = 0;
+		goto waitagain;
 
 out:
 	mutex_unlock(&trace_types_lock);
@@ -2684,7 +2667,8 @@
 {
 	unsigned long val;
 	char buf[64];
-	int i, ret;
+	int ret;
+	struct trace_array *tr = filp->private_data;
 
 	if (cnt >= sizeof(buf))
 		return -EINVAL;
@@ -2704,59 +2688,38 @@
 
 	mutex_lock(&trace_types_lock);
 
-	if (current_trace != &no_tracer) {
+	if (tr->ctrl) {
 		cnt = -EBUSY;
-		pr_info("ftrace: set current_tracer to none"
+		pr_info("ftrace: please disable tracing"
 			" before modifying buffer size\n");
 		goto out;
 	}
 
-	if (val > global_trace.entries) {
-		long pages_requested;
-		unsigned long freeable_pages;
-
-		/* make sure we have enough memory before mapping */
-		pages_requested =
-			(val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE;
-
-		/* account for each buffer (and max_tr) */
-		pages_requested *= tracing_nr_buffers * 2;
-
-		/* Check for overflow */
-		if (pages_requested < 0) {
-			cnt = -ENOMEM;
+	if (val != global_trace.entries) {
+		ret = ring_buffer_resize(global_trace.buffer, val);
+		if (ret < 0) {
+			cnt = ret;
 			goto out;
 		}
 
-		freeable_pages = determine_dirtyable_memory();
-
-		/* we only allow to request 1/4 of useable memory */
-		if (pages_requested >
-		    ((freeable_pages + tracing_pages_allocated) / 4)) {
-			cnt = -ENOMEM;
-			goto out;
-		}
-
-		while (global_trace.entries < val) {
-			if (trace_alloc_page()) {
-				cnt = -ENOMEM;
-				goto out;
+		ret = ring_buffer_resize(max_tr.buffer, val);
+		if (ret < 0) {
+			int r;
+			cnt = ret;
+			r = ring_buffer_resize(global_trace.buffer,
+					       global_trace.entries);
+			if (r < 0) {
+				/* AARGH! We are left with different
+				 * size max buffer!!!! */
+				WARN_ON(1);
+				tracing_disabled = 1;
 			}
-			/* double check that we don't go over the known pages */
-			if (tracing_pages_allocated > pages_requested)
-				break;
+			goto out;
 		}
 
-	} else {
-		/* include the number of entries in val (inc of page entries) */
-		while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
-			trace_free_page();
+		global_trace.entries = val;
 	}
 
-	/* check integrity */
-	for_each_tracing_cpu(i)
-		check_pages(global_trace.data[i]);
-
 	filp->f_pos += cnt;
 
 	/* If check pages failed, return ENOMEM */
@@ -2769,6 +2732,52 @@
 	return cnt;
 }
 
+static int mark_printk(const char *fmt, ...)
+{
+	int ret;
+	va_list args;
+	va_start(args, fmt);
+	ret = trace_vprintk(0, fmt, args);
+	va_end(args);
+	return ret;
+}
+
+static ssize_t
+tracing_mark_write(struct file *filp, const char __user *ubuf,
+					size_t cnt, loff_t *fpos)
+{
+	char *buf;
+	char *end;
+	struct trace_array *tr = &global_trace;
+
+	if (!tr->ctrl || tracing_disabled)
+		return -EINVAL;
+
+	if (cnt > TRACE_BUF_SIZE)
+		cnt = TRACE_BUF_SIZE;
+
+	buf = kmalloc(cnt + 1, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, ubuf, cnt)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	/* Cut from the first nil or newline. */
+	buf[cnt] = '\0';
+	end = strchr(buf, '\n');
+	if (end)
+		*end = '\0';
+
+	cnt = mark_printk("%s\n", buf);
+	kfree(buf);
+	*fpos += cnt;
+
+	return cnt;
+}
+
 static struct file_operations tracing_max_lat_fops = {
 	.open		= tracing_open_generic,
 	.read		= tracing_max_lat_read,
@@ -2800,6 +2809,11 @@
 	.write		= tracing_entries_write,
 };
 
+static struct file_operations tracing_mark_fops = {
+	.open		= tracing_open_generic,
+	.write		= tracing_mark_write,
+};
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 static ssize_t
@@ -2846,7 +2860,7 @@
 #include "trace_selftest.c"
 #endif
 
-static __init void tracer_init_debugfs(void)
+static __init int tracer_init_debugfs(void)
 {
 	struct dentry *d_tracer;
 	struct dentry *entry;
@@ -2881,12 +2895,12 @@
 	entry = debugfs_create_file("available_tracers", 0444, d_tracer,
 				    &global_trace, &show_traces_fops);
 	if (!entry)
-		pr_warning("Could not create debugfs 'trace' entry\n");
+		pr_warning("Could not create debugfs 'available_tracers' entry\n");
 
 	entry = debugfs_create_file("current_tracer", 0444, d_tracer,
 				    &global_trace, &set_tracer_fops);
 	if (!entry)
-		pr_warning("Could not create debugfs 'trace' entry\n");
+		pr_warning("Could not create debugfs 'current_tracer' entry\n");
 
 	entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
 				    &tracing_max_latency,
@@ -2899,7 +2913,7 @@
 				    &tracing_thresh, &tracing_max_lat_fops);
 	if (!entry)
 		pr_warning("Could not create debugfs "
-			   "'tracing_threash' entry\n");
+			   "'tracing_thresh' entry\n");
 	entry = debugfs_create_file("README", 0644, d_tracer,
 				    NULL, &tracing_readme_fops);
 	if (!entry)
@@ -2909,13 +2923,19 @@
 				    NULL, &tracing_pipe_fops);
 	if (!entry)
 		pr_warning("Could not create debugfs "
-			   "'tracing_threash' entry\n");
+			   "'trace_pipe' entry\n");
 
 	entry = debugfs_create_file("trace_entries", 0644, d_tracer,
 				    &global_trace, &tracing_entries_fops);
 	if (!entry)
 		pr_warning("Could not create debugfs "
-			   "'tracing_threash' entry\n");
+			   "'trace_entries' entry\n");
+
+	entry = debugfs_create_file("trace_marker", 0220, d_tracer,
+				    NULL, &tracing_mark_fops);
+	if (!entry)
+		pr_warning("Could not create debugfs "
+			   "'trace_marker' entry\n");
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 	entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
@@ -2928,230 +2948,263 @@
 #ifdef CONFIG_SYSPROF_TRACER
 	init_tracer_sysprof_debugfs(d_tracer);
 #endif
-}
-
-static int trace_alloc_page(void)
-{
-	struct trace_array_cpu *data;
-	struct page *page, *tmp;
-	LIST_HEAD(pages);
-	void *array;
-	unsigned pages_allocated = 0;
-	int i;
-
-	/* first allocate a page for each CPU */
-	for_each_tracing_cpu(i) {
-		array = (void *)__get_free_page(GFP_KERNEL);
-		if (array == NULL) {
-			printk(KERN_ERR "tracer: failed to allocate page"
-			       "for trace buffer!\n");
-			goto free_pages;
-		}
-
-		pages_allocated++;
-		page = virt_to_page(array);
-		list_add(&page->lru, &pages);
-
-/* Only allocate if we are actually using the max trace */
-#ifdef CONFIG_TRACER_MAX_TRACE
-		array = (void *)__get_free_page(GFP_KERNEL);
-		if (array == NULL) {
-			printk(KERN_ERR "tracer: failed to allocate page"
-			       "for trace buffer!\n");
-			goto free_pages;
-		}
-		pages_allocated++;
-		page = virt_to_page(array);
-		list_add(&page->lru, &pages);
-#endif
-	}
-
-	/* Now that we successfully allocate a page per CPU, add them */
-	for_each_tracing_cpu(i) {
-		data = global_trace.data[i];
-		page = list_entry(pages.next, struct page, lru);
-		list_del_init(&page->lru);
-		list_add_tail(&page->lru, &data->trace_pages);
-		ClearPageLRU(page);
-
-#ifdef CONFIG_TRACER_MAX_TRACE
-		data = max_tr.data[i];
-		page = list_entry(pages.next, struct page, lru);
-		list_del_init(&page->lru);
-		list_add_tail(&page->lru, &data->trace_pages);
-		SetPageLRU(page);
-#endif
-	}
-	tracing_pages_allocated += pages_allocated;
-	global_trace.entries += ENTRIES_PER_PAGE;
-
 	return 0;
-
- free_pages:
-	list_for_each_entry_safe(page, tmp, &pages, lru) {
-		list_del_init(&page->lru);
-		__free_page(page);
-	}
-	return -ENOMEM;
 }
 
-static int trace_free_page(void)
+int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
 {
+	static DEFINE_SPINLOCK(trace_buf_lock);
+	static char trace_buf[TRACE_BUF_SIZE];
+
+	struct ring_buffer_event *event;
+	struct trace_array *tr = &global_trace;
 	struct trace_array_cpu *data;
-	struct page *page;
-	struct list_head *p;
-	int i;
-	int ret = 0;
+	struct print_entry *entry;
+	unsigned long flags, irq_flags;
+	int cpu, len = 0, size, pc;
 
-	/* free one page from each buffer */
-	for_each_tracing_cpu(i) {
-		data = global_trace.data[i];
-		p = data->trace_pages.next;
-		if (p == &data->trace_pages) {
-			/* should never happen */
-			WARN_ON(1);
-			tracing_disabled = 1;
-			ret = -1;
-			break;
-		}
-		page = list_entry(p, struct page, lru);
-		ClearPageLRU(page);
-		list_del(&page->lru);
-		tracing_pages_allocated--;
-		tracing_pages_allocated--;
-		__free_page(page);
+	if (!tr->ctrl || tracing_disabled)
+		return 0;
 
-		tracing_reset(data);
+	pc = preempt_count();
+	preempt_disable_notrace();
+	cpu = raw_smp_processor_id();
+	data = tr->data[cpu];
 
-#ifdef CONFIG_TRACER_MAX_TRACE
-		data = max_tr.data[i];
-		p = data->trace_pages.next;
-		if (p == &data->trace_pages) {
-			/* should never happen */
-			WARN_ON(1);
-			tracing_disabled = 1;
-			ret = -1;
-			break;
-		}
-		page = list_entry(p, struct page, lru);
-		ClearPageLRU(page);
-		list_del(&page->lru);
-		__free_page(page);
+	if (unlikely(atomic_read(&data->disabled)))
+		goto out;
 
-		tracing_reset(data);
-#endif
-	}
-	global_trace.entries -= ENTRIES_PER_PAGE;
+	spin_lock_irqsave(&trace_buf_lock, flags);
+	len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
 
+	len = min(len, TRACE_BUF_SIZE-1);
+	trace_buf[len] = 0;
+
+	size = sizeof(*entry) + len + 1;
+	event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
+	if (!event)
+		goto out_unlock;
+	entry = ring_buffer_event_data(event);
+	tracing_generic_entry_update(&entry->ent, flags, pc);
+	entry->ent.type			= TRACE_PRINT;
+	entry->ip			= ip;
+
+	memcpy(&entry->buf, trace_buf, len);
+	entry->buf[len] = 0;
+	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+
+ out_unlock:
+	spin_unlock_irqrestore(&trace_buf_lock, flags);
+
+ out:
+	preempt_enable_notrace();
+
+	return len;
+}
+EXPORT_SYMBOL_GPL(trace_vprintk);
+
+int __ftrace_printk(unsigned long ip, const char *fmt, ...)
+{
+	int ret;
+	va_list ap;
+
+	if (!(trace_flags & TRACE_ITER_PRINTK))
+		return 0;
+
+	va_start(ap, fmt);
+	ret = trace_vprintk(ip, fmt, ap);
+	va_end(ap);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(__ftrace_printk);
+
+static int trace_panic_handler(struct notifier_block *this,
+			       unsigned long event, void *unused)
+{
+	ftrace_dump();
+	return NOTIFY_OK;
+}
+
+static struct notifier_block trace_panic_notifier = {
+	.notifier_call  = trace_panic_handler,
+	.next           = NULL,
+	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
+};
+
+static int trace_die_handler(struct notifier_block *self,
+			     unsigned long val,
+			     void *data)
+{
+	switch (val) {
+	case DIE_OOPS:
+		ftrace_dump();
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block trace_die_notifier = {
+	.notifier_call = trace_die_handler,
+	.priority = 200
+};
+
+/*
+ * printk is set to max of 1024, we really don't need it that big.
+ * Nothing should be printing 1000 characters anyway.
+ */
+#define TRACE_MAX_PRINT		1000
+
+/*
+ * Define here KERN_TRACE so that we have one place to modify
+ * it if we decide to change what log level the ftrace dump
+ * should be at.
+ */
+#define KERN_TRACE		KERN_INFO
+
+static void
+trace_printk_seq(struct trace_seq *s)
+{
+	/* Probably should print a warning here. */
+	if (s->len >= 1000)
+		s->len = 1000;
+
+	/* should be zero ended, but we are paranoid. */
+	s->buffer[s->len] = 0;
+
+	printk(KERN_TRACE "%s", s->buffer);
+
+	trace_seq_reset(s);
+}
+
+
+void ftrace_dump(void)
+{
+	static DEFINE_SPINLOCK(ftrace_dump_lock);
+	/* use static because iter can be a bit big for the stack */
+	static struct trace_iterator iter;
+	static cpumask_t mask;
+	static int dump_ran;
+	unsigned long flags;
+	int cnt = 0, cpu;
+
+	/* only one dump */
+	spin_lock_irqsave(&ftrace_dump_lock, flags);
+	if (dump_ran)
+		goto out;
+
+	dump_ran = 1;
+
+	/* No turning back! */
+	ftrace_kill_atomic();
+
+	for_each_tracing_cpu(cpu) {
+		atomic_inc(&global_trace.data[cpu]->disabled);
+	}
+
+	printk(KERN_TRACE "Dumping ftrace buffer:\n");
+
+	iter.tr = &global_trace;
+	iter.trace = current_trace;
+
+	/*
+	 * We need to stop all tracing on all CPUS to read the
+	 * the next buffer. This is a bit expensive, but is
+	 * not done often. We fill all what we can read,
+	 * and then release the locks again.
+	 */
+
+	cpus_clear(mask);
+
+	while (!trace_empty(&iter)) {
+
+		if (!cnt)
+			printk(KERN_TRACE "---------------------------------\n");
+
+		cnt++;
+
+		/* reset all but tr, trace, and overruns */
+		memset(&iter.seq, 0,
+		       sizeof(struct trace_iterator) -
+		       offsetof(struct trace_iterator, seq));
+		iter.iter_flags |= TRACE_FILE_LAT_FMT;
+		iter.pos = -1;
+
+		if (find_next_entry_inc(&iter) != NULL) {
+			print_trace_line(&iter);
+			trace_consume(&iter);
+		}
+
+		trace_printk_seq(&iter.seq);
+	}
+
+	if (!cnt)
+		printk(KERN_TRACE "   (ftrace buffer empty)\n");
+	else
+		printk(KERN_TRACE "---------------------------------\n");
+
+ out:
+	spin_unlock_irqrestore(&ftrace_dump_lock, flags);
+}
 
 __init static int tracer_alloc_buffers(void)
 {
 	struct trace_array_cpu *data;
-	void *array;
-	struct page *page;
-	int pages = 0;
-	int ret = -ENOMEM;
 	int i;
 
 	/* TODO: make the number of buffers hot pluggable with CPUS */
-	tracing_nr_buffers = num_possible_cpus();
 	tracing_buffer_mask = cpu_possible_map;
 
+	global_trace.buffer = ring_buffer_alloc(trace_buf_size,
+						   TRACE_BUFFER_FLAGS);
+	if (!global_trace.buffer) {
+		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
+		WARN_ON(1);
+		return 0;
+	}
+	global_trace.entries = ring_buffer_size(global_trace.buffer);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+	max_tr.buffer = ring_buffer_alloc(trace_buf_size,
+					     TRACE_BUFFER_FLAGS);
+	if (!max_tr.buffer) {
+		printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
+		WARN_ON(1);
+		ring_buffer_free(global_trace.buffer);
+		return 0;
+	}
+	max_tr.entries = ring_buffer_size(max_tr.buffer);
+	WARN_ON(max_tr.entries != global_trace.entries);
+#endif
+
 	/* Allocate the first page for all buffers */
 	for_each_tracing_cpu(i) {
 		data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
 		max_tr.data[i] = &per_cpu(max_data, i);
-
-		array = (void *)__get_free_page(GFP_KERNEL);
-		if (array == NULL) {
-			printk(KERN_ERR "tracer: failed to allocate page"
-			       "for trace buffer!\n");
-			goto free_buffers;
-		}
-
-		/* set the array to the list */
-		INIT_LIST_HEAD(&data->trace_pages);
-		page = virt_to_page(array);
-		list_add(&page->lru, &data->trace_pages);
-		/* use the LRU flag to differentiate the two buffers */
-		ClearPageLRU(page);
-
-		data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
-		max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
-
-/* Only allocate if we are actually using the max trace */
-#ifdef CONFIG_TRACER_MAX_TRACE
-		array = (void *)__get_free_page(GFP_KERNEL);
-		if (array == NULL) {
-			printk(KERN_ERR "tracer: failed to allocate page"
-			       "for trace buffer!\n");
-			goto free_buffers;
-		}
-
-		INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
-		page = virt_to_page(array);
-		list_add(&page->lru, &max_tr.data[i]->trace_pages);
-		SetPageLRU(page);
-#endif
 	}
 
-	/*
-	 * Since we allocate by orders of pages, we may be able to
-	 * round up a bit.
-	 */
-	global_trace.entries = ENTRIES_PER_PAGE;
-	pages++;
-
-	while (global_trace.entries < trace_nr_entries) {
-		if (trace_alloc_page())
-			break;
-		pages++;
-	}
-	max_tr.entries = global_trace.entries;
-
-	pr_info("tracer: %d pages allocated for %ld entries of %ld bytes\n",
-		pages, trace_nr_entries, (long)TRACE_ENTRY_SIZE);
-	pr_info("   actual entries %ld\n", global_trace.entries);
-
-	tracer_init_debugfs();
-
 	trace_init_cmdlines();
 
-	register_tracer(&no_tracer);
-	current_trace = &no_tracer;
+	register_tracer(&nop_trace);
+#ifdef CONFIG_BOOT_TRACER
+	register_tracer(&boot_tracer);
+	current_trace = &boot_tracer;
+	current_trace->init(&global_trace);
+#else
+	current_trace = &nop_trace;
+#endif
 
 	/* All seems OK, enable tracing */
 	global_trace.ctrl = tracer_enabled;
 	tracing_disabled = 0;
 
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &trace_panic_notifier);
+
+	register_die_notifier(&trace_die_notifier);
+
 	return 0;
-
- free_buffers:
-	for (i-- ; i >= 0; i--) {
-		struct page *page, *tmp;
-		struct trace_array_cpu *data = global_trace.data[i];
-
-		if (data) {
-			list_for_each_entry_safe(page, tmp,
-						 &data->trace_pages, lru) {
-				list_del_init(&page->lru);
-				__free_page(page);
-			}
-		}
-
-#ifdef CONFIG_TRACER_MAX_TRACE
-		data = max_tr.data[i];
-		if (data) {
-			list_for_each_entry_safe(page, tmp,
-						 &data->trace_pages, lru) {
-				list_del_init(&page->lru);
-				__free_page(page);
-			}
-		}
-#endif
-	}
-	return ret;
 }
-fs_initcall(tracer_alloc_buffers);
+early_initcall(tracer_alloc_buffers);
+fs_initcall(tracer_init_debugfs);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f69f867..f1f9957 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -5,7 +5,9 @@
 #include <asm/atomic.h>
 #include <linux/sched.h>
 #include <linux/clocksource.h>
+#include <linux/ring_buffer.h>
 #include <linux/mmiotrace.h>
+#include <linux/ftrace.h>
 
 enum trace_type {
 	__TRACE_FIRST_TYPE = 0,
@@ -13,38 +15,60 @@
 	TRACE_FN,
 	TRACE_CTX,
 	TRACE_WAKE,
+	TRACE_CONT,
 	TRACE_STACK,
+	TRACE_PRINT,
 	TRACE_SPECIAL,
 	TRACE_MMIO_RW,
 	TRACE_MMIO_MAP,
+	TRACE_BOOT,
 
 	__TRACE_LAST_TYPE
 };
 
 /*
+ * The trace entry - the most basic unit of tracing. This is what
+ * is printed in the end as a single line in the trace output, such as:
+ *
+ *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
+ */
+struct trace_entry {
+	unsigned char		type;
+	unsigned char		cpu;
+	unsigned char		flags;
+	unsigned char		preempt_count;
+	int			pid;
+};
+
+/*
  * Function trace entry - function address and parent function addres:
  */
 struct ftrace_entry {
+	struct trace_entry	ent;
 	unsigned long		ip;
 	unsigned long		parent_ip;
 };
+extern struct tracer boot_tracer;
 
 /*
  * Context switch trace entry - which task (and prio) we switched from/to:
  */
 struct ctx_switch_entry {
+	struct trace_entry	ent;
 	unsigned int		prev_pid;
 	unsigned char		prev_prio;
 	unsigned char		prev_state;
 	unsigned int		next_pid;
 	unsigned char		next_prio;
 	unsigned char		next_state;
+	unsigned int		next_cpu;
 };
 
 /*
  * Special (free-form) trace entry:
  */
 struct special_entry {
+	struct trace_entry	ent;
 	unsigned long		arg1;
 	unsigned long		arg2;
 	unsigned long		arg3;
@@ -57,33 +81,60 @@
 #define FTRACE_STACK_ENTRIES	8
 
 struct stack_entry {
+	struct trace_entry	ent;
 	unsigned long		caller[FTRACE_STACK_ENTRIES];
 };
 
 /*
- * The trace entry - the most basic unit of tracing. This is what
- * is printed in the end as a single line in the trace output, such as:
- *
- *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
+ * ftrace_printk entry:
  */
-struct trace_entry {
-	char			type;
-	char			cpu;
-	char			flags;
-	char			preempt_count;
-	int			pid;
-	cycle_t			t;
-	union {
-		struct ftrace_entry		fn;
-		struct ctx_switch_entry		ctx;
-		struct special_entry		special;
-		struct stack_entry		stack;
-		struct mmiotrace_rw		mmiorw;
-		struct mmiotrace_map		mmiomap;
-	};
+struct print_entry {
+	struct trace_entry	ent;
+	unsigned long		ip;
+	char			buf[];
 };
 
-#define TRACE_ENTRY_SIZE	sizeof(struct trace_entry)
+#define TRACE_OLD_SIZE		88
+
+struct trace_field_cont {
+	unsigned char		type;
+	/* Temporary till we get rid of this completely */
+	char			buf[TRACE_OLD_SIZE - 1];
+};
+
+struct trace_mmiotrace_rw {
+	struct trace_entry	ent;
+	struct mmiotrace_rw	rw;
+};
+
+struct trace_mmiotrace_map {
+	struct trace_entry	ent;
+	struct mmiotrace_map	map;
+};
+
+struct trace_boot {
+	struct trace_entry	ent;
+	struct boot_trace	initcall;
+};
+
+/*
+ * trace_flag_type is an enumeration that holds different
+ * states when a trace occurs. These are:
+ *  IRQS_OFF	- interrupts were disabled
+ *  NEED_RESCED - reschedule is requested
+ *  HARDIRQ	- inside an interrupt handler
+ *  SOFTIRQ	- inside a softirq handler
+ *  CONT	- multiple entries hold the trace item
+ */
+enum trace_flag_type {
+	TRACE_FLAG_IRQS_OFF		= 0x01,
+	TRACE_FLAG_NEED_RESCHED		= 0x02,
+	TRACE_FLAG_HARDIRQ		= 0x04,
+	TRACE_FLAG_SOFTIRQ		= 0x08,
+	TRACE_FLAG_CONT			= 0x10,
+};
+
+#define TRACE_BUF_SIZE		1024
 
 /*
  * The CPU trace array - it consists of thousands of trace entries
@@ -91,16 +142,9 @@
  * the trace, etc.)
  */
 struct trace_array_cpu {
-	struct list_head	trace_pages;
 	atomic_t		disabled;
-	raw_spinlock_t		lock;
-	struct lock_class_key	lock_key;
 
 	/* these fields get copied into max-trace: */
-	unsigned		trace_head_idx;
-	unsigned		trace_tail_idx;
-	void			*trace_head; /* producer */
-	void			*trace_tail; /* consumer */
 	unsigned long		trace_idx;
 	unsigned long		overrun;
 	unsigned long		saved_latency;
@@ -124,6 +168,7 @@
  * They have on/off state as well:
  */
 struct trace_array {
+	struct ring_buffer	*buffer;
 	unsigned long		entries;
 	long			ctrl;
 	int			cpu;
@@ -132,6 +177,56 @@
 	struct trace_array_cpu	*data[NR_CPUS];
 };
 
+#define FTRACE_CMP_TYPE(var, type) \
+	__builtin_types_compatible_p(typeof(var), type *)
+
+#undef IF_ASSIGN
+#define IF_ASSIGN(var, entry, etype, id)		\
+	if (FTRACE_CMP_TYPE(var, etype)) {		\
+		var = (typeof(var))(entry);		\
+		WARN_ON(id && (entry)->type != id);	\
+		break;					\
+	}
+
+/* Will cause compile errors if type is not found. */
+extern void __ftrace_bad_type(void);
+
+/*
+ * The trace_assign_type is a verifier that the entry type is
+ * the same as the type being assigned. To add new types simply
+ * add a line with the following format:
+ *
+ * IF_ASSIGN(var, ent, type, id);
+ *
+ *  Where "type" is the trace type that includes the trace_entry
+ *  as the "ent" item. And "id" is the trace identifier that is
+ *  used in the trace_type enum.
+ *
+ *  If the type can have more than one id, then use zero.
+ */
+#define trace_assign_type(var, ent)					\
+	do {								\
+		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
+		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
+		IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
+		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
+		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
+		IF_ASSIGN(var, ent, struct special_entry, 0);		\
+		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
+			  TRACE_MMIO_RW);				\
+		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
+			  TRACE_MMIO_MAP);				\
+		IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT);	\
+		__ftrace_bad_type();					\
+	} while (0)
+
+/* Return values for print_line callback */
+enum print_line_t {
+	TRACE_TYPE_PARTIAL_LINE	= 0,	/* Retry after flushing the seq */
+	TRACE_TYPE_HANDLED	= 1,
+	TRACE_TYPE_UNHANDLED	= 2	/* Relay to other output functions */
+};
+
 /*
  * A specific tracer, represented by methods that operate on a trace array:
  */
@@ -152,7 +247,7 @@
 	int			(*selftest)(struct tracer *trace,
 					    struct trace_array *tr);
 #endif
-	int			(*print_line)(struct trace_iterator *iter);
+	enum print_line_t	(*print_line)(struct trace_iterator *iter);
 	struct tracer		*next;
 	int			print_max;
 };
@@ -171,57 +266,58 @@
 	struct trace_array	*tr;
 	struct tracer		*trace;
 	void			*private;
-	long			last_overrun[NR_CPUS];
-	long			overrun[NR_CPUS];
+	struct ring_buffer_iter	*buffer_iter[NR_CPUS];
 
 	/* The below is zeroed out in pipe_read */
 	struct trace_seq	seq;
 	struct trace_entry	*ent;
 	int			cpu;
-
-	struct trace_entry	*prev_ent;
-	int			prev_cpu;
+	u64			ts;
 
 	unsigned long		iter_flags;
 	loff_t			pos;
-	unsigned long		next_idx[NR_CPUS];
-	struct list_head	*next_page[NR_CPUS];
-	unsigned		next_page_idx[NR_CPUS];
 	long			idx;
 };
 
-void tracing_reset(struct trace_array_cpu *data);
+void trace_wake_up(void);
+void tracing_reset(struct trace_array *tr, int cpu);
 int tracing_open_generic(struct inode *inode, struct file *filp);
 struct dentry *tracing_init_dentry(void);
 void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
 
+struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
+						struct trace_array_cpu *data);
+void tracing_generic_entry_update(struct trace_entry *entry,
+				  unsigned long flags,
+				  int pc);
+
 void ftrace(struct trace_array *tr,
 			    struct trace_array_cpu *data,
 			    unsigned long ip,
 			    unsigned long parent_ip,
-			    unsigned long flags);
+			    unsigned long flags, int pc);
 void tracing_sched_switch_trace(struct trace_array *tr,
 				struct trace_array_cpu *data,
 				struct task_struct *prev,
 				struct task_struct *next,
-				unsigned long flags);
+				unsigned long flags, int pc);
 void tracing_record_cmdline(struct task_struct *tsk);
 
 void tracing_sched_wakeup_trace(struct trace_array *tr,
 				struct trace_array_cpu *data,
 				struct task_struct *wakee,
 				struct task_struct *cur,
-				unsigned long flags);
+				unsigned long flags, int pc);
 void trace_special(struct trace_array *tr,
 		   struct trace_array_cpu *data,
 		   unsigned long arg1,
 		   unsigned long arg2,
-		   unsigned long arg3);
+		   unsigned long arg3, int pc);
 void trace_function(struct trace_array *tr,
 		    struct trace_array_cpu *data,
 		    unsigned long ip,
 		    unsigned long parent_ip,
-		    unsigned long flags);
+		    unsigned long flags, int pc);
 
 void tracing_start_cmdline_record(void);
 void tracing_stop_cmdline_record(void);
@@ -268,51 +364,33 @@
 extern int DYN_FTRACE_TEST_NAME(void);
 #endif
 
-#ifdef CONFIG_MMIOTRACE
-extern void __trace_mmiotrace_rw(struct trace_array *tr,
-				struct trace_array_cpu *data,
-				struct mmiotrace_rw *rw);
-extern void __trace_mmiotrace_map(struct trace_array *tr,
-				struct trace_array_cpu *data,
-				struct mmiotrace_map *map);
-#endif
-
 #ifdef CONFIG_FTRACE_STARTUP_TEST
-#ifdef CONFIG_FTRACE
 extern int trace_selftest_startup_function(struct tracer *trace,
 					   struct trace_array *tr);
-#endif
-#ifdef CONFIG_IRQSOFF_TRACER
 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
 					  struct trace_array *tr);
-#endif
-#ifdef CONFIG_PREEMPT_TRACER
 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
 					     struct trace_array *tr);
-#endif
-#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
 						 struct trace_array *tr);
-#endif
-#ifdef CONFIG_SCHED_TRACER
 extern int trace_selftest_startup_wakeup(struct tracer *trace,
 					 struct trace_array *tr);
-#endif
-#ifdef CONFIG_CONTEXT_SWITCH_TRACER
+extern int trace_selftest_startup_nop(struct tracer *trace,
+					 struct trace_array *tr);
 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
 					       struct trace_array *tr);
-#endif
-#ifdef CONFIG_SYSPROF_TRACER
 extern int trace_selftest_startup_sysprof(struct tracer *trace,
 					       struct trace_array *tr);
-#endif
 #endif /* CONFIG_FTRACE_STARTUP_TEST */
 
 extern void *head_page(struct trace_array_cpu *data);
 extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
+extern void trace_seq_print_cont(struct trace_seq *s,
+				 struct trace_iterator *iter);
 extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
 				 size_t cnt);
 extern long ns2usecs(cycle_t nsec);
+extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
 
 extern unsigned long trace_flags;
 
@@ -334,6 +412,9 @@
 	TRACE_ITER_BLOCK		= 0x80,
 	TRACE_ITER_STACKTRACE		= 0x100,
 	TRACE_ITER_SCHED_TREE		= 0x200,
+	TRACE_ITER_PRINTK		= 0x400,
 };
 
+extern struct tracer nop_trace;
+
 #endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
new file mode 100644
index 0000000..d0a5e50
--- /dev/null
+++ b/kernel/trace/trace_boot.c
@@ -0,0 +1,126 @@
+/*
+ * ring buffer based initcalls tracer
+ *
+ * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/ftrace.h>
+#include <linux/kallsyms.h>
+
+#include "trace.h"
+
+static struct trace_array *boot_trace;
+static int trace_boot_enabled;
+
+
+/* Should be started after do_pre_smp_initcalls() in init/main.c */
+void start_boot_trace(void)
+{
+	trace_boot_enabled = 1;
+}
+
+void stop_boot_trace(void)
+{
+	trace_boot_enabled = 0;
+}
+
+void reset_boot_trace(struct trace_array *tr)
+{
+	stop_boot_trace();
+}
+
+static void boot_trace_init(struct trace_array *tr)
+{
+	int cpu;
+	boot_trace = tr;
+
+	trace_boot_enabled = 0;
+
+	for_each_cpu_mask(cpu, cpu_possible_map)
+		tracing_reset(tr, cpu);
+}
+
+static void boot_trace_ctrl_update(struct trace_array *tr)
+{
+	if (tr->ctrl)
+		start_boot_trace();
+	else
+		stop_boot_trace();
+}
+
+static enum print_line_t initcall_print_line(struct trace_iterator *iter)
+{
+	int ret;
+	struct trace_entry *entry = iter->ent;
+	struct trace_boot *field = (struct trace_boot *)entry;
+	struct boot_trace *it = &field->initcall;
+	struct trace_seq *s = &iter->seq;
+	struct timespec calltime = ktime_to_timespec(it->calltime);
+	struct timespec rettime = ktime_to_timespec(it->rettime);
+
+	if (entry->type == TRACE_BOOT) {
+		ret = trace_seq_printf(s, "[%5ld.%09ld] calling  %s @ %i\n",
+					  calltime.tv_sec,
+					  calltime.tv_nsec,
+					  it->func, it->caller);
+		if (!ret)
+			return TRACE_TYPE_PARTIAL_LINE;
+
+		ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
+					  "returned %d after %lld msecs\n",
+					  rettime.tv_sec,
+					  rettime.tv_nsec,
+					  it->func, it->result, it->duration);
+
+		if (!ret)
+			return TRACE_TYPE_PARTIAL_LINE;
+		return TRACE_TYPE_HANDLED;
+	}
+	return TRACE_TYPE_UNHANDLED;
+}
+
+struct tracer boot_tracer __read_mostly =
+{
+	.name		= "initcall",
+	.init		= boot_trace_init,
+	.reset		= reset_boot_trace,
+	.ctrl_update	= boot_trace_ctrl_update,
+	.print_line	= initcall_print_line,
+};
+
+void trace_boot(struct boot_trace *it, initcall_t fn)
+{
+	struct ring_buffer_event *event;
+	struct trace_boot *entry;
+	struct trace_array_cpu *data;
+	unsigned long irq_flags;
+	struct trace_array *tr = boot_trace;
+
+	if (!trace_boot_enabled)
+		return;
+
+	/* Get its name now since this function could
+	 * disappear because it is in the .init section.
+	 */
+	sprint_symbol(it->func, (unsigned long)fn);
+	preempt_disable();
+	data = tr->data[smp_processor_id()];
+
+	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+					 &irq_flags);
+	if (!event)
+		goto out;
+	entry	= ring_buffer_event_data(event);
+	tracing_generic_entry_update(&entry->ent, 0, 0);
+	entry->ent.type = TRACE_BOOT;
+	entry->initcall = *it;
+	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+
+	trace_wake_up();
+
+ out:
+	preempt_enable();
+}
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 3121448..e90eb0c 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -23,7 +23,7 @@
 	tr->time_start = ftrace_now(tr->cpu);
 
 	for_each_online_cpu(cpu)
-		tracing_reset(tr->data[cpu]);
+		tracing_reset(tr, cpu);
 }
 
 static void start_function_trace(struct trace_array *tr)
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index ece6cfb..a7db7f0 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -95,7 +95,7 @@
 	disabled = atomic_inc_return(&data->disabled);
 
 	if (likely(disabled == 1))
-		trace_function(tr, data, ip, parent_ip, flags);
+		trace_function(tr, data, ip, parent_ip, flags, preempt_count());
 
 	atomic_dec(&data->disabled);
 }
@@ -130,6 +130,7 @@
 	unsigned long latency, t0, t1;
 	cycle_t T0, T1, delta;
 	unsigned long flags;
+	int pc;
 
 	/*
 	 * usecs conversion is slow so we try to delay the conversion
@@ -141,6 +142,8 @@
 
 	local_save_flags(flags);
 
+	pc = preempt_count();
+
 	if (!report_latency(delta))
 		goto out;
 
@@ -150,7 +153,7 @@
 	if (!report_latency(delta))
 		goto out_unlock;
 
-	trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
+	trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
 
 	latency = nsecs_to_usecs(delta);
 
@@ -173,8 +176,8 @@
 out:
 	data->critical_sequence = max_sequence;
 	data->preempt_timestamp = ftrace_now(cpu);
-	tracing_reset(data);
-	trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
+	tracing_reset(tr, cpu);
+	trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
 }
 
 static inline void
@@ -203,11 +206,11 @@
 	data->critical_sequence = max_sequence;
 	data->preempt_timestamp = ftrace_now(cpu);
 	data->critical_start = parent_ip ? : ip;
-	tracing_reset(data);
+	tracing_reset(tr, cpu);
 
 	local_save_flags(flags);
 
-	trace_function(tr, data, ip, parent_ip, flags);
+	trace_function(tr, data, ip, parent_ip, flags, preempt_count());
 
 	per_cpu(tracing_cpu, cpu) = 1;
 
@@ -234,14 +237,14 @@
 
 	data = tr->data[cpu];
 
-	if (unlikely(!data) || unlikely(!head_page(data)) ||
+	if (unlikely(!data) ||
 	    !data->critical_start || atomic_read(&data->disabled))
 		return;
 
 	atomic_inc(&data->disabled);
 
 	local_save_flags(flags);
-	trace_function(tr, data, ip, parent_ip, flags);
+	trace_function(tr, data, ip, parent_ip, flags, preempt_count());
 	check_critical_timing(tr, data, parent_ip ? : ip, cpu);
 	data->critical_start = 0;
 	atomic_dec(&data->disabled);
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index b13dc19..f284846 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -27,7 +27,7 @@
 	tr->time_start = ftrace_now(tr->cpu);
 
 	for_each_online_cpu(cpu)
-		tracing_reset(tr->data[cpu]);
+		tracing_reset(tr, cpu);
 }
 
 static void mmio_trace_init(struct trace_array *tr)
@@ -130,10 +130,14 @@
 {
 	int cpu;
 	unsigned long cnt = 0;
+/* FIXME: */
+#if 0
 	for_each_online_cpu(cpu) {
 		cnt += iter->overrun[cpu];
 		iter->overrun[cpu] = 0;
 	}
+#endif
+	(void)cpu;
 	return cnt;
 }
 
@@ -171,17 +175,21 @@
 	return (ret == -EBUSY) ? 0 : ret;
 }
 
-static int mmio_print_rw(struct trace_iterator *iter)
+static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
 {
 	struct trace_entry *entry = iter->ent;
-	struct mmiotrace_rw *rw	= &entry->mmiorw;
+	struct trace_mmiotrace_rw *field;
+	struct mmiotrace_rw *rw;
 	struct trace_seq *s	= &iter->seq;
-	unsigned long long t	= ns2usecs(entry->t);
+	unsigned long long t	= ns2usecs(iter->ts);
 	unsigned long usec_rem	= do_div(t, 1000000ULL);
 	unsigned secs		= (unsigned long)t;
 	int ret = 1;
 
-	switch (entry->mmiorw.opcode) {
+	trace_assign_type(field, entry);
+	rw = &field->rw;
+
+	switch (rw->opcode) {
 	case MMIO_READ:
 		ret = trace_seq_printf(s,
 			"R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
@@ -209,21 +217,25 @@
 		break;
 	}
 	if (ret)
-		return 1;
-	return 0;
+		return TRACE_TYPE_HANDLED;
+	return TRACE_TYPE_PARTIAL_LINE;
 }
 
-static int mmio_print_map(struct trace_iterator *iter)
+static enum print_line_t mmio_print_map(struct trace_iterator *iter)
 {
 	struct trace_entry *entry = iter->ent;
-	struct mmiotrace_map *m	= &entry->mmiomap;
+	struct trace_mmiotrace_map *field;
+	struct mmiotrace_map *m;
 	struct trace_seq *s	= &iter->seq;
-	unsigned long long t	= ns2usecs(entry->t);
+	unsigned long long t	= ns2usecs(iter->ts);
 	unsigned long usec_rem	= do_div(t, 1000000ULL);
 	unsigned secs		= (unsigned long)t;
-	int ret = 1;
+	int ret;
 
-	switch (entry->mmiorw.opcode) {
+	trace_assign_type(field, entry);
+	m = &field->map;
+
+	switch (m->opcode) {
 	case MMIO_PROBE:
 		ret = trace_seq_printf(s,
 			"MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
@@ -241,20 +253,43 @@
 		break;
 	}
 	if (ret)
-		return 1;
-	return 0;
+		return TRACE_TYPE_HANDLED;
+	return TRACE_TYPE_PARTIAL_LINE;
 }
 
-/* return 0 to abort printing without consuming current entry in pipe mode */
-static int mmio_print_line(struct trace_iterator *iter)
+static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
+{
+	struct trace_entry *entry = iter->ent;
+	struct print_entry *print = (struct print_entry *)entry;
+	const char *msg		= print->buf;
+	struct trace_seq *s	= &iter->seq;
+	unsigned long long t	= ns2usecs(iter->ts);
+	unsigned long usec_rem	= do_div(t, 1000000ULL);
+	unsigned secs		= (unsigned long)t;
+	int ret;
+
+	/* The trailing newline must be in the message. */
+	ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg);
+	if (!ret)
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	if (entry->flags & TRACE_FLAG_CONT)
+		trace_seq_print_cont(s, iter);
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t mmio_print_line(struct trace_iterator *iter)
 {
 	switch (iter->ent->type) {
 	case TRACE_MMIO_RW:
 		return mmio_print_rw(iter);
 	case TRACE_MMIO_MAP:
 		return mmio_print_map(iter);
+	case TRACE_PRINT:
+		return mmio_print_mark(iter);
 	default:
-		return 1; /* ignore unknown entries */
+		return TRACE_TYPE_HANDLED; /* ignore unknown entries */
 	}
 }
 
@@ -276,6 +311,27 @@
 }
 device_initcall(init_mmio_trace);
 
+static void __trace_mmiotrace_rw(struct trace_array *tr,
+				struct trace_array_cpu *data,
+				struct mmiotrace_rw *rw)
+{
+	struct ring_buffer_event *event;
+	struct trace_mmiotrace_rw *entry;
+	unsigned long irq_flags;
+
+	event	= ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+					   &irq_flags);
+	if (!event)
+		return;
+	entry	= ring_buffer_event_data(event);
+	tracing_generic_entry_update(&entry->ent, 0, preempt_count());
+	entry->ent.type			= TRACE_MMIO_RW;
+	entry->rw			= *rw;
+	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+
+	trace_wake_up();
+}
+
 void mmio_trace_rw(struct mmiotrace_rw *rw)
 {
 	struct trace_array *tr = mmio_trace_array;
@@ -283,6 +339,27 @@
 	__trace_mmiotrace_rw(tr, data, rw);
 }
 
+static void __trace_mmiotrace_map(struct trace_array *tr,
+				struct trace_array_cpu *data,
+				struct mmiotrace_map *map)
+{
+	struct ring_buffer_event *event;
+	struct trace_mmiotrace_map *entry;
+	unsigned long irq_flags;
+
+	event	= ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+					   &irq_flags);
+	if (!event)
+		return;
+	entry	= ring_buffer_event_data(event);
+	tracing_generic_entry_update(&entry->ent, 0, preempt_count());
+	entry->ent.type			= TRACE_MMIO_MAP;
+	entry->map			= *map;
+	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+
+	trace_wake_up();
+}
+
 void mmio_trace_mapping(struct mmiotrace_map *map)
 {
 	struct trace_array *tr = mmio_trace_array;
@@ -293,3 +370,8 @@
 	__trace_mmiotrace_map(tr, data, map);
 	preempt_enable();
 }
+
+int mmio_trace_printk(const char *fmt, va_list args)
+{
+	return trace_vprintk(0, fmt, args);
+}
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
new file mode 100644
index 0000000..4592b48
--- /dev/null
+++ b/kernel/trace/trace_nop.c
@@ -0,0 +1,64 @@
+/*
+ * nop tracer
+ *
+ * Copyright (C) 2008 Steven Noonan <steven@uplinklabs.net>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/ftrace.h>
+
+#include "trace.h"
+
+static struct trace_array	*ctx_trace;
+
+static void start_nop_trace(struct trace_array *tr)
+{
+	/* Nothing to do! */
+}
+
+static void stop_nop_trace(struct trace_array *tr)
+{
+	/* Nothing to do! */
+}
+
+static void nop_trace_init(struct trace_array *tr)
+{
+	int cpu;
+	ctx_trace = tr;
+
+	for_each_online_cpu(cpu)
+		tracing_reset(tr, cpu);
+
+	if (tr->ctrl)
+		start_nop_trace(tr);
+}
+
+static void nop_trace_reset(struct trace_array *tr)
+{
+	if (tr->ctrl)
+		stop_nop_trace(tr);
+}
+
+static void nop_trace_ctrl_update(struct trace_array *tr)
+{
+	/* When starting a new trace, reset the buffers */
+	if (tr->ctrl)
+		start_nop_trace(tr);
+	else
+		stop_nop_trace(tr);
+}
+
+struct tracer nop_trace __read_mostly =
+{
+	.name		= "nop",
+	.init		= nop_trace_init,
+	.reset		= nop_trace_reset,
+	.ctrl_update	= nop_trace_ctrl_update,
+#ifdef CONFIG_FTRACE_SELFTEST
+	.selftest	= trace_selftest_startup_nop,
+#endif
+};
+
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index cb817a2..b8f56be 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -9,8 +9,8 @@
 #include <linux/debugfs.h>
 #include <linux/kallsyms.h>
 #include <linux/uaccess.h>
-#include <linux/marker.h>
 #include <linux/ftrace.h>
+#include <trace/sched.h>
 
 #include "trace.h"
 
@@ -19,15 +19,16 @@
 static atomic_t			sched_ref;
 
 static void
-sched_switch_func(void *private, void *__rq, struct task_struct *prev,
+probe_sched_switch(struct rq *__rq, struct task_struct *prev,
 			struct task_struct *next)
 {
-	struct trace_array **ptr = private;
-	struct trace_array *tr = *ptr;
 	struct trace_array_cpu *data;
 	unsigned long flags;
-	long disabled;
 	int cpu;
+	int pc;
+
+	if (!atomic_read(&sched_ref))
+		return;
 
 	tracing_record_cmdline(prev);
 	tracing_record_cmdline(next);
@@ -35,97 +36,41 @@
 	if (!tracer_enabled)
 		return;
 
+	pc = preempt_count();
 	local_irq_save(flags);
 	cpu = raw_smp_processor_id();
-	data = tr->data[cpu];
-	disabled = atomic_inc_return(&data->disabled);
+	data = ctx_trace->data[cpu];
 
-	if (likely(disabled == 1))
-		tracing_sched_switch_trace(tr, data, prev, next, flags);
+	if (likely(!atomic_read(&data->disabled)))
+		tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
 
-	atomic_dec(&data->disabled);
 	local_irq_restore(flags);
 }
 
-static notrace void
-sched_switch_callback(void *probe_data, void *call_data,
-		      const char *format, va_list *args)
-{
-	struct task_struct *prev;
-	struct task_struct *next;
-	struct rq *__rq;
-
-	if (!atomic_read(&sched_ref))
-		return;
-
-	/* skip prev_pid %d next_pid %d prev_state %ld */
-	(void)va_arg(*args, int);
-	(void)va_arg(*args, int);
-	(void)va_arg(*args, long);
-	__rq = va_arg(*args, typeof(__rq));
-	prev = va_arg(*args, typeof(prev));
-	next = va_arg(*args, typeof(next));
-
-	/*
-	 * If tracer_switch_func only points to the local
-	 * switch func, it still needs the ptr passed to it.
-	 */
-	sched_switch_func(probe_data, __rq, prev, next);
-}
-
 static void
-wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
-			task_struct *curr)
+probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
 {
-	struct trace_array **ptr = private;
-	struct trace_array *tr = *ptr;
 	struct trace_array_cpu *data;
 	unsigned long flags;
-	long disabled;
-	int cpu;
+	int cpu, pc;
 
-	if (!tracer_enabled)
+	if (!likely(tracer_enabled))
 		return;
 
-	tracing_record_cmdline(curr);
+	pc = preempt_count();
+	tracing_record_cmdline(current);
 
 	local_irq_save(flags);
 	cpu = raw_smp_processor_id();
-	data = tr->data[cpu];
-	disabled = atomic_inc_return(&data->disabled);
+	data = ctx_trace->data[cpu];
 
-	if (likely(disabled == 1))
-		tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
+	if (likely(!atomic_read(&data->disabled)))
+		tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
+					   flags, pc);
 
-	atomic_dec(&data->disabled);
 	local_irq_restore(flags);
 }
 
-static notrace void
-wake_up_callback(void *probe_data, void *call_data,
-		 const char *format, va_list *args)
-{
-	struct task_struct *curr;
-	struct task_struct *task;
-	struct rq *__rq;
-
-	if (likely(!tracer_enabled))
-		return;
-
-	/* Skip pid %d state %ld */
-	(void)va_arg(*args, int);
-	(void)va_arg(*args, long);
-	/* now get the meat: "rq %p task %p rq->curr %p" */
-	__rq = va_arg(*args, typeof(__rq));
-	task = va_arg(*args, typeof(task));
-	curr = va_arg(*args, typeof(curr));
-
-	tracing_record_cmdline(task);
-	tracing_record_cmdline(curr);
-
-	wakeup_func(probe_data, __rq, task, curr);
-}
-
 static void sched_switch_reset(struct trace_array *tr)
 {
 	int cpu;
@@ -133,67 +78,47 @@
 	tr->time_start = ftrace_now(tr->cpu);
 
 	for_each_online_cpu(cpu)
-		tracing_reset(tr->data[cpu]);
+		tracing_reset(tr, cpu);
 }
 
 static int tracing_sched_register(void)
 {
 	int ret;
 
-	ret = marker_probe_register("kernel_sched_wakeup",
-			"pid %d state %ld ## rq %p task %p rq->curr %p",
-			wake_up_callback,
-			&ctx_trace);
+	ret = register_trace_sched_wakeup(probe_sched_wakeup);
 	if (ret) {
-		pr_info("wakeup trace: Couldn't add marker"
+		pr_info("wakeup trace: Couldn't activate tracepoint"
 			" probe to kernel_sched_wakeup\n");
 		return ret;
 	}
 
-	ret = marker_probe_register("kernel_sched_wakeup_new",
-			"pid %d state %ld ## rq %p task %p rq->curr %p",
-			wake_up_callback,
-			&ctx_trace);
+	ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
 	if (ret) {
-		pr_info("wakeup trace: Couldn't add marker"
+		pr_info("wakeup trace: Couldn't activate tracepoint"
 			" probe to kernel_sched_wakeup_new\n");
 		goto fail_deprobe;
 	}
 
-	ret = marker_probe_register("kernel_sched_schedule",
-		"prev_pid %d next_pid %d prev_state %ld "
-		"## rq %p prev %p next %p",
-		sched_switch_callback,
-		&ctx_trace);
+	ret = register_trace_sched_switch(probe_sched_switch);
 	if (ret) {
-		pr_info("sched trace: Couldn't add marker"
+		pr_info("sched trace: Couldn't activate tracepoint"
 			" probe to kernel_sched_schedule\n");
 		goto fail_deprobe_wake_new;
 	}
 
 	return ret;
 fail_deprobe_wake_new:
-	marker_probe_unregister("kernel_sched_wakeup_new",
-				wake_up_callback,
-				&ctx_trace);
+	unregister_trace_sched_wakeup_new(probe_sched_wakeup);
 fail_deprobe:
-	marker_probe_unregister("kernel_sched_wakeup",
-				wake_up_callback,
-				&ctx_trace);
+	unregister_trace_sched_wakeup(probe_sched_wakeup);
 	return ret;
 }
 
 static void tracing_sched_unregister(void)
 {
-	marker_probe_unregister("kernel_sched_schedule",
-				sched_switch_callback,
-				&ctx_trace);
-	marker_probe_unregister("kernel_sched_wakeup_new",
-				wake_up_callback,
-				&ctx_trace);
-	marker_probe_unregister("kernel_sched_wakeup",
-				wake_up_callback,
-				&ctx_trace);
+	unregister_trace_sched_switch(probe_sched_switch);
+	unregister_trace_sched_wakeup_new(probe_sched_wakeup);
+	unregister_trace_sched_wakeup(probe_sched_wakeup);
 }
 
 static void tracing_start_sched_switch(void)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e303ccb..fe4a252 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -15,7 +15,7 @@
 #include <linux/kallsyms.h>
 #include <linux/uaccess.h>
 #include <linux/ftrace.h>
-#include <linux/marker.h>
+#include <trace/sched.h>
 
 #include "trace.h"
 
@@ -44,10 +44,12 @@
 	long disabled;
 	int resched;
 	int cpu;
+	int pc;
 
 	if (likely(!wakeup_task))
 		return;
 
+	pc = preempt_count();
 	resched = need_resched();
 	preempt_disable_notrace();
 
@@ -70,7 +72,7 @@
 	if (task_cpu(wakeup_task) != cpu)
 		goto unlock;
 
-	trace_function(tr, data, ip, parent_ip, flags);
+	trace_function(tr, data, ip, parent_ip, flags, pc);
 
  unlock:
 	__raw_spin_unlock(&wakeup_lock);
@@ -112,17 +114,18 @@
 }
 
 static void notrace
-wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
+probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
 	struct task_struct *next)
 {
 	unsigned long latency = 0, t0 = 0, t1 = 0;
-	struct trace_array **ptr = private;
-	struct trace_array *tr = *ptr;
 	struct trace_array_cpu *data;
 	cycle_t T0, T1, delta;
 	unsigned long flags;
 	long disabled;
 	int cpu;
+	int pc;
+
+	tracing_record_cmdline(prev);
 
 	if (unlikely(!tracer_enabled))
 		return;
@@ -139,12 +142,14 @@
 	if (next != wakeup_task)
 		return;
 
+	pc = preempt_count();
+
 	/* The task we are waiting for is waking up */
-	data = tr->data[wakeup_cpu];
+	data = wakeup_trace->data[wakeup_cpu];
 
 	/* disable local data, not wakeup_cpu data */
 	cpu = raw_smp_processor_id();
-	disabled = atomic_inc_return(&tr->data[cpu]->disabled);
+	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
 	if (likely(disabled != 1))
 		goto out;
 
@@ -155,7 +160,7 @@
 	if (unlikely(!tracer_enabled || next != wakeup_task))
 		goto out_unlock;
 
-	trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
+	trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
 
 	/*
 	 * usecs conversion is slow so we try to delay the conversion
@@ -174,39 +179,14 @@
 	t0 = nsecs_to_usecs(T0);
 	t1 = nsecs_to_usecs(T1);
 
-	update_max_tr(tr, wakeup_task, wakeup_cpu);
+	update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
 
 out_unlock:
-	__wakeup_reset(tr);
+	__wakeup_reset(wakeup_trace);
 	__raw_spin_unlock(&wakeup_lock);
 	local_irq_restore(flags);
 out:
-	atomic_dec(&tr->data[cpu]->disabled);
-}
-
-static notrace void
-sched_switch_callback(void *probe_data, void *call_data,
-		      const char *format, va_list *args)
-{
-	struct task_struct *prev;
-	struct task_struct *next;
-	struct rq *__rq;
-
-	/* skip prev_pid %d next_pid %d prev_state %ld */
-	(void)va_arg(*args, int);
-	(void)va_arg(*args, int);
-	(void)va_arg(*args, long);
-	__rq = va_arg(*args, typeof(__rq));
-	prev = va_arg(*args, typeof(prev));
-	next = va_arg(*args, typeof(next));
-
-	tracing_record_cmdline(prev);
-
-	/*
-	 * If tracer_switch_func only points to the local
-	 * switch func, it still needs the ptr passed to it.
-	 */
-	wakeup_sched_switch(probe_data, __rq, prev, next);
+	atomic_dec(&wakeup_trace->data[cpu]->disabled);
 }
 
 static void __wakeup_reset(struct trace_array *tr)
@@ -216,7 +196,7 @@
 
 	for_each_possible_cpu(cpu) {
 		data = tr->data[cpu];
-		tracing_reset(data);
+		tracing_reset(tr, cpu);
 	}
 
 	wakeup_cpu = -1;
@@ -240,19 +220,26 @@
 }
 
 static void
-wakeup_check_start(struct trace_array *tr, struct task_struct *p,
-		   struct task_struct *curr)
+probe_wakeup(struct rq *rq, struct task_struct *p)
 {
 	int cpu = smp_processor_id();
 	unsigned long flags;
 	long disabled;
+	int pc;
+
+	if (likely(!tracer_enabled))
+		return;
+
+	tracing_record_cmdline(p);
+	tracing_record_cmdline(current);
 
 	if (likely(!rt_task(p)) ||
 			p->prio >= wakeup_prio ||
-			p->prio >= curr->prio)
+			p->prio >= current->prio)
 		return;
 
-	disabled = atomic_inc_return(&tr->data[cpu]->disabled);
+	pc = preempt_count();
+	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
 	if (unlikely(disabled != 1))
 		goto out;
 
@@ -264,7 +251,7 @@
 		goto out_locked;
 
 	/* reset the trace */
-	__wakeup_reset(tr);
+	__wakeup_reset(wakeup_trace);
 
 	wakeup_cpu = task_cpu(p);
 	wakeup_prio = p->prio;
@@ -274,74 +261,37 @@
 
 	local_save_flags(flags);
 
-	tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
-	trace_function(tr, tr->data[wakeup_cpu],
-		       CALLER_ADDR1, CALLER_ADDR2, flags);
+	wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
+	trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
+		       CALLER_ADDR1, CALLER_ADDR2, flags, pc);
 
 out_locked:
 	__raw_spin_unlock(&wakeup_lock);
 out:
-	atomic_dec(&tr->data[cpu]->disabled);
-}
-
-static notrace void
-wake_up_callback(void *probe_data, void *call_data,
-		 const char *format, va_list *args)
-{
-	struct trace_array **ptr = probe_data;
-	struct trace_array *tr = *ptr;
-	struct task_struct *curr;
-	struct task_struct *task;
-	struct rq *__rq;
-
-	if (likely(!tracer_enabled))
-		return;
-
-	/* Skip pid %d state %ld */
-	(void)va_arg(*args, int);
-	(void)va_arg(*args, long);
-	/* now get the meat: "rq %p task %p rq->curr %p" */
-	__rq = va_arg(*args, typeof(__rq));
-	task = va_arg(*args, typeof(task));
-	curr = va_arg(*args, typeof(curr));
-
-	tracing_record_cmdline(task);
-	tracing_record_cmdline(curr);
-
-	wakeup_check_start(tr, task, curr);
+	atomic_dec(&wakeup_trace->data[cpu]->disabled);
 }
 
 static void start_wakeup_tracer(struct trace_array *tr)
 {
 	int ret;
 
-	ret = marker_probe_register("kernel_sched_wakeup",
-			"pid %d state %ld ## rq %p task %p rq->curr %p",
-			wake_up_callback,
-			&wakeup_trace);
+	ret = register_trace_sched_wakeup(probe_wakeup);
 	if (ret) {
-		pr_info("wakeup trace: Couldn't add marker"
+		pr_info("wakeup trace: Couldn't activate tracepoint"
 			" probe to kernel_sched_wakeup\n");
 		return;
 	}
 
-	ret = marker_probe_register("kernel_sched_wakeup_new",
-			"pid %d state %ld ## rq %p task %p rq->curr %p",
-			wake_up_callback,
-			&wakeup_trace);
+	ret = register_trace_sched_wakeup_new(probe_wakeup);
 	if (ret) {
-		pr_info("wakeup trace: Couldn't add marker"
+		pr_info("wakeup trace: Couldn't activate tracepoint"
 			" probe to kernel_sched_wakeup_new\n");
 		goto fail_deprobe;
 	}
 
-	ret = marker_probe_register("kernel_sched_schedule",
-		"prev_pid %d next_pid %d prev_state %ld "
-		"## rq %p prev %p next %p",
-		sched_switch_callback,
-		&wakeup_trace);
+	ret = register_trace_sched_switch(probe_wakeup_sched_switch);
 	if (ret) {
-		pr_info("sched trace: Couldn't add marker"
+		pr_info("sched trace: Couldn't activate tracepoint"
 			" probe to kernel_sched_schedule\n");
 		goto fail_deprobe_wake_new;
 	}
@@ -363,28 +313,18 @@
 
 	return;
 fail_deprobe_wake_new:
-	marker_probe_unregister("kernel_sched_wakeup_new",
-				wake_up_callback,
-				&wakeup_trace);
+	unregister_trace_sched_wakeup_new(probe_wakeup);
 fail_deprobe:
-	marker_probe_unregister("kernel_sched_wakeup",
-				wake_up_callback,
-				&wakeup_trace);
+	unregister_trace_sched_wakeup(probe_wakeup);
 }
 
 static void stop_wakeup_tracer(struct trace_array *tr)
 {
 	tracer_enabled = 0;
 	unregister_ftrace_function(&trace_ops);
-	marker_probe_unregister("kernel_sched_schedule",
-				sched_switch_callback,
-				&wakeup_trace);
-	marker_probe_unregister("kernel_sched_wakeup_new",
-				wake_up_callback,
-				&wakeup_trace);
-	marker_probe_unregister("kernel_sched_wakeup",
-				wake_up_callback,
-				&wakeup_trace);
+	unregister_trace_sched_switch(probe_wakeup_sched_switch);
+	unregister_trace_sched_wakeup_new(probe_wakeup);
+	unregister_trace_sched_wakeup(probe_wakeup);
 }
 
 static void wakeup_tracer_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 0911b7e..09cf230 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -9,65 +9,29 @@
 	case TRACE_FN:
 	case TRACE_CTX:
 	case TRACE_WAKE:
+	case TRACE_CONT:
 	case TRACE_STACK:
+	case TRACE_PRINT:
 	case TRACE_SPECIAL:
 		return 1;
 	}
 	return 0;
 }
 
-static int
-trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
+static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
 {
-	struct trace_entry *entries;
-	struct page *page;
-	int idx = 0;
-	int i;
+	struct ring_buffer_event *event;
+	struct trace_entry *entry;
 
-	BUG_ON(list_empty(&data->trace_pages));
-	page = list_entry(data->trace_pages.next, struct page, lru);
-	entries = page_address(page);
+	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
+		entry = ring_buffer_event_data(event);
 
-	check_pages(data);
-	if (head_page(data) != entries)
-		goto failed;
-
-	/*
-	 * The starting trace buffer always has valid elements,
-	 * if any element exists.
-	 */
-	entries = head_page(data);
-
-	for (i = 0; i < tr->entries; i++) {
-
-		if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
+		if (!trace_valid_entry(entry)) {
 			printk(KERN_CONT ".. invalid entry %d ",
-				entries[idx].type);
+				entry->type);
 			goto failed;
 		}
-
-		idx++;
-		if (idx >= ENTRIES_PER_PAGE) {
-			page = virt_to_page(entries);
-			if (page->lru.next == &data->trace_pages) {
-				if (i != tr->entries - 1) {
-					printk(KERN_CONT ".. entries buffer mismatch");
-					goto failed;
-				}
-			} else {
-				page = list_entry(page->lru.next, struct page, lru);
-				entries = page_address(page);
-			}
-			idx = 0;
-		}
 	}
-
-	page = virt_to_page(entries);
-	if (page->lru.next != &data->trace_pages) {
-		printk(KERN_CONT ".. too many entries");
-		goto failed;
-	}
-
 	return 0;
 
  failed:
@@ -89,13 +53,11 @@
 	/* Don't allow flipping of max traces now */
 	raw_local_irq_save(flags);
 	__raw_spin_lock(&ftrace_max_lock);
+
+	cnt = ring_buffer_entries(tr->buffer);
+
 	for_each_possible_cpu(cpu) {
-		if (!head_page(tr->data[cpu]))
-			continue;
-
-		cnt += tr->data[cpu]->trace_idx;
-
-		ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
+		ret = trace_test_buffer_cpu(tr, cpu);
 		if (ret)
 			break;
 	}
@@ -120,11 +82,11 @@
 					   struct trace_array *tr,
 					   int (*func)(void))
 {
-	unsigned long count;
-	int ret;
 	int save_ftrace_enabled = ftrace_enabled;
 	int save_tracer_enabled = tracer_enabled;
+	unsigned long count;
 	char *func_name;
+	int ret;
 
 	/* The ftrace test PASSED */
 	printk(KERN_CONT "PASSED\n");
@@ -157,6 +119,7 @@
 	/* enable tracing */
 	tr->ctrl = 1;
 	trace->init(tr);
+
 	/* Sleep for a 1/10 of a second */
 	msleep(100);
 
@@ -212,10 +175,10 @@
 int
 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 {
-	unsigned long count;
-	int ret;
 	int save_ftrace_enabled = ftrace_enabled;
 	int save_tracer_enabled = tracer_enabled;
+	unsigned long count;
+	int ret;
 
 	/* make sure msleep has been recorded */
 	msleep(1);
@@ -415,6 +378,15 @@
 }
 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
 
+#ifdef CONFIG_NOP_TRACER
+int
+trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
+{
+	/* What could possibly go wrong? */
+	return 0;
+}
+#endif
+
 #ifdef CONFIG_SCHED_TRACER
 static int trace_wakeup_test_thread(void *data)
 {
@@ -486,6 +458,9 @@
 
 	wake_up_process(p);
 
+	/* give a little time to let the thread wake up */
+	msleep(100);
+
 	/* stop the tracing. */
 	tr->ctrl = 0;
 	trace->ctrl_update(tr);
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
new file mode 100644
index 0000000..74c5d9a
--- /dev/null
+++ b/kernel/trace/trace_stack.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
+ *
+ */
+#include <linux/stacktrace.h>
+#include <linux/kallsyms.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include "trace.h"
+
+#define STACK_TRACE_ENTRIES 500
+
+static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
+	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
+static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
+
+static struct stack_trace max_stack_trace = {
+	.max_entries		= STACK_TRACE_ENTRIES,
+	.entries		= stack_dump_trace,
+};
+
+static unsigned long max_stack_size;
+static raw_spinlock_t max_stack_lock =
+	(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
+static int stack_trace_disabled __read_mostly;
+static DEFINE_PER_CPU(int, trace_active);
+
+static inline void check_stack(void)
+{
+	unsigned long this_size, flags;
+	unsigned long *p, *top, *start;
+	int i;
+
+	this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
+	this_size = THREAD_SIZE - this_size;
+
+	if (this_size <= max_stack_size)
+		return;
+
+	raw_local_irq_save(flags);
+	__raw_spin_lock(&max_stack_lock);
+
+	/* a race could have already updated it */
+	if (this_size <= max_stack_size)
+		goto out;
+
+	max_stack_size = this_size;
+
+	max_stack_trace.nr_entries	= 0;
+	max_stack_trace.skip		= 3;
+
+	save_stack_trace(&max_stack_trace);
+
+	/*
+	 * Now find where in the stack these are.
+	 */
+	i = 0;
+	start = &this_size;
+	top = (unsigned long *)
+		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
+
+	/*
+	 * Loop through all the entries. One of the entries may
+	 * for some reason be missed on the stack, so we may
+	 * have to account for them. If they are all there, this
+	 * loop will only happen once. This code only takes place
+	 * on a new max, so it is far from a fast path.
+	 */
+	while (i < max_stack_trace.nr_entries) {
+
+		stack_dump_index[i] = this_size;
+		p = start;
+
+		for (; p < top && i < max_stack_trace.nr_entries; p++) {
+			if (*p == stack_dump_trace[i]) {
+				this_size = stack_dump_index[i++] =
+					(top - p) * sizeof(unsigned long);
+				/* Start the search from here */
+				start = p + 1;
+			}
+		}
+
+		i++;
+	}
+
+ out:
+	__raw_spin_unlock(&max_stack_lock);
+	raw_local_irq_restore(flags);
+}
+
+static void
+stack_trace_call(unsigned long ip, unsigned long parent_ip)
+{
+	int cpu, resched;
+
+	if (unlikely(!ftrace_enabled || stack_trace_disabled))
+		return;
+
+	resched = need_resched();
+	preempt_disable_notrace();
+
+	cpu = raw_smp_processor_id();
+	/* no atomic needed, we only modify this variable by this cpu */
+	if (per_cpu(trace_active, cpu)++ != 0)
+		goto out;
+
+	check_stack();
+
+ out:
+	per_cpu(trace_active, cpu)--;
+	/* prevent recursion in schedule */
+	if (resched)
+		preempt_enable_no_resched_notrace();
+	else
+		preempt_enable_notrace();
+}
+
+static struct ftrace_ops trace_ops __read_mostly =
+{
+	.func = stack_trace_call,
+};
+
+static ssize_t
+stack_max_size_read(struct file *filp, char __user *ubuf,
+		    size_t count, loff_t *ppos)
+{
+	unsigned long *ptr = filp->private_data;
+	char buf[64];
+	int r;
+
+	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
+	if (r > sizeof(buf))
+		r = sizeof(buf);
+	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
+}
+
+static ssize_t
+stack_max_size_write(struct file *filp, const char __user *ubuf,
+		     size_t count, loff_t *ppos)
+{
+	long *ptr = filp->private_data;
+	unsigned long val, flags;
+	char buf[64];
+	int ret;
+
+	if (count >= sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(&buf, ubuf, count))
+		return -EFAULT;
+
+	buf[count] = 0;
+
+	ret = strict_strtoul(buf, 10, &val);
+	if (ret < 0)
+		return ret;
+
+	raw_local_irq_save(flags);
+	__raw_spin_lock(&max_stack_lock);
+	*ptr = val;
+	__raw_spin_unlock(&max_stack_lock);
+	raw_local_irq_restore(flags);
+
+	return count;
+}
+
+static struct file_operations stack_max_size_fops = {
+	.open		= tracing_open_generic,
+	.read		= stack_max_size_read,
+	.write		= stack_max_size_write,
+};
+
+static void *
+t_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	long i = (long)m->private;
+
+	(*pos)++;
+
+	i++;
+
+	if (i >= max_stack_trace.nr_entries ||
+	    stack_dump_trace[i] == ULONG_MAX)
+		return NULL;
+
+	m->private = (void *)i;
+
+	return &m->private;
+}
+
+static void *t_start(struct seq_file *m, loff_t *pos)
+{
+	void *t = &m->private;
+	loff_t l = 0;
+
+	local_irq_disable();
+	__raw_spin_lock(&max_stack_lock);
+
+	for (; t && l < *pos; t = t_next(m, t, &l))
+		;
+
+	return t;
+}
+
+static void t_stop(struct seq_file *m, void *p)
+{
+	__raw_spin_unlock(&max_stack_lock);
+	local_irq_enable();
+}
+
+static int trace_lookup_stack(struct seq_file *m, long i)
+{
+	unsigned long addr = stack_dump_trace[i];
+#ifdef CONFIG_KALLSYMS
+	char str[KSYM_SYMBOL_LEN];
+
+	sprint_symbol(str, addr);
+
+	return seq_printf(m, "%s\n", str);
+#else
+	return seq_printf(m, "%p\n", (void*)addr);
+#endif
+}
+
+static int t_show(struct seq_file *m, void *v)
+{
+	long i = *(long *)v;
+	int size;
+
+	if (i < 0) {
+		seq_printf(m, "        Depth   Size      Location"
+			   "    (%d entries)\n"
+			   "        -----   ----      --------\n",
+			   max_stack_trace.nr_entries);
+		return 0;
+	}
+
+	if (i >= max_stack_trace.nr_entries ||
+	    stack_dump_trace[i] == ULONG_MAX)
+		return 0;
+
+	if (i+1 == max_stack_trace.nr_entries ||
+	    stack_dump_trace[i+1] == ULONG_MAX)
+		size = stack_dump_index[i];
+	else
+		size = stack_dump_index[i] - stack_dump_index[i+1];
+
+	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
+
+	trace_lookup_stack(m, i);
+
+	return 0;
+}
+
+static struct seq_operations stack_trace_seq_ops = {
+	.start		= t_start,
+	.next		= t_next,
+	.stop		= t_stop,
+	.show		= t_show,
+};
+
+static int stack_trace_open(struct inode *inode, struct file *file)
+{
+	int ret;
+
+	ret = seq_open(file, &stack_trace_seq_ops);
+	if (!ret) {
+		struct seq_file *m = file->private_data;
+		m->private = (void *)-1;
+	}
+
+	return ret;
+}
+
+static struct file_operations stack_trace_fops = {
+	.open		= stack_trace_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+};
+
+static __init int stack_trace_init(void)
+{
+	struct dentry *d_tracer;
+	struct dentry *entry;
+
+	d_tracer = tracing_init_dentry();
+
+	entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
+				    &max_stack_size, &stack_max_size_fops);
+	if (!entry)
+		pr_warning("Could not create debugfs 'stack_max_size' entry\n");
+
+	entry = debugfs_create_file("stack_trace", 0444, d_tracer,
+				    NULL, &stack_trace_fops);
+	if (!entry)
+		pr_warning("Could not create debugfs 'stack_trace' entry\n");
+
+	register_ftrace_function(&trace_ops);
+
+	return 0;
+}
+
+device_initcall(stack_trace_init);
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index db58fb6..9587d3b 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -241,7 +241,7 @@
 	tr->time_start = ftrace_now(tr->cpu);
 
 	for_each_online_cpu(cpu)
-		tracing_reset(tr->data[cpu]);
+		tracing_reset(tr, cpu);
 }
 
 static void start_stack_trace(struct trace_array *tr)
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
new file mode 100644
index 0000000..f2b7c28
--- /dev/null
+++ b/kernel/tracepoint.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2008 Mathieu Desnoyers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/jhash.h>
+#include <linux/list.h>
+#include <linux/rcupdate.h>
+#include <linux/tracepoint.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+extern struct tracepoint __start___tracepoints[];
+extern struct tracepoint __stop___tracepoints[];
+
+/* Set to 1 to enable tracepoint debug output */
+static const int tracepoint_debug;
+
+/*
+ * tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
+ * builtin and module tracepoints and the hash table.
+ */
+static DEFINE_MUTEX(tracepoints_mutex);
+
+/*
+ * Tracepoint hash table, containing the active tracepoints.
+ * Protected by tracepoints_mutex.
+ */
+#define TRACEPOINT_HASH_BITS 6
+#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
+
+/*
+ * Note about RCU :
+ * It is used to to delay the free of multiple probes array until a quiescent
+ * state is reached.
+ * Tracepoint entries modifications are protected by the tracepoints_mutex.
+ */
+struct tracepoint_entry {
+	struct hlist_node hlist;
+	void **funcs;
+	int refcount;	/* Number of times armed. 0 if disarmed. */
+	struct rcu_head rcu;
+	void *oldptr;
+	unsigned char rcu_pending:1;
+	char name[0];
+};
+
+static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
+
+static void free_old_closure(struct rcu_head *head)
+{
+	struct tracepoint_entry *entry = container_of(head,
+		struct tracepoint_entry, rcu);
+	kfree(entry->oldptr);
+	/* Make sure we free the data before setting the pending flag to 0 */
+	smp_wmb();
+	entry->rcu_pending = 0;
+}
+
+static void tracepoint_entry_free_old(struct tracepoint_entry *entry, void *old)
+{
+	if (!old)
+		return;
+	entry->oldptr = old;
+	entry->rcu_pending = 1;
+	/* write rcu_pending before calling the RCU callback */
+	smp_wmb();
+	call_rcu_sched(&entry->rcu, free_old_closure);
+}
+
+static void debug_print_probes(struct tracepoint_entry *entry)
+{
+	int i;
+
+	if (!tracepoint_debug)
+		return;
+
+	for (i = 0; entry->funcs[i]; i++)
+		printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i]);
+}
+
+static void *
+tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
+{
+	int nr_probes = 0;
+	void **old, **new;
+
+	WARN_ON(!probe);
+
+	debug_print_probes(entry);
+	old = entry->funcs;
+	if (old) {
+		/* (N -> N+1), (N != 0, 1) probes */
+		for (nr_probes = 0; old[nr_probes]; nr_probes++)
+			if (old[nr_probes] == probe)
+				return ERR_PTR(-EEXIST);
+	}
+	/* + 2 : one for new probe, one for NULL func */
+	new = kzalloc((nr_probes + 2) * sizeof(void *), GFP_KERNEL);
+	if (new == NULL)
+		return ERR_PTR(-ENOMEM);
+	if (old)
+		memcpy(new, old, nr_probes * sizeof(void *));
+	new[nr_probes] = probe;
+	entry->refcount = nr_probes + 1;
+	entry->funcs = new;
+	debug_print_probes(entry);
+	return old;
+}
+
+static void *
+tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
+{
+	int nr_probes = 0, nr_del = 0, i;
+	void **old, **new;
+
+	old = entry->funcs;
+
+	debug_print_probes(entry);
+	/* (N -> M), (N > 1, M >= 0) probes */
+	for (nr_probes = 0; old[nr_probes]; nr_probes++) {
+		if ((!probe || old[nr_probes] == probe))
+			nr_del++;
+	}
+
+	if (nr_probes - nr_del == 0) {
+		/* N -> 0, (N > 1) */
+		entry->funcs = NULL;
+		entry->refcount = 0;
+		debug_print_probes(entry);
+		return old;
+	} else {
+		int j = 0;
+		/* N -> M, (N > 1, M > 0) */
+		/* + 1 for NULL */
+		new = kzalloc((nr_probes - nr_del + 1)
+			* sizeof(void *), GFP_KERNEL);
+		if (new == NULL)
+			return ERR_PTR(-ENOMEM);
+		for (i = 0; old[i]; i++)
+			if ((probe && old[i] != probe))
+				new[j++] = old[i];
+		entry->refcount = nr_probes - nr_del;
+		entry->funcs = new;
+	}
+	debug_print_probes(entry);
+	return old;
+}
+
+/*
+ * Get tracepoint if the tracepoint is present in the tracepoint hash table.
+ * Must be called with tracepoints_mutex held.
+ * Returns NULL if not present.
+ */
+static struct tracepoint_entry *get_tracepoint(const char *name)
+{
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct tracepoint_entry *e;
+	u32 hash = jhash(name, strlen(name), 0);
+
+	head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
+	hlist_for_each_entry(e, node, head, hlist) {
+		if (!strcmp(name, e->name))
+			return e;
+	}
+	return NULL;
+}
+
+/*
+ * Add the tracepoint to the tracepoint hash table. Must be called with
+ * tracepoints_mutex held.
+ */
+static struct tracepoint_entry *add_tracepoint(const char *name)
+{
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct tracepoint_entry *e;
+	size_t name_len = strlen(name) + 1;
+	u32 hash = jhash(name, name_len-1, 0);
+
+	head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
+	hlist_for_each_entry(e, node, head, hlist) {
+		if (!strcmp(name, e->name)) {
+			printk(KERN_NOTICE
+				"tracepoint %s busy\n", name);
+			return ERR_PTR(-EEXIST);	/* Already there */
+		}
+	}
+	/*
+	 * Using kmalloc here to allocate a variable length element. Could
+	 * cause some memory fragmentation if overused.
+	 */
+	e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
+	if (!e)
+		return ERR_PTR(-ENOMEM);
+	memcpy(&e->name[0], name, name_len);
+	e->funcs = NULL;
+	e->refcount = 0;
+	e->rcu_pending = 0;
+	hlist_add_head(&e->hlist, head);
+	return e;
+}
+
+/*
+ * Remove the tracepoint from the tracepoint hash table. Must be called with
+ * mutex_lock held.
+ */
+static int remove_tracepoint(const char *name)
+{
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct tracepoint_entry *e;
+	int found = 0;
+	size_t len = strlen(name) + 1;
+	u32 hash = jhash(name, len-1, 0);
+
+	head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
+	hlist_for_each_entry(e, node, head, hlist) {
+		if (!strcmp(name, e->name)) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found)
+		return -ENOENT;
+	if (e->refcount)
+		return -EBUSY;
+	hlist_del(&e->hlist);
+	/* Make sure the call_rcu_sched has been executed */
+	if (e->rcu_pending)
+		rcu_barrier_sched();
+	kfree(e);
+	return 0;
+}
+
+/*
+ * Sets the probe callback corresponding to one tracepoint.
+ */
+static void set_tracepoint(struct tracepoint_entry **entry,
+	struct tracepoint *elem, int active)
+{
+	WARN_ON(strcmp((*entry)->name, elem->name) != 0);
+
+	/*
+	 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
+	 * probe callbacks array is consistent before setting a pointer to it.
+	 * This array is referenced by __DO_TRACE from
+	 * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
+	 * is used.
+	 */
+	rcu_assign_pointer(elem->funcs, (*entry)->funcs);
+	elem->state = active;
+}
+
+/*
+ * Disable a tracepoint and its probe callback.
+ * Note: only waiting an RCU period after setting elem->call to the empty
+ * function insures that the original callback is not used anymore. This insured
+ * by preempt_disable around the call site.
+ */
+static void disable_tracepoint(struct tracepoint *elem)
+{
+	elem->state = 0;
+}
+
+/**
+ * tracepoint_update_probe_range - Update a probe range
+ * @begin: beginning of the range
+ * @end: end of the range
+ *
+ * Updates the probe callback corresponding to a range of tracepoints.
+ */
+void tracepoint_update_probe_range(struct tracepoint *begin,
+	struct tracepoint *end)
+{
+	struct tracepoint *iter;
+	struct tracepoint_entry *mark_entry;
+
+	mutex_lock(&tracepoints_mutex);
+	for (iter = begin; iter < end; iter++) {
+		mark_entry = get_tracepoint(iter->name);
+		if (mark_entry) {
+			set_tracepoint(&mark_entry, iter,
+					!!mark_entry->refcount);
+		} else {
+			disable_tracepoint(iter);
+		}
+	}
+	mutex_unlock(&tracepoints_mutex);
+}
+
+/*
+ * Update probes, removing the faulty probes.
+ */
+static void tracepoint_update_probes(void)
+{
+	/* Core kernel tracepoints */
+	tracepoint_update_probe_range(__start___tracepoints,
+		__stop___tracepoints);
+	/* tracepoints in modules. */
+	module_update_tracepoints();
+}
+
+/**
+ * tracepoint_probe_register -  Connect a probe to a tracepoint
+ * @name: tracepoint name
+ * @probe: probe handler
+ *
+ * Returns 0 if ok, error value on error.
+ * The probe address must at least be aligned on the architecture pointer size.
+ */
+int tracepoint_probe_register(const char *name, void *probe)
+{
+	struct tracepoint_entry *entry;
+	int ret = 0;
+	void *old;
+
+	mutex_lock(&tracepoints_mutex);
+	entry = get_tracepoint(name);
+	if (!entry) {
+		entry = add_tracepoint(name);
+		if (IS_ERR(entry)) {
+			ret = PTR_ERR(entry);
+			goto end;
+		}
+	}
+	/*
+	 * If we detect that a call_rcu_sched is pending for this tracepoint,
+	 * make sure it's executed now.
+	 */
+	if (entry->rcu_pending)
+		rcu_barrier_sched();
+	old = tracepoint_entry_add_probe(entry, probe);
+	if (IS_ERR(old)) {
+		ret = PTR_ERR(old);
+		goto end;
+	}
+	mutex_unlock(&tracepoints_mutex);
+	tracepoint_update_probes();		/* may update entry */
+	mutex_lock(&tracepoints_mutex);
+	entry = get_tracepoint(name);
+	WARN_ON(!entry);
+	if (entry->rcu_pending)
+		rcu_barrier_sched();
+	tracepoint_entry_free_old(entry, old);
+end:
+	mutex_unlock(&tracepoints_mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tracepoint_probe_register);
+
+/**
+ * tracepoint_probe_unregister -  Disconnect a probe from a tracepoint
+ * @name: tracepoint name
+ * @probe: probe function pointer
+ *
+ * We do not need to call a synchronize_sched to make sure the probes have
+ * finished running before doing a module unload, because the module unload
+ * itself uses stop_machine(), which insures that every preempt disabled section
+ * have finished.
+ */
+int tracepoint_probe_unregister(const char *name, void *probe)
+{
+	struct tracepoint_entry *entry;
+	void *old;
+	int ret = -ENOENT;
+
+	mutex_lock(&tracepoints_mutex);
+	entry = get_tracepoint(name);
+	if (!entry)
+		goto end;
+	if (entry->rcu_pending)
+		rcu_barrier_sched();
+	old = tracepoint_entry_remove_probe(entry, probe);
+	mutex_unlock(&tracepoints_mutex);
+	tracepoint_update_probes();		/* may update entry */
+	mutex_lock(&tracepoints_mutex);
+	entry = get_tracepoint(name);
+	if (!entry)
+		goto end;
+	if (entry->rcu_pending)
+		rcu_barrier_sched();
+	tracepoint_entry_free_old(entry, old);
+	remove_tracepoint(name);	/* Ignore busy error message */
+	ret = 0;
+end:
+	mutex_unlock(&tracepoints_mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
+
+/**
+ * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
+ * @tracepoint: current tracepoints (in), next tracepoint (out)
+ * @begin: beginning of the range
+ * @end: end of the range
+ *
+ * Returns whether a next tracepoint has been found (1) or not (0).
+ * Will return the first tracepoint in the range if the input tracepoint is
+ * NULL.
+ */
+int tracepoint_get_iter_range(struct tracepoint **tracepoint,
+	struct tracepoint *begin, struct tracepoint *end)
+{
+	if (!*tracepoint && begin != end) {
+		*tracepoint = begin;
+		return 1;
+	}
+	if (*tracepoint >= begin && *tracepoint < end)
+		return 1;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
+
+static void tracepoint_get_iter(struct tracepoint_iter *iter)
+{
+	int found = 0;
+
+	/* Core kernel tracepoints */
+	if (!iter->module) {
+		found = tracepoint_get_iter_range(&iter->tracepoint,
+				__start___tracepoints, __stop___tracepoints);
+		if (found)
+			goto end;
+	}
+	/* tracepoints in modules. */
+	found = module_get_iter_tracepoints(iter);
+end:
+	if (!found)
+		tracepoint_iter_reset(iter);
+}
+
+void tracepoint_iter_start(struct tracepoint_iter *iter)
+{
+	tracepoint_get_iter(iter);
+}
+EXPORT_SYMBOL_GPL(tracepoint_iter_start);
+
+void tracepoint_iter_next(struct tracepoint_iter *iter)
+{
+	iter->tracepoint++;
+	/*
+	 * iter->tracepoint may be invalid because we blindly incremented it.
+	 * Make sure it is valid by marshalling on the tracepoints, getting the
+	 * tracepoints from following modules if necessary.
+	 */
+	tracepoint_get_iter(iter);
+}
+EXPORT_SYMBOL_GPL(tracepoint_iter_next);
+
+void tracepoint_iter_stop(struct tracepoint_iter *iter)
+{
+}
+EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
+
+void tracepoint_iter_reset(struct tracepoint_iter *iter)
+{
+	iter->module = NULL;
+	iter->tracepoint = NULL;
+}
+EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 714afad..f928f2a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -62,6 +62,7 @@
 	const char *name;
 	int singlethread;
 	int freezeable;		/* Freeze threads during suspend */
+	int rt;
 #ifdef CONFIG_LOCKDEP
 	struct lockdep_map lockdep_map;
 #endif
@@ -766,6 +767,7 @@
 
 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
 {
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
 	struct workqueue_struct *wq = cwq->wq;
 	const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
 	struct task_struct *p;
@@ -781,7 +783,8 @@
 	 */
 	if (IS_ERR(p))
 		return PTR_ERR(p);
-
+	if (cwq->wq->rt)
+		sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
 	cwq->thread = p;
 
 	return 0;
@@ -801,6 +804,7 @@
 struct workqueue_struct *__create_workqueue_key(const char *name,
 						int singlethread,
 						int freezeable,
+						int rt,
 						struct lock_class_key *key,
 						const char *lock_name)
 {
@@ -822,6 +826,7 @@
 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
 	wq->singlethread = singlethread;
 	wq->freezeable = freezeable;
+	wq->rt = rt;
 	INIT_LIST_HEAD(&wq->list);
 
 	if (singlethread) {
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 06fb57c..1338469 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -316,17 +316,6 @@
 EXPORT_SYMBOL(bitmap_scnprintf);
 
 /**
- * bitmap_scnprintf_len - return buffer length needed to convert
- * bitmap to an ASCII hex string
- * @nr_bits: number of bits to be converted
- */
-int bitmap_scnprintf_len(unsigned int nr_bits)
-{
-	unsigned int nr_nibbles = ALIGN(nr_bits, 4) / 4;
-	return nr_nibbles + ALIGN(nr_nibbles, CHUNKSZ / 4) / (CHUNKSZ / 4) - 1;
-}
-
-/**
  * __bitmap_parse - convert an ASCII hex string into a bitmap.
  * @buf: pointer to buffer containing string.
  * @buflen: buffer size in bytes.  If string is smaller than this
@@ -1007,3 +996,25 @@
 	return 0;
 }
 EXPORT_SYMBOL(bitmap_allocate_region);
+
+/**
+ * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
+ * @dst:   destination buffer
+ * @src:   bitmap to copy
+ * @nbits: number of bits in the bitmap
+ *
+ * Require nbits % BITS_PER_LONG == 0.
+ */
+void bitmap_copy_le(void *dst, const unsigned long *src, int nbits)
+{
+	unsigned long *d = dst;
+	int i;
+
+	for (i = 0; i < nbits/BITS_PER_LONG; i++) {
+		if (BITS_PER_LONG == 64)
+			d[i] = cpu_to_le64(src[i]);
+		else
+			d[i] = cpu_to_le32(src[i]);
+	}
+}
+EXPORT_SYMBOL(bitmap_copy_le);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index cceecb6..a013bbc 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -24,6 +24,7 @@
 #include <linux/kernel.h>
 #include <linux/kallsyms.h>
 #include <linux/uaccess.h>
+#include <linux/ioport.h>
 
 #include <asm/page.h>		/* for PAGE_SIZE */
 #include <asm/div64.h>
@@ -550,18 +551,51 @@
 #endif
 }
 
+static char *resource_string(char *buf, char *end, struct resource *res, int field_width, int precision, int flags)
+{
+#ifndef IO_RSRC_PRINTK_SIZE
+#define IO_RSRC_PRINTK_SIZE	4
+#endif
+
+#ifndef MEM_RSRC_PRINTK_SIZE
+#define MEM_RSRC_PRINTK_SIZE	8
+#endif
+
+	/* room for the actual numbers, the two "0x", -, [, ] and the final zero */
+	char sym[4*sizeof(resource_size_t) + 8];
+	char *p = sym, *pend = sym + sizeof(sym);
+	int size = -1;
+
+	if (res->flags & IORESOURCE_IO)
+		size = IO_RSRC_PRINTK_SIZE;
+	else if (res->flags & IORESOURCE_MEM)
+		size = MEM_RSRC_PRINTK_SIZE;
+
+	*p++ = '[';
+	p = number(p, pend, res->start, 16, size, -1, SPECIAL | SMALL | ZEROPAD);
+	*p++ = '-';
+	p = number(p, pend, res->end, 16, size, -1, SPECIAL | SMALL | ZEROPAD);
+	*p++ = ']';
+	*p = 0;
+
+	return string(buf, end, sym, field_width, precision, flags);
+}
+
 /*
  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
  * by an extra set of alphanumeric characters that are extended format
  * specifiers.
  *
- * Right now we just handle 'F' (for symbolic Function descriptor pointers)
- * and 'S' (for Symbolic direct pointers), but this can easily be
- * extended in the future (network address types etc).
+ * Right now we handle:
  *
- * The difference between 'S' and 'F' is that on ia64 and ppc64 function
- * pointers are really function descriptors, which contain a pointer the
- * real address. 
+ * - 'F' For symbolic function descriptor pointers
+ * - 'S' For symbolic direct pointers
+ * - 'R' For a struct resource pointer, it prints the range of
+ *       addresses (not the name nor the flags)
+ *
+ * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
+ * function pointers are really function descriptors, which contain a
+ * pointer to the real address.
  */
 static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags)
 {
@@ -571,6 +605,8 @@
 		/* Fallthrough */
 	case 'S':
 		return symbol_string(buf, end, ptr, field_width, precision, flags);
+	case 'R':
+		return resource_string(buf, end, ptr, field_width, precision, flags);
 	}
 	flags |= SMALL;
 	if (field_width == -1) {
@@ -590,6 +626,7 @@
  * This function follows C99 vsnprintf, but has some extensions:
  * %pS output the name of a text symbol
  * %pF output the name of a function pointer
+ * %pR output the address range in a struct resource
  *
  * The return value is the number of characters which would
  * be generated for the given input, excluding the trailing
diff --git a/mm/Kconfig b/mm/Kconfig
index 1a501a4..5b5790f 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -209,5 +209,16 @@
 	def_bool y
 	depends on !ARCH_NO_VIRT_TO_BUS
 
+config UNEVICTABLE_LRU
+	bool "Add LRU list to track non-evictable pages"
+	default y
+	depends on MMU
+	help
+	  Keeps unevictable pages off of the active and inactive pageout
+	  lists, so kswapd will not waste CPU time or have its balancing
+	  algorithms thrown off by scanning these pages.  Selecting this
+	  will use one page flag and increase the code size a little,
+	  say Y unless you know what you are doing.
+
 config MMU_NOTIFIER
 	bool
diff --git a/mm/Makefile b/mm/Makefile
index da4ccf0..c06b45a 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -33,5 +33,4 @@
 obj-$(CONFIG_MIGRATION) += migrate.o
 obj-$(CONFIG_SMP) += allocpercpu.o
 obj-$(CONFIG_QUICKLIST) += quicklist.o
-obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o
-
+obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
diff --git a/mm/filemap.c b/mm/filemap.c
index 903bf31..ab85536 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -33,6 +33,7 @@
 #include <linux/cpuset.h>
 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
 #include <linux/memcontrol.h>
+#include <linux/mm_inline.h> /* for page_is_file_cache() */
 #include "internal.h"
 
 /*
@@ -115,12 +116,12 @@
 {
 	struct address_space *mapping = page->mapping;
 
-	mem_cgroup_uncharge_cache_page(page);
 	radix_tree_delete(&mapping->page_tree, page->index);
 	page->mapping = NULL;
 	mapping->nrpages--;
 	__dec_zone_page_state(page, NR_FILE_PAGES);
 	BUG_ON(page_mapped(page));
+	mem_cgroup_uncharge_cache_page(page);
 
 	/*
 	 * Some filesystems seem to re-dirty the page even after
@@ -492,9 +493,24 @@
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 				pgoff_t offset, gfp_t gfp_mask)
 {
-	int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
-	if (ret == 0)
-		lru_cache_add(page);
+	int ret;
+
+	/*
+	 * Splice_read and readahead add shmem/tmpfs pages into the page cache
+	 * before shmem_readpage has a chance to mark them as SwapBacked: they
+	 * need to go on the active_anon lru below, and mem_cgroup_cache_charge
+	 * (called in add_to_page_cache) needs to know where they're going too.
+	 */
+	if (mapping_cap_swap_backed(mapping))
+		SetPageSwapBacked(page);
+
+	ret = add_to_page_cache(page, mapping, offset, gfp_mask);
+	if (ret == 0) {
+		if (page_is_file_cache(page))
+			lru_cache_add_file(page);
+		else
+			lru_cache_add_active_anon(page);
+	}
 	return ret;
 }
 
@@ -557,17 +573,14 @@
  * mechananism between PageLocked pages and PageWriteback pages is shared.
  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
  *
- * The first mb is necessary to safely close the critical section opened by the
- * test_and_set_bit() to lock the page; the second mb is necessary to enforce
- * ordering between the clear_bit and the read of the waitqueue (to avoid SMP
- * races with a parallel wait_on_page_locked()).
+ * The mb is necessary to enforce ordering between the clear_bit and the read
+ * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
  */
 void unlock_page(struct page *page)
 {
-	smp_mb__before_clear_bit();
-	if (!test_and_clear_bit(PG_locked, &page->flags))
-		BUG();
-	smp_mb__after_clear_bit(); 
+	VM_BUG_ON(!PageLocked(page));
+	clear_bit_unlock(PG_locked, &page->flags);
+	smp_mb__after_clear_bit();
 	wake_up_page(page, PG_locked);
 }
 EXPORT_SYMBOL(unlock_page);
diff --git a/mm/fremap.c b/mm/fremap.c
index 7881638..7d12ca7 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -21,6 +21,8 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
+#include "internal.h"
+
 static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
 			unsigned long addr, pte_t *ptep)
 {
@@ -215,15 +217,31 @@
 		spin_unlock(&mapping->i_mmap_lock);
 	}
 
+	if (vma->vm_flags & VM_LOCKED) {
+		/*
+		 * drop PG_Mlocked flag for over-mapped range
+		 */
+		unsigned int saved_flags = vma->vm_flags;
+		munlock_vma_pages_range(vma, start, start + size);
+		vma->vm_flags = saved_flags;
+	}
+
 	mmu_notifier_invalidate_range_start(mm, start, start + size);
 	err = populate_range(mm, vma, start, size, pgoff);
 	mmu_notifier_invalidate_range_end(mm, start, start + size);
 	if (!err && !(flags & MAP_NONBLOCK)) {
-		if (unlikely(has_write_lock)) {
-			downgrade_write(&mm->mmap_sem);
-			has_write_lock = 0;
+		if (vma->vm_flags & VM_LOCKED) {
+			/*
+			 * might be mapping previously unmapped range of file
+			 */
+			mlock_vma_pages_range(vma, start, start + size);
+		} else {
+			if (unlikely(has_write_lock)) {
+				downgrade_write(&mm->mmap_sem);
+				has_write_lock = 0;
+			}
+			make_pages_present(start, start+size);
 		}
-		make_pages_present(start, start+size);
 	}
 
 	/*
@@ -240,4 +258,3 @@
 
 	return err;
 }
-
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3863386..ce8cbb2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -262,7 +262,7 @@
 	struct list_head regions;
 };
 
-struct resv_map *resv_map_alloc(void)
+static struct resv_map *resv_map_alloc(void)
 {
 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
 	if (!resv_map)
@@ -274,7 +274,7 @@
 	return resv_map;
 }
 
-void resv_map_release(struct kref *ref)
+static void resv_map_release(struct kref *ref)
 {
 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
 
@@ -289,7 +289,7 @@
 	if (!(vma->vm_flags & VM_SHARED))
 		return (struct resv_map *)(get_vma_private_data(vma) &
 							~HPAGE_RESV_MASK);
-	return 0;
+	return NULL;
 }
 
 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
@@ -1459,11 +1459,11 @@
 {
 	struct hstate *h = &default_hstate;
 	return sprintf(buf,
-			"HugePages_Total: %5lu\n"
-			"HugePages_Free:  %5lu\n"
-			"HugePages_Rsvd:  %5lu\n"
-			"HugePages_Surp:  %5lu\n"
-			"Hugepagesize:    %5lu kB\n",
+			"HugePages_Total:   %5lu\n"
+			"HugePages_Free:    %5lu\n"
+			"HugePages_Rsvd:    %5lu\n"
+			"HugePages_Surp:    %5lu\n"
+			"Hugepagesize:   %8lu kB\n",
 			h->nr_huge_pages,
 			h->free_huge_pages,
 			h->resv_huge_pages,
@@ -1747,10 +1747,8 @@
  * from other VMAs and let the children be SIGKILLed if they are faulting the
  * same region.
  */
-int unmap_ref_private(struct mm_struct *mm,
-					struct vm_area_struct *vma,
-					struct page *page,
-					unsigned long address)
+static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+				struct page *page, unsigned long address)
 {
 	struct vm_area_struct *iter_vma;
 	struct address_space *mapping;
@@ -2073,6 +2071,14 @@
 	return NULL;
 }
 
+static int huge_zeropage_ok(pte_t *ptep, int write, int shared)
+{
+	if (!ptep || write || shared)
+		return 0;
+	else
+		return huge_pte_none(huge_ptep_get(ptep));
+}
+
 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			struct page **pages, struct vm_area_struct **vmas,
 			unsigned long *position, int *length, int i,
@@ -2082,6 +2088,8 @@
 	unsigned long vaddr = *position;
 	int remainder = *length;
 	struct hstate *h = hstate_vma(vma);
+	int zeropage_ok = 0;
+	int shared = vma->vm_flags & VM_SHARED;
 
 	spin_lock(&mm->page_table_lock);
 	while (vaddr < vma->vm_end && remainder) {
@@ -2094,8 +2102,11 @@
 		 * first, for the page indexing below to work.
 		 */
 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
+		if (huge_zeropage_ok(pte, write, shared))
+			zeropage_ok = 1;
 
-		if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
+		if (!pte ||
+		    (huge_pte_none(huge_ptep_get(pte)) && !zeropage_ok) ||
 		    (write && !pte_write(huge_ptep_get(pte)))) {
 			int ret;
 
@@ -2115,8 +2126,11 @@
 		page = pte_page(huge_ptep_get(pte));
 same_page:
 		if (pages) {
-			get_page(page);
-			pages[i] = page + pfn_offset;
+			if (zeropage_ok)
+				pages[i] = ZERO_PAGE(0);
+			else
+				pages[i] = page + pfn_offset;
+			get_page(pages[i]);
 		}
 
 		if (vmas)
diff --git a/mm/internal.h b/mm/internal.h
index 1f43f74..e4e728b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -39,6 +39,15 @@
 	atomic_dec(&page->_count);
 }
 
+/*
+ * in mm/vmscan.c:
+ */
+extern int isolate_lru_page(struct page *page);
+extern void putback_lru_page(struct page *page);
+
+/*
+ * in mm/page_alloc.c
+ */
 extern void __free_pages_bootmem(struct page *page, unsigned int order);
 
 /*
@@ -52,6 +61,120 @@
 	return page_private(page);
 }
 
+extern long mlock_vma_pages_range(struct vm_area_struct *vma,
+			unsigned long start, unsigned long end);
+extern void munlock_vma_pages_range(struct vm_area_struct *vma,
+			unsigned long start, unsigned long end);
+static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
+{
+	munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
+}
+
+#ifdef CONFIG_UNEVICTABLE_LRU
+/*
+ * unevictable_migrate_page() called only from migrate_page_copy() to
+ * migrate unevictable flag to new page.
+ * Note that the old page has been isolated from the LRU lists at this
+ * point so we don't need to worry about LRU statistics.
+ */
+static inline void unevictable_migrate_page(struct page *new, struct page *old)
+{
+	if (TestClearPageUnevictable(old))
+		SetPageUnevictable(new);
+}
+#else
+static inline void unevictable_migrate_page(struct page *new, struct page *old)
+{
+}
+#endif
+
+#ifdef CONFIG_UNEVICTABLE_LRU
+/*
+ * Called only in fault path via page_evictable() for a new page
+ * to determine if it's being mapped into a LOCKED vma.
+ * If so, mark page as mlocked.
+ */
+static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
+{
+	VM_BUG_ON(PageLRU(page));
+
+	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
+		return 0;
+
+	if (!TestSetPageMlocked(page)) {
+		inc_zone_page_state(page, NR_MLOCK);
+		count_vm_event(UNEVICTABLE_PGMLOCKED);
+	}
+	return 1;
+}
+
+/*
+ * must be called with vma's mmap_sem held for read, and page locked.
+ */
+extern void mlock_vma_page(struct page *page);
+
+/*
+ * Clear the page's PageMlocked().  This can be useful in a situation where
+ * we want to unconditionally remove a page from the pagecache -- e.g.,
+ * on truncation or freeing.
+ *
+ * It is legal to call this function for any page, mlocked or not.
+ * If called for a page that is still mapped by mlocked vmas, all we do
+ * is revert to lazy LRU behaviour -- semantics are not broken.
+ */
+extern void __clear_page_mlock(struct page *page);
+static inline void clear_page_mlock(struct page *page)
+{
+	if (unlikely(TestClearPageMlocked(page)))
+		__clear_page_mlock(page);
+}
+
+/*
+ * mlock_migrate_page - called only from migrate_page_copy() to
+ * migrate the Mlocked page flag; update statistics.
+ */
+static inline void mlock_migrate_page(struct page *newpage, struct page *page)
+{
+	if (TestClearPageMlocked(page)) {
+		unsigned long flags;
+
+		local_irq_save(flags);
+		__dec_zone_page_state(page, NR_MLOCK);
+		SetPageMlocked(newpage);
+		__inc_zone_page_state(newpage, NR_MLOCK);
+		local_irq_restore(flags);
+	}
+}
+
+/*
+ * free_page_mlock() -- clean up attempts to free and mlocked() page.
+ * Page should not be on lru, so no need to fix that up.
+ * free_pages_check() will verify...
+ */
+static inline void free_page_mlock(struct page *page)
+{
+	if (unlikely(TestClearPageMlocked(page))) {
+		unsigned long flags;
+
+		local_irq_save(flags);
+		__dec_zone_page_state(page, NR_MLOCK);
+		__count_vm_event(UNEVICTABLE_MLOCKFREED);
+		local_irq_restore(flags);
+	}
+}
+
+#else /* CONFIG_UNEVICTABLE_LRU */
+static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
+{
+	return 0;
+}
+static inline void clear_page_mlock(struct page *page) { }
+static inline void mlock_vma_page(struct page *page) { }
+static inline void mlock_migrate_page(struct page *new, struct page *old) { }
+static inline void free_page_mlock(struct page *page) { }
+
+#endif /* CONFIG_UNEVICTABLE_LRU */
+
 /*
  * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
  * so all functions starting at paging_init should be marked __init
@@ -120,4 +243,12 @@
 }
 #endif /* CONFIG_SPARSEMEM */
 
+#define GUP_FLAGS_WRITE                  0x1
+#define GUP_FLAGS_FORCE                  0x2
+#define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
+
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+		     unsigned long start, int len, int flags,
+		     struct page **pages, struct vm_area_struct **vmas);
+
 #endif
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 36896f3..866dcc7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -32,11 +32,12 @@
 #include <linux/fs.h>
 #include <linux/seq_file.h>
 #include <linux/vmalloc.h>
+#include <linux/mm_inline.h>
+#include <linux/page_cgroup.h>
 
 #include <asm/uaccess.h>
 
 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
-static struct kmem_cache *page_cgroup_cache __read_mostly;
 #define MEM_CGROUP_RECLAIM_RETRIES	5
 
 /*
@@ -65,11 +66,10 @@
 /*
  * For accounting under irq disable, no need for increment preempt count.
  */
-static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
+static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
 		enum mem_cgroup_stat_index idx, int val)
 {
-	int cpu = smp_processor_id();
-	stat->cpustat[cpu].count[idx] += val;
+	stat->count[idx] += val;
 }
 
 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
@@ -85,22 +85,13 @@
 /*
  * per-zone information in memory controller.
  */
-
-enum mem_cgroup_zstat_index {
-	MEM_CGROUP_ZSTAT_ACTIVE,
-	MEM_CGROUP_ZSTAT_INACTIVE,
-
-	NR_MEM_CGROUP_ZSTAT,
-};
-
 struct mem_cgroup_per_zone {
 	/*
 	 * spin_lock to protect the per cgroup LRU
 	 */
 	spinlock_t		lru_lock;
-	struct list_head	active_list;
-	struct list_head	inactive_list;
-	unsigned long count[NR_MEM_CGROUP_ZSTAT];
+	struct list_head	lists[NR_LRU_LISTS];
+	unsigned long		count[NR_LRU_LISTS];
 };
 /* Macro for accessing counter */
 #define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])
@@ -144,69 +135,52 @@
 };
 static struct mem_cgroup init_mem_cgroup;
 
-/*
- * We use the lower bit of the page->page_cgroup pointer as a bit spin
- * lock.  We need to ensure that page->page_cgroup is at least two
- * byte aligned (based on comments from Nick Piggin).  But since
- * bit_spin_lock doesn't actually set that lock bit in a non-debug
- * uniprocessor kernel, we should avoid setting it here too.
- */
-#define PAGE_CGROUP_LOCK_BIT 	0x0
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-#define PAGE_CGROUP_LOCK 	(1 << PAGE_CGROUP_LOCK_BIT)
-#else
-#define PAGE_CGROUP_LOCK	0x0
-#endif
-
-/*
- * A page_cgroup page is associated with every page descriptor. The
- * page_cgroup helps us identify information about the cgroup
- */
-struct page_cgroup {
-	struct list_head lru;		/* per cgroup LRU list */
-	struct page *page;
-	struct mem_cgroup *mem_cgroup;
-	int flags;
-};
-#define PAGE_CGROUP_FLAG_CACHE	(0x1)	/* charged as cache */
-#define PAGE_CGROUP_FLAG_ACTIVE (0x2)	/* page is active in this cgroup */
-
-static int page_cgroup_nid(struct page_cgroup *pc)
-{
-	return page_to_nid(pc->page);
-}
-
-static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
-{
-	return page_zonenum(pc->page);
-}
-
 enum charge_type {
 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
 	MEM_CGROUP_CHARGE_TYPE_MAPPED,
+	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
 	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
+	NR_CHARGE_TYPE,
+};
+
+/* only for here (for easy reading.) */
+#define PCGF_CACHE	(1UL << PCG_CACHE)
+#define PCGF_USED	(1UL << PCG_USED)
+#define PCGF_ACTIVE	(1UL << PCG_ACTIVE)
+#define PCGF_LOCK	(1UL << PCG_LOCK)
+#define PCGF_FILE	(1UL << PCG_FILE)
+static const unsigned long
+pcg_default_flags[NR_CHARGE_TYPE] = {
+	PCGF_CACHE | PCGF_FILE | PCGF_USED | PCGF_LOCK, /* File Cache */
+	PCGF_ACTIVE | PCGF_USED | PCGF_LOCK, /* Anon */
+	PCGF_ACTIVE | PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
+	0, /* FORCE */
 };
 
 /*
  * Always modified under lru lock. Then, not necessary to preempt_disable()
  */
-static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
-					bool charge)
+static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
+					 struct page_cgroup *pc,
+					 bool charge)
 {
 	int val = (charge)? 1 : -1;
 	struct mem_cgroup_stat *stat = &mem->stat;
+	struct mem_cgroup_stat_cpu *cpustat;
 
 	VM_BUG_ON(!irqs_disabled());
-	if (flags & PAGE_CGROUP_FLAG_CACHE)
-		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
+
+	cpustat = &stat->cpustat[smp_processor_id()];
+	if (PageCgroupCache(pc))
+		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
 	else
-		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
+		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
 
 	if (charge)
-		__mem_cgroup_stat_add_safe(stat,
+		__mem_cgroup_stat_add_safe(cpustat,
 				MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
 	else
-		__mem_cgroup_stat_add_safe(stat,
+		__mem_cgroup_stat_add_safe(cpustat,
 				MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
 }
 
@@ -227,7 +201,7 @@
 }
 
 static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
-					enum mem_cgroup_zstat_index idx)
+					enum lru_list idx)
 {
 	int nid, zid;
 	struct mem_cgroup_per_zone *mz;
@@ -262,85 +236,77 @@
 				struct mem_cgroup, css);
 }
 
-static inline int page_cgroup_locked(struct page *page)
-{
-	return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
-}
-
-static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
-{
-	VM_BUG_ON(!page_cgroup_locked(page));
-	page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
-}
-
-struct page_cgroup *page_get_page_cgroup(struct page *page)
-{
-	return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
-}
-
-static void lock_page_cgroup(struct page *page)
-{
-	bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
-}
-
-static int try_lock_page_cgroup(struct page *page)
-{
-	return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
-}
-
-static void unlock_page_cgroup(struct page *page)
-{
-	bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
-}
-
 static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
 			struct page_cgroup *pc)
 {
-	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
+	int lru = LRU_BASE;
 
-	if (from)
-		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
-	else
-		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
+	if (PageCgroupUnevictable(pc))
+		lru = LRU_UNEVICTABLE;
+	else {
+		if (PageCgroupActive(pc))
+			lru += LRU_ACTIVE;
+		if (PageCgroupFile(pc))
+			lru += LRU_FILE;
+	}
 
-	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
+	MEM_CGROUP_ZSTAT(mz, lru) -= 1;
+
+	mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false);
 	list_del(&pc->lru);
 }
 
 static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
 				struct page_cgroup *pc)
 {
-	int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
+	int lru = LRU_BASE;
 
-	if (!to) {
-		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
-		list_add(&pc->lru, &mz->inactive_list);
-	} else {
-		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
-		list_add(&pc->lru, &mz->active_list);
+	if (PageCgroupUnevictable(pc))
+		lru = LRU_UNEVICTABLE;
+	else {
+		if (PageCgroupActive(pc))
+			lru += LRU_ACTIVE;
+		if (PageCgroupFile(pc))
+			lru += LRU_FILE;
 	}
-	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
+
+	MEM_CGROUP_ZSTAT(mz, lru) += 1;
+	list_add(&pc->lru, &mz->lists[lru]);
+
+	mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true);
 }
 
-static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
+static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
 {
-	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
 	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
+	int active    = PageCgroupActive(pc);
+	int file      = PageCgroupFile(pc);
+	int unevictable = PageCgroupUnevictable(pc);
+	enum lru_list from = unevictable ? LRU_UNEVICTABLE :
+				(LRU_FILE * !!file + !!active);
 
-	if (from)
-		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
-	else
-		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
+	if (lru == from)
+		return;
 
-	if (active) {
-		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
-		pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
-		list_move(&pc->lru, &mz->active_list);
+	MEM_CGROUP_ZSTAT(mz, from) -= 1;
+	/*
+	 * However this is done under mz->lru_lock, another flags, which
+	 * are not related to LRU, will be modified from out-of-lock.
+	 * We have to use atomic set/clear flags.
+	 */
+	if (is_unevictable_lru(lru)) {
+		ClearPageCgroupActive(pc);
+		SetPageCgroupUnevictable(pc);
 	} else {
-		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
-		pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
-		list_move(&pc->lru, &mz->inactive_list);
+		if (is_active_lru(lru))
+			SetPageCgroupActive(pc);
+		else
+			ClearPageCgroupActive(pc);
+		ClearPageCgroupUnevictable(pc);
 	}
+
+	MEM_CGROUP_ZSTAT(mz, lru) += 1;
+	list_move(&pc->lru, &mz->lists[lru]);
 }
 
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
@@ -356,7 +322,7 @@
 /*
  * This routine assumes that the appropriate zone's lru lock is already held
  */
-void mem_cgroup_move_lists(struct page *page, bool active)
+void mem_cgroup_move_lists(struct page *page, enum lru_list lru)
 {
 	struct page_cgroup *pc;
 	struct mem_cgroup_per_zone *mz;
@@ -372,17 +338,16 @@
 	 * safely get to page_cgroup without it, so just try_lock it:
 	 * mem_cgroup_isolate_pages allows for page left on wrong list.
 	 */
-	if (!try_lock_page_cgroup(page))
+	pc = lookup_page_cgroup(page);
+	if (!trylock_page_cgroup(pc))
 		return;
-
-	pc = page_get_page_cgroup(page);
-	if (pc) {
+	if (pc && PageCgroupUsed(pc)) {
 		mz = page_cgroup_zoneinfo(pc);
 		spin_lock_irqsave(&mz->lru_lock, flags);
-		__mem_cgroup_move_lists(pc, active);
+		__mem_cgroup_move_lists(pc, lru);
 		spin_unlock_irqrestore(&mz->lru_lock, flags);
 	}
-	unlock_page_cgroup(page);
+	unlock_page_cgroup(pc);
 }
 
 /*
@@ -403,21 +368,6 @@
 }
 
 /*
- * This function is called from vmscan.c. In page reclaiming loop. balance
- * between active and inactive list is calculated. For memory controller
- * page reclaiming, we should use using mem_cgroup's imbalance rather than
- * zone's global lru imbalance.
- */
-long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
-{
-	unsigned long active, inactive;
-	/* active and inactive are the number of pages. 'long' is ok.*/
-	active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
-	inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
-	return (long) (active / (inactive + 1));
-}
-
-/*
  * prev_priority control...this will be used in memory reclaim path.
  */
 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
@@ -444,28 +394,17 @@
  * (see include/linux/mmzone.h)
  */
 
-long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
-				   struct zone *zone, int priority)
+long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
+					int priority, enum lru_list lru)
 {
-	long nr_active;
+	long nr_pages;
 	int nid = zone->zone_pgdat->node_id;
 	int zid = zone_idx(zone);
 	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
 
-	nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
-	return (nr_active >> priority);
-}
+	nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
 
-long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
-					struct zone *zone, int priority)
-{
-	long nr_inactive;
-	int nid = zone->zone_pgdat->node_id;
-	int zid = zone_idx(zone);
-	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
-
-	nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
-	return (nr_inactive >> priority);
+	return (nr_pages >> priority);
 }
 
 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -473,7 +412,7 @@
 					unsigned long *scanned, int order,
 					int mode, struct zone *z,
 					struct mem_cgroup *mem_cont,
-					int active)
+					int active, int file)
 {
 	unsigned long nr_taken = 0;
 	struct page *page;
@@ -484,38 +423,38 @@
 	int nid = z->zone_pgdat->node_id;
 	int zid = zone_idx(z);
 	struct mem_cgroup_per_zone *mz;
+	int lru = LRU_FILE * !!file + !!active;
 
 	BUG_ON(!mem_cont);
 	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
-	if (active)
-		src = &mz->active_list;
-	else
-		src = &mz->inactive_list;
-
+	src = &mz->lists[lru];
 
 	spin_lock(&mz->lru_lock);
 	scan = 0;
 	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
 		if (scan >= nr_to_scan)
 			break;
+		if (unlikely(!PageCgroupUsed(pc)))
+			continue;
 		page = pc->page;
 
 		if (unlikely(!PageLRU(page)))
 			continue;
 
-		if (PageActive(page) && !active) {
-			__mem_cgroup_move_lists(pc, true);
-			continue;
-		}
-		if (!PageActive(page) && active) {
-			__mem_cgroup_move_lists(pc, false);
+		/*
+		 * TODO: play better with lumpy reclaim, grabbing anything.
+		 */
+		if (PageUnevictable(page) ||
+		    (PageActive(page) && !active) ||
+		    (!PageActive(page) && active)) {
+			__mem_cgroup_move_lists(pc, page_lru(page));
 			continue;
 		}
 
 		scan++;
 		list_move(&pc->lru, &pc_list);
 
-		if (__isolate_lru_page(page, mode) == 0) {
+		if (__isolate_lru_page(page, mode, file) == 0) {
 			list_move(&page->lru, dst);
 			nr_taken++;
 		}
@@ -540,26 +479,27 @@
 {
 	struct mem_cgroup *mem;
 	struct page_cgroup *pc;
-	unsigned long flags;
 	unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
 	struct mem_cgroup_per_zone *mz;
+	unsigned long flags;
 
-	pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
-	if (unlikely(pc == NULL))
-		goto err;
-
+	pc = lookup_page_cgroup(page);
+	/* can happen at boot */
+	if (unlikely(!pc))
+		return 0;
+	prefetchw(pc);
 	/*
 	 * We always charge the cgroup the mm_struct belongs to.
 	 * The mm_struct's mem_cgroup changes on task migration if the
 	 * thread group leader migrates. It's possible that mm is not
 	 * set, if so charge the init_mm (happens for pagecache usage).
 	 */
+
 	if (likely(!memcg)) {
 		rcu_read_lock();
 		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
 		if (unlikely(!mem)) {
 			rcu_read_unlock();
-			kmem_cache_free(page_cgroup_cache, pc);
 			return 0;
 		}
 		/*
@@ -572,7 +512,7 @@
 		css_get(&memcg->css);
 	}
 
-	while (res_counter_charge(&mem->res, PAGE_SIZE)) {
+	while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) {
 		if (!(gfp_mask & __GFP_WAIT))
 			goto out;
 
@@ -595,39 +535,33 @@
 		}
 	}
 
+
+	lock_page_cgroup(pc);
+	if (unlikely(PageCgroupUsed(pc))) {
+		unlock_page_cgroup(pc);
+		res_counter_uncharge(&mem->res, PAGE_SIZE);
+		css_put(&mem->css);
+
+		goto done;
+	}
 	pc->mem_cgroup = mem;
-	pc->page = page;
 	/*
 	 * If a page is accounted as a page cache, insert to inactive list.
 	 * If anon, insert to active list.
 	 */
-	if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
-		pc->flags = PAGE_CGROUP_FLAG_CACHE;
-	else
-		pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
-
-	lock_page_cgroup(page);
-	if (unlikely(page_get_page_cgroup(page))) {
-		unlock_page_cgroup(page);
-		res_counter_uncharge(&mem->res, PAGE_SIZE);
-		css_put(&mem->css);
-		kmem_cache_free(page_cgroup_cache, pc);
-		goto done;
-	}
-	page_assign_page_cgroup(page, pc);
+	pc->flags = pcg_default_flags[ctype];
 
 	mz = page_cgroup_zoneinfo(pc);
+
 	spin_lock_irqsave(&mz->lru_lock, flags);
 	__mem_cgroup_add_list(mz, pc);
 	spin_unlock_irqrestore(&mz->lru_lock, flags);
+	unlock_page_cgroup(pc);
 
-	unlock_page_cgroup(page);
 done:
 	return 0;
 out:
 	css_put(&mem->css);
-	kmem_cache_free(page_cgroup_cache, pc);
-err:
 	return -ENOMEM;
 }
 
@@ -635,7 +569,8 @@
 {
 	if (mem_cgroup_subsys.disabled)
 		return 0;
-
+	if (PageCompound(page))
+		return 0;
 	/*
 	 * If already mapped, we don't have to account.
 	 * If page cache, page->mapping has address_space.
@@ -656,7 +591,8 @@
 {
 	if (mem_cgroup_subsys.disabled)
 		return 0;
-
+	if (PageCompound(page))
+		return 0;
 	/*
 	 * Corner case handling. This is called from add_to_page_cache()
 	 * in usual. But some FS (shmem) precharges this page before calling it
@@ -669,22 +605,27 @@
 	if (!(gfp_mask & __GFP_WAIT)) {
 		struct page_cgroup *pc;
 
-		lock_page_cgroup(page);
-		pc = page_get_page_cgroup(page);
-		if (pc) {
-			VM_BUG_ON(pc->page != page);
-			VM_BUG_ON(!pc->mem_cgroup);
-			unlock_page_cgroup(page);
+
+		pc = lookup_page_cgroup(page);
+		if (!pc)
+			return 0;
+		lock_page_cgroup(pc);
+		if (PageCgroupUsed(pc)) {
+			unlock_page_cgroup(pc);
 			return 0;
 		}
-		unlock_page_cgroup(page);
+		unlock_page_cgroup(pc);
 	}
 
 	if (unlikely(!mm))
 		mm = &init_mm;
 
-	return mem_cgroup_charge_common(page, mm, gfp_mask,
+	if (page_is_file_cache(page))
+		return mem_cgroup_charge_common(page, mm, gfp_mask,
 				MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
+	else
+		return mem_cgroup_charge_common(page, mm, gfp_mask,
+				MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
 }
 
 /*
@@ -704,44 +645,46 @@
 	/*
 	 * Check if our page_cgroup is valid
 	 */
-	lock_page_cgroup(page);
-	pc = page_get_page_cgroup(page);
-	if (unlikely(!pc))
-		goto unlock;
+	pc = lookup_page_cgroup(page);
+	if (unlikely(!pc || !PageCgroupUsed(pc)))
+		return;
 
-	VM_BUG_ON(pc->page != page);
-
-	if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
-	    && ((pc->flags & PAGE_CGROUP_FLAG_CACHE)
-		|| page_mapped(page)))
-		goto unlock;
+	lock_page_cgroup(pc);
+	if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page))
+	     || !PageCgroupUsed(pc)) {
+		/* This happens at race in zap_pte_range() and do_swap_page()*/
+		unlock_page_cgroup(pc);
+		return;
+	}
+	ClearPageCgroupUsed(pc);
+	mem = pc->mem_cgroup;
 
 	mz = page_cgroup_zoneinfo(pc);
 	spin_lock_irqsave(&mz->lru_lock, flags);
 	__mem_cgroup_remove_list(mz, pc);
 	spin_unlock_irqrestore(&mz->lru_lock, flags);
+	unlock_page_cgroup(pc);
 
-	page_assign_page_cgroup(page, NULL);
-	unlock_page_cgroup(page);
-
-	mem = pc->mem_cgroup;
 	res_counter_uncharge(&mem->res, PAGE_SIZE);
 	css_put(&mem->css);
 
-	kmem_cache_free(page_cgroup_cache, pc);
 	return;
-unlock:
-	unlock_page_cgroup(page);
 }
 
 void mem_cgroup_uncharge_page(struct page *page)
 {
+	/* early check. */
+	if (page_mapped(page))
+		return;
+	if (page->mapping && !PageAnon(page))
+		return;
 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
 }
 
 void mem_cgroup_uncharge_cache_page(struct page *page)
 {
 	VM_BUG_ON(page_mapped(page));
+	VM_BUG_ON(page->mapping);
 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
 }
 
@@ -758,15 +701,19 @@
 	if (mem_cgroup_subsys.disabled)
 		return 0;
 
-	lock_page_cgroup(page);
-	pc = page_get_page_cgroup(page);
-	if (pc) {
+	pc = lookup_page_cgroup(page);
+	lock_page_cgroup(pc);
+	if (PageCgroupUsed(pc)) {
 		mem = pc->mem_cgroup;
 		css_get(&mem->css);
-		if (pc->flags & PAGE_CGROUP_FLAG_CACHE)
-			ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
+		if (PageCgroupCache(pc)) {
+			if (page_is_file_cache(page))
+				ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
+			else
+				ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
+		}
 	}
-	unlock_page_cgroup(page);
+	unlock_page_cgroup(pc);
 	if (mem) {
 		ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
 			ctype, mem);
@@ -791,7 +738,7 @@
 	 */
 	if (!newpage->mapping)
 		__mem_cgroup_uncharge_common(newpage,
-					 MEM_CGROUP_CHARGE_TYPE_FORCE);
+				MEM_CGROUP_CHARGE_TYPE_FORCE);
 	else if (PageAnon(newpage))
 		mem_cgroup_uncharge_page(newpage);
 }
@@ -863,7 +810,7 @@
 #define FORCE_UNCHARGE_BATCH	(128)
 static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
 			    struct mem_cgroup_per_zone *mz,
-			    int active)
+			    enum lru_list lru)
 {
 	struct page_cgroup *pc;
 	struct page *page;
@@ -871,15 +818,14 @@
 	unsigned long flags;
 	struct list_head *list;
 
-	if (active)
-		list = &mz->active_list;
-	else
-		list = &mz->inactive_list;
+	list = &mz->lists[lru];
 
 	spin_lock_irqsave(&mz->lru_lock, flags);
 	while (!list_empty(list)) {
 		pc = list_entry(list->prev, struct page_cgroup, lru);
 		page = pc->page;
+		if (!PageCgroupUsed(pc))
+			break;
 		get_page(page);
 		spin_unlock_irqrestore(&mz->lru_lock, flags);
 		/*
@@ -894,8 +840,10 @@
 				count = FORCE_UNCHARGE_BATCH;
 				cond_resched();
 			}
-		} else
-			cond_resched();
+		} else {
+			spin_lock_irqsave(&mz->lru_lock, flags);
+			break;
+		}
 		spin_lock_irqsave(&mz->lru_lock, flags);
 	}
 	spin_unlock_irqrestore(&mz->lru_lock, flags);
@@ -919,15 +867,17 @@
 	while (mem->res.usage > 0) {
 		if (atomic_read(&mem->css.cgroup->count) > 0)
 			goto out;
+		/* This is for making all *used* pages to be on LRU. */
+		lru_add_drain_all();
 		for_each_node_state(node, N_POSSIBLE)
 			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
 				struct mem_cgroup_per_zone *mz;
+				enum lru_list l;
 				mz = mem_cgroup_zoneinfo(mem, node, zid);
-				/* drop all page_cgroup in active_list */
-				mem_cgroup_force_empty_list(mem, mz, 1);
-				/* drop all page_cgroup in inactive_list */
-				mem_cgroup_force_empty_list(mem, mz, 0);
+				for_each_lru(l)
+					mem_cgroup_force_empty_list(mem, mz, l);
 			}
+		cond_resched();
 	}
 	ret = 0;
 out:
@@ -1012,14 +962,27 @@
 	}
 	/* showing # of active pages */
 	{
-		unsigned long active, inactive;
+		unsigned long active_anon, inactive_anon;
+		unsigned long active_file, inactive_file;
+		unsigned long unevictable;
 
-		inactive = mem_cgroup_get_all_zonestat(mem_cont,
-						MEM_CGROUP_ZSTAT_INACTIVE);
-		active = mem_cgroup_get_all_zonestat(mem_cont,
-						MEM_CGROUP_ZSTAT_ACTIVE);
-		cb->fill(cb, "active", (active) * PAGE_SIZE);
-		cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
+		inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
+						LRU_INACTIVE_ANON);
+		active_anon = mem_cgroup_get_all_zonestat(mem_cont,
+						LRU_ACTIVE_ANON);
+		inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
+						LRU_INACTIVE_FILE);
+		active_file = mem_cgroup_get_all_zonestat(mem_cont,
+						LRU_ACTIVE_FILE);
+		unevictable = mem_cgroup_get_all_zonestat(mem_cont,
+							LRU_UNEVICTABLE);
+
+		cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
+		cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
+		cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
+		cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
+		cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
+
 	}
 	return 0;
 }
@@ -1062,6 +1025,7 @@
 {
 	struct mem_cgroup_per_node *pn;
 	struct mem_cgroup_per_zone *mz;
+	enum lru_list l;
 	int zone, tmp = node;
 	/*
 	 * This routine is called against possible nodes.
@@ -1082,9 +1046,9 @@
 
 	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
 		mz = &pn->zoneinfo[zone];
-		INIT_LIST_HEAD(&mz->active_list);
-		INIT_LIST_HEAD(&mz->inactive_list);
 		spin_lock_init(&mz->lru_lock);
+		for_each_lru(l)
+			INIT_LIST_HEAD(&mz->lists[l]);
 	}
 	return 0;
 }
@@ -1125,7 +1089,6 @@
 
 	if (unlikely((cont->parent) == NULL)) {
 		mem = &init_mem_cgroup;
-		page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
 	} else {
 		mem = mem_cgroup_alloc();
 		if (!mem)
diff --git a/mm/memory.c b/mm/memory.c
index 1002f47..164951c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1129,12 +1129,17 @@
 	return !vma->vm_ops || !vma->vm_ops->fault;
 }
 
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-		unsigned long start, int len, int write, int force,
+
+
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+		     unsigned long start, int len, int flags,
 		struct page **pages, struct vm_area_struct **vmas)
 {
 	int i;
-	unsigned int vm_flags;
+	unsigned int vm_flags = 0;
+	int write = !!(flags & GUP_FLAGS_WRITE);
+	int force = !!(flags & GUP_FLAGS_FORCE);
+	int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
 
 	if (len <= 0)
 		return 0;
@@ -1158,7 +1163,9 @@
 			pud_t *pud;
 			pmd_t *pmd;
 			pte_t *pte;
-			if (write) /* user gate pages are read-only */
+
+			/* user gate pages are read-only */
+			if (!ignore && write)
 				return i ? : -EFAULT;
 			if (pg > TASK_SIZE)
 				pgd = pgd_offset_k(pg);
@@ -1190,8 +1197,9 @@
 			continue;
 		}
 
-		if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
-				|| !(vm_flags & vma->vm_flags))
+		if (!vma ||
+		    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+		    (!ignore && !(vm_flags & vma->vm_flags)))
 			return i ? : -EFAULT;
 
 		if (is_vm_hugetlb_page(vma)) {
@@ -1266,6 +1274,23 @@
 	} while (len);
 	return i;
 }
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+		unsigned long start, int len, int write, int force,
+		struct page **pages, struct vm_area_struct **vmas)
+{
+	int flags = 0;
+
+	if (write)
+		flags |= GUP_FLAGS_WRITE;
+	if (force)
+		flags |= GUP_FLAGS_FORCE;
+
+	return __get_user_pages(tsk, mm,
+				start, len, flags,
+				pages, vmas);
+}
+
 EXPORT_SYMBOL(get_user_pages);
 
 pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
@@ -1296,18 +1321,14 @@
 	pte_t *pte;
 	spinlock_t *ptl;
 
-	retval = mem_cgroup_charge(page, mm, GFP_KERNEL);
-	if (retval)
-		goto out;
-
 	retval = -EINVAL;
 	if (PageAnon(page))
-		goto out_uncharge;
+		goto out;
 	retval = -ENOMEM;
 	flush_dcache_page(page);
 	pte = get_locked_pte(mm, addr, &ptl);
 	if (!pte)
-		goto out_uncharge;
+		goto out;
 	retval = -EBUSY;
 	if (!pte_none(*pte))
 		goto out_unlock;
@@ -1323,8 +1344,6 @@
 	return retval;
 out_unlock:
 	pte_unmap_unlock(pte, ptl);
-out_uncharge:
-	mem_cgroup_uncharge_page(page);
 out:
 	return retval;
 }
@@ -1858,6 +1877,15 @@
 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
 	if (!new_page)
 		goto oom;
+	/*
+	 * Don't let another task, with possibly unlocked vma,
+	 * keep the mlocked page.
+	 */
+	if (vma->vm_flags & VM_LOCKED) {
+		lock_page(old_page);	/* for LRU manipulation */
+		clear_page_mlock(old_page);
+		unlock_page(old_page);
+	}
 	cow_user_page(new_page, old_page, address, vma);
 	__SetPageUptodate(new_page);
 
@@ -1886,11 +1914,13 @@
 		 * thread doing COW.
 		 */
 		ptep_clear_flush_notify(vma, address, page_table);
-		set_pte_at(mm, address, page_table, entry);
-		update_mmu_cache(vma, address, entry);
-		lru_cache_add_active(new_page);
+		SetPageSwapBacked(new_page);
+		lru_cache_add_active_or_unevictable(new_page, vma);
 		page_add_new_anon_rmap(new_page, vma, address);
 
+//TODO:  is this safe?  do_anonymous_page() does it this way.
+		set_pte_at(mm, address, page_table, entry);
+		update_mmu_cache(vma, address, entry);
 		if (old_page) {
 			/*
 			 * Only after switching the pte to the new page may
@@ -2288,16 +2318,17 @@
 		count_vm_event(PGMAJFAULT);
 	}
 
-	if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
-		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
-		ret = VM_FAULT_OOM;
-		goto out;
-	}
-
 	mark_page_accessed(page);
+
 	lock_page(page);
 	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
+	if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+		ret = VM_FAULT_OOM;
+		unlock_page(page);
+		goto out;
+	}
+
 	/*
 	 * Back out if somebody else already faulted in this pte.
 	 */
@@ -2324,7 +2355,7 @@
 	page_add_anon_rmap(page, vma, address);
 
 	swap_free(entry);
-	if (vm_swap_full())
+	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
 		remove_exclusive_swap_page(page);
 	unlock_page(page);
 
@@ -2382,7 +2413,8 @@
 	if (!pte_none(*page_table))
 		goto release;
 	inc_mm_counter(mm, anon_rss);
-	lru_cache_add_active(page);
+	SetPageSwapBacked(page);
+	lru_cache_add_active_or_unevictable(page, vma);
 	page_add_new_anon_rmap(page, vma, address);
 	set_pte_at(mm, address, page_table, entry);
 
@@ -2423,6 +2455,7 @@
 	struct page *page;
 	pte_t entry;
 	int anon = 0;
+	int charged = 0;
 	struct page *dirty_page = NULL;
 	struct vm_fault vmf;
 	int ret;
@@ -2463,6 +2496,18 @@
 				ret = VM_FAULT_OOM;
 				goto out;
 			}
+			if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+				ret = VM_FAULT_OOM;
+				page_cache_release(page);
+				goto out;
+			}
+			charged = 1;
+			/*
+			 * Don't let another task, with possibly unlocked vma,
+			 * keep the mlocked page.
+			 */
+			if (vma->vm_flags & VM_LOCKED)
+				clear_page_mlock(vmf.page);
 			copy_user_highpage(page, vmf.page, address, vma);
 			__SetPageUptodate(page);
 		} else {
@@ -2497,11 +2542,6 @@
 
 	}
 
-	if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
-		ret = VM_FAULT_OOM;
-		goto out;
-	}
-
 	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
 
 	/*
@@ -2520,11 +2560,11 @@
 		entry = mk_pte(page, vma->vm_page_prot);
 		if (flags & FAULT_FLAG_WRITE)
 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-		set_pte_at(mm, address, page_table, entry);
 		if (anon) {
-                        inc_mm_counter(mm, anon_rss);
-                        lru_cache_add_active(page);
-                        page_add_new_anon_rmap(page, vma, address);
+			inc_mm_counter(mm, anon_rss);
+			SetPageSwapBacked(page);
+			lru_cache_add_active_or_unevictable(page, vma);
+			page_add_new_anon_rmap(page, vma, address);
 		} else {
 			inc_mm_counter(mm, file_rss);
 			page_add_file_rmap(page);
@@ -2533,11 +2573,14 @@
 				get_page(dirty_page);
 			}
 		}
+//TODO:  is this safe?  do_anonymous_page() does it this way.
+		set_pte_at(mm, address, page_table, entry);
 
 		/* no need to invalidate: a not-present page won't be cached */
 		update_mmu_cache(vma, address, entry);
 	} else {
-		mem_cgroup_uncharge_page(page);
+		if (charged)
+			mem_cgroup_uncharge_page(page);
 		if (anon)
 			page_cache_release(page);
 		else
@@ -2772,19 +2815,9 @@
 	len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
 	ret = get_user_pages(current, current->mm, addr,
 			len, write, 0, NULL, NULL);
-	if (ret < 0) {
-		/*
-		   SUS require strange return value to mlock
-		    - invalid addr generate to ENOMEM.
-		    - out of memory should generate EAGAIN.
-		*/
-		if (ret == -EFAULT)
-			ret = -ENOMEM;
-		else if (ret == -ENOMEM)
-			ret = -EAGAIN;
+	if (ret < 0)
 		return ret;
-	}
-	return ret == len ? 0 : -ENOMEM;
+	return ret == len ? 0 : -EFAULT;
 }
 
 #if !defined(__HAVE_ARCH_GATE_AREA)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 89fee2d..6837a10 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -26,6 +26,7 @@
 #include <linux/delay.h>
 #include <linux/migrate.h>
 #include <linux/page-isolation.h>
+#include <linux/pfn.h>
 
 #include <asm/tlbflush.h>
 
@@ -323,11 +324,11 @@
 	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
 	BUG_ON(nr_pages % PAGES_PER_SECTION);
 
-	release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE);
-
 	sections_to_remove = nr_pages / PAGES_PER_SECTION;
 	for (i = 0; i < sections_to_remove; i++) {
 		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
+		release_mem_region(pfn << PAGE_SHIFT,
+				   PAGES_PER_SECTION << PAGE_SHIFT);
 		ret = __remove_section(zone, __pfn_to_section(pfn));
 		if (ret)
 			break;
@@ -657,8 +658,9 @@
 		 * We can skip free pages. And we can only deal with pages on
 		 * LRU.
 		 */
-		ret = isolate_lru_page(page, &source);
+		ret = isolate_lru_page(page);
 		if (!ret) { /* Success */
+			list_add_tail(&page->lru, &source);
 			move_pages--;
 		} else {
 			/* Becasue we don't have big zone->lock. we should
@@ -849,10 +851,19 @@
 
 	return ret;
 }
+
+int remove_memory(u64 start, u64 size)
+{
+	unsigned long start_pfn, end_pfn;
+
+	start_pfn = PFN_DOWN(start);
+	end_pfn = start_pfn + PFN_DOWN(size);
+	return offline_pages(start_pfn, end_pfn, 120 * HZ);
+}
 #else
 int remove_memory(u64 start, u64 size)
 {
 	return -EINVAL;
 }
-EXPORT_SYMBOL_GPL(remove_memory);
 #endif /* CONFIG_MEMORY_HOTREMOVE */
+EXPORT_SYMBOL_GPL(remove_memory);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8336905..36f4257 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -93,6 +93,8 @@
 #include <asm/tlbflush.h>
 #include <asm/uaccess.h>
 
+#include "internal.h"
+
 /* Internal flags */
 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
@@ -762,8 +764,11 @@
 	/*
 	 * Avoid migrating a page that is shared with others.
 	 */
-	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
-		isolate_lru_page(page, pagelist);
+	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
+		if (!isolate_lru_page(page)) {
+			list_add_tail(&page->lru, pagelist);
+		}
+	}
 }
 
 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
@@ -2197,7 +2202,7 @@
 	if (PageSwapCache(page))
 		md->swapcache++;
 
-	if (PageActive(page))
+	if (PageActive(page) || PageUnevictable(page))
 		md->active++;
 
 	if (PageWriteback(page))
diff --git a/mm/migrate.c b/mm/migrate.c
index 2a80136..6602941b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -37,36 +37,6 @@
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
 
 /*
- * Isolate one page from the LRU lists. If successful put it onto
- * the indicated list with elevated page count.
- *
- * Result:
- *  -EBUSY: page not on LRU list
- *  0: page removed from LRU list and added to the specified list.
- */
-int isolate_lru_page(struct page *page, struct list_head *pagelist)
-{
-	int ret = -EBUSY;
-
-	if (PageLRU(page)) {
-		struct zone *zone = page_zone(page);
-
-		spin_lock_irq(&zone->lru_lock);
-		if (PageLRU(page) && get_page_unless_zero(page)) {
-			ret = 0;
-			ClearPageLRU(page);
-			if (PageActive(page))
-				del_page_from_active_list(zone, page);
-			else
-				del_page_from_inactive_list(zone, page);
-			list_add_tail(&page->lru, pagelist);
-		}
-		spin_unlock_irq(&zone->lru_lock);
-	}
-	return ret;
-}
-
-/*
  * migrate_prep() needs to be called before we start compiling a list of pages
  * to be migrated using isolate_lru_page().
  */
@@ -83,23 +53,9 @@
 	return 0;
 }
 
-static inline void move_to_lru(struct page *page)
-{
-	if (PageActive(page)) {
-		/*
-		 * lru_cache_add_active checks that
-		 * the PG_active bit is off.
-		 */
-		ClearPageActive(page);
-		lru_cache_add_active(page);
-	} else {
-		lru_cache_add(page);
-	}
-	put_page(page);
-}
-
 /*
- * Add isolated pages on the list back to the LRU.
+ * Add isolated pages on the list back to the LRU under page lock
+ * to avoid leaking evictable pages back onto unevictable list.
  *
  * returns the number of pages put back.
  */
@@ -111,7 +67,7 @@
 
 	list_for_each_entry_safe(page, page2, l, lru) {
 		list_del(&page->lru);
-		move_to_lru(page);
+		putback_lru_page(page);
 		count++;
 	}
 	return count;
@@ -374,8 +330,6 @@
 	__inc_zone_page_state(newpage, NR_FILE_PAGES);
 
 	spin_unlock_irq(&mapping->tree_lock);
-	if (!PageSwapCache(newpage))
-		mem_cgroup_uncharge_cache_page(page);
 
 	return 0;
 }
@@ -385,6 +339,8 @@
  */
 static void migrate_page_copy(struct page *newpage, struct page *page)
 {
+	int anon;
+
 	copy_highpage(newpage, page);
 
 	if (PageError(page))
@@ -393,8 +349,11 @@
 		SetPageReferenced(newpage);
 	if (PageUptodate(page))
 		SetPageUptodate(newpage);
-	if (PageActive(page))
+	if (TestClearPageActive(page)) {
+		VM_BUG_ON(PageUnevictable(page));
 		SetPageActive(newpage);
+	} else
+		unevictable_migrate_page(newpage, page);
 	if (PageChecked(page))
 		SetPageChecked(newpage);
 	if (PageMappedToDisk(page))
@@ -412,14 +371,20 @@
 		__set_page_dirty_nobuffers(newpage);
  	}
 
+	mlock_migrate_page(newpage, page);
+
 #ifdef CONFIG_SWAP
 	ClearPageSwapCache(page);
 #endif
-	ClearPageActive(page);
 	ClearPagePrivate(page);
 	set_page_private(page, 0);
+	/* page->mapping contains a flag for PageAnon() */
+	anon = PageAnon(page);
 	page->mapping = NULL;
 
+	if (!anon) /* This page was removed from radix-tree. */
+		mem_cgroup_uncharge_cache_page(page);
+
 	/*
 	 * If any waiters have accumulated on the new page then
 	 * wake them up.
@@ -594,6 +559,10 @@
  *
  * The new page will have replaced the old page if this function
  * is successful.
+ *
+ * Return value:
+ *   < 0 - error code
+ *  == 0 - success
  */
 static int move_to_new_page(struct page *newpage, struct page *page)
 {
@@ -611,6 +580,8 @@
 	/* Prepare mapping for the new page.*/
 	newpage->index = page->index;
 	newpage->mapping = page->mapping;
+	if (PageSwapBacked(page))
+		SetPageSwapBacked(newpage);
 
 	mapping = page_mapping(page);
 	if (!mapping)
@@ -654,9 +625,10 @@
 	if (!newpage)
 		return -ENOMEM;
 
-	if (page_count(page) == 1)
+	if (page_count(page) == 1) {
 		/* page was freed from under us. So we are done. */
 		goto move_newpage;
+	}
 
 	charge = mem_cgroup_prepare_migration(page, newpage);
 	if (charge == -ENOMEM) {
@@ -730,7 +702,6 @@
 		rcu_read_unlock();
 
 unlock:
-
 	unlock_page(page);
 
 	if (rc != -EAGAIN) {
@@ -741,17 +712,19 @@
  		 * restored.
  		 */
  		list_del(&page->lru);
- 		move_to_lru(page);
+		putback_lru_page(page);
 	}
 
 move_newpage:
 	if (!charge)
 		mem_cgroup_end_migration(newpage);
+
 	/*
 	 * Move the new page to the LRU. If migration was not successful
 	 * then this will free the page.
 	 */
-	move_to_lru(newpage);
+	putback_lru_page(newpage);
+
 	if (result) {
 		if (rc)
 			*result = rc;
@@ -858,9 +831,11 @@
  * Move a set of pages as indicated in the pm array. The addr
  * field must be set to the virtual address of the page to be moved
  * and the node number must contain a valid target node.
+ * The pm array ends with node = MAX_NUMNODES.
  */
-static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm,
-				int migrate_all)
+static int do_move_page_to_node_array(struct mm_struct *mm,
+				      struct page_to_node *pm,
+				      int migrate_all)
 {
 	int err;
 	struct page_to_node *pp;
@@ -914,7 +889,9 @@
 				!migrate_all)
 			goto put_and_set;
 
-		err = isolate_lru_page(page, &pagelist);
+		err = isolate_lru_page(page);
+		if (!err)
+			list_add_tail(&page->lru, &pagelist);
 put_and_set:
 		/*
 		 * Either remove the duplicate refcount from
@@ -926,36 +903,118 @@
 		pp->status = err;
 	}
 
+	err = 0;
 	if (!list_empty(&pagelist))
 		err = migrate_pages(&pagelist, new_page_node,
 				(unsigned long)pm);
-	else
-		err = -ENOENT;
 
 	up_read(&mm->mmap_sem);
 	return err;
 }
 
 /*
- * Determine the nodes of a list of pages. The addr in the pm array
- * must have been set to the virtual address of which we want to determine
- * the node number.
+ * Migrate an array of page address onto an array of nodes and fill
+ * the corresponding array of status.
  */
-static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm)
+static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
+			 unsigned long nr_pages,
+			 const void __user * __user *pages,
+			 const int __user *nodes,
+			 int __user *status, int flags)
 {
-	down_read(&mm->mmap_sem);
+	struct page_to_node *pm = NULL;
+	nodemask_t task_nodes;
+	int err = 0;
+	int i;
 
-	for ( ; pm->node != MAX_NUMNODES; pm++) {
-		struct vm_area_struct *vma;
-		struct page *page;
-		int err;
+	task_nodes = cpuset_mems_allowed(task);
+
+	/* Limit nr_pages so that the multiplication may not overflow */
+	if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) {
+		err = -E2BIG;
+		goto out;
+	}
+
+	pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node));
+	if (!pm) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	/*
+	 * Get parameters from user space and initialize the pm
+	 * array. Return various errors if the user did something wrong.
+	 */
+	for (i = 0; i < nr_pages; i++) {
+		const void __user *p;
 
 		err = -EFAULT;
-		vma = find_vma(mm, pm->addr);
+		if (get_user(p, pages + i))
+			goto out_pm;
+
+		pm[i].addr = (unsigned long)p;
+		if (nodes) {
+			int node;
+
+			if (get_user(node, nodes + i))
+				goto out_pm;
+
+			err = -ENODEV;
+			if (!node_state(node, N_HIGH_MEMORY))
+				goto out_pm;
+
+			err = -EACCES;
+			if (!node_isset(node, task_nodes))
+				goto out_pm;
+
+			pm[i].node = node;
+		} else
+			pm[i].node = 0;	/* anything to not match MAX_NUMNODES */
+	}
+	/* End marker */
+	pm[nr_pages].node = MAX_NUMNODES;
+
+	err = do_move_page_to_node_array(mm, pm, flags & MPOL_MF_MOVE_ALL);
+	if (err >= 0)
+		/* Return status information */
+		for (i = 0; i < nr_pages; i++)
+			if (put_user(pm[i].status, status + i))
+				err = -EFAULT;
+
+out_pm:
+	vfree(pm);
+out:
+	return err;
+}
+
+/*
+ * Determine the nodes of an array of pages and store it in an array of status.
+ */
+static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
+			 const void __user * __user *pages,
+			 int __user *status)
+{
+	unsigned long i;
+	int err;
+
+	down_read(&mm->mmap_sem);
+
+	for (i = 0; i < nr_pages; i++) {
+		const void __user *p;
+		unsigned long addr;
+		struct vm_area_struct *vma;
+		struct page *page;
+
+		err = -EFAULT;
+		if (get_user(p, pages+i))
+			goto out;
+		addr = (unsigned long) p;
+
+		vma = find_vma(mm, addr);
 		if (!vma)
 			goto set_status;
 
-		page = follow_page(vma, pm->addr, 0);
+		page = follow_page(vma, addr, 0);
 
 		err = PTR_ERR(page);
 		if (IS_ERR(page))
@@ -968,11 +1027,13 @@
 
 		err = page_to_nid(page);
 set_status:
-		pm->status = err;
+		put_user(err, status+i);
 	}
+	err = 0;
 
+out:
 	up_read(&mm->mmap_sem);
-	return 0;
+	return err;
 }
 
 /*
@@ -984,12 +1045,9 @@
 			const int __user *nodes,
 			int __user *status, int flags)
 {
-	int err = 0;
-	int i;
 	struct task_struct *task;
-	nodemask_t task_nodes;
 	struct mm_struct *mm;
-	struct page_to_node *pm = NULL;
+	int err;
 
 	/* Check flags */
 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
@@ -1021,75 +1079,21 @@
 	    (current->uid != task->suid) && (current->uid != task->uid) &&
 	    !capable(CAP_SYS_NICE)) {
 		err = -EPERM;
-		goto out2;
+		goto out;
 	}
 
  	err = security_task_movememory(task);
  	if (err)
- 		goto out2;
+		goto out;
 
-
-	task_nodes = cpuset_mems_allowed(task);
-
-	/* Limit nr_pages so that the multiplication may not overflow */
-	if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) {
-		err = -E2BIG;
-		goto out2;
+	if (nodes) {
+		err = do_pages_move(mm, task, nr_pages, pages, nodes, status,
+				    flags);
+	} else {
+		err = do_pages_stat(mm, nr_pages, pages, status);
 	}
 
-	pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node));
-	if (!pm) {
-		err = -ENOMEM;
-		goto out2;
-	}
-
-	/*
-	 * Get parameters from user space and initialize the pm
-	 * array. Return various errors if the user did something wrong.
-	 */
-	for (i = 0; i < nr_pages; i++) {
-		const void __user *p;
-
-		err = -EFAULT;
-		if (get_user(p, pages + i))
-			goto out;
-
-		pm[i].addr = (unsigned long)p;
-		if (nodes) {
-			int node;
-
-			if (get_user(node, nodes + i))
-				goto out;
-
-			err = -ENODEV;
-			if (!node_state(node, N_HIGH_MEMORY))
-				goto out;
-
-			err = -EACCES;
-			if (!node_isset(node, task_nodes))
-				goto out;
-
-			pm[i].node = node;
-		} else
-			pm[i].node = 0;	/* anything to not match MAX_NUMNODES */
-	}
-	/* End marker */
-	pm[nr_pages].node = MAX_NUMNODES;
-
-	if (nodes)
-		err = do_move_pages(mm, pm, flags & MPOL_MF_MOVE_ALL);
-	else
-		err = do_pages_stat(mm, pm);
-
-	if (err >= 0)
-		/* Return status information */
-		for (i = 0; i < nr_pages; i++)
-			if (put_user(pm[i].status, status + i))
-				err = -EFAULT;
-
 out:
-	vfree(pm);
-out2:
 	mmput(mm);
 	return err;
 }
diff --git a/mm/mlock.c b/mm/mlock.c
index 01fbe93..008ea70 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -8,10 +8,18 @@
 #include <linux/capability.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/pagemap.h>
 #include <linux/mempolicy.h>
 #include <linux/syscalls.h>
 #include <linux/sched.h>
 #include <linux/module.h>
+#include <linux/rmap.h>
+#include <linux/mmzone.h>
+#include <linux/hugetlb.h>
+
+#include "internal.h"
 
 int can_do_mlock(void)
 {
@@ -23,17 +31,381 @@
 }
 EXPORT_SYMBOL(can_do_mlock);
 
+#ifdef CONFIG_UNEVICTABLE_LRU
+/*
+ * Mlocked pages are marked with PageMlocked() flag for efficient testing
+ * in vmscan and, possibly, the fault path; and to support semi-accurate
+ * statistics.
+ *
+ * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
+ * be placed on the LRU "unevictable" list, rather than the [in]active lists.
+ * The unevictable list is an LRU sibling list to the [in]active lists.
+ * PageUnevictable is set to indicate the unevictable state.
+ *
+ * When lazy mlocking via vmscan, it is important to ensure that the
+ * vma's VM_LOCKED status is not concurrently being modified, otherwise we
+ * may have mlocked a page that is being munlocked. So lazy mlock must take
+ * the mmap_sem for read, and verify that the vma really is locked
+ * (see mm/rmap.c).
+ */
+
+/*
+ *  LRU accounting for clear_page_mlock()
+ */
+void __clear_page_mlock(struct page *page)
+{
+	VM_BUG_ON(!PageLocked(page));
+
+	if (!page->mapping) {	/* truncated ? */
+		return;
+	}
+
+	dec_zone_page_state(page, NR_MLOCK);
+	count_vm_event(UNEVICTABLE_PGCLEARED);
+	if (!isolate_lru_page(page)) {
+		putback_lru_page(page);
+	} else {
+		/*
+		 * Page not on the LRU yet.  Flush all pagevecs and retry.
+		 */
+		lru_add_drain_all();
+		if (!isolate_lru_page(page))
+			putback_lru_page(page);
+		else if (PageUnevictable(page))
+			count_vm_event(UNEVICTABLE_PGSTRANDED);
+
+	}
+}
+
+/*
+ * Mark page as mlocked if not already.
+ * If page on LRU, isolate and putback to move to unevictable list.
+ */
+void mlock_vma_page(struct page *page)
+{
+	BUG_ON(!PageLocked(page));
+
+	if (!TestSetPageMlocked(page)) {
+		inc_zone_page_state(page, NR_MLOCK);
+		count_vm_event(UNEVICTABLE_PGMLOCKED);
+		if (!isolate_lru_page(page))
+			putback_lru_page(page);
+	}
+}
+
+/*
+ * called from munlock()/munmap() path with page supposedly on the LRU.
+ *
+ * Note:  unlike mlock_vma_page(), we can't just clear the PageMlocked
+ * [in try_to_munlock()] and then attempt to isolate the page.  We must
+ * isolate the page to keep others from messing with its unevictable
+ * and mlocked state while trying to munlock.  However, we pre-clear the
+ * mlocked state anyway as we might lose the isolation race and we might
+ * not get another chance to clear PageMlocked.  If we successfully
+ * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
+ * mapping the page, it will restore the PageMlocked state, unless the page
+ * is mapped in a non-linear vma.  So, we go ahead and SetPageMlocked(),
+ * perhaps redundantly.
+ * If we lose the isolation race, and the page is mapped by other VM_LOCKED
+ * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
+ * either of which will restore the PageMlocked state by calling
+ * mlock_vma_page() above, if it can grab the vma's mmap sem.
+ */
+static void munlock_vma_page(struct page *page)
+{
+	BUG_ON(!PageLocked(page));
+
+	if (TestClearPageMlocked(page)) {
+		dec_zone_page_state(page, NR_MLOCK);
+		if (!isolate_lru_page(page)) {
+			int ret = try_to_munlock(page);
+			/*
+			 * did try_to_unlock() succeed or punt?
+			 */
+			if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
+				count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+
+			putback_lru_page(page);
+		} else {
+			/*
+			 * We lost the race.  let try_to_unmap() deal
+			 * with it.  At least we get the page state and
+			 * mlock stats right.  However, page is still on
+			 * the noreclaim list.  We'll fix that up when
+			 * the page is eventually freed or we scan the
+			 * noreclaim list.
+			 */
+			if (PageUnevictable(page))
+				count_vm_event(UNEVICTABLE_PGSTRANDED);
+			else
+				count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+		}
+	}
+}
+
+/**
+ * __mlock_vma_pages_range() -  mlock/munlock a range of pages in the vma.
+ * @vma:   target vma
+ * @start: start address
+ * @end:   end address
+ * @mlock: 0 indicate munlock, otherwise mlock.
+ *
+ * If @mlock == 0, unlock an mlocked range;
+ * else mlock the range of pages.  This takes care of making the pages present ,
+ * too.
+ *
+ * return 0 on success, negative error code on error.
+ *
+ * vma->vm_mm->mmap_sem must be held for at least read.
+ */
+static long __mlock_vma_pages_range(struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end,
+				   int mlock)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	unsigned long addr = start;
+	struct page *pages[16]; /* 16 gives a reasonable batch */
+	int nr_pages = (end - start) / PAGE_SIZE;
+	int ret;
+	int gup_flags = 0;
+
+	VM_BUG_ON(start & ~PAGE_MASK);
+	VM_BUG_ON(end   & ~PAGE_MASK);
+	VM_BUG_ON(start < vma->vm_start);
+	VM_BUG_ON(end   > vma->vm_end);
+	VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) &&
+		  (atomic_read(&mm->mm_users) != 0));
+
+	/*
+	 * mlock:   don't page populate if page has PROT_NONE permission.
+	 * munlock: the pages always do munlock althrough
+	 *          its has PROT_NONE permission.
+	 */
+	if (!mlock)
+		gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS;
+
+	if (vma->vm_flags & VM_WRITE)
+		gup_flags |= GUP_FLAGS_WRITE;
+
+	lru_add_drain_all();	/* push cached pages to LRU */
+
+	while (nr_pages > 0) {
+		int i;
+
+		cond_resched();
+
+		/*
+		 * get_user_pages makes pages present if we are
+		 * setting mlock. and this extra reference count will
+		 * disable migration of this page.  However, page may
+		 * still be truncated out from under us.
+		 */
+		ret = __get_user_pages(current, mm, addr,
+				min_t(int, nr_pages, ARRAY_SIZE(pages)),
+				gup_flags, pages, NULL);
+		/*
+		 * This can happen for, e.g., VM_NONLINEAR regions before
+		 * a page has been allocated and mapped at a given offset,
+		 * or for addresses that map beyond end of a file.
+		 * We'll mlock the the pages if/when they get faulted in.
+		 */
+		if (ret < 0)
+			break;
+		if (ret == 0) {
+			/*
+			 * We know the vma is there, so the only time
+			 * we cannot get a single page should be an
+			 * error (ret < 0) case.
+			 */
+			WARN_ON(1);
+			break;
+		}
+
+		lru_add_drain();	/* push cached pages to LRU */
+
+		for (i = 0; i < ret; i++) {
+			struct page *page = pages[i];
+
+			lock_page(page);
+			/*
+			 * Because we lock page here and migration is blocked
+			 * by the elevated reference, we need only check for
+			 * page truncation (file-cache only).
+			 */
+			if (page->mapping) {
+				if (mlock)
+					mlock_vma_page(page);
+				else
+					munlock_vma_page(page);
+			}
+			unlock_page(page);
+			put_page(page);		/* ref from get_user_pages() */
+
+			/*
+			 * here we assume that get_user_pages() has given us
+			 * a list of virtually contiguous pages.
+			 */
+			addr += PAGE_SIZE;	/* for next get_user_pages() */
+			nr_pages--;
+		}
+		ret = 0;
+	}
+
+	lru_add_drain_all();	/* to update stats */
+
+	return ret;	/* count entire vma as locked_vm */
+}
+
+/*
+ * convert get_user_pages() return value to posix mlock() error
+ */
+static int __mlock_posix_error_return(long retval)
+{
+	if (retval == -EFAULT)
+		retval = -ENOMEM;
+	else if (retval == -ENOMEM)
+		retval = -EAGAIN;
+	return retval;
+}
+
+#else /* CONFIG_UNEVICTABLE_LRU */
+
+/*
+ * Just make pages present if VM_LOCKED.  No-op if unlocking.
+ */
+static long __mlock_vma_pages_range(struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end,
+				   int mlock)
+{
+	if (mlock && (vma->vm_flags & VM_LOCKED))
+		return make_pages_present(start, end);
+	return 0;
+}
+
+static inline int __mlock_posix_error_return(long retval)
+{
+	return 0;
+}
+
+#endif /* CONFIG_UNEVICTABLE_LRU */
+
+/**
+ * mlock_vma_pages_range() - mlock pages in specified vma range.
+ * @vma - the vma containing the specfied address range
+ * @start - starting address in @vma to mlock
+ * @end   - end address [+1] in @vma to mlock
+ *
+ * For mmap()/mremap()/expansion of mlocked vma.
+ *
+ * return 0 on success for "normal" vmas.
+ *
+ * return number of pages [> 0] to be removed from locked_vm on success
+ * of "special" vmas.
+ *
+ * return negative error if vma spanning @start-@range disappears while
+ * mmap semaphore is dropped.  Unlikely?
+ */
+long mlock_vma_pages_range(struct vm_area_struct *vma,
+			unsigned long start, unsigned long end)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	int nr_pages = (end - start) / PAGE_SIZE;
+	BUG_ON(!(vma->vm_flags & VM_LOCKED));
+
+	/*
+	 * filter unlockable vmas
+	 */
+	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+		goto no_mlock;
+
+	if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
+			is_vm_hugetlb_page(vma) ||
+			vma == get_gate_vma(current))) {
+		long error;
+		downgrade_write(&mm->mmap_sem);
+
+		error = __mlock_vma_pages_range(vma, start, end, 1);
+
+		up_read(&mm->mmap_sem);
+		/* vma can change or disappear */
+		down_write(&mm->mmap_sem);
+		vma = find_vma(mm, start);
+		/* non-NULL vma must contain @start, but need to check @end */
+		if (!vma ||  end > vma->vm_end)
+			return -ENOMEM;
+
+		return 0;	/* hide other errors from mmap(), et al */
+	}
+
+	/*
+	 * User mapped kernel pages or huge pages:
+	 * make these pages present to populate the ptes, but
+	 * fall thru' to reset VM_LOCKED--no need to unlock, and
+	 * return nr_pages so these don't get counted against task's
+	 * locked limit.  huge pages are already counted against
+	 * locked vm limit.
+	 */
+	make_pages_present(start, end);
+
+no_mlock:
+	vma->vm_flags &= ~VM_LOCKED;	/* and don't come back! */
+	return nr_pages;		/* error or pages NOT mlocked */
+}
+
+
+/*
+ * munlock_vma_pages_range() - munlock all pages in the vma range.'
+ * @vma - vma containing range to be munlock()ed.
+ * @start - start address in @vma of the range
+ * @end - end of range in @vma.
+ *
+ *  For mremap(), munmap() and exit().
+ *
+ * Called with @vma VM_LOCKED.
+ *
+ * Returns with VM_LOCKED cleared.  Callers must be prepared to
+ * deal with this.
+ *
+ * We don't save and restore VM_LOCKED here because pages are
+ * still on lru.  In unmap path, pages might be scanned by reclaim
+ * and re-mlocked by try_to_{munlock|unmap} before we unmap and
+ * free them.  This will result in freeing mlocked pages.
+ */
+void munlock_vma_pages_range(struct vm_area_struct *vma,
+			   unsigned long start, unsigned long end)
+{
+	vma->vm_flags &= ~VM_LOCKED;
+	__mlock_vma_pages_range(vma, start, end, 0);
+}
+
+/*
+ * mlock_fixup  - handle mlock[all]/munlock[all] requests.
+ *
+ * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
+ * munlock is a no-op.  However, for some special vmas, we go ahead and
+ * populate the ptes via make_pages_present().
+ *
+ * For vmas that pass the filters, merge/split as appropriate.
+ */
 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
 	unsigned long start, unsigned long end, unsigned int newflags)
 {
-	struct mm_struct * mm = vma->vm_mm;
+	struct mm_struct *mm = vma->vm_mm;
 	pgoff_t pgoff;
-	int pages;
+	int nr_pages;
 	int ret = 0;
+	int lock = newflags & VM_LOCKED;
 
-	if (newflags == vma->vm_flags) {
-		*prev = vma;
-		goto out;
+	if (newflags == vma->vm_flags ||
+			(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+		goto out;	/* don't set VM_LOCKED,  don't count */
+
+	if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
+			is_vm_hugetlb_page(vma) ||
+			vma == get_gate_vma(current)) {
+		if (lock)
+			make_pages_present(start, end);
+		goto out;	/* don't set VM_LOCKED,  don't count */
 	}
 
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
@@ -44,8 +416,6 @@
 		goto success;
 	}
 
-	*prev = vma;
-
 	if (start != vma->vm_start) {
 		ret = split_vma(mm, vma, start, 1);
 		if (ret)
@@ -60,24 +430,61 @@
 
 success:
 	/*
+	 * Keep track of amount of locked VM.
+	 */
+	nr_pages = (end - start) >> PAGE_SHIFT;
+	if (!lock)
+		nr_pages = -nr_pages;
+	mm->locked_vm += nr_pages;
+
+	/*
 	 * vm_flags is protected by the mmap_sem held in write mode.
 	 * It's okay if try_to_unmap_one unmaps a page just after we
-	 * set VM_LOCKED, make_pages_present below will bring it back.
+	 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
 	 */
 	vma->vm_flags = newflags;
 
-	/*
-	 * Keep track of amount of locked VM.
-	 */
-	pages = (end - start) >> PAGE_SHIFT;
-	if (newflags & VM_LOCKED) {
-		pages = -pages;
-		if (!(newflags & VM_IO))
-			ret = make_pages_present(start, end);
+	if (lock) {
+		/*
+		 * mmap_sem is currently held for write.  Downgrade the write
+		 * lock to a read lock so that other faults, mmap scans, ...
+		 * while we fault in all pages.
+		 */
+		downgrade_write(&mm->mmap_sem);
+
+		ret = __mlock_vma_pages_range(vma, start, end, 1);
+
+		/*
+		 * Need to reacquire mmap sem in write mode, as our callers
+		 * expect this.  We have no support for atomically upgrading
+		 * a sem to write, so we need to check for ranges while sem
+		 * is unlocked.
+		 */
+		up_read(&mm->mmap_sem);
+		/* vma can change or disappear */
+		down_write(&mm->mmap_sem);
+		*prev = find_vma(mm, start);
+		/* non-NULL *prev must contain @start, but need to check @end */
+		if (!(*prev) || end > (*prev)->vm_end)
+			ret = -ENOMEM;
+		else if (ret > 0) {
+			mm->locked_vm -= ret;
+			ret = 0;
+		} else
+			ret = __mlock_posix_error_return(ret); /* translate if needed */
+	} else {
+		/*
+		 * TODO:  for unlocking, pages will already be resident, so
+		 * we don't need to wait for allocations/reclaim/pagein, ...
+		 * However, unlocking a very large region can still take a
+		 * while.  Should we downgrade the semaphore for both lock
+		 * AND unlock ?
+		 */
+		__mlock_vma_pages_range(vma, start, end, 0);
 	}
 
-	mm->locked_vm -= pages;
 out:
+	*prev = vma;
 	return ret;
 }
 
diff --git a/mm/mmap.c b/mm/mmap.c
index e7a5a68..74f4d15 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -410,7 +410,7 @@
 	rb_insert_color(&vma->vm_rb, &mm->mm_rb);
 }
 
-static inline void __vma_link_file(struct vm_area_struct *vma)
+static void __vma_link_file(struct vm_area_struct *vma)
 {
 	struct file * file;
 
@@ -662,8 +662,6 @@
  * If the vma has a ->close operation then the driver probably needs to release
  * per-vma resources, so we don't attempt to merge those.
  */
-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
-
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
 			struct file *file, unsigned long vm_flags)
 {
@@ -972,6 +970,7 @@
 			return -EPERM;
 		vm_flags |= VM_LOCKED;
 	}
+
 	/* mlock MCL_FUTURE? */
 	if (vm_flags & VM_LOCKED) {
 		unsigned long locked, lock_limit;
@@ -1139,10 +1138,12 @@
 	 * The VM_SHARED test is necessary because shmem_zero_setup
 	 * will create the file object for a shared anonymous map below.
 	 */
-	if (!file && !(vm_flags & VM_SHARED) &&
-	    vma_merge(mm, prev, addr, addr + len, vm_flags,
-					NULL, NULL, pgoff, NULL))
-		goto out;
+	if (!file && !(vm_flags & VM_SHARED)) {
+		vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
+					NULL, NULL, pgoff, NULL);
+		if (vma)
+			goto out;
+	}
 
 	/*
 	 * Determine the object being mapped and call the appropriate
@@ -1224,10 +1225,14 @@
 	mm->total_vm += len >> PAGE_SHIFT;
 	vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
 	if (vm_flags & VM_LOCKED) {
-		mm->locked_vm += len >> PAGE_SHIFT;
-		make_pages_present(addr, addr + len);
-	}
-	if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
+		/*
+		 * makes pages present; downgrades, drops, reacquires mmap_sem
+		 */
+		long nr_pages = mlock_vma_pages_range(vma, addr, addr + len);
+		if (nr_pages < 0)
+			return nr_pages;	/* vma gone! */
+		mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages;
+	} else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
 		make_pages_present(addr, addr + len);
 	return addr;
 
@@ -1586,7 +1591,7 @@
  * vma is the last one with address > vma->vm_end.  Have to extend vma.
  */
 #ifndef CONFIG_IA64
-static inline
+static
 #endif
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
@@ -1636,7 +1641,7 @@
 /*
  * vma is the first one with address < vma->vm_start.  Have to extend vma.
  */
-static inline int expand_downwards(struct vm_area_struct *vma,
+static int expand_downwards(struct vm_area_struct *vma,
 				   unsigned long address)
 {
 	int error;
@@ -1698,10 +1703,12 @@
 	vma = find_vma_prev(mm, addr, &prev);
 	if (vma && (vma->vm_start <= addr))
 		return vma;
-	if (!prev || expand_stack(prev, addr))
+	if (expand_stack(prev, addr))
 		return NULL;
-	if (prev->vm_flags & VM_LOCKED)
-		make_pages_present(addr, prev->vm_end);
+	if (prev->vm_flags & VM_LOCKED) {
+		if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0)
+			return NULL;	/* vma gone! */
+	}
 	return prev;
 }
 #else
@@ -1727,8 +1734,10 @@
 	start = vma->vm_start;
 	if (expand_stack(vma, addr))
 		return NULL;
-	if (vma->vm_flags & VM_LOCKED)
-		make_pages_present(addr, start);
+	if (vma->vm_flags & VM_LOCKED) {
+		if (mlock_vma_pages_range(vma, addr, start) < 0)
+			return NULL;	/* vma gone! */
+	}
 	return vma;
 }
 #endif
@@ -1747,8 +1756,6 @@
 		long nrpages = vma_pages(vma);
 
 		mm->total_vm -= nrpages;
-		if (vma->vm_flags & VM_LOCKED)
-			mm->locked_vm -= nrpages;
 		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
 		vma = remove_vma(vma);
 	} while (vma);
@@ -1914,6 +1921,20 @@
 	vma = prev? prev->vm_next: mm->mmap;
 
 	/*
+	 * unlock any mlock()ed ranges before detaching vmas
+	 */
+	if (mm->locked_vm) {
+		struct vm_area_struct *tmp = vma;
+		while (tmp && tmp->vm_start < end) {
+			if (tmp->vm_flags & VM_LOCKED) {
+				mm->locked_vm -= vma_pages(tmp);
+				munlock_vma_pages_all(tmp);
+			}
+			tmp = tmp->vm_next;
+		}
+	}
+
+	/*
 	 * Remove the vma's, and unmap the actual pages
 	 */
 	detach_vmas_to_be_unmapped(mm, vma, prev, end);
@@ -2025,8 +2046,9 @@
 		return -ENOMEM;
 
 	/* Can we just expand an old private anonymous mapping? */
-	if (vma_merge(mm, prev, addr, addr + len, flags,
-					NULL, NULL, pgoff, NULL))
+	vma = vma_merge(mm, prev, addr, addr + len, flags,
+					NULL, NULL, pgoff, NULL);
+	if (vma)
 		goto out;
 
 	/*
@@ -2048,8 +2070,8 @@
 out:
 	mm->total_vm += len >> PAGE_SHIFT;
 	if (flags & VM_LOCKED) {
-		mm->locked_vm += len >> PAGE_SHIFT;
-		make_pages_present(addr, addr + len);
+		if (!mlock_vma_pages_range(vma, addr, addr + len))
+			mm->locked_vm += (len >> PAGE_SHIFT);
 	}
 	return addr;
 }
@@ -2060,7 +2082,7 @@
 void exit_mmap(struct mm_struct *mm)
 {
 	struct mmu_gather *tlb;
-	struct vm_area_struct *vma = mm->mmap;
+	struct vm_area_struct *vma;
 	unsigned long nr_accounted = 0;
 	unsigned long end;
 
@@ -2068,6 +2090,15 @@
 	arch_exit_mmap(mm);
 	mmu_notifier_release(mm);
 
+	if (mm->locked_vm) {
+		vma = mm->mmap;
+		while (vma) {
+			if (vma->vm_flags & VM_LOCKED)
+				munlock_vma_pages_all(vma);
+			vma = vma->vm_next;
+		}
+	}
+	vma = mm->mmap;
 	lru_add_drain();
 	flush_cache_mm(mm);
 	tlb = tlb_gather_mmu(mm, 1);
diff --git a/mm/mremap.c b/mm/mremap.c
index 1a77439..58a2908 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -24,6 +24,8 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
+#include "internal.h"
+
 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
 {
 	pgd_t *pgd;
@@ -238,8 +240,8 @@
 	if (vm_flags & VM_LOCKED) {
 		mm->locked_vm += new_len >> PAGE_SHIFT;
 		if (new_len > old_len)
-			make_pages_present(new_addr + old_len,
-					   new_addr + new_len);
+			mlock_vma_pages_range(new_vma, new_addr + old_len,
+						       new_addr + new_len);
 	}
 
 	return new_addr;
@@ -379,7 +381,7 @@
 			vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
 			if (vma->vm_flags & VM_LOCKED) {
 				mm->locked_vm += pages;
-				make_pages_present(addr + old_len,
+				mlock_vma_pages_range(vma, addr + old_len,
 						   addr + new_len);
 			}
 			ret = addr;
diff --git a/mm/nommu.c b/mm/nommu.c
index ed75bc9..2696b24 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -34,6 +34,8 @@
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 
+#include "internal.h"
+
 void *high_memory;
 struct page *mem_map;
 unsigned long max_mapnr;
@@ -128,20 +130,16 @@
 	return PAGE_SIZE << compound_order(page);
 }
 
-/*
- * get a list of pages in an address range belonging to the specified process
- * and indicate the VMA that covers each page
- * - this is potentially dodgy as we may end incrementing the page count of a
- *   slab page or a secondary page from a compound page
- * - don't permit access to VMAs that don't support it, such as I/O mappings
- */
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-	unsigned long start, int len, int write, int force,
-	struct page **pages, struct vm_area_struct **vmas)
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+		     unsigned long start, int len, int flags,
+		struct page **pages, struct vm_area_struct **vmas)
 {
 	struct vm_area_struct *vma;
 	unsigned long vm_flags;
 	int i;
+	int write = !!(flags & GUP_FLAGS_WRITE);
+	int force = !!(flags & GUP_FLAGS_FORCE);
+	int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
 
 	/* calculate required read or write permissions.
 	 * - if 'force' is set, we only require the "MAY" flags.
@@ -156,7 +154,7 @@
 
 		/* protect what we can, including chardevs */
 		if (vma->vm_flags & (VM_IO | VM_PFNMAP) ||
-		    !(vm_flags & vma->vm_flags))
+		    (!ignore && !(vm_flags & vma->vm_flags)))
 			goto finish_or_fault;
 
 		if (pages) {
@@ -174,6 +172,30 @@
 finish_or_fault:
 	return i ? : -EFAULT;
 }
+
+
+/*
+ * get a list of pages in an address range belonging to the specified process
+ * and indicate the VMA that covers each page
+ * - this is potentially dodgy as we may end incrementing the page count of a
+ *   slab page or a secondary page from a compound page
+ * - don't permit access to VMAs that don't support it, such as I/O mappings
+ */
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+	unsigned long start, int len, int write, int force,
+	struct page **pages, struct vm_area_struct **vmas)
+{
+	int flags = 0;
+
+	if (write)
+		flags |= GUP_FLAGS_WRITE;
+	if (force)
+		flags |= GUP_FLAGS_FORCE;
+
+	return __get_user_pages(tsk, mm,
+				start, len, flags,
+				pages, vmas);
+}
 EXPORT_SYMBOL(get_user_pages);
 
 DEFINE_RWLOCK(vmlist_lock);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index c130a13..2970e35 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -329,9 +329,7 @@
 		struct zone *z =
 			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 
-		x += zone_page_state(z, NR_FREE_PAGES)
-			+ zone_page_state(z, NR_INACTIVE)
-			+ zone_page_state(z, NR_ACTIVE);
+		x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z);
 	}
 	/*
 	 * Make sure that the number of highmem pages is never larger
@@ -355,9 +353,7 @@
 {
 	unsigned long x;
 
-	x = global_page_state(NR_FREE_PAGES)
-		+ global_page_state(NR_INACTIVE)
-		+ global_page_state(NR_ACTIVE);
+	x = global_page_state(NR_FREE_PAGES) + global_lru_pages();
 
 	if (!vm_highmem_is_dirtyable)
 		x -= highmem_dirtyable_memory(x);
@@ -876,6 +872,7 @@
 	pgoff_t end;		/* Inclusive */
 	int scanned = 0;
 	int range_whole = 0;
+	long nr_to_write = wbc->nr_to_write;
 
 	if (wbc->nonblocking && bdi_write_congested(bdi)) {
 		wbc->encountered_congestion = 1;
@@ -939,7 +936,7 @@
 				unlock_page(page);
 				ret = 0;
 			}
-			if (ret || (--(wbc->nr_to_write) <= 0))
+			if (ret || (--nr_to_write <= 0))
 				done = 1;
 			if (wbc->nonblocking && bdi_write_congested(bdi)) {
 				wbc->encountered_congestion = 1;
@@ -958,11 +955,12 @@
 		index = 0;
 		goto retry;
 	}
-	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
-		mapping->writeback_index = index;
+	if (!wbc->no_nrwrite_index_update) {
+		if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
+			mapping->writeback_index = index;
+		wbc->nr_to_write = nr_to_write;
+	}
 
-	if (wbc->range_cont)
-		wbc->range_start = index << PAGE_CACHE_SHIFT;
 	return ret;
 }
 EXPORT_SYMBOL(write_cache_pages);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9eb9eb9..d0a240f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -44,7 +44,7 @@
 #include <linux/backing-dev.h>
 #include <linux/fault-inject.h>
 #include <linux/page-isolation.h>
-#include <linux/memcontrol.h>
+#include <linux/page_cgroup.h>
 #include <linux/debugobjects.h>
 
 #include <asm/tlbflush.h>
@@ -223,17 +223,12 @@
 
 static void bad_page(struct page *page)
 {
-	void *pc = page_get_page_cgroup(page);
-
 	printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG
 		"page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
 		current->comm, page, (int)(2*sizeof(unsigned long)),
 		(unsigned long)page->flags, page->mapping,
 		page_mapcount(page), page_count(page));
-	if (pc) {
-		printk(KERN_EMERG "cgroup:%p\n", pc);
-		page_reset_bad_cgroup(page);
-	}
+
 	printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
 		KERN_EMERG "Backtrace:\n");
 	dump_stack();
@@ -454,14 +449,16 @@
 
 static inline int free_pages_check(struct page *page)
 {
+	free_page_mlock(page);
 	if (unlikely(page_mapcount(page) |
 		(page->mapping != NULL)  |
-		(page_get_page_cgroup(page) != NULL) |
 		(page_count(page) != 0)  |
 		(page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
 		bad_page(page);
 	if (PageDirty(page))
 		__ClearPageDirty(page);
+	if (PageSwapBacked(page))
+		__ClearPageSwapBacked(page);
 	/*
 	 * For now, we report if PG_reserved was found set, but do not
 	 * clear it, and do not free the page.  But we shall soon need
@@ -600,7 +597,6 @@
 {
 	if (unlikely(page_mapcount(page) |
 		(page->mapping != NULL)  |
-		(page_get_page_cgroup(page) != NULL) |
 		(page_count(page) != 0)  |
 		(page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
 		bad_page(page);
@@ -614,7 +610,11 @@
 
 	page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim |
 			1 << PG_referenced | 1 << PG_arch_1 |
-			1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
+			1 << PG_owner_priv_1 | 1 << PG_mappedtodisk
+#ifdef CONFIG_UNEVICTABLE_LRU
+			| 1 << PG_mlocked
+#endif
+			);
 	set_page_private(page, 0);
 	set_page_refcounted(page);
 
@@ -1862,10 +1862,21 @@
 		}
 	}
 
-	printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
+	printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
+		" inactive_file:%lu"
+//TODO:  check/adjust line lengths
+#ifdef CONFIG_UNEVICTABLE_LRU
+		" unevictable:%lu"
+#endif
+		" dirty:%lu writeback:%lu unstable:%lu\n"
 		" free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
-		global_page_state(NR_ACTIVE),
-		global_page_state(NR_INACTIVE),
+		global_page_state(NR_ACTIVE_ANON),
+		global_page_state(NR_ACTIVE_FILE),
+		global_page_state(NR_INACTIVE_ANON),
+		global_page_state(NR_INACTIVE_FILE),
+#ifdef CONFIG_UNEVICTABLE_LRU
+		global_page_state(NR_UNEVICTABLE),
+#endif
 		global_page_state(NR_FILE_DIRTY),
 		global_page_state(NR_WRITEBACK),
 		global_page_state(NR_UNSTABLE_NFS),
@@ -1888,8 +1899,13 @@
 			" min:%lukB"
 			" low:%lukB"
 			" high:%lukB"
-			" active:%lukB"
-			" inactive:%lukB"
+			" active_anon:%lukB"
+			" inactive_anon:%lukB"
+			" active_file:%lukB"
+			" inactive_file:%lukB"
+#ifdef CONFIG_UNEVICTABLE_LRU
+			" unevictable:%lukB"
+#endif
 			" present:%lukB"
 			" pages_scanned:%lu"
 			" all_unreclaimable? %s"
@@ -1899,8 +1915,13 @@
 			K(zone->pages_min),
 			K(zone->pages_low),
 			K(zone->pages_high),
-			K(zone_page_state(zone, NR_ACTIVE)),
-			K(zone_page_state(zone, NR_INACTIVE)),
+			K(zone_page_state(zone, NR_ACTIVE_ANON)),
+			K(zone_page_state(zone, NR_INACTIVE_ANON)),
+			K(zone_page_state(zone, NR_ACTIVE_FILE)),
+			K(zone_page_state(zone, NR_INACTIVE_FILE)),
+#ifdef CONFIG_UNEVICTABLE_LRU
+			K(zone_page_state(zone, NR_UNEVICTABLE)),
+#endif
 			K(zone->present_pages),
 			zone->pages_scanned,
 			(zone_is_all_unreclaimable(zone) ? "yes" : "no")
@@ -3410,10 +3431,12 @@
 	pgdat->nr_zones = 0;
 	init_waitqueue_head(&pgdat->kswapd_wait);
 	pgdat->kswapd_max_order = 0;
+	pgdat_page_cgroup_init(pgdat);
 	
 	for (j = 0; j < MAX_NR_ZONES; j++) {
 		struct zone *zone = pgdat->node_zones + j;
 		unsigned long size, realsize, memmap_pages;
+		enum lru_list l;
 
 		size = zone_spanned_pages_in_node(nid, j, zones_size);
 		realsize = size - zone_absent_pages_in_node(nid, j,
@@ -3428,8 +3451,8 @@
 			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
 		if (realsize >= memmap_pages) {
 			realsize -= memmap_pages;
-			mminit_dprintk(MMINIT_TRACE, "memmap_init",
-				"%s zone: %lu pages used for memmap\n",
+			printk(KERN_DEBUG
+				"  %s zone: %lu pages used for memmap\n",
 				zone_names[j], memmap_pages);
 		} else
 			printk(KERN_WARNING
@@ -3439,8 +3462,7 @@
 		/* Account for reserved pages */
 		if (j == 0 && realsize > dma_reserve) {
 			realsize -= dma_reserve;
-			mminit_dprintk(MMINIT_TRACE, "memmap_init",
-					"%s zone: %lu pages reserved\n",
+			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
 					zone_names[0], dma_reserve);
 		}
 
@@ -3465,10 +3487,14 @@
 		zone->prev_priority = DEF_PRIORITY;
 
 		zone_pcp_init(zone);
-		INIT_LIST_HEAD(&zone->active_list);
-		INIT_LIST_HEAD(&zone->inactive_list);
-		zone->nr_scan_active = 0;
-		zone->nr_scan_inactive = 0;
+		for_each_lru(l) {
+			INIT_LIST_HEAD(&zone->lru[l].list);
+			zone->lru[l].nr_scan = 0;
+		}
+		zone->recent_rotated[0] = 0;
+		zone->recent_rotated[1] = 0;
+		zone->recent_scanned[0] = 0;
+		zone->recent_scanned[1] = 0;
 		zap_zone_vm_stats(zone);
 		zone->flags = 0;
 		if (!size)
@@ -4210,7 +4236,7 @@
 	for_each_zone(zone) {
 		u64 tmp;
 
-		spin_lock_irqsave(&zone->lru_lock, flags);
+		spin_lock_irqsave(&zone->lock, flags);
 		tmp = (u64)pages_min * zone->present_pages;
 		do_div(tmp, lowmem_pages);
 		if (is_highmem(zone)) {
@@ -4242,13 +4268,53 @@
 		zone->pages_low   = zone->pages_min + (tmp >> 2);
 		zone->pages_high  = zone->pages_min + (tmp >> 1);
 		setup_zone_migrate_reserve(zone);
-		spin_unlock_irqrestore(&zone->lru_lock, flags);
+		spin_unlock_irqrestore(&zone->lock, flags);
 	}
 
 	/* update totalreserve_pages */
 	calculate_totalreserve_pages();
 }
 
+/**
+ * setup_per_zone_inactive_ratio - called when min_free_kbytes changes.
+ *
+ * The inactive anon list should be small enough that the VM never has to
+ * do too much work, but large enough that each inactive page has a chance
+ * to be referenced again before it is swapped out.
+ *
+ * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
+ * INACTIVE_ANON pages on this zone's LRU, maintained by the
+ * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
+ * the anonymous pages are kept on the inactive list.
+ *
+ * total     target    max
+ * memory    ratio     inactive anon
+ * -------------------------------------
+ *   10MB       1         5MB
+ *  100MB       1        50MB
+ *    1GB       3       250MB
+ *   10GB      10       0.9GB
+ *  100GB      31         3GB
+ *    1TB     101        10GB
+ *   10TB     320        32GB
+ */
+void setup_per_zone_inactive_ratio(void)
+{
+	struct zone *zone;
+
+	for_each_zone(zone) {
+		unsigned int gb, ratio;
+
+		/* Zone size in gigabytes */
+		gb = zone->present_pages >> (30 - PAGE_SHIFT);
+		ratio = int_sqrt(10 * gb);
+		if (!ratio)
+			ratio = 1;
+
+		zone->inactive_ratio = ratio;
+	}
+}
+
 /*
  * Initialise min_free_kbytes.
  *
@@ -4286,6 +4352,7 @@
 		min_free_kbytes = 65536;
 	setup_per_zone_pages_min();
 	setup_per_zone_lowmem_reserve();
+	setup_per_zone_inactive_ratio();
 	return 0;
 }
 module_init(init_per_zone_pages_min)
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
new file mode 100644
index 0000000..f59d797
--- /dev/null
+++ b/mm/page_cgroup.c
@@ -0,0 +1,256 @@
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/bootmem.h>
+#include <linux/bit_spinlock.h>
+#include <linux/page_cgroup.h>
+#include <linux/hash.h>
+#include <linux/slab.h>
+#include <linux/memory.h>
+#include <linux/vmalloc.h>
+#include <linux/cgroup.h>
+
+static void __meminit
+__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
+{
+	pc->flags = 0;
+	pc->mem_cgroup = NULL;
+	pc->page = pfn_to_page(pfn);
+}
+static unsigned long total_usage;
+
+#if !defined(CONFIG_SPARSEMEM)
+
+
+void __init pgdat_page_cgroup_init(struct pglist_data *pgdat)
+{
+	pgdat->node_page_cgroup = NULL;
+}
+
+struct page_cgroup *lookup_page_cgroup(struct page *page)
+{
+	unsigned long pfn = page_to_pfn(page);
+	unsigned long offset;
+	struct page_cgroup *base;
+
+	base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
+	if (unlikely(!base))
+		return NULL;
+
+	offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
+	return base + offset;
+}
+
+static int __init alloc_node_page_cgroup(int nid)
+{
+	struct page_cgroup *base, *pc;
+	unsigned long table_size;
+	unsigned long start_pfn, nr_pages, index;
+
+	start_pfn = NODE_DATA(nid)->node_start_pfn;
+	nr_pages = NODE_DATA(nid)->node_spanned_pages;
+
+	table_size = sizeof(struct page_cgroup) * nr_pages;
+
+	base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
+			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+	if (!base)
+		return -ENOMEM;
+	for (index = 0; index < nr_pages; index++) {
+		pc = base + index;
+		__init_page_cgroup(pc, start_pfn + index);
+	}
+	NODE_DATA(nid)->node_page_cgroup = base;
+	total_usage += table_size;
+	return 0;
+}
+
+void __init page_cgroup_init(void)
+{
+
+	int nid, fail;
+
+	if (mem_cgroup_subsys.disabled)
+		return;
+
+	for_each_online_node(nid)  {
+		fail = alloc_node_page_cgroup(nid);
+		if (fail)
+			goto fail;
+	}
+	printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
+	printk(KERN_INFO "please try cgroup_disable=memory option if you"
+	" don't want\n");
+	return;
+fail:
+	printk(KERN_CRIT "allocation of page_cgroup was failed.\n");
+	printk(KERN_CRIT "please try cgroup_disable=memory boot option\n");
+	panic("Out of memory");
+}
+
+#else /* CONFIG_FLAT_NODE_MEM_MAP */
+
+struct page_cgroup *lookup_page_cgroup(struct page *page)
+{
+	unsigned long pfn = page_to_pfn(page);
+	struct mem_section *section = __pfn_to_section(pfn);
+
+	return section->page_cgroup + pfn;
+}
+
+int __meminit init_section_page_cgroup(unsigned long pfn)
+{
+	struct mem_section *section;
+	struct page_cgroup *base, *pc;
+	unsigned long table_size;
+	int nid, index;
+
+	section = __pfn_to_section(pfn);
+
+	if (section->page_cgroup)
+		return 0;
+
+	nid = page_to_nid(pfn_to_page(pfn));
+
+	table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
+	if (slab_is_available()) {
+		base = kmalloc_node(table_size, GFP_KERNEL, nid);
+		if (!base)
+			base = vmalloc_node(table_size, nid);
+	} else {
+		base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size,
+				PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+	}
+
+	if (!base) {
+		printk(KERN_ERR "page cgroup allocation failure\n");
+		return -ENOMEM;
+	}
+
+	for (index = 0; index < PAGES_PER_SECTION; index++) {
+		pc = base + index;
+		__init_page_cgroup(pc, pfn + index);
+	}
+
+	section = __pfn_to_section(pfn);
+	section->page_cgroup = base - pfn;
+	total_usage += table_size;
+	return 0;
+}
+#ifdef CONFIG_MEMORY_HOTPLUG
+void __free_page_cgroup(unsigned long pfn)
+{
+	struct mem_section *ms;
+	struct page_cgroup *base;
+
+	ms = __pfn_to_section(pfn);
+	if (!ms || !ms->page_cgroup)
+		return;
+	base = ms->page_cgroup + pfn;
+	if (is_vmalloc_addr(base)) {
+		vfree(base);
+		ms->page_cgroup = NULL;
+	} else {
+		struct page *page = virt_to_page(base);
+		if (!PageReserved(page)) { /* Is bootmem ? */
+			kfree(base);
+			ms->page_cgroup = NULL;
+		}
+	}
+}
+
+int online_page_cgroup(unsigned long start_pfn,
+			unsigned long nr_pages,
+			int nid)
+{
+	unsigned long start, end, pfn;
+	int fail = 0;
+
+	start = start_pfn & (PAGES_PER_SECTION - 1);
+	end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
+
+	for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
+		if (!pfn_present(pfn))
+			continue;
+		fail = init_section_page_cgroup(pfn);
+	}
+	if (!fail)
+		return 0;
+
+	/* rollback */
+	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
+		__free_page_cgroup(pfn);
+
+	return -ENOMEM;
+}
+
+int offline_page_cgroup(unsigned long start_pfn,
+		unsigned long nr_pages, int nid)
+{
+	unsigned long start, end, pfn;
+
+	start = start_pfn & (PAGES_PER_SECTION - 1);
+	end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
+
+	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
+		__free_page_cgroup(pfn);
+	return 0;
+
+}
+
+static int page_cgroup_callback(struct notifier_block *self,
+			       unsigned long action, void *arg)
+{
+	struct memory_notify *mn = arg;
+	int ret = 0;
+	switch (action) {
+	case MEM_GOING_ONLINE:
+		ret = online_page_cgroup(mn->start_pfn,
+				   mn->nr_pages, mn->status_change_nid);
+		break;
+	case MEM_CANCEL_ONLINE:
+	case MEM_OFFLINE:
+		offline_page_cgroup(mn->start_pfn,
+				mn->nr_pages, mn->status_change_nid);
+		break;
+	case MEM_GOING_OFFLINE:
+		break;
+	case MEM_ONLINE:
+	case MEM_CANCEL_OFFLINE:
+		break;
+	}
+	ret = notifier_from_errno(ret);
+	return ret;
+}
+
+#endif
+
+void __init page_cgroup_init(void)
+{
+	unsigned long pfn;
+	int fail = 0;
+
+	if (mem_cgroup_subsys.disabled)
+		return;
+
+	for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
+		if (!pfn_present(pfn))
+			continue;
+		fail = init_section_page_cgroup(pfn);
+	}
+	if (fail) {
+		printk(KERN_CRIT "try cgroup_disable=memory boot option\n");
+		panic("Out of memory");
+	} else {
+		hotplug_memory_notifier(page_cgroup_callback, 0);
+	}
+	printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
+	printk(KERN_INFO "please try cgroup_disable=memory option if you don't"
+	" want\n");
+}
+
+void __init pgdat_page_cgroup_init(struct pglist_data *pgdat)
+{
+	return;
+}
+
+#endif
diff --git a/mm/readahead.c b/mm/readahead.c
index 6cbd9a7..bec83c1 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -229,7 +229,7 @@
  */
 unsigned long max_sane_readahead(unsigned long nr)
 {
-	return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE)
+	return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
 		+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
 }
 
diff --git a/mm/rmap.c b/mm/rmap.c
index 0383acf..1099394 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -53,9 +53,47 @@
 
 #include <asm/tlbflush.h>
 
-struct kmem_cache *anon_vma_cachep;
+#include "internal.h"
 
-/* This must be called under the mmap_sem. */
+static struct kmem_cache *anon_vma_cachep;
+
+static inline struct anon_vma *anon_vma_alloc(void)
+{
+	return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
+}
+
+static inline void anon_vma_free(struct anon_vma *anon_vma)
+{
+	kmem_cache_free(anon_vma_cachep, anon_vma);
+}
+
+/**
+ * anon_vma_prepare - attach an anon_vma to a memory region
+ * @vma: the memory region in question
+ *
+ * This makes sure the memory mapping described by 'vma' has
+ * an 'anon_vma' attached to it, so that we can associate the
+ * anonymous pages mapped into it with that anon_vma.
+ *
+ * The common case will be that we already have one, but if
+ * if not we either need to find an adjacent mapping that we
+ * can re-use the anon_vma from (very common when the only
+ * reason for splitting a vma has been mprotect()), or we
+ * allocate a new one.
+ *
+ * Anon-vma allocations are very subtle, because we may have
+ * optimistically looked up an anon_vma in page_lock_anon_vma()
+ * and that may actually touch the spinlock even in the newly
+ * allocated vma (it depends on RCU to make sure that the
+ * anon_vma isn't actually destroyed).
+ *
+ * As a result, we need to do proper anon_vma locking even
+ * for the new allocation. At the same time, we do not want
+ * to do any locking for the common case of already having
+ * an anon_vma.
+ *
+ * This must be called with the mmap_sem held for reading.
+ */
 int anon_vma_prepare(struct vm_area_struct *vma)
 {
 	struct anon_vma *anon_vma = vma->anon_vma;
@@ -63,20 +101,17 @@
 	might_sleep();
 	if (unlikely(!anon_vma)) {
 		struct mm_struct *mm = vma->vm_mm;
-		struct anon_vma *allocated, *locked;
+		struct anon_vma *allocated;
 
 		anon_vma = find_mergeable_anon_vma(vma);
-		if (anon_vma) {
-			allocated = NULL;
-			locked = anon_vma;
-			spin_lock(&locked->lock);
-		} else {
+		allocated = NULL;
+		if (!anon_vma) {
 			anon_vma = anon_vma_alloc();
 			if (unlikely(!anon_vma))
 				return -ENOMEM;
 			allocated = anon_vma;
-			locked = NULL;
 		}
+		spin_lock(&anon_vma->lock);
 
 		/* page_table_lock to protect against threads */
 		spin_lock(&mm->page_table_lock);
@@ -87,8 +122,7 @@
 		}
 		spin_unlock(&mm->page_table_lock);
 
-		if (locked)
-			spin_unlock(&locked->lock);
+		spin_unlock(&anon_vma->lock);
 		if (unlikely(allocated))
 			anon_vma_free(allocated);
 	}
@@ -157,7 +191,7 @@
  * Getting a lock on a stable anon_vma from a page off the LRU is
  * tricky: page_lock_anon_vma rely on RCU to guard against the races.
  */
-static struct anon_vma *page_lock_anon_vma(struct page *page)
+struct anon_vma *page_lock_anon_vma(struct page *page)
 {
 	struct anon_vma *anon_vma;
 	unsigned long anon_mapping;
@@ -177,7 +211,7 @@
 	return NULL;
 }
 
-static void page_unlock_anon_vma(struct anon_vma *anon_vma)
+void page_unlock_anon_vma(struct anon_vma *anon_vma)
 {
 	spin_unlock(&anon_vma->lock);
 	rcu_read_unlock();
@@ -268,6 +302,32 @@
 	return NULL;
 }
 
+/**
+ * page_mapped_in_vma - check whether a page is really mapped in a VMA
+ * @page: the page to test
+ * @vma: the VMA to test
+ *
+ * Returns 1 if the page is mapped into the page tables of the VMA, 0
+ * if the page is not mapped into the page tables of this VMA.  Only
+ * valid for normal file or anonymous VMAs.
+ */
+static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
+{
+	unsigned long address;
+	pte_t *pte;
+	spinlock_t *ptl;
+
+	address = vma_address(page, vma);
+	if (address == -EFAULT)		/* out of vma range */
+		return 0;
+	pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
+	if (!pte)			/* the page is not in this mm */
+		return 0;
+	pte_unmap_unlock(pte, ptl);
+
+	return 1;
+}
+
 /*
  * Subfunctions of page_referenced: page_referenced_one called
  * repeatedly from either page_referenced_anon or page_referenced_file.
@@ -289,10 +349,17 @@
 	if (!pte)
 		goto out;
 
+	/*
+	 * Don't want to elevate referenced for mlocked page that gets this far,
+	 * in order that it progresses to try_to_unmap and is moved to the
+	 * unevictable list.
+	 */
 	if (vma->vm_flags & VM_LOCKED) {
-		referenced++;
 		*mapcount = 1;	/* break early from loop */
-	} else if (ptep_clear_flush_young_notify(vma, address, pte))
+		goto out_unmap;
+	}
+
+	if (ptep_clear_flush_young_notify(vma, address, pte))
 		referenced++;
 
 	/* Pretend the page is referenced if the task has the
@@ -301,6 +368,7 @@
 			rwsem_is_locked(&mm->mmap_sem))
 		referenced++;
 
+out_unmap:
 	(*mapcount)--;
 	pte_unmap_unlock(pte, ptl);
 out:
@@ -390,11 +458,6 @@
 		 */
 		if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
 			continue;
-		if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
-				  == (VM_LOCKED|VM_MAYSHARE)) {
-			referenced++;
-			break;
-		}
 		referenced += page_referenced_one(page, vma, &mapcount);
 		if (!mapcount)
 			break;
@@ -674,8 +737,8 @@
 			page_clear_dirty(page);
 			set_page_dirty(page);
 		}
-
-		mem_cgroup_uncharge_page(page);
+		if (PageAnon(page))
+			mem_cgroup_uncharge_page(page);
 		__dec_zone_page_state(page,
 			PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
 		/*
@@ -717,11 +780,16 @@
 	 * If it's recently referenced (perhaps page_referenced
 	 * skipped over this mm) then we should reactivate it.
 	 */
-	if (!migration && ((vma->vm_flags & VM_LOCKED) ||
-			(ptep_clear_flush_young_notify(vma, address, pte)))) {
-		ret = SWAP_FAIL;
-		goto out_unmap;
-	}
+	if (!migration) {
+		if (vma->vm_flags & VM_LOCKED) {
+			ret = SWAP_MLOCK;
+			goto out_unmap;
+		}
+		if (ptep_clear_flush_young_notify(vma, address, pte)) {
+			ret = SWAP_FAIL;
+			goto out_unmap;
+		}
+  	}
 
 	/* Nuke the page table entry. */
 	flush_cache_page(vma, address, page_to_pfn(page));
@@ -802,12 +870,17 @@
  * For very sparsely populated VMAs this is a little inefficient - chances are
  * there there won't be many ptes located within the scan cluster.  In this case
  * maybe we could scan further - to the end of the pte page, perhaps.
+ *
+ * Mlocked pages:  check VM_LOCKED under mmap_sem held for read, if we can
+ * acquire it without blocking.  If vma locked, mlock the pages in the cluster,
+ * rather than unmapping them.  If we encounter the "check_page" that vmscan is
+ * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
  */
 #define CLUSTER_SIZE	min(32*PAGE_SIZE, PMD_SIZE)
 #define CLUSTER_MASK	(~(CLUSTER_SIZE - 1))
 
-static void try_to_unmap_cluster(unsigned long cursor,
-	unsigned int *mapcount, struct vm_area_struct *vma)
+static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
+		struct vm_area_struct *vma, struct page *check_page)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	pgd_t *pgd;
@@ -819,6 +892,8 @@
 	struct page *page;
 	unsigned long address;
 	unsigned long end;
+	int ret = SWAP_AGAIN;
+	int locked_vma = 0;
 
 	address = (vma->vm_start + cursor) & CLUSTER_MASK;
 	end = address + CLUSTER_SIZE;
@@ -829,15 +904,26 @@
 
 	pgd = pgd_offset(mm, address);
 	if (!pgd_present(*pgd))
-		return;
+		return ret;
 
 	pud = pud_offset(pgd, address);
 	if (!pud_present(*pud))
-		return;
+		return ret;
 
 	pmd = pmd_offset(pud, address);
 	if (!pmd_present(*pmd))
-		return;
+		return ret;
+
+	/*
+	 * MLOCK_PAGES => feature is configured.
+	 * if we can acquire the mmap_sem for read, and vma is VM_LOCKED,
+	 * keep the sem while scanning the cluster for mlocking pages.
+	 */
+	if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) {
+		locked_vma = (vma->vm_flags & VM_LOCKED);
+		if (!locked_vma)
+			up_read(&vma->vm_mm->mmap_sem); /* don't need it */
+	}
 
 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
 
@@ -850,6 +936,13 @@
 		page = vm_normal_page(vma, address, *pte);
 		BUG_ON(!page || PageAnon(page));
 
+		if (locked_vma) {
+			mlock_vma_page(page);   /* no-op if already mlocked */
+			if (page == check_page)
+				ret = SWAP_MLOCK;
+			continue;	/* don't unmap */
+		}
+
 		if (ptep_clear_flush_young_notify(vma, address, pte))
 			continue;
 
@@ -871,39 +964,104 @@
 		(*mapcount)--;
 	}
 	pte_unmap_unlock(pte - 1, ptl);
+	if (locked_vma)
+		up_read(&vma->vm_mm->mmap_sem);
+	return ret;
 }
 
-static int try_to_unmap_anon(struct page *page, int migration)
+/*
+ * common handling for pages mapped in VM_LOCKED vmas
+ */
+static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
+{
+	int mlocked = 0;
+
+	if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
+		if (vma->vm_flags & VM_LOCKED) {
+			mlock_vma_page(page);
+			mlocked++;	/* really mlocked the page */
+		}
+		up_read(&vma->vm_mm->mmap_sem);
+	}
+	return mlocked;
+}
+
+/**
+ * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
+ * rmap method
+ * @page: the page to unmap/unlock
+ * @unlock:  request for unlock rather than unmap [unlikely]
+ * @migration:  unmapping for migration - ignored if @unlock
+ *
+ * Find all the mappings of a page using the mapping pointer and the vma chains
+ * contained in the anon_vma struct it points to.
+ *
+ * This function is only called from try_to_unmap/try_to_munlock for
+ * anonymous pages.
+ * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
+ * where the page was found will be held for write.  So, we won't recheck
+ * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
+ * 'LOCKED.
+ */
+static int try_to_unmap_anon(struct page *page, int unlock, int migration)
 {
 	struct anon_vma *anon_vma;
 	struct vm_area_struct *vma;
+	unsigned int mlocked = 0;
 	int ret = SWAP_AGAIN;
 
+	if (MLOCK_PAGES && unlikely(unlock))
+		ret = SWAP_SUCCESS;	/* default for try_to_munlock() */
+
 	anon_vma = page_lock_anon_vma(page);
 	if (!anon_vma)
 		return ret;
 
 	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
-		ret = try_to_unmap_one(page, vma, migration);
-		if (ret == SWAP_FAIL || !page_mapped(page))
-			break;
+		if (MLOCK_PAGES && unlikely(unlock)) {
+			if (!((vma->vm_flags & VM_LOCKED) &&
+			      page_mapped_in_vma(page, vma)))
+				continue;  /* must visit all unlocked vmas */
+			ret = SWAP_MLOCK;  /* saw at least one mlocked vma */
+		} else {
+			ret = try_to_unmap_one(page, vma, migration);
+			if (ret == SWAP_FAIL || !page_mapped(page))
+				break;
+		}
+		if (ret == SWAP_MLOCK) {
+			mlocked = try_to_mlock_page(page, vma);
+			if (mlocked)
+				break;	/* stop if actually mlocked page */
+		}
 	}
 
 	page_unlock_anon_vma(anon_vma);
+
+	if (mlocked)
+		ret = SWAP_MLOCK;	/* actually mlocked the page */
+	else if (ret == SWAP_MLOCK)
+		ret = SWAP_AGAIN;	/* saw VM_LOCKED vma */
+
 	return ret;
 }
 
 /**
- * try_to_unmap_file - unmap file page using the object-based rmap method
- * @page: the page to unmap
- * @migration: migration flag
+ * try_to_unmap_file - unmap/unlock file page using the object-based rmap method
+ * @page: the page to unmap/unlock
+ * @unlock:  request for unlock rather than unmap [unlikely]
+ * @migration:  unmapping for migration - ignored if @unlock
  *
  * Find all the mappings of a page using the mapping pointer and the vma chains
  * contained in the address_space struct it points to.
  *
- * This function is only called from try_to_unmap for object-based pages.
+ * This function is only called from try_to_unmap/try_to_munlock for
+ * object-based pages.
+ * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
+ * where the page was found will be held for write.  So, we won't recheck
+ * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
+ * 'LOCKED.
  */
-static int try_to_unmap_file(struct page *page, int migration)
+static int try_to_unmap_file(struct page *page, int unlock, int migration)
 {
 	struct address_space *mapping = page->mapping;
 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -914,20 +1072,44 @@
 	unsigned long max_nl_cursor = 0;
 	unsigned long max_nl_size = 0;
 	unsigned int mapcount;
+	unsigned int mlocked = 0;
+
+	if (MLOCK_PAGES && unlikely(unlock))
+		ret = SWAP_SUCCESS;	/* default for try_to_munlock() */
 
 	spin_lock(&mapping->i_mmap_lock);
 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
-		ret = try_to_unmap_one(page, vma, migration);
-		if (ret == SWAP_FAIL || !page_mapped(page))
-			goto out;
+		if (MLOCK_PAGES && unlikely(unlock)) {
+			if (!(vma->vm_flags & VM_LOCKED))
+				continue;	/* must visit all vmas */
+			ret = SWAP_MLOCK;
+		} else {
+			ret = try_to_unmap_one(page, vma, migration);
+			if (ret == SWAP_FAIL || !page_mapped(page))
+				goto out;
+		}
+		if (ret == SWAP_MLOCK) {
+			mlocked = try_to_mlock_page(page, vma);
+			if (mlocked)
+				break;  /* stop if actually mlocked page */
+		}
 	}
 
+	if (mlocked)
+		goto out;
+
 	if (list_empty(&mapping->i_mmap_nonlinear))
 		goto out;
 
 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
 						shared.vm_set.list) {
-		if ((vma->vm_flags & VM_LOCKED) && !migration)
+		if (MLOCK_PAGES && unlikely(unlock)) {
+			if (!(vma->vm_flags & VM_LOCKED))
+				continue;	/* must visit all vmas */
+			ret = SWAP_MLOCK;	/* leave mlocked == 0 */
+			goto out;		/* no need to look further */
+		}
+		if (!MLOCK_PAGES && !migration && (vma->vm_flags & VM_LOCKED))
 			continue;
 		cursor = (unsigned long) vma->vm_private_data;
 		if (cursor > max_nl_cursor)
@@ -937,7 +1119,7 @@
 			max_nl_size = cursor;
 	}
 
-	if (max_nl_size == 0) {	/* any nonlinears locked or reserved */
+	if (max_nl_size == 0) {	/* all nonlinears locked or reserved ? */
 		ret = SWAP_FAIL;
 		goto out;
 	}
@@ -961,12 +1143,16 @@
 	do {
 		list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
 						shared.vm_set.list) {
-			if ((vma->vm_flags & VM_LOCKED) && !migration)
+			if (!MLOCK_PAGES && !migration &&
+			    (vma->vm_flags & VM_LOCKED))
 				continue;
 			cursor = (unsigned long) vma->vm_private_data;
 			while ( cursor < max_nl_cursor &&
 				cursor < vma->vm_end - vma->vm_start) {
-				try_to_unmap_cluster(cursor, &mapcount, vma);
+				ret = try_to_unmap_cluster(cursor, &mapcount,
+								vma, page);
+				if (ret == SWAP_MLOCK)
+					mlocked = 2;	/* to return below */
 				cursor += CLUSTER_SIZE;
 				vma->vm_private_data = (void *) cursor;
 				if ((int)mapcount <= 0)
@@ -987,6 +1173,10 @@
 		vma->vm_private_data = NULL;
 out:
 	spin_unlock(&mapping->i_mmap_lock);
+	if (mlocked)
+		ret = SWAP_MLOCK;	/* actually mlocked the page */
+	else if (ret == SWAP_MLOCK)
+		ret = SWAP_AGAIN;	/* saw VM_LOCKED vma */
 	return ret;
 }
 
@@ -1002,6 +1192,7 @@
  * SWAP_SUCCESS	- we succeeded in removing all mappings
  * SWAP_AGAIN	- we missed a mapping, try again later
  * SWAP_FAIL	- the page is unswappable
+ * SWAP_MLOCK	- page is mlocked.
  */
 int try_to_unmap(struct page *page, int migration)
 {
@@ -1010,12 +1201,36 @@
 	BUG_ON(!PageLocked(page));
 
 	if (PageAnon(page))
-		ret = try_to_unmap_anon(page, migration);
+		ret = try_to_unmap_anon(page, 0, migration);
 	else
-		ret = try_to_unmap_file(page, migration);
-
-	if (!page_mapped(page))
+		ret = try_to_unmap_file(page, 0, migration);
+	if (ret != SWAP_MLOCK && !page_mapped(page))
 		ret = SWAP_SUCCESS;
 	return ret;
 }
 
+#ifdef CONFIG_UNEVICTABLE_LRU
+/**
+ * try_to_munlock - try to munlock a page
+ * @page: the page to be munlocked
+ *
+ * Called from munlock code.  Checks all of the VMAs mapping the page
+ * to make sure nobody else has this page mlocked. The page will be
+ * returned with PG_mlocked cleared if no other vmas have it mlocked.
+ *
+ * Return values are:
+ *
+ * SWAP_SUCCESS	- no vma's holding page mlocked.
+ * SWAP_AGAIN	- page mapped in mlocked vma -- couldn't acquire mmap sem
+ * SWAP_MLOCK	- page is now mlocked.
+ */
+int try_to_munlock(struct page *page)
+{
+	VM_BUG_ON(!PageLocked(page) || PageLRU(page));
+
+	if (PageAnon(page))
+		return try_to_unmap_anon(page, 1, 0);
+	else
+		return try_to_unmap_file(page, 1, 0);
+}
+#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index d87958a..d38d7e6 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -199,7 +199,7 @@
 
 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
 	.ra_pages	= 0,	/* No readahead */
-	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
+	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
 	.unplug_io_fn	= default_unplug_io_fn,
 };
 
@@ -1367,6 +1367,7 @@
 				error = -ENOMEM;
 				goto failed;
 			}
+			SetPageSwapBacked(filepage);
 
 			/* Precharge page while we can wait, compensate after */
 			error = mem_cgroup_cache_charge(filepage, current->mm,
@@ -1476,12 +1477,16 @@
 		if (!user_shm_lock(inode->i_size, user))
 			goto out_nomem;
 		info->flags |= VM_LOCKED;
+		mapping_set_unevictable(file->f_mapping);
 	}
 	if (!lock && (info->flags & VM_LOCKED) && user) {
 		user_shm_unlock(inode->i_size, user);
 		info->flags &= ~VM_LOCKED;
+		mapping_clear_unevictable(file->f_mapping);
+		scan_mapping_unevictable_pages(file->f_mapping);
 	}
 	retval = 0;
+
 out_nomem:
 	spin_unlock(&info->lock);
 	return retval;
diff --git a/mm/swap.c b/mm/swap.c
index 9e0cb31..2152e48 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -31,11 +31,12 @@
 #include <linux/backing-dev.h>
 #include <linux/memcontrol.h>
 
+#include "internal.h"
+
 /* How many pages do we try to swap or page in/out together? */
 int page_cluster;
 
-static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs);
-static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs);
+static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
 
 /*
@@ -116,8 +117,9 @@
 			zone = pagezone;
 			spin_lock(&zone->lru_lock);
 		}
-		if (PageLRU(page) && !PageActive(page)) {
-			list_move_tail(&page->lru, &zone->inactive_list);
+		if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+			int lru = page_is_file_cache(page);
+			list_move_tail(&page->lru, &zone->lru[lru].list);
 			pgmoved++;
 		}
 	}
@@ -136,7 +138,7 @@
 void  rotate_reclaimable_page(struct page *page)
 {
 	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
-	    PageLRU(page)) {
+	    !PageUnevictable(page) && PageLRU(page)) {
 		struct pagevec *pvec;
 		unsigned long flags;
 
@@ -157,12 +159,19 @@
 	struct zone *zone = page_zone(page);
 
 	spin_lock_irq(&zone->lru_lock);
-	if (PageLRU(page) && !PageActive(page)) {
-		del_page_from_inactive_list(zone, page);
+	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+		int file = page_is_file_cache(page);
+		int lru = LRU_BASE + file;
+		del_page_from_lru_list(zone, page, lru);
+
 		SetPageActive(page);
-		add_page_to_active_list(zone, page);
+		lru += LRU_ACTIVE;
+		add_page_to_lru_list(zone, page, lru);
 		__count_vm_event(PGACTIVATE);
-		mem_cgroup_move_lists(page, true);
+		mem_cgroup_move_lists(page, lru);
+
+		zone->recent_rotated[!!file]++;
+		zone->recent_scanned[!!file]++;
 	}
 	spin_unlock_irq(&zone->lru_lock);
 }
@@ -176,7 +185,8 @@
  */
 void mark_page_accessed(struct page *page)
 {
-	if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
+	if (!PageActive(page) && !PageUnevictable(page) &&
+			PageReferenced(page) && PageLRU(page)) {
 		activate_page(page);
 		ClearPageReferenced(page);
 	} else if (!PageReferenced(page)) {
@@ -186,28 +196,73 @@
 
 EXPORT_SYMBOL(mark_page_accessed);
 
-/**
- * lru_cache_add: add a page to the page lists
- * @page: the page to add
- */
-void lru_cache_add(struct page *page)
+void __lru_cache_add(struct page *page, enum lru_list lru)
 {
-	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
+	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
 
 	page_cache_get(page);
 	if (!pagevec_add(pvec, page))
-		__pagevec_lru_add(pvec);
+		____pagevec_lru_add(pvec, lru);
 	put_cpu_var(lru_add_pvecs);
 }
 
-void lru_cache_add_active(struct page *page)
+/**
+ * lru_cache_add_lru - add a page to a page list
+ * @page: the page to be added to the LRU.
+ * @lru: the LRU list to which the page is added.
+ */
+void lru_cache_add_lru(struct page *page, enum lru_list lru)
 {
-	struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
+	if (PageActive(page)) {
+		VM_BUG_ON(PageUnevictable(page));
+		ClearPageActive(page);
+	} else if (PageUnevictable(page)) {
+		VM_BUG_ON(PageActive(page));
+		ClearPageUnevictable(page);
+	}
 
-	page_cache_get(page);
-	if (!pagevec_add(pvec, page))
-		__pagevec_lru_add_active(pvec);
-	put_cpu_var(lru_add_active_pvecs);
+	VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
+	__lru_cache_add(page, lru);
+}
+
+/**
+ * add_page_to_unevictable_list - add a page to the unevictable list
+ * @page:  the page to be added to the unevictable list
+ *
+ * Add page directly to its zone's unevictable list.  To avoid races with
+ * tasks that might be making the page evictable, through eg. munlock,
+ * munmap or exit, while it's not on the lru, we want to add the page
+ * while it's locked or otherwise "invisible" to other tasks.  This is
+ * difficult to do when using the pagevec cache, so bypass that.
+ */
+void add_page_to_unevictable_list(struct page *page)
+{
+	struct zone *zone = page_zone(page);
+
+	spin_lock_irq(&zone->lru_lock);
+	SetPageUnevictable(page);
+	SetPageLRU(page);
+	add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
+	spin_unlock_irq(&zone->lru_lock);
+}
+
+/**
+ * lru_cache_add_active_or_unevictable
+ * @page:  the page to be added to LRU
+ * @vma:   vma in which page is mapped for determining reclaimability
+ *
+ * place @page on active or unevictable LRU list, depending on
+ * page_evictable().  Note that if the page is not evictable,
+ * it goes directly back onto it's zone's unevictable list.  It does
+ * NOT use a per cpu pagevec.
+ */
+void lru_cache_add_active_or_unevictable(struct page *page,
+					struct vm_area_struct *vma)
+{
+	if (page_evictable(page, vma))
+		lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
+	else
+		add_page_to_unevictable_list(page);
 }
 
 /*
@@ -217,15 +272,15 @@
  */
 static void drain_cpu_pagevecs(int cpu)
 {
+	struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
 	struct pagevec *pvec;
+	int lru;
 
-	pvec = &per_cpu(lru_add_pvecs, cpu);
-	if (pagevec_count(pvec))
-		__pagevec_lru_add(pvec);
-
-	pvec = &per_cpu(lru_add_active_pvecs, cpu);
-	if (pagevec_count(pvec))
-		__pagevec_lru_add_active(pvec);
+	for_each_lru(lru) {
+		pvec = &pvecs[lru - LRU_BASE];
+		if (pagevec_count(pvec))
+			____pagevec_lru_add(pvec, lru);
+	}
 
 	pvec = &per_cpu(lru_rotate_pvecs, cpu);
 	if (pagevec_count(pvec)) {
@@ -244,7 +299,7 @@
 	put_cpu();
 }
 
-#ifdef CONFIG_NUMA
+#if defined(CONFIG_NUMA) || defined(CONFIG_UNEVICTABLE_LRU)
 static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
 	lru_add_drain();
@@ -308,6 +363,7 @@
 
 		if (PageLRU(page)) {
 			struct zone *pagezone = page_zone(page);
+
 			if (pagezone != zone) {
 				if (zone)
 					spin_unlock_irqrestore(&zone->lru_lock,
@@ -380,10 +436,11 @@
  * Add the passed pages to the LRU, then drop the caller's refcount
  * on them.  Reinitialises the caller's pagevec.
  */
-void __pagevec_lru_add(struct pagevec *pvec)
+void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
 {
 	int i;
 	struct zone *zone = NULL;
+	VM_BUG_ON(is_unevictable_lru(lru));
 
 	for (i = 0; i < pagevec_count(pvec); i++) {
 		struct page *page = pvec->pages[i];
@@ -395,38 +452,13 @@
 			zone = pagezone;
 			spin_lock_irq(&zone->lru_lock);
 		}
-		VM_BUG_ON(PageLRU(page));
-		SetPageLRU(page);
-		add_page_to_inactive_list(zone, page);
-	}
-	if (zone)
-		spin_unlock_irq(&zone->lru_lock);
-	release_pages(pvec->pages, pvec->nr, pvec->cold);
-	pagevec_reinit(pvec);
-}
-
-EXPORT_SYMBOL(__pagevec_lru_add);
-
-void __pagevec_lru_add_active(struct pagevec *pvec)
-{
-	int i;
-	struct zone *zone = NULL;
-
-	for (i = 0; i < pagevec_count(pvec); i++) {
-		struct page *page = pvec->pages[i];
-		struct zone *pagezone = page_zone(page);
-
-		if (pagezone != zone) {
-			if (zone)
-				spin_unlock_irq(&zone->lru_lock);
-			zone = pagezone;
-			spin_lock_irq(&zone->lru_lock);
-		}
-		VM_BUG_ON(PageLRU(page));
-		SetPageLRU(page);
 		VM_BUG_ON(PageActive(page));
-		SetPageActive(page);
-		add_page_to_active_list(zone, page);
+		VM_BUG_ON(PageUnevictable(page));
+		VM_BUG_ON(PageLRU(page));
+		SetPageLRU(page);
+		if (is_active_lru(lru))
+			SetPageActive(page);
+		add_page_to_lru_list(zone, page, lru);
 	}
 	if (zone)
 		spin_unlock_irq(&zone->lru_lock);
@@ -434,6 +466,8 @@
 	pagevec_reinit(pvec);
 }
 
+EXPORT_SYMBOL(____pagevec_lru_add);
+
 /*
  * Try to drop buffers from the pages in a pagevec
  */
@@ -453,6 +487,30 @@
 }
 
 /**
+ * pagevec_swap_free - try to free swap space from the pages in a pagevec
+ * @pvec: pagevec with swapcache pages to free the swap space of
+ *
+ * The caller needs to hold an extra reference to each page and
+ * not hold the page lock on the pages.  This function uses a
+ * trylock on the page lock so it may not always free the swap
+ * space associated with a page.
+ */
+void pagevec_swap_free(struct pagevec *pvec)
+{
+	int i;
+
+	for (i = 0; i < pagevec_count(pvec); i++) {
+		struct page *page = pvec->pages[i];
+
+		if (PageSwapCache(page) && trylock_page(page)) {
+			if (PageSwapCache(page))
+				remove_exclusive_swap_page_ref(page);
+			unlock_page(page);
+		}
+	}
+}
+
+/**
  * pagevec_lookup - gang pagecache lookup
  * @pvec:	Where the resulting pages are placed
  * @mapping:	The address_space to search
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 797c383..3353c90 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -33,7 +33,7 @@
 };
 
 static struct backing_dev_info swap_backing_dev_info = {
-	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
+	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
 	.unplug_io_fn	= swap_unplug_io_fn,
 };
 
@@ -75,6 +75,7 @@
 	BUG_ON(!PageLocked(page));
 	BUG_ON(PageSwapCache(page));
 	BUG_ON(PagePrivate(page));
+	BUG_ON(!PageSwapBacked(page));
 	error = radix_tree_preload(gfp_mask);
 	if (!error) {
 		page_cache_get(page);
@@ -302,17 +303,19 @@
 		 * re-using the just freed swap entry for an existing page.
 		 * May fail (-ENOMEM) if radix-tree node allocation failed.
 		 */
-		set_page_locked(new_page);
+		__set_page_locked(new_page);
+		SetPageSwapBacked(new_page);
 		err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
 		if (likely(!err)) {
 			/*
 			 * Initiate read into locked page and return.
 			 */
-			lru_cache_add_active(new_page);
+			lru_cache_add_anon(new_page);
 			swap_readpage(NULL, new_page);
 			return new_page;
 		}
-		clear_page_locked(new_page);
+		ClearPageSwapBacked(new_page);
+		__clear_page_locked(new_page);
 		swap_free(entry);
 	} while (err != -ENOMEM);
 
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1e330f2..90cb67a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -344,7 +344,7 @@
  * Work out if there are any other processes sharing this
  * swap cache page. Free it if you can. Return success.
  */
-int remove_exclusive_swap_page(struct page *page)
+static int remove_exclusive_swap_page_count(struct page *page, int count)
 {
 	int retval;
 	struct swap_info_struct * p;
@@ -357,7 +357,7 @@
 		return 0;
 	if (PageWriteback(page))
 		return 0;
-	if (page_count(page) != 2) /* 2: us + cache */
+	if (page_count(page) != count) /* us + cache + ptes */
 		return 0;
 
 	entry.val = page_private(page);
@@ -370,7 +370,7 @@
 	if (p->swap_map[swp_offset(entry)] == 1) {
 		/* Recheck the page count with the swapcache lock held.. */
 		spin_lock_irq(&swapper_space.tree_lock);
-		if ((page_count(page) == 2) && !PageWriteback(page)) {
+		if ((page_count(page) == count) && !PageWriteback(page)) {
 			__delete_from_swap_cache(page);
 			SetPageDirty(page);
 			retval = 1;
@@ -388,6 +388,25 @@
 }
 
 /*
+ * Most of the time the page should have two references: one for the
+ * process and one for the swap cache.
+ */
+int remove_exclusive_swap_page(struct page *page)
+{
+	return remove_exclusive_swap_page_count(page, 2);
+}
+
+/*
+ * The pageout code holds an extra reference to the page.  That raises
+ * the reference count to test for to 2 for a page that is only in the
+ * swap cache plus 1 for each process that maps the page.
+ */
+int remove_exclusive_swap_page_ref(struct page *page)
+{
+	return remove_exclusive_swap_page_count(page, 2 + page_mapcount(page));
+}
+
+/*
  * Free the swap entry like above, but also try to
  * free the page cache entry if it is the last user.
  */
@@ -403,7 +422,7 @@
 	if (p) {
 		if (swap_entry_free(p, swp_offset(entry)) == 1) {
 			page = find_get_page(&swapper_space, entry.val);
-			if (page && unlikely(!trylock_page(page))) {
+			if (page && !trylock_page(page)) {
 				page_cache_release(page);
 				page = NULL;
 			}
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index 8d7a27a..3e67d57 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -95,6 +95,7 @@
 put_memory:
 	return ERR_PTR(error);
 }
+EXPORT_SYMBOL_GPL(shmem_file_setup);
 
 /**
  * shmem_zero_setup - setup a shared anonymous mapping
diff --git a/mm/truncate.c b/mm/truncate.c
index e83e4b1..1229211 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -18,6 +18,7 @@
 #include <linux/task_io_accounting_ops.h>
 #include <linux/buffer_head.h>	/* grr. try_to_release_page,
 				   do_invalidatepage */
+#include "internal.h"
 
 
 /**
@@ -103,6 +104,7 @@
 
 	cancel_dirty_page(page, PAGE_CACHE_SIZE);
 
+	clear_page_mlock(page);
 	remove_from_page_cache(page);
 	ClearPageMappedToDisk(page);
 	page_cache_release(page);	/* pagecache ref */
@@ -127,6 +129,7 @@
 	if (PagePrivate(page) && !try_to_release_page(page, 0))
 		return 0;
 
+	clear_page_mlock(page);
 	ret = remove_mapping(mapping, page);
 
 	return ret;
@@ -352,6 +355,7 @@
 	if (PageDirty(page))
 		goto failed;
 
+	clear_page_mlock(page);
 	BUG_ON(PagePrivate(page));
 	__remove_from_page_cache(page);
 	spin_unlock_irq(&mapping->tree_lock);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index bba06c4..65ae576 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -8,6 +8,7 @@
  *  Numa awareness, Christoph Lameter, SGI, June 2005
  */
 
+#include <linux/vmalloc.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/highmem.h>
@@ -16,18 +17,18 @@
 #include <linux/interrupt.h>
 #include <linux/seq_file.h>
 #include <linux/debugobjects.h>
-#include <linux/vmalloc.h>
 #include <linux/kallsyms.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/radix-tree.h>
+#include <linux/rcupdate.h>
 
+#include <asm/atomic.h>
 #include <asm/uaccess.h>
 #include <asm/tlbflush.h>
 
 
-DEFINE_RWLOCK(vmlist_lock);
-struct vm_struct *vmlist;
-
-static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
-			    int node, void *caller);
+/*** Page table manipulation functions ***/
 
 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
 {
@@ -40,8 +41,7 @@
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
-static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
-						unsigned long end)
+static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
 {
 	pmd_t *pmd;
 	unsigned long next;
@@ -55,8 +55,7 @@
 	} while (pmd++, addr = next, addr != end);
 }
 
-static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
-						unsigned long end)
+static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
 {
 	pud_t *pud;
 	unsigned long next;
@@ -70,12 +69,10 @@
 	} while (pud++, addr = next, addr != end);
 }
 
-void unmap_kernel_range(unsigned long addr, unsigned long size)
+static void vunmap_page_range(unsigned long addr, unsigned long end)
 {
 	pgd_t *pgd;
 	unsigned long next;
-	unsigned long start = addr;
-	unsigned long end = addr + size;
 
 	BUG_ON(addr >= end);
 	pgd = pgd_offset_k(addr);
@@ -86,35 +83,36 @@
 			continue;
 		vunmap_pud_range(pgd, addr, next);
 	} while (pgd++, addr = next, addr != end);
-	flush_tlb_kernel_range(start, end);
-}
-
-static void unmap_vm_area(struct vm_struct *area)
-{
-	unmap_kernel_range((unsigned long)area->addr, area->size);
 }
 
 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
-			unsigned long end, pgprot_t prot, struct page ***pages)
+		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 {
 	pte_t *pte;
 
+	/*
+	 * nr is a running index into the array which helps higher level
+	 * callers keep track of where we're up to.
+	 */
+
 	pte = pte_alloc_kernel(pmd, addr);
 	if (!pte)
 		return -ENOMEM;
 	do {
-		struct page *page = **pages;
-		WARN_ON(!pte_none(*pte));
-		if (!page)
+		struct page *page = pages[*nr];
+
+		if (WARN_ON(!pte_none(*pte)))
+			return -EBUSY;
+		if (WARN_ON(!page))
 			return -ENOMEM;
 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
-		(*pages)++;
+		(*nr)++;
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 	return 0;
 }
 
-static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
-			unsigned long end, pgprot_t prot, struct page ***pages)
+static int vmap_pmd_range(pud_t *pud, unsigned long addr,
+		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 {
 	pmd_t *pmd;
 	unsigned long next;
@@ -124,14 +122,14 @@
 		return -ENOMEM;
 	do {
 		next = pmd_addr_end(addr, end);
-		if (vmap_pte_range(pmd, addr, next, prot, pages))
+		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
 			return -ENOMEM;
 	} while (pmd++, addr = next, addr != end);
 	return 0;
 }
 
-static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
-			unsigned long end, pgprot_t prot, struct page ***pages)
+static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
+		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 {
 	pud_t *pud;
 	unsigned long next;
@@ -141,57 +139,78 @@
 		return -ENOMEM;
 	do {
 		next = pud_addr_end(addr, end);
-		if (vmap_pmd_range(pud, addr, next, prot, pages))
+		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
 			return -ENOMEM;
 	} while (pud++, addr = next, addr != end);
 	return 0;
 }
 
-int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
+/*
+ * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
+ * will have pfns corresponding to the "pages" array.
+ *
+ * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
+ */
+static int vmap_page_range(unsigned long addr, unsigned long end,
+				pgprot_t prot, struct page **pages)
 {
 	pgd_t *pgd;
 	unsigned long next;
-	unsigned long addr = (unsigned long) area->addr;
-	unsigned long end = addr + area->size - PAGE_SIZE;
-	int err;
+	int err = 0;
+	int nr = 0;
 
 	BUG_ON(addr >= end);
 	pgd = pgd_offset_k(addr);
 	do {
 		next = pgd_addr_end(addr, end);
-		err = vmap_pud_range(pgd, addr, next, prot, pages);
+		err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
 		if (err)
 			break;
 	} while (pgd++, addr = next, addr != end);
-	flush_cache_vmap((unsigned long) area->addr, end);
-	return err;
+	flush_cache_vmap(addr, end);
+
+	if (unlikely(err))
+		return err;
+	return nr;
 }
-EXPORT_SYMBOL_GPL(map_vm_area);
+
+static inline int is_vmalloc_or_module_addr(const void *x)
+{
+	/*
+	 * x86-64 and sparc64 put modules in a special place,
+	 * and fall back on vmalloc() if that fails. Others
+	 * just put it in the vmalloc space.
+	 */
+#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
+	unsigned long addr = (unsigned long)x;
+	if (addr >= MODULES_VADDR && addr < MODULES_END)
+		return 1;
+#endif
+	return is_vmalloc_addr(x);
+}
 
 /*
- * Map a vmalloc()-space virtual address to the physical page.
+ * Walk a vmap address to the struct page it maps.
  */
 struct page *vmalloc_to_page(const void *vmalloc_addr)
 {
 	unsigned long addr = (unsigned long) vmalloc_addr;
 	struct page *page = NULL;
 	pgd_t *pgd = pgd_offset_k(addr);
-	pud_t *pud;
-	pmd_t *pmd;
-	pte_t *ptep, pte;
 
 	/*
 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
 	 * architectures that do not vmalloc module space
 	 */
-	VIRTUAL_BUG_ON(!is_vmalloc_addr(vmalloc_addr) &&
-			!is_module_address(addr));
+	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
 
 	if (!pgd_none(*pgd)) {
-		pud = pud_offset(pgd, addr);
+		pud_t *pud = pud_offset(pgd, addr);
 		if (!pud_none(*pud)) {
-			pmd = pmd_offset(pud, addr);
+			pmd_t *pmd = pmd_offset(pud, addr);
 			if (!pmd_none(*pmd)) {
+				pte_t *ptep, pte;
+
 				ptep = pte_offset_map(pmd, addr);
 				pte = *ptep;
 				if (pte_present(pte))
@@ -213,13 +232,751 @@
 }
 EXPORT_SYMBOL(vmalloc_to_pfn);
 
-static struct vm_struct *
-__get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start,
-		unsigned long end, int node, gfp_t gfp_mask, void *caller)
+
+/*** Global kva allocator ***/
+
+#define VM_LAZY_FREE	0x01
+#define VM_LAZY_FREEING	0x02
+#define VM_VM_AREA	0x04
+
+struct vmap_area {
+	unsigned long va_start;
+	unsigned long va_end;
+	unsigned long flags;
+	struct rb_node rb_node;		/* address sorted rbtree */
+	struct list_head list;		/* address sorted list */
+	struct list_head purge_list;	/* "lazy purge" list */
+	void *private;
+	struct rcu_head rcu_head;
+};
+
+static DEFINE_SPINLOCK(vmap_area_lock);
+static struct rb_root vmap_area_root = RB_ROOT;
+static LIST_HEAD(vmap_area_list);
+
+static struct vmap_area *__find_vmap_area(unsigned long addr)
 {
-	struct vm_struct **p, *tmp, *area;
-	unsigned long align = 1;
+	struct rb_node *n = vmap_area_root.rb_node;
+
+	while (n) {
+		struct vmap_area *va;
+
+		va = rb_entry(n, struct vmap_area, rb_node);
+		if (addr < va->va_start)
+			n = n->rb_left;
+		else if (addr > va->va_start)
+			n = n->rb_right;
+		else
+			return va;
+	}
+
+	return NULL;
+}
+
+static void __insert_vmap_area(struct vmap_area *va)
+{
+	struct rb_node **p = &vmap_area_root.rb_node;
+	struct rb_node *parent = NULL;
+	struct rb_node *tmp;
+
+	while (*p) {
+		struct vmap_area *tmp;
+
+		parent = *p;
+		tmp = rb_entry(parent, struct vmap_area, rb_node);
+		if (va->va_start < tmp->va_end)
+			p = &(*p)->rb_left;
+		else if (va->va_end > tmp->va_start)
+			p = &(*p)->rb_right;
+		else
+			BUG();
+	}
+
+	rb_link_node(&va->rb_node, parent, p);
+	rb_insert_color(&va->rb_node, &vmap_area_root);
+
+	/* address-sort this list so it is usable like the vmlist */
+	tmp = rb_prev(&va->rb_node);
+	if (tmp) {
+		struct vmap_area *prev;
+		prev = rb_entry(tmp, struct vmap_area, rb_node);
+		list_add_rcu(&va->list, &prev->list);
+	} else
+		list_add_rcu(&va->list, &vmap_area_list);
+}
+
+static void purge_vmap_area_lazy(void);
+
+/*
+ * Allocate a region of KVA of the specified size and alignment, within the
+ * vstart and vend.
+ */
+static struct vmap_area *alloc_vmap_area(unsigned long size,
+				unsigned long align,
+				unsigned long vstart, unsigned long vend,
+				int node, gfp_t gfp_mask)
+{
+	struct vmap_area *va;
+	struct rb_node *n;
 	unsigned long addr;
+	int purged = 0;
+
+	BUG_ON(size & ~PAGE_MASK);
+
+	addr = ALIGN(vstart, align);
+
+	va = kmalloc_node(sizeof(struct vmap_area),
+			gfp_mask & GFP_RECLAIM_MASK, node);
+	if (unlikely(!va))
+		return ERR_PTR(-ENOMEM);
+
+retry:
+	spin_lock(&vmap_area_lock);
+	/* XXX: could have a last_hole cache */
+	n = vmap_area_root.rb_node;
+	if (n) {
+		struct vmap_area *first = NULL;
+
+		do {
+			struct vmap_area *tmp;
+			tmp = rb_entry(n, struct vmap_area, rb_node);
+			if (tmp->va_end >= addr) {
+				if (!first && tmp->va_start < addr + size)
+					first = tmp;
+				n = n->rb_left;
+			} else {
+				first = tmp;
+				n = n->rb_right;
+			}
+		} while (n);
+
+		if (!first)
+			goto found;
+
+		if (first->va_end < addr) {
+			n = rb_next(&first->rb_node);
+			if (n)
+				first = rb_entry(n, struct vmap_area, rb_node);
+			else
+				goto found;
+		}
+
+		while (addr + size >= first->va_start && addr + size <= vend) {
+			addr = ALIGN(first->va_end + PAGE_SIZE, align);
+
+			n = rb_next(&first->rb_node);
+			if (n)
+				first = rb_entry(n, struct vmap_area, rb_node);
+			else
+				goto found;
+		}
+	}
+found:
+	if (addr + size > vend) {
+		spin_unlock(&vmap_area_lock);
+		if (!purged) {
+			purge_vmap_area_lazy();
+			purged = 1;
+			goto retry;
+		}
+		if (printk_ratelimit())
+			printk(KERN_WARNING "vmap allocation failed: "
+				 "use vmalloc=<size> to increase size.\n");
+		return ERR_PTR(-EBUSY);
+	}
+
+	BUG_ON(addr & (align-1));
+
+	va->va_start = addr;
+	va->va_end = addr + size;
+	va->flags = 0;
+	__insert_vmap_area(va);
+	spin_unlock(&vmap_area_lock);
+
+	return va;
+}
+
+static void rcu_free_va(struct rcu_head *head)
+{
+	struct vmap_area *va = container_of(head, struct vmap_area, rcu_head);
+
+	kfree(va);
+}
+
+static void __free_vmap_area(struct vmap_area *va)
+{
+	BUG_ON(RB_EMPTY_NODE(&va->rb_node));
+	rb_erase(&va->rb_node, &vmap_area_root);
+	RB_CLEAR_NODE(&va->rb_node);
+	list_del_rcu(&va->list);
+
+	call_rcu(&va->rcu_head, rcu_free_va);
+}
+
+/*
+ * Free a region of KVA allocated by alloc_vmap_area
+ */
+static void free_vmap_area(struct vmap_area *va)
+{
+	spin_lock(&vmap_area_lock);
+	__free_vmap_area(va);
+	spin_unlock(&vmap_area_lock);
+}
+
+/*
+ * Clear the pagetable entries of a given vmap_area
+ */
+static void unmap_vmap_area(struct vmap_area *va)
+{
+	vunmap_page_range(va->va_start, va->va_end);
+}
+
+/*
+ * lazy_max_pages is the maximum amount of virtual address space we gather up
+ * before attempting to purge with a TLB flush.
+ *
+ * There is a tradeoff here: a larger number will cover more kernel page tables
+ * and take slightly longer to purge, but it will linearly reduce the number of
+ * global TLB flushes that must be performed. It would seem natural to scale
+ * this number up linearly with the number of CPUs (because vmapping activity
+ * could also scale linearly with the number of CPUs), however it is likely
+ * that in practice, workloads might be constrained in other ways that mean
+ * vmap activity will not scale linearly with CPUs. Also, I want to be
+ * conservative and not introduce a big latency on huge systems, so go with
+ * a less aggressive log scale. It will still be an improvement over the old
+ * code, and it will be simple to change the scale factor if we find that it
+ * becomes a problem on bigger systems.
+ */
+static unsigned long lazy_max_pages(void)
+{
+	unsigned int log;
+
+	log = fls(num_online_cpus());
+
+	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
+}
+
+static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
+
+/*
+ * Purges all lazily-freed vmap areas.
+ *
+ * If sync is 0 then don't purge if there is already a purge in progress.
+ * If force_flush is 1, then flush kernel TLBs between *start and *end even
+ * if we found no lazy vmap areas to unmap (callers can use this to optimise
+ * their own TLB flushing).
+ * Returns with *start = min(*start, lowest purged address)
+ *              *end = max(*end, highest purged address)
+ */
+static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
+					int sync, int force_flush)
+{
+	static DEFINE_SPINLOCK(purge_lock);
+	LIST_HEAD(valist);
+	struct vmap_area *va;
+	int nr = 0;
+
+	/*
+	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
+	 * should not expect such behaviour. This just simplifies locking for
+	 * the case that isn't actually used at the moment anyway.
+	 */
+	if (!sync && !force_flush) {
+		if (!spin_trylock(&purge_lock))
+			return;
+	} else
+		spin_lock(&purge_lock);
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(va, &vmap_area_list, list) {
+		if (va->flags & VM_LAZY_FREE) {
+			if (va->va_start < *start)
+				*start = va->va_start;
+			if (va->va_end > *end)
+				*end = va->va_end;
+			nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
+			unmap_vmap_area(va);
+			list_add_tail(&va->purge_list, &valist);
+			va->flags |= VM_LAZY_FREEING;
+			va->flags &= ~VM_LAZY_FREE;
+		}
+	}
+	rcu_read_unlock();
+
+	if (nr) {
+		BUG_ON(nr > atomic_read(&vmap_lazy_nr));
+		atomic_sub(nr, &vmap_lazy_nr);
+	}
+
+	if (nr || force_flush)
+		flush_tlb_kernel_range(*start, *end);
+
+	if (nr) {
+		spin_lock(&vmap_area_lock);
+		list_for_each_entry(va, &valist, purge_list)
+			__free_vmap_area(va);
+		spin_unlock(&vmap_area_lock);
+	}
+	spin_unlock(&purge_lock);
+}
+
+/*
+ * Kick off a purge of the outstanding lazy areas.
+ */
+static void purge_vmap_area_lazy(void)
+{
+	unsigned long start = ULONG_MAX, end = 0;
+
+	__purge_vmap_area_lazy(&start, &end, 0, 0);
+}
+
+/*
+ * Free and unmap a vmap area
+ */
+static void free_unmap_vmap_area(struct vmap_area *va)
+{
+	va->flags |= VM_LAZY_FREE;
+	atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
+	if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
+		purge_vmap_area_lazy();
+}
+
+static struct vmap_area *find_vmap_area(unsigned long addr)
+{
+	struct vmap_area *va;
+
+	spin_lock(&vmap_area_lock);
+	va = __find_vmap_area(addr);
+	spin_unlock(&vmap_area_lock);
+
+	return va;
+}
+
+static void free_unmap_vmap_area_addr(unsigned long addr)
+{
+	struct vmap_area *va;
+
+	va = find_vmap_area(addr);
+	BUG_ON(!va);
+	free_unmap_vmap_area(va);
+}
+
+
+/*** Per cpu kva allocator ***/
+
+/*
+ * vmap space is limited especially on 32 bit architectures. Ensure there is
+ * room for at least 16 percpu vmap blocks per CPU.
+ */
+/*
+ * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
+ * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
+ * instead (we just need a rough idea)
+ */
+#if BITS_PER_LONG == 32
+#define VMALLOC_SPACE		(128UL*1024*1024)
+#else
+#define VMALLOC_SPACE		(128UL*1024*1024*1024)
+#endif
+
+#define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
+#define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
+#define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
+#define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
+#define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
+#define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
+#define VMAP_BBMAP_BITS		VMAP_MIN(VMAP_BBMAP_BITS_MAX,		\
+					VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
+						VMALLOC_PAGES / NR_CPUS / 16))
+
+#define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
+
+struct vmap_block_queue {
+	spinlock_t lock;
+	struct list_head free;
+	struct list_head dirty;
+	unsigned int nr_dirty;
+};
+
+struct vmap_block {
+	spinlock_t lock;
+	struct vmap_area *va;
+	struct vmap_block_queue *vbq;
+	unsigned long free, dirty;
+	DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
+	DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
+	union {
+		struct {
+			struct list_head free_list;
+			struct list_head dirty_list;
+		};
+		struct rcu_head rcu_head;
+	};
+};
+
+/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
+static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
+
+/*
+ * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
+ * in the free path. Could get rid of this if we change the API to return a
+ * "cookie" from alloc, to be passed to free. But no big deal yet.
+ */
+static DEFINE_SPINLOCK(vmap_block_tree_lock);
+static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
+
+/*
+ * We should probably have a fallback mechanism to allocate virtual memory
+ * out of partially filled vmap blocks. However vmap block sizing should be
+ * fairly reasonable according to the vmalloc size, so it shouldn't be a
+ * big problem.
+ */
+
+static unsigned long addr_to_vb_idx(unsigned long addr)
+{
+	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
+	addr /= VMAP_BLOCK_SIZE;
+	return addr;
+}
+
+static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
+{
+	struct vmap_block_queue *vbq;
+	struct vmap_block *vb;
+	struct vmap_area *va;
+	unsigned long vb_idx;
+	int node, err;
+
+	node = numa_node_id();
+
+	vb = kmalloc_node(sizeof(struct vmap_block),
+			gfp_mask & GFP_RECLAIM_MASK, node);
+	if (unlikely(!vb))
+		return ERR_PTR(-ENOMEM);
+
+	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
+					VMALLOC_START, VMALLOC_END,
+					node, gfp_mask);
+	if (unlikely(IS_ERR(va))) {
+		kfree(vb);
+		return ERR_PTR(PTR_ERR(va));
+	}
+
+	err = radix_tree_preload(gfp_mask);
+	if (unlikely(err)) {
+		kfree(vb);
+		free_vmap_area(va);
+		return ERR_PTR(err);
+	}
+
+	spin_lock_init(&vb->lock);
+	vb->va = va;
+	vb->free = VMAP_BBMAP_BITS;
+	vb->dirty = 0;
+	bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
+	bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
+	INIT_LIST_HEAD(&vb->free_list);
+	INIT_LIST_HEAD(&vb->dirty_list);
+
+	vb_idx = addr_to_vb_idx(va->va_start);
+	spin_lock(&vmap_block_tree_lock);
+	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
+	spin_unlock(&vmap_block_tree_lock);
+	BUG_ON(err);
+	radix_tree_preload_end();
+
+	vbq = &get_cpu_var(vmap_block_queue);
+	vb->vbq = vbq;
+	spin_lock(&vbq->lock);
+	list_add(&vb->free_list, &vbq->free);
+	spin_unlock(&vbq->lock);
+	put_cpu_var(vmap_cpu_blocks);
+
+	return vb;
+}
+
+static void rcu_free_vb(struct rcu_head *head)
+{
+	struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head);
+
+	kfree(vb);
+}
+
+static void free_vmap_block(struct vmap_block *vb)
+{
+	struct vmap_block *tmp;
+	unsigned long vb_idx;
+
+	spin_lock(&vb->vbq->lock);
+	if (!list_empty(&vb->free_list))
+		list_del(&vb->free_list);
+	if (!list_empty(&vb->dirty_list))
+		list_del(&vb->dirty_list);
+	spin_unlock(&vb->vbq->lock);
+
+	vb_idx = addr_to_vb_idx(vb->va->va_start);
+	spin_lock(&vmap_block_tree_lock);
+	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
+	spin_unlock(&vmap_block_tree_lock);
+	BUG_ON(tmp != vb);
+
+	free_unmap_vmap_area(vb->va);
+	call_rcu(&vb->rcu_head, rcu_free_vb);
+}
+
+static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+{
+	struct vmap_block_queue *vbq;
+	struct vmap_block *vb;
+	unsigned long addr = 0;
+	unsigned int order;
+
+	BUG_ON(size & ~PAGE_MASK);
+	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
+	order = get_order(size);
+
+again:
+	rcu_read_lock();
+	vbq = &get_cpu_var(vmap_block_queue);
+	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
+		int i;
+
+		spin_lock(&vb->lock);
+		i = bitmap_find_free_region(vb->alloc_map,
+						VMAP_BBMAP_BITS, order);
+
+		if (i >= 0) {
+			addr = vb->va->va_start + (i << PAGE_SHIFT);
+			BUG_ON(addr_to_vb_idx(addr) !=
+					addr_to_vb_idx(vb->va->va_start));
+			vb->free -= 1UL << order;
+			if (vb->free == 0) {
+				spin_lock(&vbq->lock);
+				list_del_init(&vb->free_list);
+				spin_unlock(&vbq->lock);
+			}
+			spin_unlock(&vb->lock);
+			break;
+		}
+		spin_unlock(&vb->lock);
+	}
+	put_cpu_var(vmap_cpu_blocks);
+	rcu_read_unlock();
+
+	if (!addr) {
+		vb = new_vmap_block(gfp_mask);
+		if (IS_ERR(vb))
+			return vb;
+		goto again;
+	}
+
+	return (void *)addr;
+}
+
+static void vb_free(const void *addr, unsigned long size)
+{
+	unsigned long offset;
+	unsigned long vb_idx;
+	unsigned int order;
+	struct vmap_block *vb;
+
+	BUG_ON(size & ~PAGE_MASK);
+	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
+	order = get_order(size);
+
+	offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
+
+	vb_idx = addr_to_vb_idx((unsigned long)addr);
+	rcu_read_lock();
+	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
+	rcu_read_unlock();
+	BUG_ON(!vb);
+
+	spin_lock(&vb->lock);
+	bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
+	if (!vb->dirty) {
+		spin_lock(&vb->vbq->lock);
+		list_add(&vb->dirty_list, &vb->vbq->dirty);
+		spin_unlock(&vb->vbq->lock);
+	}
+	vb->dirty += 1UL << order;
+	if (vb->dirty == VMAP_BBMAP_BITS) {
+		BUG_ON(vb->free || !list_empty(&vb->free_list));
+		spin_unlock(&vb->lock);
+		free_vmap_block(vb);
+	} else
+		spin_unlock(&vb->lock);
+}
+
+/**
+ * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
+ *
+ * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
+ * to amortize TLB flushing overheads. What this means is that any page you
+ * have now, may, in a former life, have been mapped into kernel virtual
+ * address by the vmap layer and so there might be some CPUs with TLB entries
+ * still referencing that page (additional to the regular 1:1 kernel mapping).
+ *
+ * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
+ * be sure that none of the pages we have control over will have any aliases
+ * from the vmap layer.
+ */
+void vm_unmap_aliases(void)
+{
+	unsigned long start = ULONG_MAX, end = 0;
+	int cpu;
+	int flush = 0;
+
+	for_each_possible_cpu(cpu) {
+		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
+		struct vmap_block *vb;
+
+		rcu_read_lock();
+		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
+			int i;
+
+			spin_lock(&vb->lock);
+			i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
+			while (i < VMAP_BBMAP_BITS) {
+				unsigned long s, e;
+				int j;
+				j = find_next_zero_bit(vb->dirty_map,
+					VMAP_BBMAP_BITS, i);
+
+				s = vb->va->va_start + (i << PAGE_SHIFT);
+				e = vb->va->va_start + (j << PAGE_SHIFT);
+				vunmap_page_range(s, e);
+				flush = 1;
+
+				if (s < start)
+					start = s;
+				if (e > end)
+					end = e;
+
+				i = j;
+				i = find_next_bit(vb->dirty_map,
+							VMAP_BBMAP_BITS, i);
+			}
+			spin_unlock(&vb->lock);
+		}
+		rcu_read_unlock();
+	}
+
+	__purge_vmap_area_lazy(&start, &end, 1, flush);
+}
+EXPORT_SYMBOL_GPL(vm_unmap_aliases);
+
+/**
+ * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
+ * @mem: the pointer returned by vm_map_ram
+ * @count: the count passed to that vm_map_ram call (cannot unmap partial)
+ */
+void vm_unmap_ram(const void *mem, unsigned int count)
+{
+	unsigned long size = count << PAGE_SHIFT;
+	unsigned long addr = (unsigned long)mem;
+
+	BUG_ON(!addr);
+	BUG_ON(addr < VMALLOC_START);
+	BUG_ON(addr > VMALLOC_END);
+	BUG_ON(addr & (PAGE_SIZE-1));
+
+	debug_check_no_locks_freed(mem, size);
+
+	if (likely(count <= VMAP_MAX_ALLOC))
+		vb_free(mem, size);
+	else
+		free_unmap_vmap_area_addr(addr);
+}
+EXPORT_SYMBOL(vm_unmap_ram);
+
+/**
+ * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
+ * @pages: an array of pointers to the pages to be mapped
+ * @count: number of pages
+ * @node: prefer to allocate data structures on this node
+ * @prot: memory protection to use. PAGE_KERNEL for regular RAM
+ * @returns: a pointer to the address that has been mapped, or NULL on failure
+ */
+void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
+{
+	unsigned long size = count << PAGE_SHIFT;
+	unsigned long addr;
+	void *mem;
+
+	if (likely(count <= VMAP_MAX_ALLOC)) {
+		mem = vb_alloc(size, GFP_KERNEL);
+		if (IS_ERR(mem))
+			return NULL;
+		addr = (unsigned long)mem;
+	} else {
+		struct vmap_area *va;
+		va = alloc_vmap_area(size, PAGE_SIZE,
+				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
+		if (IS_ERR(va))
+			return NULL;
+
+		addr = va->va_start;
+		mem = (void *)addr;
+	}
+	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
+		vm_unmap_ram(mem, count);
+		return NULL;
+	}
+	return mem;
+}
+EXPORT_SYMBOL(vm_map_ram);
+
+void __init vmalloc_init(void)
+{
+	int i;
+
+	for_each_possible_cpu(i) {
+		struct vmap_block_queue *vbq;
+
+		vbq = &per_cpu(vmap_block_queue, i);
+		spin_lock_init(&vbq->lock);
+		INIT_LIST_HEAD(&vbq->free);
+		INIT_LIST_HEAD(&vbq->dirty);
+		vbq->nr_dirty = 0;
+	}
+}
+
+void unmap_kernel_range(unsigned long addr, unsigned long size)
+{
+	unsigned long end = addr + size;
+	vunmap_page_range(addr, end);
+	flush_tlb_kernel_range(addr, end);
+}
+
+int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
+{
+	unsigned long addr = (unsigned long)area->addr;
+	unsigned long end = addr + area->size - PAGE_SIZE;
+	int err;
+
+	err = vmap_page_range(addr, end, prot, *pages);
+	if (err > 0) {
+		*pages += err;
+		err = 0;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(map_vm_area);
+
+/*** Old vmalloc interfaces ***/
+DEFINE_RWLOCK(vmlist_lock);
+struct vm_struct *vmlist;
+
+static struct vm_struct *__get_vm_area_node(unsigned long size,
+		unsigned long flags, unsigned long start, unsigned long end,
+		int node, gfp_t gfp_mask, void *caller)
+{
+	static struct vmap_area *va;
+	struct vm_struct *area;
+	struct vm_struct *tmp, **p;
+	unsigned long align = 1;
 
 	BUG_ON(in_interrupt());
 	if (flags & VM_IOREMAP) {
@@ -232,13 +989,12 @@
 
 		align = 1ul << bit;
 	}
-	addr = ALIGN(start, align);
+
 	size = PAGE_ALIGN(size);
 	if (unlikely(!size))
 		return NULL;
 
 	area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
-
 	if (unlikely(!area))
 		return NULL;
 
@@ -247,48 +1003,32 @@
 	 */
 	size += PAGE_SIZE;
 
-	write_lock(&vmlist_lock);
-	for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
-		if ((unsigned long)tmp->addr < addr) {
-			if((unsigned long)tmp->addr + tmp->size >= addr)
-				addr = ALIGN(tmp->size + 
-					     (unsigned long)tmp->addr, align);
-			continue;
-		}
-		if ((size + addr) < addr)
-			goto out;
-		if (size + addr <= (unsigned long)tmp->addr)
-			goto found;
-		addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
-		if (addr > end - size)
-			goto out;
+	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
+	if (IS_ERR(va)) {
+		kfree(area);
+		return NULL;
 	}
-	if ((size + addr) < addr)
-		goto out;
-	if (addr > end - size)
-		goto out;
-
-found:
-	area->next = *p;
-	*p = area;
 
 	area->flags = flags;
-	area->addr = (void *)addr;
+	area->addr = (void *)va->va_start;
 	area->size = size;
 	area->pages = NULL;
 	area->nr_pages = 0;
 	area->phys_addr = 0;
 	area->caller = caller;
+	va->private = area;
+	va->flags |= VM_VM_AREA;
+
+	write_lock(&vmlist_lock);
+	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
+		if (tmp->addr >= area->addr)
+			break;
+	}
+	area->next = *p;
+	*p = area;
 	write_unlock(&vmlist_lock);
 
 	return area;
-
-out:
-	write_unlock(&vmlist_lock);
-	kfree(area);
-	if (printk_ratelimit())
-		printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
-	return NULL;
 }
 
 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
@@ -328,39 +1068,15 @@
 				  gfp_mask, __builtin_return_address(0));
 }
 
-/* Caller must hold vmlist_lock */
-static struct vm_struct *__find_vm_area(const void *addr)
+static struct vm_struct *find_vm_area(const void *addr)
 {
-	struct vm_struct *tmp;
+	struct vmap_area *va;
 
-	for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
-		 if (tmp->addr == addr)
-			break;
-	}
+	va = find_vmap_area((unsigned long)addr);
+	if (va && va->flags & VM_VM_AREA)
+		return va->private;
 
-	return tmp;
-}
-
-/* Caller must hold vmlist_lock */
-static struct vm_struct *__remove_vm_area(const void *addr)
-{
-	struct vm_struct **p, *tmp;
-
-	for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
-		 if (tmp->addr == addr)
-			 goto found;
-	}
 	return NULL;
-
-found:
-	unmap_vm_area(tmp);
-	*p = tmp->next;
-
-	/*
-	 * Remove the guard page.
-	 */
-	tmp->size -= PAGE_SIZE;
-	return tmp;
 }
 
 /**
@@ -373,11 +1089,24 @@
  */
 struct vm_struct *remove_vm_area(const void *addr)
 {
-	struct vm_struct *v;
-	write_lock(&vmlist_lock);
-	v = __remove_vm_area(addr);
-	write_unlock(&vmlist_lock);
-	return v;
+	struct vmap_area *va;
+
+	va = find_vmap_area((unsigned long)addr);
+	if (va && va->flags & VM_VM_AREA) {
+		struct vm_struct *vm = va->private;
+		struct vm_struct *tmp, **p;
+		free_unmap_vmap_area(va);
+		vm->size -= PAGE_SIZE;
+
+		write_lock(&vmlist_lock);
+		for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
+			;
+		*p = tmp->next;
+		write_unlock(&vmlist_lock);
+
+		return vm;
+	}
+	return NULL;
 }
 
 static void __vunmap(const void *addr, int deallocate_pages)
@@ -487,6 +1216,8 @@
 }
 EXPORT_SYMBOL(vmap);
 
+static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
+			    int node, void *caller);
 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 				 pgprot_t prot, int node, void *caller)
 {
@@ -613,10 +1344,8 @@
 
 	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
 	if (ret) {
-		write_lock(&vmlist_lock);
-		area = __find_vm_area(ret);
+		area = find_vm_area(ret);
 		area->flags |= VM_USERMAP;
-		write_unlock(&vmlist_lock);
 	}
 	return ret;
 }
@@ -696,10 +1425,8 @@
 
 	ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
 	if (ret) {
-		write_lock(&vmlist_lock);
-		area = __find_vm_area(ret);
+		area = find_vm_area(ret);
 		area->flags |= VM_USERMAP;
-		write_unlock(&vmlist_lock);
 	}
 	return ret;
 }
@@ -800,26 +1527,25 @@
 	struct vm_struct *area;
 	unsigned long uaddr = vma->vm_start;
 	unsigned long usize = vma->vm_end - vma->vm_start;
-	int ret;
 
 	if ((PAGE_SIZE-1) & (unsigned long)addr)
 		return -EINVAL;
 
-	read_lock(&vmlist_lock);
-	area = __find_vm_area(addr);
+	area = find_vm_area(addr);
 	if (!area)
-		goto out_einval_locked;
+		return -EINVAL;
 
 	if (!(area->flags & VM_USERMAP))
-		goto out_einval_locked;
+		return -EINVAL;
 
 	if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
-		goto out_einval_locked;
-	read_unlock(&vmlist_lock);
+		return -EINVAL;
 
 	addr += pgoff << PAGE_SHIFT;
 	do {
 		struct page *page = vmalloc_to_page(addr);
+		int ret;
+
 		ret = vm_insert_page(vma, uaddr, page);
 		if (ret)
 			return ret;
@@ -832,11 +1558,7 @@
 	/* Prevent "things" like memory migration? VM_flags need a cleanup... */
 	vma->vm_flags |= VM_RESERVED;
 
-	return ret;
-
-out_einval_locked:
-	read_unlock(&vmlist_lock);
-	return -EINVAL;
+	return 0;
 }
 EXPORT_SYMBOL(remap_vmalloc_range);
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1ff1a58..3b58602 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -39,6 +39,7 @@
 #include <linux/freezer.h>
 #include <linux/memcontrol.h>
 #include <linux/delayacct.h>
+#include <linux/sysctl.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -78,7 +79,7 @@
 	unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
 			unsigned long *scanned, int order, int mode,
 			struct zone *z, struct mem_cgroup *mem_cont,
-			int active);
+			int active, int file);
 };
 
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -470,6 +471,85 @@
 	return 0;
 }
 
+/**
+ * putback_lru_page - put previously isolated page onto appropriate LRU list
+ * @page: page to be put back to appropriate lru list
+ *
+ * Add previously isolated @page to appropriate LRU list.
+ * Page may still be unevictable for other reasons.
+ *
+ * lru_lock must not be held, interrupts must be enabled.
+ */
+#ifdef CONFIG_UNEVICTABLE_LRU
+void putback_lru_page(struct page *page)
+{
+	int lru;
+	int active = !!TestClearPageActive(page);
+	int was_unevictable = PageUnevictable(page);
+
+	VM_BUG_ON(PageLRU(page));
+
+redo:
+	ClearPageUnevictable(page);
+
+	if (page_evictable(page, NULL)) {
+		/*
+		 * For evictable pages, we can use the cache.
+		 * In event of a race, worst case is we end up with an
+		 * unevictable page on [in]active list.
+		 * We know how to handle that.
+		 */
+		lru = active + page_is_file_cache(page);
+		lru_cache_add_lru(page, lru);
+	} else {
+		/*
+		 * Put unevictable pages directly on zone's unevictable
+		 * list.
+		 */
+		lru = LRU_UNEVICTABLE;
+		add_page_to_unevictable_list(page);
+	}
+	mem_cgroup_move_lists(page, lru);
+
+	/*
+	 * page's status can change while we move it among lru. If an evictable
+	 * page is on unevictable list, it never be freed. To avoid that,
+	 * check after we added it to the list, again.
+	 */
+	if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
+		if (!isolate_lru_page(page)) {
+			put_page(page);
+			goto redo;
+		}
+		/* This means someone else dropped this page from LRU
+		 * So, it will be freed or putback to LRU again. There is
+		 * nothing to do here.
+		 */
+	}
+
+	if (was_unevictable && lru != LRU_UNEVICTABLE)
+		count_vm_event(UNEVICTABLE_PGRESCUED);
+	else if (!was_unevictable && lru == LRU_UNEVICTABLE)
+		count_vm_event(UNEVICTABLE_PGCULLED);
+
+	put_page(page);		/* drop ref from isolate */
+}
+
+#else /* CONFIG_UNEVICTABLE_LRU */
+
+void putback_lru_page(struct page *page)
+{
+	int lru;
+	VM_BUG_ON(PageLRU(page));
+
+	lru = !!TestClearPageActive(page) + page_is_file_cache(page);
+	lru_cache_add_lru(page, lru);
+	mem_cgroup_move_lists(page, lru);
+	put_page(page);
+}
+#endif /* CONFIG_UNEVICTABLE_LRU */
+
+
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -503,6 +583,9 @@
 
 		sc->nr_scanned++;
 
+		if (unlikely(!page_evictable(page, NULL)))
+			goto cull_mlocked;
+
 		if (!sc->may_swap && page_mapped(page))
 			goto keep_locked;
 
@@ -539,9 +622,19 @@
 		 * Anonymous process memory has backing store?
 		 * Try to allocate it some swap space here.
 		 */
-		if (PageAnon(page) && !PageSwapCache(page))
+		if (PageAnon(page) && !PageSwapCache(page)) {
+			switch (try_to_munlock(page)) {
+			case SWAP_FAIL:		/* shouldn't happen */
+			case SWAP_AGAIN:
+				goto keep_locked;
+			case SWAP_MLOCK:
+				goto cull_mlocked;
+			case SWAP_SUCCESS:
+				; /* fall thru'; add to swap cache */
+			}
 			if (!add_to_swap(page, GFP_ATOMIC))
 				goto activate_locked;
+		}
 #endif /* CONFIG_SWAP */
 
 		mapping = page_mapping(page);
@@ -556,6 +649,8 @@
 				goto activate_locked;
 			case SWAP_AGAIN:
 				goto keep_locked;
+			case SWAP_MLOCK:
+				goto cull_mlocked;
 			case SWAP_SUCCESS:
 				; /* try to free the page below */
 			}
@@ -602,7 +697,7 @@
 		 * possible for a page to have PageDirty set, but it is actually
 		 * clean (all its buffers are clean).  This happens if the
 		 * buffers were written out directly, with submit_bh(). ext3
-		 * will do this, as well as the blockdev mapping. 
+		 * will do this, as well as the blockdev mapping.
 		 * try_to_release_page() will discover that cleanness and will
 		 * drop the buffers and mark the page clean - it can be freed.
 		 *
@@ -637,7 +732,14 @@
 		if (!mapping || !__remove_mapping(mapping, page))
 			goto keep_locked;
 
-		unlock_page(page);
+		/*
+		 * At this point, we have no other references and there is
+		 * no way to pick any more up (removed from LRU, removed
+		 * from pagecache). Can use non-atomic bitops now (and
+		 * we obviously don't have to worry about waking up a process
+		 * waiting on the page lock, because there are no references.
+		 */
+		__clear_page_locked(page);
 free_it:
 		nr_reclaimed++;
 		if (!pagevec_add(&freed_pvec, page)) {
@@ -646,14 +748,23 @@
 		}
 		continue;
 
+cull_mlocked:
+		unlock_page(page);
+		putback_lru_page(page);
+		continue;
+
 activate_locked:
+		/* Not a candidate for swapping, so reclaim swap space. */
+		if (PageSwapCache(page) && vm_swap_full())
+			remove_exclusive_swap_page_ref(page);
+		VM_BUG_ON(PageActive(page));
 		SetPageActive(page);
 		pgactivate++;
 keep_locked:
 		unlock_page(page);
 keep:
 		list_add(&page->lru, &ret_pages);
-		VM_BUG_ON(PageLRU(page));
+		VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
 	}
 	list_splice(&ret_pages, page_list);
 	if (pagevec_count(&freed_pvec))
@@ -677,7 +788,7 @@
  *
  * returns 0 on success, -ve errno on failure.
  */
-int __isolate_lru_page(struct page *page, int mode)
+int __isolate_lru_page(struct page *page, int mode, int file)
 {
 	int ret = -EINVAL;
 
@@ -693,6 +804,17 @@
 	if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
 		return ret;
 
+	if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file))
+		return ret;
+
+	/*
+	 * When this function is being called for lumpy reclaim, we
+	 * initially look into all LRU pages, active, inactive and
+	 * unevictable; only give shrink_page_list evictable pages.
+	 */
+	if (PageUnevictable(page))
+		return ret;
+
 	ret = -EBUSY;
 	if (likely(get_page_unless_zero(page))) {
 		/*
@@ -723,12 +845,13 @@
  * @scanned:	The number of pages that were scanned.
  * @order:	The caller's attempted allocation order
  * @mode:	One of the LRU isolation modes
+ * @file:	True [1] if isolating file [!anon] pages
  *
  * returns how many pages were moved onto *@dst.
  */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 		struct list_head *src, struct list_head *dst,
-		unsigned long *scanned, int order, int mode)
+		unsigned long *scanned, int order, int mode, int file)
 {
 	unsigned long nr_taken = 0;
 	unsigned long scan;
@@ -745,7 +868,7 @@
 
 		VM_BUG_ON(!PageLRU(page));
 
-		switch (__isolate_lru_page(page, mode)) {
+		switch (__isolate_lru_page(page, mode, file)) {
 		case 0:
 			list_move(&page->lru, dst);
 			nr_taken++;
@@ -788,10 +911,11 @@
 				break;
 
 			cursor_page = pfn_to_page(pfn);
+
 			/* Check that we have not crossed a zone boundary. */
 			if (unlikely(page_zone_id(cursor_page) != zone_id))
 				continue;
-			switch (__isolate_lru_page(cursor_page, mode)) {
+			switch (__isolate_lru_page(cursor_page, mode, file)) {
 			case 0:
 				list_move(&cursor_page->lru, dst);
 				nr_taken++;
@@ -802,7 +926,7 @@
 				/* else it is being freed elsewhere */
 				list_move(&cursor_page->lru, src);
 			default:
-				break;
+				break;	/* ! on LRU or wrong list */
 			}
 		}
 	}
@@ -816,40 +940,93 @@
 					unsigned long *scanned, int order,
 					int mode, struct zone *z,
 					struct mem_cgroup *mem_cont,
-					int active)
+					int active, int file)
 {
+	int lru = LRU_BASE;
 	if (active)
-		return isolate_lru_pages(nr, &z->active_list, dst,
-						scanned, order, mode);
-	else
-		return isolate_lru_pages(nr, &z->inactive_list, dst,
-						scanned, order, mode);
+		lru += LRU_ACTIVE;
+	if (file)
+		lru += LRU_FILE;
+	return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
+								mode, !!file);
 }
 
 /*
  * clear_active_flags() is a helper for shrink_active_list(), clearing
  * any active bits from the pages in the list.
  */
-static unsigned long clear_active_flags(struct list_head *page_list)
+static unsigned long clear_active_flags(struct list_head *page_list,
+					unsigned int *count)
 {
 	int nr_active = 0;
+	int lru;
 	struct page *page;
 
-	list_for_each_entry(page, page_list, lru)
+	list_for_each_entry(page, page_list, lru) {
+		lru = page_is_file_cache(page);
 		if (PageActive(page)) {
+			lru += LRU_ACTIVE;
 			ClearPageActive(page);
 			nr_active++;
 		}
+		count[lru]++;
+	}
 
 	return nr_active;
 }
 
+/**
+ * isolate_lru_page - tries to isolate a page from its LRU list
+ * @page: page to isolate from its LRU list
+ *
+ * Isolates a @page from an LRU list, clears PageLRU and adjusts the
+ * vmstat statistic corresponding to whatever LRU list the page was on.
+ *
+ * Returns 0 if the page was removed from an LRU list.
+ * Returns -EBUSY if the page was not on an LRU list.
+ *
+ * The returned page will have PageLRU() cleared.  If it was found on
+ * the active list, it will have PageActive set.  If it was found on
+ * the unevictable list, it will have the PageUnevictable bit set. That flag
+ * may need to be cleared by the caller before letting the page go.
+ *
+ * The vmstat statistic corresponding to the list on which the page was
+ * found will be decremented.
+ *
+ * Restrictions:
+ * (1) Must be called with an elevated refcount on the page. This is a
+ *     fundamentnal difference from isolate_lru_pages (which is called
+ *     without a stable reference).
+ * (2) the lru_lock must not be held.
+ * (3) interrupts must be enabled.
+ */
+int isolate_lru_page(struct page *page)
+{
+	int ret = -EBUSY;
+
+	if (PageLRU(page)) {
+		struct zone *zone = page_zone(page);
+
+		spin_lock_irq(&zone->lru_lock);
+		if (PageLRU(page) && get_page_unless_zero(page)) {
+			int lru = page_lru(page);
+			ret = 0;
+			ClearPageLRU(page);
+
+			del_page_from_lru_list(zone, page, lru);
+		}
+		spin_unlock_irq(&zone->lru_lock);
+	}
+	return ret;
+}
+
 /*
  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
  * of reclaimed pages
  */
 static unsigned long shrink_inactive_list(unsigned long max_scan,
-				struct zone *zone, struct scan_control *sc)
+			struct zone *zone, struct scan_control *sc,
+			int priority, int file)
 {
 	LIST_HEAD(page_list);
 	struct pagevec pvec;
@@ -866,20 +1043,43 @@
 		unsigned long nr_scan;
 		unsigned long nr_freed;
 		unsigned long nr_active;
+		unsigned int count[NR_LRU_LISTS] = { 0, };
+		int mode = ISOLATE_INACTIVE;
+
+		/*
+		 * If we need a large contiguous chunk of memory, or have
+		 * trouble getting a small set of contiguous pages, we
+		 * will reclaim both active and inactive pages.
+		 *
+		 * We use the same threshold as pageout congestion_wait below.
+		 */
+		if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+			mode = ISOLATE_BOTH;
+		else if (sc->order && priority < DEF_PRIORITY - 2)
+			mode = ISOLATE_BOTH;
 
 		nr_taken = sc->isolate_pages(sc->swap_cluster_max,
-			     &page_list, &nr_scan, sc->order,
-			     (sc->order > PAGE_ALLOC_COSTLY_ORDER)?
-					     ISOLATE_BOTH : ISOLATE_INACTIVE,
-				zone, sc->mem_cgroup, 0);
-		nr_active = clear_active_flags(&page_list);
+			     &page_list, &nr_scan, sc->order, mode,
+				zone, sc->mem_cgroup, 0, file);
+		nr_active = clear_active_flags(&page_list, count);
 		__count_vm_events(PGDEACTIVATE, nr_active);
 
-		__mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
-		__mod_zone_page_state(zone, NR_INACTIVE,
-						-(nr_taken - nr_active));
-		if (scan_global_lru(sc))
+		__mod_zone_page_state(zone, NR_ACTIVE_FILE,
+						-count[LRU_ACTIVE_FILE]);
+		__mod_zone_page_state(zone, NR_INACTIVE_FILE,
+						-count[LRU_INACTIVE_FILE]);
+		__mod_zone_page_state(zone, NR_ACTIVE_ANON,
+						-count[LRU_ACTIVE_ANON]);
+		__mod_zone_page_state(zone, NR_INACTIVE_ANON,
+						-count[LRU_INACTIVE_ANON]);
+
+		if (scan_global_lru(sc)) {
 			zone->pages_scanned += nr_scan;
+			zone->recent_scanned[0] += count[LRU_INACTIVE_ANON];
+			zone->recent_scanned[0] += count[LRU_ACTIVE_ANON];
+			zone->recent_scanned[1] += count[LRU_INACTIVE_FILE];
+			zone->recent_scanned[1] += count[LRU_ACTIVE_FILE];
+		}
 		spin_unlock_irq(&zone->lru_lock);
 
 		nr_scanned += nr_scan;
@@ -899,7 +1099,7 @@
 			 * The attempt at page out may have made some
 			 * of the pages active, mark them inactive again.
 			 */
-			nr_active = clear_active_flags(&page_list);
+			nr_active = clear_active_flags(&page_list, count);
 			count_vm_events(PGDEACTIVATE, nr_active);
 
 			nr_freed += shrink_page_list(&page_list, sc,
@@ -924,14 +1124,24 @@
 		 * Put back any unfreeable pages.
 		 */
 		while (!list_empty(&page_list)) {
+			int lru;
 			page = lru_to_page(&page_list);
 			VM_BUG_ON(PageLRU(page));
-			SetPageLRU(page);
 			list_del(&page->lru);
-			if (PageActive(page))
-				add_page_to_active_list(zone, page);
-			else
-				add_page_to_inactive_list(zone, page);
+			if (unlikely(!page_evictable(page, NULL))) {
+				spin_unlock_irq(&zone->lru_lock);
+				putback_lru_page(page);
+				spin_lock_irq(&zone->lru_lock);
+				continue;
+			}
+			SetPageLRU(page);
+			lru = page_lru(page);
+			add_page_to_lru_list(zone, page, lru);
+			mem_cgroup_move_lists(page, lru);
+			if (PageActive(page) && scan_global_lru(sc)) {
+				int file = !!page_is_file_cache(page);
+				zone->recent_rotated[file]++;
+			}
 			if (!pagevec_add(&pvec, page)) {
 				spin_unlock_irq(&zone->lru_lock);
 				__pagevec_release(&pvec);
@@ -962,115 +1172,7 @@
 
 static inline int zone_is_near_oom(struct zone *zone)
 {
-	return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE)
-				+ zone_page_state(zone, NR_INACTIVE))*3;
-}
-
-/*
- * Determine we should try to reclaim mapped pages.
- * This is called only when sc->mem_cgroup is NULL.
- */
-static int calc_reclaim_mapped(struct scan_control *sc, struct zone *zone,
-				int priority)
-{
-	long mapped_ratio;
-	long distress;
-	long swap_tendency;
-	long imbalance;
-	int reclaim_mapped = 0;
-	int prev_priority;
-
-	if (scan_global_lru(sc) && zone_is_near_oom(zone))
-		return 1;
-	/*
-	 * `distress' is a measure of how much trouble we're having
-	 * reclaiming pages.  0 -> no problems.  100 -> great trouble.
-	 */
-	if (scan_global_lru(sc))
-		prev_priority = zone->prev_priority;
-	else
-		prev_priority = mem_cgroup_get_reclaim_priority(sc->mem_cgroup);
-
-	distress = 100 >> min(prev_priority, priority);
-
-	/*
-	 * The point of this algorithm is to decide when to start
-	 * reclaiming mapped memory instead of just pagecache.  Work out
-	 * how much memory
-	 * is mapped.
-	 */
-	if (scan_global_lru(sc))
-		mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
-				global_page_state(NR_ANON_PAGES)) * 100) /
-					vm_total_pages;
-	else
-		mapped_ratio = mem_cgroup_calc_mapped_ratio(sc->mem_cgroup);
-
-	/*
-	 * Now decide how much we really want to unmap some pages.  The
-	 * mapped ratio is downgraded - just because there's a lot of
-	 * mapped memory doesn't necessarily mean that page reclaim
-	 * isn't succeeding.
-	 *
-	 * The distress ratio is important - we don't want to start
-	 * going oom.
-	 *
-	 * A 100% value of vm_swappiness overrides this algorithm
-	 * altogether.
-	 */
-	swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
-
-	/*
-	 * If there's huge imbalance between active and inactive
-	 * (think active 100 times larger than inactive) we should
-	 * become more permissive, or the system will take too much
-	 * cpu before it start swapping during memory pressure.
-	 * Distress is about avoiding early-oom, this is about
-	 * making swappiness graceful despite setting it to low
-	 * values.
-	 *
-	 * Avoid div by zero with nr_inactive+1, and max resulting
-	 * value is vm_total_pages.
-	 */
-	if (scan_global_lru(sc)) {
-		imbalance  = zone_page_state(zone, NR_ACTIVE);
-		imbalance /= zone_page_state(zone, NR_INACTIVE) + 1;
-	} else
-		imbalance = mem_cgroup_reclaim_imbalance(sc->mem_cgroup);
-
-	/*
-	 * Reduce the effect of imbalance if swappiness is low,
-	 * this means for a swappiness very low, the imbalance
-	 * must be much higher than 100 for this logic to make
-	 * the difference.
-	 *
-	 * Max temporary value is vm_total_pages*100.
-	 */
-	imbalance *= (vm_swappiness + 1);
-	imbalance /= 100;
-
-	/*
-	 * If not much of the ram is mapped, makes the imbalance
-	 * less relevant, it's high priority we refill the inactive
-	 * list with mapped pages only in presence of high ratio of
-	 * mapped pages.
-	 *
-	 * Max temporary value is vm_total_pages*100.
-	 */
-	imbalance *= mapped_ratio;
-	imbalance /= 100;
-
-	/* apply imbalance feedback to swap_tendency */
-	swap_tendency += imbalance;
-
-	/*
-	 * Now use this metric to decide whether to start moving mapped
-	 * memory onto the inactive list.
-	 */
-	if (swap_tendency >= 100)
-		reclaim_mapped = 1;
-
-	return reclaim_mapped;
+	return zone->pages_scanned >= (zone_lru_pages(zone) * 3);
 }
 
 /*
@@ -1093,53 +1195,71 @@
 
 
 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
-				struct scan_control *sc, int priority)
+			struct scan_control *sc, int priority, int file)
 {
 	unsigned long pgmoved;
 	int pgdeactivate = 0;
 	unsigned long pgscanned;
 	LIST_HEAD(l_hold);	/* The pages which were snipped off */
-	LIST_HEAD(l_inactive);	/* Pages to go onto the inactive_list */
-	LIST_HEAD(l_active);	/* Pages to go onto the active_list */
+	LIST_HEAD(l_inactive);
 	struct page *page;
 	struct pagevec pvec;
-	int reclaim_mapped = 0;
-
-	if (sc->may_swap)
-		reclaim_mapped = calc_reclaim_mapped(sc, zone, priority);
+	enum lru_list lru;
 
 	lru_add_drain();
 	spin_lock_irq(&zone->lru_lock);
 	pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
 					ISOLATE_ACTIVE, zone,
-					sc->mem_cgroup, 1);
+					sc->mem_cgroup, 1, file);
 	/*
 	 * zone->pages_scanned is used for detect zone's oom
 	 * mem_cgroup remembers nr_scan by itself.
 	 */
-	if (scan_global_lru(sc))
+	if (scan_global_lru(sc)) {
 		zone->pages_scanned += pgscanned;
+		zone->recent_scanned[!!file] += pgmoved;
+	}
 
-	__mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
+	if (file)
+		__mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
+	else
+		__mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
 	spin_unlock_irq(&zone->lru_lock);
 
+	pgmoved = 0;
 	while (!list_empty(&l_hold)) {
 		cond_resched();
 		page = lru_to_page(&l_hold);
 		list_del(&page->lru);
-		if (page_mapped(page)) {
-			if (!reclaim_mapped ||
-			    (total_swap_pages == 0 && PageAnon(page)) ||
-			    page_referenced(page, 0, sc->mem_cgroup)) {
-				list_add(&page->lru, &l_active);
-				continue;
-			}
+
+		if (unlikely(!page_evictable(page, NULL))) {
+			putback_lru_page(page);
+			continue;
 		}
+
+		/* page_referenced clears PageReferenced */
+		if (page_mapping_inuse(page) &&
+		    page_referenced(page, 0, sc->mem_cgroup))
+			pgmoved++;
+
 		list_add(&page->lru, &l_inactive);
 	}
 
+	/*
+	 * Count referenced pages from currently used mappings as
+	 * rotated, even though they are moved to the inactive list.
+	 * This helps balance scan pressure between file and anonymous
+	 * pages in get_scan_ratio.
+	 */
+	zone->recent_rotated[!!file] += pgmoved;
+
+	/*
+	 * Move the pages to the [file or anon] inactive list.
+	 */
 	pagevec_init(&pvec, 1);
+
 	pgmoved = 0;
+	lru = LRU_BASE + file * LRU_FILE;
 	spin_lock_irq(&zone->lru_lock);
 	while (!list_empty(&l_inactive)) {
 		page = lru_to_page(&l_inactive);
@@ -1149,11 +1269,11 @@
 		VM_BUG_ON(!PageActive(page));
 		ClearPageActive(page);
 
-		list_move(&page->lru, &zone->inactive_list);
-		mem_cgroup_move_lists(page, false);
+		list_move(&page->lru, &zone->lru[lru].list);
+		mem_cgroup_move_lists(page, lru);
 		pgmoved++;
 		if (!pagevec_add(&pvec, page)) {
-			__mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
+			__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
 			spin_unlock_irq(&zone->lru_lock);
 			pgdeactivate += pgmoved;
 			pgmoved = 0;
@@ -1163,104 +1283,189 @@
 			spin_lock_irq(&zone->lru_lock);
 		}
 	}
-	__mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
+	__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
 	pgdeactivate += pgmoved;
 	if (buffer_heads_over_limit) {
 		spin_unlock_irq(&zone->lru_lock);
 		pagevec_strip(&pvec);
 		spin_lock_irq(&zone->lru_lock);
 	}
-
-	pgmoved = 0;
-	while (!list_empty(&l_active)) {
-		page = lru_to_page(&l_active);
-		prefetchw_prev_lru_page(page, &l_active, flags);
-		VM_BUG_ON(PageLRU(page));
-		SetPageLRU(page);
-		VM_BUG_ON(!PageActive(page));
-
-		list_move(&page->lru, &zone->active_list);
-		mem_cgroup_move_lists(page, true);
-		pgmoved++;
-		if (!pagevec_add(&pvec, page)) {
-			__mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
-			pgmoved = 0;
-			spin_unlock_irq(&zone->lru_lock);
-			__pagevec_release(&pvec);
-			spin_lock_irq(&zone->lru_lock);
-		}
-	}
-	__mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
-
 	__count_zone_vm_events(PGREFILL, zone, pgscanned);
 	__count_vm_events(PGDEACTIVATE, pgdeactivate);
 	spin_unlock_irq(&zone->lru_lock);
+	if (vm_swap_full())
+		pagevec_swap_free(&pvec);
 
 	pagevec_release(&pvec);
 }
 
+static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
+	struct zone *zone, struct scan_control *sc, int priority)
+{
+	int file = is_file_lru(lru);
+
+	if (lru == LRU_ACTIVE_FILE) {
+		shrink_active_list(nr_to_scan, zone, sc, priority, file);
+		return 0;
+	}
+
+	if (lru == LRU_ACTIVE_ANON &&
+	    (!scan_global_lru(sc) || inactive_anon_is_low(zone))) {
+		shrink_active_list(nr_to_scan, zone, sc, priority, file);
+		return 0;
+	}
+	return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
+}
+
+/*
+ * Determine how aggressively the anon and file LRU lists should be
+ * scanned.  The relative value of each set of LRU lists is determined
+ * by looking at the fraction of the pages scanned we did rotate back
+ * onto the active list instead of evict.
+ *
+ * percent[0] specifies how much pressure to put on ram/swap backed
+ * memory, while percent[1] determines pressure on the file LRUs.
+ */
+static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
+					unsigned long *percent)
+{
+	unsigned long anon, file, free;
+	unsigned long anon_prio, file_prio;
+	unsigned long ap, fp;
+
+	anon  = zone_page_state(zone, NR_ACTIVE_ANON) +
+		zone_page_state(zone, NR_INACTIVE_ANON);
+	file  = zone_page_state(zone, NR_ACTIVE_FILE) +
+		zone_page_state(zone, NR_INACTIVE_FILE);
+	free  = zone_page_state(zone, NR_FREE_PAGES);
+
+	/* If we have no swap space, do not bother scanning anon pages. */
+	if (nr_swap_pages <= 0) {
+		percent[0] = 0;
+		percent[1] = 100;
+		return;
+	}
+
+	/* If we have very few page cache pages, force-scan anon pages. */
+	if (unlikely(file + free <= zone->pages_high)) {
+		percent[0] = 100;
+		percent[1] = 0;
+		return;
+	}
+
+	/*
+	 * OK, so we have swap space and a fair amount of page cache
+	 * pages.  We use the recently rotated / recently scanned
+	 * ratios to determine how valuable each cache is.
+	 *
+	 * Because workloads change over time (and to avoid overflow)
+	 * we keep these statistics as a floating average, which ends
+	 * up weighing recent references more than old ones.
+	 *
+	 * anon in [0], file in [1]
+	 */
+	if (unlikely(zone->recent_scanned[0] > anon / 4)) {
+		spin_lock_irq(&zone->lru_lock);
+		zone->recent_scanned[0] /= 2;
+		zone->recent_rotated[0] /= 2;
+		spin_unlock_irq(&zone->lru_lock);
+	}
+
+	if (unlikely(zone->recent_scanned[1] > file / 4)) {
+		spin_lock_irq(&zone->lru_lock);
+		zone->recent_scanned[1] /= 2;
+		zone->recent_rotated[1] /= 2;
+		spin_unlock_irq(&zone->lru_lock);
+	}
+
+	/*
+	 * With swappiness at 100, anonymous and file have the same priority.
+	 * This scanning priority is essentially the inverse of IO cost.
+	 */
+	anon_prio = sc->swappiness;
+	file_prio = 200 - sc->swappiness;
+
+	/*
+	 *                  anon       recent_rotated[0]
+	 * %anon = 100 * ----------- / ----------------- * IO cost
+	 *               anon + file      rotate_sum
+	 */
+	ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1);
+	ap /= zone->recent_rotated[0] + 1;
+
+	fp = (file_prio + 1) * (zone->recent_scanned[1] + 1);
+	fp /= zone->recent_rotated[1] + 1;
+
+	/* Normalize to percentages */
+	percent[0] = 100 * ap / (ap + fp + 1);
+	percent[1] = 100 - percent[0];
+}
+
+
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
 static unsigned long shrink_zone(int priority, struct zone *zone,
 				struct scan_control *sc)
 {
-	unsigned long nr_active;
-	unsigned long nr_inactive;
+	unsigned long nr[NR_LRU_LISTS];
 	unsigned long nr_to_scan;
 	unsigned long nr_reclaimed = 0;
+	unsigned long percent[2];	/* anon @ 0; file @ 1 */
+	enum lru_list l;
 
-	if (scan_global_lru(sc)) {
-		/*
-		 * Add one to nr_to_scan just to make sure that the kernel
-		 * will slowly sift through the active list.
-		 */
-		zone->nr_scan_active +=
-			(zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
-		nr_active = zone->nr_scan_active;
-		zone->nr_scan_inactive +=
-			(zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
-		nr_inactive = zone->nr_scan_inactive;
-		if (nr_inactive >= sc->swap_cluster_max)
-			zone->nr_scan_inactive = 0;
-		else
-			nr_inactive = 0;
+	get_scan_ratio(zone, sc, percent);
 
-		if (nr_active >= sc->swap_cluster_max)
-			zone->nr_scan_active = 0;
-		else
-			nr_active = 0;
-	} else {
-		/*
-		 * This reclaim occurs not because zone memory shortage but
-		 * because memory controller hits its limit.
-		 * Then, don't modify zone reclaim related data.
-		 */
-		nr_active = mem_cgroup_calc_reclaim_active(sc->mem_cgroup,
-					zone, priority);
+	for_each_evictable_lru(l) {
+		if (scan_global_lru(sc)) {
+			int file = is_file_lru(l);
+			int scan;
 
-		nr_inactive = mem_cgroup_calc_reclaim_inactive(sc->mem_cgroup,
-					zone, priority);
-	}
-
-
-	while (nr_active || nr_inactive) {
-		if (nr_active) {
-			nr_to_scan = min(nr_active,
-					(unsigned long)sc->swap_cluster_max);
-			nr_active -= nr_to_scan;
-			shrink_active_list(nr_to_scan, zone, sc, priority);
-		}
-
-		if (nr_inactive) {
-			nr_to_scan = min(nr_inactive,
-					(unsigned long)sc->swap_cluster_max);
-			nr_inactive -= nr_to_scan;
-			nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
-								sc);
+			scan = zone_page_state(zone, NR_LRU_BASE + l);
+			if (priority) {
+				scan >>= priority;
+				scan = (scan * percent[file]) / 100;
+			}
+			zone->lru[l].nr_scan += scan;
+			nr[l] = zone->lru[l].nr_scan;
+			if (nr[l] >= sc->swap_cluster_max)
+				zone->lru[l].nr_scan = 0;
+			else
+				nr[l] = 0;
+		} else {
+			/*
+			 * This reclaim occurs not because zone memory shortage
+			 * but because memory controller hits its limit.
+			 * Don't modify zone reclaim related data.
+			 */
+			nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
+								priority, l);
 		}
 	}
 
+	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
+					nr[LRU_INACTIVE_FILE]) {
+		for_each_evictable_lru(l) {
+			if (nr[l]) {
+				nr_to_scan = min(nr[l],
+					(unsigned long)sc->swap_cluster_max);
+				nr[l] -= nr_to_scan;
+
+				nr_reclaimed += shrink_list(l, nr_to_scan,
+							zone, sc, priority);
+			}
+		}
+	}
+
+	/*
+	 * Even if we did not try to evict anon pages at all, we want to
+	 * rebalance the anon lru active/inactive ratio.
+	 */
+	if (!scan_global_lru(sc) || inactive_anon_is_low(zone))
+		shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
+	else if (!scan_global_lru(sc))
+		shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
+
 	throttle_vm_writeout(sc->gfp_mask);
 	return nr_reclaimed;
 }
@@ -1321,7 +1526,7 @@
 
 	return nr_reclaimed;
 }
- 
+
 /*
  * This is the main entry point to direct page reclaim.
  *
@@ -1364,8 +1569,7 @@
 			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
 				continue;
 
-			lru_pages += zone_page_state(zone, NR_ACTIVE)
-					+ zone_page_state(zone, NR_INACTIVE);
+			lru_pages += zone_lru_pages(zone);
 		}
 	}
 
@@ -1555,6 +1759,14 @@
 			    priority != DEF_PRIORITY)
 				continue;
 
+			/*
+			 * Do some background aging of the anon list, to give
+			 * pages a chance to be referenced before reclaiming.
+			 */
+			if (inactive_anon_is_low(zone))
+				shrink_active_list(SWAP_CLUSTER_MAX, zone,
+							&sc, priority, 0);
+
 			if (!zone_watermark_ok(zone, order, zone->pages_high,
 					       0, 0)) {
 				end_zone = i;
@@ -1567,8 +1779,7 @@
 		for (i = 0; i <= end_zone; i++) {
 			struct zone *zone = pgdat->node_zones + i;
 
-			lru_pages += zone_page_state(zone, NR_ACTIVE)
-					+ zone_page_state(zone, NR_INACTIVE);
+			lru_pages += zone_lru_pages(zone);
 		}
 
 		/*
@@ -1612,8 +1823,7 @@
 			if (zone_is_all_unreclaimable(zone))
 				continue;
 			if (nr_slab == 0 && zone->pages_scanned >=
-				(zone_page_state(zone, NR_ACTIVE)
-				+ zone_page_state(zone, NR_INACTIVE)) * 6)
+						(zone_lru_pages(zone) * 6))
 					zone_set_flag(zone,
 						      ZONE_ALL_UNRECLAIMABLE);
 			/*
@@ -1667,7 +1877,7 @@
 
 /*
  * The background pageout daemon, started as a kernel thread
- * from the init process. 
+ * from the init process.
  *
  * This basically trickles out pages so that we have _some_
  * free memory available even if there is no other activity
@@ -1761,6 +1971,14 @@
 	wake_up_interruptible(&pgdat->kswapd_wait);
 }
 
+unsigned long global_lru_pages(void)
+{
+	return global_page_state(NR_ACTIVE_ANON)
+		+ global_page_state(NR_ACTIVE_FILE)
+		+ global_page_state(NR_INACTIVE_ANON)
+		+ global_page_state(NR_INACTIVE_FILE);
+}
+
 #ifdef CONFIG_PM
 /*
  * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages
@@ -1774,6 +1992,7 @@
 {
 	struct zone *zone;
 	unsigned long nr_to_scan, ret = 0;
+	enum lru_list l;
 
 	for_each_zone(zone) {
 
@@ -1783,38 +2002,31 @@
 		if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
 			continue;
 
-		/* For pass = 0 we don't shrink the active list */
-		if (pass > 0) {
-			zone->nr_scan_active +=
-				(zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
-			if (zone->nr_scan_active >= nr_pages || pass > 3) {
-				zone->nr_scan_active = 0;
-				nr_to_scan = min(nr_pages,
-					zone_page_state(zone, NR_ACTIVE));
-				shrink_active_list(nr_to_scan, zone, sc, prio);
-			}
-		}
+		for_each_evictable_lru(l) {
+			/* For pass = 0, we don't shrink the active list */
+			if (pass == 0 &&
+				(l == LRU_ACTIVE || l == LRU_ACTIVE_FILE))
+				continue;
 
-		zone->nr_scan_inactive +=
-			(zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
-		if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
-			zone->nr_scan_inactive = 0;
-			nr_to_scan = min(nr_pages,
-				zone_page_state(zone, NR_INACTIVE));
-			ret += shrink_inactive_list(nr_to_scan, zone, sc);
-			if (ret >= nr_pages)
-				return ret;
+			zone->lru[l].nr_scan +=
+				(zone_page_state(zone, NR_LRU_BASE + l)
+								>> prio) + 1;
+			if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
+				zone->lru[l].nr_scan = 0;
+				nr_to_scan = min(nr_pages,
+					zone_page_state(zone,
+							NR_LRU_BASE + l));
+				ret += shrink_list(l, nr_to_scan, zone,
+								sc, prio);
+				if (ret >= nr_pages)
+					return ret;
+			}
 		}
 	}
 
 	return ret;
 }
 
-static unsigned long count_lru_pages(void)
-{
-	return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE);
-}
-
 /*
  * Try to free `nr_pages' of memory, system-wide, and return the number of
  * freed pages.
@@ -1840,7 +2052,7 @@
 
 	current->reclaim_state = &reclaim_state;
 
-	lru_pages = count_lru_pages();
+	lru_pages = global_lru_pages();
 	nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
 	/* If slab caches are huge, it's better to hit them first */
 	while (nr_slab >= lru_pages) {
@@ -1883,7 +2095,7 @@
 
 			reclaim_state.reclaimed_slab = 0;
 			shrink_slab(sc.nr_scanned, sc.gfp_mask,
-					count_lru_pages());
+					global_lru_pages());
 			ret += reclaim_state.reclaimed_slab;
 			if (ret >= nr_pages)
 				goto out;
@@ -1900,7 +2112,7 @@
 	if (!ret) {
 		do {
 			reclaim_state.reclaimed_slab = 0;
-			shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
+			shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
 			ret += reclaim_state.reclaimed_slab;
 		} while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
 	}
@@ -2128,3 +2340,285 @@
 	return ret;
 }
 #endif
+
+#ifdef CONFIG_UNEVICTABLE_LRU
+/*
+ * page_evictable - test whether a page is evictable
+ * @page: the page to test
+ * @vma: the VMA in which the page is or will be mapped, may be NULL
+ *
+ * Test whether page is evictable--i.e., should be placed on active/inactive
+ * lists vs unevictable list.  The vma argument is !NULL when called from the
+ * fault path to determine how to instantate a new page.
+ *
+ * Reasons page might not be evictable:
+ * (1) page's mapping marked unevictable
+ * (2) page is part of an mlocked VMA
+ *
+ */
+int page_evictable(struct page *page, struct vm_area_struct *vma)
+{
+
+	if (mapping_unevictable(page_mapping(page)))
+		return 0;
+
+	if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
+		return 0;
+
+	return 1;
+}
+
+static void show_page_path(struct page *page)
+{
+	char buf[256];
+	if (page_is_file_cache(page)) {
+		struct address_space *mapping = page->mapping;
+		struct dentry *dentry;
+		pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
+		spin_lock(&mapping->i_mmap_lock);
+		dentry = d_find_alias(mapping->host);
+		printk(KERN_INFO "rescued: %s %lu\n",
+		       dentry_path(dentry, buf, 256), pgoff);
+		spin_unlock(&mapping->i_mmap_lock);
+	} else {
+#if defined(CONFIG_MM_OWNER) && defined(CONFIG_MMU)
+		struct anon_vma *anon_vma;
+		struct vm_area_struct *vma;
+
+		anon_vma = page_lock_anon_vma(page);
+		if (!anon_vma)
+			return;
+
+		list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+			printk(KERN_INFO "rescued: anon %s\n",
+			       vma->vm_mm->owner->comm);
+			break;
+		}
+		page_unlock_anon_vma(anon_vma);
+#endif
+	}
+}
+
+
+/**
+ * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
+ * @page: page to check evictability and move to appropriate lru list
+ * @zone: zone page is in
+ *
+ * Checks a page for evictability and moves the page to the appropriate
+ * zone lru list.
+ *
+ * Restrictions: zone->lru_lock must be held, page must be on LRU and must
+ * have PageUnevictable set.
+ */
+static void check_move_unevictable_page(struct page *page, struct zone *zone)
+{
+	VM_BUG_ON(PageActive(page));
+
+retry:
+	ClearPageUnevictable(page);
+	if (page_evictable(page, NULL)) {
+		enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
+
+		show_page_path(page);
+
+		__dec_zone_state(zone, NR_UNEVICTABLE);
+		list_move(&page->lru, &zone->lru[l].list);
+		__inc_zone_state(zone, NR_INACTIVE_ANON + l);
+		__count_vm_event(UNEVICTABLE_PGRESCUED);
+	} else {
+		/*
+		 * rotate unevictable list
+		 */
+		SetPageUnevictable(page);
+		list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
+		if (page_evictable(page, NULL))
+			goto retry;
+	}
+}
+
+/**
+ * scan_mapping_unevictable_pages - scan an address space for evictable pages
+ * @mapping: struct address_space to scan for evictable pages
+ *
+ * Scan all pages in mapping.  Check unevictable pages for
+ * evictability and move them to the appropriate zone lru list.
+ */
+void scan_mapping_unevictable_pages(struct address_space *mapping)
+{
+	pgoff_t next = 0;
+	pgoff_t end   = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
+			 PAGE_CACHE_SHIFT;
+	struct zone *zone;
+	struct pagevec pvec;
+
+	if (mapping->nrpages == 0)
+		return;
+
+	pagevec_init(&pvec, 0);
+	while (next < end &&
+		pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+		int i;
+		int pg_scanned = 0;
+
+		zone = NULL;
+
+		for (i = 0; i < pagevec_count(&pvec); i++) {
+			struct page *page = pvec.pages[i];
+			pgoff_t page_index = page->index;
+			struct zone *pagezone = page_zone(page);
+
+			pg_scanned++;
+			if (page_index > next)
+				next = page_index;
+			next++;
+
+			if (pagezone != zone) {
+				if (zone)
+					spin_unlock_irq(&zone->lru_lock);
+				zone = pagezone;
+				spin_lock_irq(&zone->lru_lock);
+			}
+
+			if (PageLRU(page) && PageUnevictable(page))
+				check_move_unevictable_page(page, zone);
+		}
+		if (zone)
+			spin_unlock_irq(&zone->lru_lock);
+		pagevec_release(&pvec);
+
+		count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
+	}
+
+}
+
+/**
+ * scan_zone_unevictable_pages - check unevictable list for evictable pages
+ * @zone - zone of which to scan the unevictable list
+ *
+ * Scan @zone's unevictable LRU lists to check for pages that have become
+ * evictable.  Move those that have to @zone's inactive list where they
+ * become candidates for reclaim, unless shrink_inactive_zone() decides
+ * to reactivate them.  Pages that are still unevictable are rotated
+ * back onto @zone's unevictable list.
+ */
+#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
+void scan_zone_unevictable_pages(struct zone *zone)
+{
+	struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
+	unsigned long scan;
+	unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
+
+	while (nr_to_scan > 0) {
+		unsigned long batch_size = min(nr_to_scan,
+						SCAN_UNEVICTABLE_BATCH_SIZE);
+
+		spin_lock_irq(&zone->lru_lock);
+		for (scan = 0;  scan < batch_size; scan++) {
+			struct page *page = lru_to_page(l_unevictable);
+
+			if (!trylock_page(page))
+				continue;
+
+			prefetchw_prev_lru_page(page, l_unevictable, flags);
+
+			if (likely(PageLRU(page) && PageUnevictable(page)))
+				check_move_unevictable_page(page, zone);
+
+			unlock_page(page);
+		}
+		spin_unlock_irq(&zone->lru_lock);
+
+		nr_to_scan -= batch_size;
+	}
+}
+
+
+/**
+ * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
+ *
+ * A really big hammer:  scan all zones' unevictable LRU lists to check for
+ * pages that have become evictable.  Move those back to the zones'
+ * inactive list where they become candidates for reclaim.
+ * This occurs when, e.g., we have unswappable pages on the unevictable lists,
+ * and we add swap to the system.  As such, it runs in the context of a task
+ * that has possibly/probably made some previously unevictable pages
+ * evictable.
+ */
+void scan_all_zones_unevictable_pages(void)
+{
+	struct zone *zone;
+
+	for_each_zone(zone) {
+		scan_zone_unevictable_pages(zone);
+	}
+}
+
+/*
+ * scan_unevictable_pages [vm] sysctl handler.  On demand re-scan of
+ * all nodes' unevictable lists for evictable pages
+ */
+unsigned long scan_unevictable_pages;
+
+int scan_unevictable_handler(struct ctl_table *table, int write,
+			   struct file *file, void __user *buffer,
+			   size_t *length, loff_t *ppos)
+{
+	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+
+	if (write && *(unsigned long *)table->data)
+		scan_all_zones_unevictable_pages();
+
+	scan_unevictable_pages = 0;
+	return 0;
+}
+
+/*
+ * per node 'scan_unevictable_pages' attribute.  On demand re-scan of
+ * a specified node's per zone unevictable lists for evictable pages.
+ */
+
+static ssize_t read_scan_unevictable_node(struct sys_device *dev,
+					  struct sysdev_attribute *attr,
+					  char *buf)
+{
+	return sprintf(buf, "0\n");	/* always zero; should fit... */
+}
+
+static ssize_t write_scan_unevictable_node(struct sys_device *dev,
+					   struct sysdev_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
+	struct zone *zone;
+	unsigned long res;
+	unsigned long req = strict_strtoul(buf, 10, &res);
+
+	if (!req)
+		return 1;	/* zero is no-op */
+
+	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
+		if (!populated_zone(zone))
+			continue;
+		scan_zone_unevictable_pages(zone);
+	}
+	return 1;
+}
+
+
+static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
+			read_scan_unevictable_node,
+			write_scan_unevictable_node);
+
+int scan_unevictable_register_node(struct node *node)
+{
+	return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
+}
+
+void scan_unevictable_unregister_node(struct node *node)
+{
+	sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
+}
+
+#endif
diff --git a/mm/vmstat.c b/mm/vmstat.c
index d7826af..9343227 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -619,8 +619,14 @@
 static const char * const vmstat_text[] = {
 	/* Zoned VM counters */
 	"nr_free_pages",
-	"nr_inactive",
-	"nr_active",
+	"nr_inactive_anon",
+	"nr_active_anon",
+	"nr_inactive_file",
+	"nr_active_file",
+#ifdef CONFIG_UNEVICTABLE_LRU
+	"nr_unevictable",
+	"nr_mlock",
+#endif
 	"nr_anon_pages",
 	"nr_mapped",
 	"nr_file_pages",
@@ -675,6 +681,16 @@
 	"htlb_buddy_alloc_success",
 	"htlb_buddy_alloc_fail",
 #endif
+#ifdef CONFIG_UNEVICTABLE_LRU
+	"unevictable_pgs_culled",
+	"unevictable_pgs_scanned",
+	"unevictable_pgs_rescued",
+	"unevictable_pgs_mlocked",
+	"unevictable_pgs_munlocked",
+	"unevictable_pgs_cleared",
+	"unevictable_pgs_stranded",
+	"unevictable_pgs_mlockfreed",
+#endif
 #endif
 };
 
@@ -688,7 +704,7 @@
 		   "\n        min      %lu"
 		   "\n        low      %lu"
 		   "\n        high     %lu"
-		   "\n        scanned  %lu (a: %lu i: %lu)"
+		   "\n        scanned  %lu (aa: %lu ia: %lu af: %lu if: %lu)"
 		   "\n        spanned  %lu"
 		   "\n        present  %lu",
 		   zone_page_state(zone, NR_FREE_PAGES),
@@ -696,7 +712,10 @@
 		   zone->pages_low,
 		   zone->pages_high,
 		   zone->pages_scanned,
-		   zone->nr_scan_active, zone->nr_scan_inactive,
+		   zone->lru[LRU_ACTIVE_ANON].nr_scan,
+		   zone->lru[LRU_INACTIVE_ANON].nr_scan,
+		   zone->lru[LRU_ACTIVE_FILE].nr_scan,
+		   zone->lru[LRU_INACTIVE_FILE].nr_scan,
 		   zone->spanned_pages,
 		   zone->present_pages);
 
@@ -733,10 +752,12 @@
 	seq_printf(m,
 		   "\n  all_unreclaimable: %u"
 		   "\n  prev_priority:     %i"
-		   "\n  start_pfn:         %lu",
+		   "\n  start_pfn:         %lu"
+		   "\n  inactive_ratio:    %u",
 			   zone_is_all_unreclaimable(zone),
 		   zone->prev_priority,
-		   zone->zone_start_pfn);
+		   zone->zone_start_pfn,
+		   zone->inactive_ratio);
 	seq_putc(m, '\n');
 }
 
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index ff34c5a..c42c0c4 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -20,6 +20,12 @@
 	  This builds support for a transports between
 	  guest partitions and a host partition.
 
+config NET_9P_RDMA
+	depends on NET_9P && INFINIBAND && EXPERIMENTAL
+	tristate "9P RDMA Transport (Experimental)"
+	help
+	  This builds support for a RDMA transport.
+
 config NET_9P_DEBUG
 	bool "Debug information"
 	depends on NET_9P
diff --git a/net/9p/Makefile b/net/9p/Makefile
index 5192194..198a640 100644
--- a/net/9p/Makefile
+++ b/net/9p/Makefile
@@ -1,14 +1,17 @@
 obj-$(CONFIG_NET_9P) := 9pnet.o
 obj-$(CONFIG_NET_9P_VIRTIO) += 9pnet_virtio.o
+obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
 
 9pnet-objs := \
 	mod.o \
 	client.o \
-	conv.o \
 	error.o \
-	fcprint.o \
 	util.o \
+	protocol.o \
 	trans_fd.o \
 
 9pnet_virtio-objs := \
 	trans_virtio.o \
+
+9pnet_rdma-objs := \
+	trans_rdma.o \
diff --git a/net/9p/client.c b/net/9p/client.c
index e053e06..67717f6 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -33,12 +33,9 @@
 #include <linux/uaccess.h>
 #include <net/9p/9p.h>
 #include <linux/parser.h>
-#include <net/9p/transport.h>
 #include <net/9p/client.h>
-
-static struct p9_fid *p9_fid_create(struct p9_client *clnt);
-static void p9_fid_destroy(struct p9_fid *fid);
-static struct p9_stat *p9_clone_stat(struct p9_stat *st, int dotu);
+#include <net/9p/transport.h>
+#include "protocol.h"
 
 /*
   * Client Option Parsing (code inspired by NFS code)
@@ -59,6 +56,9 @@
 	{Opt_err, NULL},
 };
 
+static struct p9_req_t *
+p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
+
 /**
  * v9fs_parse_options - parse mount options into session structure
  * @options: options string passed from mount
@@ -124,922 +124,497 @@
 	return ret;
 }
 
+/**
+ * p9_tag_alloc - lookup/allocate a request by tag
+ * @c: client session to lookup tag within
+ * @tag: numeric id for transaction
+ *
+ * this is a simple array lookup, but will grow the
+ * request_slots as necessary to accomodate transaction
+ * ids which did not previously have a slot.
+ *
+ * this code relies on the client spinlock to manage locks, its
+ * possible we should switch to something else, but I'd rather
+ * stick with something low-overhead for the common case.
+ *
+ */
+
+static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
+{
+	unsigned long flags;
+	int row, col;
+	struct p9_req_t *req;
+
+	/* This looks up the original request by tag so we know which
+	 * buffer to read the data into */
+	tag++;
+
+	if (tag >= c->max_tag) {
+		spin_lock_irqsave(&c->lock, flags);
+		/* check again since original check was outside of lock */
+		while (tag >= c->max_tag) {
+			row = (tag / P9_ROW_MAXTAG);
+			c->reqs[row] = kcalloc(P9_ROW_MAXTAG,
+					sizeof(struct p9_req_t), GFP_ATOMIC);
+
+			if (!c->reqs[row]) {
+				printk(KERN_ERR "Couldn't grow tag array\n");
+				spin_unlock_irqrestore(&c->lock, flags);
+				return ERR_PTR(-ENOMEM);
+			}
+			for (col = 0; col < P9_ROW_MAXTAG; col++) {
+				c->reqs[row][col].status = REQ_STATUS_IDLE;
+				c->reqs[row][col].tc = NULL;
+			}
+			c->max_tag += P9_ROW_MAXTAG;
+		}
+		spin_unlock_irqrestore(&c->lock, flags);
+	}
+	row = tag / P9_ROW_MAXTAG;
+	col = tag % P9_ROW_MAXTAG;
+
+	req = &c->reqs[row][col];
+	if (!req->tc) {
+		req->wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
+		if (!req->wq) {
+			printk(KERN_ERR "Couldn't grow tag array\n");
+			return ERR_PTR(-ENOMEM);
+		}
+		init_waitqueue_head(req->wq);
+		req->tc = kmalloc(sizeof(struct p9_fcall)+c->msize,
+								GFP_KERNEL);
+		req->rc = kmalloc(sizeof(struct p9_fcall)+c->msize,
+								GFP_KERNEL);
+		if ((!req->tc) || (!req->rc)) {
+			printk(KERN_ERR "Couldn't grow tag array\n");
+			kfree(req->tc);
+			kfree(req->rc);
+			return ERR_PTR(-ENOMEM);
+		}
+		req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall);
+		req->tc->capacity = c->msize;
+		req->rc->sdata = (char *) req->rc + sizeof(struct p9_fcall);
+		req->rc->capacity = c->msize;
+	}
+
+	p9pdu_reset(req->tc);
+	p9pdu_reset(req->rc);
+
+	req->flush_tag = 0;
+	req->tc->tag = tag-1;
+	req->status = REQ_STATUS_ALLOC;
+
+	return &c->reqs[row][col];
+}
 
 /**
- * p9_client_rpc - sends 9P request and waits until a response is available.
- *      The function can be interrupted.
- * @c: client data
- * @tc: request to be sent
- * @rc: pointer where a pointer to the response is stored
+ * p9_tag_lookup - lookup a request by tag
+ * @c: client session to lookup tag within
+ * @tag: numeric id for transaction
+ *
  */
-int
-p9_client_rpc(struct p9_client *c, struct p9_fcall *tc,
-	struct p9_fcall **rc)
+
+struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag)
 {
-	return c->trans->rpc(c->trans, tc, rc);
+	int row, col;
+
+	/* This looks up the original request by tag so we know which
+	 * buffer to read the data into */
+	tag++;
+
+	BUG_ON(tag >= c->max_tag);
+
+	row = tag / P9_ROW_MAXTAG;
+	col = tag % P9_ROW_MAXTAG;
+
+	return &c->reqs[row][col];
 }
+EXPORT_SYMBOL(p9_tag_lookup);
 
-struct p9_client *p9_client_create(const char *dev_name, char *options)
+/**
+ * p9_tag_init - setup tags structure and contents
+ * @tags: tags structure from the client struct
+ *
+ * This initializes the tags structure for each client instance.
+ *
+ */
+
+static int p9_tag_init(struct p9_client *c)
 {
-	int err, n;
-	struct p9_client *clnt;
-	struct p9_fcall *tc, *rc;
-	struct p9_str *version;
+	int err = 0;
 
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-	clnt = kmalloc(sizeof(struct p9_client), GFP_KERNEL);
-	if (!clnt)
-		return ERR_PTR(-ENOMEM);
-
-	clnt->trans_mod = NULL;
-	clnt->trans = NULL;
-	spin_lock_init(&clnt->lock);
-	INIT_LIST_HEAD(&clnt->fidlist);
-	clnt->fidpool = p9_idpool_create();
-	if (IS_ERR(clnt->fidpool)) {
-		err = PTR_ERR(clnt->fidpool);
-		clnt->fidpool = NULL;
+	c->tagpool = p9_idpool_create();
+	if (IS_ERR(c->tagpool)) {
+		err = PTR_ERR(c->tagpool);
+		c->tagpool = NULL;
 		goto error;
 	}
 
-	err = parse_opts(options, clnt);
-	if (err < 0)
-		goto error;
+	p9_idpool_get(c->tagpool); /* reserve tag 0 */
 
-	if (clnt->trans_mod == NULL) {
-		err = -EPROTONOSUPPORT;
-		P9_DPRINTK(P9_DEBUG_ERROR,
-				"No transport defined or default transport\n");
-		goto error;
-	}
-
-	P9_DPRINTK(P9_DEBUG_9P, "clnt %p trans %p msize %d dotu %d\n",
-		clnt, clnt->trans_mod, clnt->msize, clnt->dotu);
-
-
-	clnt->trans = clnt->trans_mod->create(dev_name, options, clnt->msize,
-								clnt->dotu);
-	if (IS_ERR(clnt->trans)) {
-		err = PTR_ERR(clnt->trans);
-		clnt->trans = NULL;
-		goto error;
-	}
-
-	if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize)
-		clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ;
-
-	tc = p9_create_tversion(clnt->msize, clnt->dotu?"9P2000.u":"9P2000");
-	if (IS_ERR(tc)) {
-		err = PTR_ERR(tc);
-		tc = NULL;
-		goto error;
-	}
-
-	err = p9_client_rpc(clnt, tc, &rc);
-	if (err)
-		goto error;
-
-	version = &rc->params.rversion.version;
-	if (version->len == 8 && !memcmp(version->str, "9P2000.u", 8))
-		clnt->dotu = 1;
-	else if (version->len == 6 && !memcmp(version->str, "9P2000", 6))
-		clnt->dotu = 0;
-	else {
-		err = -EREMOTEIO;
-		goto error;
-	}
-
-	n = rc->params.rversion.msize;
-	if (n < clnt->msize)
-		clnt->msize = n;
-
-	kfree(tc);
-	kfree(rc);
-	return clnt;
-
+	c->max_tag = 0;
 error:
-	kfree(tc);
-	kfree(rc);
-	p9_client_destroy(clnt);
-	return ERR_PTR(err);
+	return err;
 }
-EXPORT_SYMBOL(p9_client_create);
 
-void p9_client_destroy(struct p9_client *clnt)
+/**
+ * p9_tag_cleanup - cleans up tags structure and reclaims resources
+ * @tags: tags structure from the client struct
+ *
+ * This frees resources associated with the tags structure
+ *
+ */
+static void p9_tag_cleanup(struct p9_client *c)
 {
-	struct p9_fid *fid, *fidptr;
+	int row, col;
 
-	P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
-
-	if (clnt->trans) {
-		clnt->trans->close(clnt->trans);
-		kfree(clnt->trans);
-		clnt->trans = NULL;
-	}
-
-	v9fs_put_trans(clnt->trans_mod);
-
-	list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist)
-		p9_fid_destroy(fid);
-
-	if (clnt->fidpool)
-		p9_idpool_destroy(clnt->fidpool);
-
-	kfree(clnt);
-}
-EXPORT_SYMBOL(p9_client_destroy);
-
-void p9_client_disconnect(struct p9_client *clnt)
-{
-	P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
-	clnt->trans->status = Disconnected;
-}
-EXPORT_SYMBOL(p9_client_disconnect);
-
-struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
-	char *uname, u32 n_uname, char *aname)
-{
-	int err;
-	struct p9_fcall *tc, *rc;
-	struct p9_fid *fid;
-
-	P9_DPRINTK(P9_DEBUG_9P, "clnt %p afid %d uname %s aname %s\n",
-		clnt, afid?afid->fid:-1, uname, aname);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-
-	fid = p9_fid_create(clnt);
-	if (IS_ERR(fid)) {
-		err = PTR_ERR(fid);
-		fid = NULL;
-		goto error;
-	}
-
-	tc = p9_create_tattach(fid->fid, afid?afid->fid:P9_NOFID, uname, aname,
-		n_uname, clnt->dotu);
-	if (IS_ERR(tc)) {
-		err = PTR_ERR(tc);
-		tc = NULL;
-		goto error;
-	}
-
-	err = p9_client_rpc(clnt, tc, &rc);
-	if (err)
-		goto error;
-
-	memmove(&fid->qid, &rc->params.rattach.qid, sizeof(struct p9_qid));
-	kfree(tc);
-	kfree(rc);
-	return fid;
-
-error:
-	kfree(tc);
-	kfree(rc);
-	if (fid)
-		p9_fid_destroy(fid);
-	return ERR_PTR(err);
-}
-EXPORT_SYMBOL(p9_client_attach);
-
-struct p9_fid *p9_client_auth(struct p9_client *clnt, char *uname,
-	u32 n_uname, char *aname)
-{
-	int err;
-	struct p9_fcall *tc, *rc;
-	struct p9_fid *fid;
-
-	P9_DPRINTK(P9_DEBUG_9P, "clnt %p uname %s aname %s\n", clnt, uname,
-									aname);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-
-	fid = p9_fid_create(clnt);
-	if (IS_ERR(fid)) {
-		err = PTR_ERR(fid);
-		fid = NULL;
-		goto error;
-	}
-
-	tc = p9_create_tauth(fid->fid, uname, aname, n_uname, clnt->dotu);
-	if (IS_ERR(tc)) {
-		err = PTR_ERR(tc);
-		tc = NULL;
-		goto error;
-	}
-
-	err = p9_client_rpc(clnt, tc, &rc);
-	if (err)
-		goto error;
-
-	memmove(&fid->qid, &rc->params.rauth.qid, sizeof(struct p9_qid));
-	kfree(tc);
-	kfree(rc);
-	return fid;
-
-error:
-	kfree(tc);
-	kfree(rc);
-	if (fid)
-		p9_fid_destroy(fid);
-	return ERR_PTR(err);
-}
-EXPORT_SYMBOL(p9_client_auth);
-
-struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
-	int clone)
-{
-	int err;
-	struct p9_fcall *tc, *rc;
-	struct p9_client *clnt;
-	struct p9_fid *fid;
-
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d nwname %d wname[0] %s\n",
-		oldfid->fid, nwname, wnames?wnames[0]:NULL);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-	clnt = oldfid->clnt;
-	if (clone) {
-		fid = p9_fid_create(clnt);
-		if (IS_ERR(fid)) {
-			err = PTR_ERR(fid);
-			fid = NULL;
-			goto error;
+	/* check to insure all requests are idle */
+	for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) {
+		for (col = 0; col < P9_ROW_MAXTAG; col++) {
+			if (c->reqs[row][col].status != REQ_STATUS_IDLE) {
+				P9_DPRINTK(P9_DEBUG_MUX,
+				  "Attempting to cleanup non-free tag %d,%d\n",
+				  row, col);
+				/* TODO: delay execution of cleanup */
+				return;
+			}
 		}
-
-		fid->uid = oldfid->uid;
-	} else
-		fid = oldfid;
-
-	tc = p9_create_twalk(oldfid->fid, fid->fid, nwname, wnames);
-	if (IS_ERR(tc)) {
-		err = PTR_ERR(tc);
-		tc = NULL;
-		goto error;
 	}
 
-	err = p9_client_rpc(clnt, tc, &rc);
+	if (c->tagpool)
+		p9_idpool_destroy(c->tagpool);
+
+	/* free requests associated with tags */
+	for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) {
+		for (col = 0; col < P9_ROW_MAXTAG; col++) {
+			kfree(c->reqs[row][col].wq);
+			kfree(c->reqs[row][col].tc);
+			kfree(c->reqs[row][col].rc);
+		}
+		kfree(c->reqs[row]);
+	}
+	c->max_tag = 0;
+}
+
+/**
+ * p9_free_req - free a request and clean-up as necessary
+ * c: client state
+ * r: request to release
+ *
+ */
+
+static void p9_free_req(struct p9_client *c, struct p9_req_t *r)
+{
+	int tag = r->tc->tag;
+	P9_DPRINTK(P9_DEBUG_MUX, "clnt %p req %p tag: %d\n", c, r, tag);
+
+	r->status = REQ_STATUS_IDLE;
+	if (tag != P9_NOTAG && p9_idpool_check(tag, c->tagpool))
+		p9_idpool_put(tag, c->tagpool);
+
+	/* if this was a flush request we have to free response fcall */
+	if (r->rc->id == P9_RFLUSH) {
+		kfree(r->tc);
+		kfree(r->rc);
+	}
+}
+
+/**
+ * p9_client_cb - call back from transport to client
+ * c: client state
+ * req: request received
+ *
+ */
+void p9_client_cb(struct p9_client *c, struct p9_req_t *req)
+{
+	struct p9_req_t *other_req;
+	unsigned long flags;
+
+	P9_DPRINTK(P9_DEBUG_MUX, " tag %d\n", req->tc->tag);
+
+	if (req->status == REQ_STATUS_ERROR)
+		wake_up(req->wq);
+
+	if (req->flush_tag) { 			/* flush receive path */
+		P9_DPRINTK(P9_DEBUG_9P, "<<< RFLUSH %d\n", req->tc->tag);
+		spin_lock_irqsave(&c->lock, flags);
+		other_req = p9_tag_lookup(c, req->flush_tag);
+		if (other_req->status != REQ_STATUS_FLSH) /* stale flush */
+			spin_unlock_irqrestore(&c->lock, flags);
+		else {
+			other_req->status = REQ_STATUS_FLSHD;
+			spin_unlock_irqrestore(&c->lock, flags);
+			wake_up(other_req->wq);
+		}
+		p9_free_req(c, req);
+	} else { 				/* normal receive path */
+		P9_DPRINTK(P9_DEBUG_MUX, "normal: tag %d\n", req->tc->tag);
+		spin_lock_irqsave(&c->lock, flags);
+		if (req->status != REQ_STATUS_FLSHD)
+			req->status = REQ_STATUS_RCVD;
+		spin_unlock_irqrestore(&c->lock, flags);
+		wake_up(req->wq);
+		P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag);
+	}
+}
+EXPORT_SYMBOL(p9_client_cb);
+
+/**
+ * p9_parse_header - parse header arguments out of a packet
+ * @pdu: packet to parse
+ * @size: size of packet
+ * @type: type of request
+ * @tag: tag of packet
+ * @rewind: set if we need to rewind offset afterwards
+ */
+
+int
+p9_parse_header(struct p9_fcall *pdu, int32_t *size, int8_t *type, int16_t *tag,
+								int rewind)
+{
+	int8_t r_type;
+	int16_t r_tag;
+	int32_t r_size;
+	int offset = pdu->offset;
+	int err;
+
+	pdu->offset = 0;
+	if (pdu->size == 0)
+		pdu->size = 7;
+
+	err = p9pdu_readf(pdu, 0, "dbw", &r_size, &r_type, &r_tag);
+	if (err)
+		goto rewind_and_exit;
+
+	pdu->size = r_size;
+	pdu->id = r_type;
+	pdu->tag = r_tag;
+
+	P9_DPRINTK(P9_DEBUG_9P, "<<< size=%d type: %d tag: %d\n", pdu->size,
+							pdu->id, pdu->tag);
+
+	if (type)
+		*type = r_type;
+	if (tag)
+		*tag = r_tag;
+	if (size)
+		*size = r_size;
+
+
+rewind_and_exit:
+	if (rewind)
+		pdu->offset = offset;
+	return err;
+}
+EXPORT_SYMBOL(p9_parse_header);
+
+/**
+ * p9_check_errors - check 9p packet for error return and process it
+ * @c: current client instance
+ * @req: request to parse and check for error conditions
+ *
+ * returns error code if one is discovered, otherwise returns 0
+ *
+ * this will have to be more complicated if we have multiple
+ * error packet types
+ */
+
+static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
+{
+	int8_t type;
+	int err;
+
+	err = p9_parse_header(req->rc, NULL, &type, NULL, 0);
 	if (err) {
-		if (rc && rc->id == P9_RWALK)
-			goto clunk_fid;
-		else
-			goto error;
+		P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
+		return err;
 	}
 
-	if (rc->params.rwalk.nwqid != nwname) {
-		err = -ENOENT;
-		goto clunk_fid;
-	}
+	if (type == P9_RERROR) {
+		int ecode;
+		char *ename;
 
-	if (nwname)
-		memmove(&fid->qid,
-			&rc->params.rwalk.wqids[rc->params.rwalk.nwqid - 1],
-			sizeof(struct p9_qid));
-	else
-		fid->qid = oldfid->qid;
-
-	kfree(tc);
-	kfree(rc);
-	return fid;
-
-clunk_fid:
-	kfree(tc);
-	kfree(rc);
-	rc = NULL;
-	tc = p9_create_tclunk(fid->fid);
-	if (IS_ERR(tc)) {
-		err = PTR_ERR(tc);
-		tc = NULL;
-		goto error;
-	}
-
-	p9_client_rpc(clnt, tc, &rc);
-
-error:
-	kfree(tc);
-	kfree(rc);
-	if (fid && (fid != oldfid))
-		p9_fid_destroy(fid);
-
-	return ERR_PTR(err);
-}
-EXPORT_SYMBOL(p9_client_walk);
-
-int p9_client_open(struct p9_fid *fid, int mode)
-{
-	int err;
-	struct p9_fcall *tc, *rc;
-	struct p9_client *clnt;
-
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d mode %d\n", fid->fid, mode);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-	clnt = fid->clnt;
-
-	if (fid->mode != -1)
-		return -EINVAL;
-
-	tc = p9_create_topen(fid->fid, mode);
-	if (IS_ERR(tc)) {
-		err = PTR_ERR(tc);
-		tc = NULL;
-		goto done;
-	}
-
-	err = p9_client_rpc(clnt, tc, &rc);
-	if (err)
-		goto done;
-
-	fid->mode = mode;
-	fid->iounit = rc->params.ropen.iounit;
-
-done:
-	kfree(tc);
-	kfree(rc);
-	return err;
-}
-EXPORT_SYMBOL(p9_client_open);
-
-int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
-		     char *extension)
-{
-	int err;
-	struct p9_fcall *tc, *rc;
-	struct p9_client *clnt;
-
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d name %s perm %d mode %d\n", fid->fid,
-		name, perm, mode);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-	clnt = fid->clnt;
-
-	if (fid->mode != -1)
-		return -EINVAL;
-
-	tc = p9_create_tcreate(fid->fid, name, perm, mode, extension,
-							       clnt->dotu);
-	if (IS_ERR(tc)) {
-		err = PTR_ERR(tc);
-		tc = NULL;
-		goto done;
-	}
-
-	err = p9_client_rpc(clnt, tc, &rc);
-	if (err)
-		goto done;
-
-	fid->mode = mode;
-	fid->iounit = rc->params.ropen.iounit;
-
-done:
-	kfree(tc);
-	kfree(rc);
-	return err;
-}
-EXPORT_SYMBOL(p9_client_fcreate);
-
-int p9_client_clunk(struct p9_fid *fid)
-{
-	int err;
-	struct p9_fcall *tc, *rc;
-	struct p9_client *clnt;
-
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d\n", fid->fid);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-	clnt = fid->clnt;
-
-	tc = p9_create_tclunk(fid->fid);
-	if (IS_ERR(tc)) {
-		err = PTR_ERR(tc);
-		tc = NULL;
-		goto done;
-	}
-
-	err = p9_client_rpc(clnt, tc, &rc);
-	if (err)
-		goto done;
-
-	p9_fid_destroy(fid);
-
-done:
-	kfree(tc);
-	kfree(rc);
-	return err;
-}
-EXPORT_SYMBOL(p9_client_clunk);
-
-int p9_client_remove(struct p9_fid *fid)
-{
-	int err;
-	struct p9_fcall *tc, *rc;
-	struct p9_client *clnt;
-
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d\n", fid->fid);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-	clnt = fid->clnt;
-
-	tc = p9_create_tremove(fid->fid);
-	if (IS_ERR(tc)) {
-		err = PTR_ERR(tc);
-		tc = NULL;
-		goto done;
-	}
-
-	err = p9_client_rpc(clnt, tc, &rc);
-	if (err)
-		goto done;
-
-	p9_fid_destroy(fid);
-
-done:
-	kfree(tc);
-	kfree(rc);
-	return err;
-}
-EXPORT_SYMBOL(p9_client_remove);
-
-int p9_client_read(struct p9_fid *fid, char *data, u64 offset, u32 count)
-{
-	int err, n, rsize, total;
-	struct p9_fcall *tc, *rc;
-	struct p9_client *clnt;
-
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d offset %llu %d\n", fid->fid,
-					(long long unsigned) offset, count);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-	clnt = fid->clnt;
-	total = 0;
-
-	rsize = fid->iounit;
-	if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
-		rsize = clnt->msize - P9_IOHDRSZ;
-
-	do {
-		if (count < rsize)
-			rsize = count;
-
-		tc = p9_create_tread(fid->fid, offset, rsize);
-		if (IS_ERR(tc)) {
-			err = PTR_ERR(tc);
-			tc = NULL;
-			goto error;
-		}
-
-		err = p9_client_rpc(clnt, tc, &rc);
-		if (err)
-			goto error;
-
-		n = rc->params.rread.count;
-		if (n > count)
-			n = count;
-
-		memmove(data, rc->params.rread.data, n);
-		count -= n;
-		data += n;
-		offset += n;
-		total += n;
-		kfree(tc);
-		tc = NULL;
-		kfree(rc);
-		rc = NULL;
-	} while (count > 0 && n == rsize);
-
-	return total;
-
-error:
-	kfree(tc);
-	kfree(rc);
-	return err;
-}
-EXPORT_SYMBOL(p9_client_read);
-
-int p9_client_write(struct p9_fid *fid, char *data, u64 offset, u32 count)
-{
-	int err, n, rsize, total;
-	struct p9_fcall *tc, *rc;
-	struct p9_client *clnt;
-
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d offset %llu count %d\n", fid->fid,
-					(long long unsigned) offset, count);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-	clnt = fid->clnt;
-	total = 0;
-
-	rsize = fid->iounit;
-	if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
-		rsize = clnt->msize - P9_IOHDRSZ;
-
-	do {
-		if (count < rsize)
-			rsize = count;
-
-		tc = p9_create_twrite(fid->fid, offset, rsize, data);
-		if (IS_ERR(tc)) {
-			err = PTR_ERR(tc);
-			tc = NULL;
-			goto error;
-		}
-
-		err = p9_client_rpc(clnt, tc, &rc);
-		if (err)
-			goto error;
-
-		n = rc->params.rread.count;
-		count -= n;
-		data += n;
-		offset += n;
-		total += n;
-		kfree(tc);
-		tc = NULL;
-		kfree(rc);
-		rc = NULL;
-	} while (count > 0);
-
-	return total;
-
-error:
-	kfree(tc);
-	kfree(rc);
-	return err;
-}
-EXPORT_SYMBOL(p9_client_write);
-
-int
-p9_client_uread(struct p9_fid *fid, char __user *data, u64 offset, u32 count)
-{
-	int err, n, rsize, total;
-	struct p9_fcall *tc, *rc;
-	struct p9_client *clnt;
-
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d offset %llu count %d\n", fid->fid,
-					(long long unsigned) offset, count);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-	clnt = fid->clnt;
-	total = 0;
-
-	rsize = fid->iounit;
-	if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
-		rsize = clnt->msize - P9_IOHDRSZ;
-
-	do {
-		if (count < rsize)
-			rsize = count;
-
-		tc = p9_create_tread(fid->fid, offset, rsize);
-		if (IS_ERR(tc)) {
-			err = PTR_ERR(tc);
-			tc = NULL;
-			goto error;
-		}
-
-		err = p9_client_rpc(clnt, tc, &rc);
-		if (err)
-			goto error;
-
-		n = rc->params.rread.count;
-		if (n > count)
-			n = count;
-
-		err = copy_to_user(data, rc->params.rread.data, n);
+		err = p9pdu_readf(req->rc, c->dotu, "s?d", &ename, &ecode);
 		if (err) {
-			err = -EFAULT;
-			goto error;
+			P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n",
+									err);
+			return err;
 		}
 
-		count -= n;
-		data += n;
-		offset += n;
-		total += n;
-		kfree(tc);
-		tc = NULL;
-		kfree(rc);
-		rc = NULL;
-	} while (count > 0 && n == rsize);
+		if (c->dotu)
+			err = -ecode;
 
-	return total;
+		if (!err) {
+			err = p9_errstr2errno(ename, strlen(ename));
 
-error:
-	kfree(tc);
-	kfree(rc);
-	return err;
-}
-EXPORT_SYMBOL(p9_client_uread);
-
-int
-p9_client_uwrite(struct p9_fid *fid, const char __user *data, u64 offset,
-								   u32 count)
-{
-	int err, n, rsize, total;
-	struct p9_fcall *tc, *rc;
-	struct p9_client *clnt;
-
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d offset %llu count %d\n", fid->fid,
-					(long long unsigned) offset, count);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-	clnt = fid->clnt;
-	total = 0;
-
-	rsize = fid->iounit;
-	if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
-		rsize = clnt->msize - P9_IOHDRSZ;
-
-	do {
-		if (count < rsize)
-			rsize = count;
-
-		tc = p9_create_twrite_u(fid->fid, offset, rsize, data);
-		if (IS_ERR(tc)) {
-			err = PTR_ERR(tc);
-			tc = NULL;
-			goto error;
+			/* string match failed */
+			if (!err)
+				err = -ESERVERFAULT;
 		}
 
-		err = p9_client_rpc(clnt, tc, &rc);
-		if (err)
-			goto error;
+		P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode, ename);
 
-		n = rc->params.rread.count;
-		count -= n;
-		data += n;
-		offset += n;
-		total += n;
-		kfree(tc);
-		tc = NULL;
-		kfree(rc);
-		rc = NULL;
-	} while (count > 0);
+		kfree(ename);
+	} else
+		err = 0;
 
-	return total;
-
-error:
-	kfree(tc);
-	kfree(rc);
 	return err;
 }
-EXPORT_SYMBOL(p9_client_uwrite);
 
-int p9_client_readn(struct p9_fid *fid, char *data, u64 offset, u32 count)
+/**
+ * p9_client_flush - flush (cancel) a request
+ * c: client state
+ * req: request to cancel
+ *
+ * This sents a flush for a particular requests and links
+ * the flush request to the original request.  The current
+ * code only supports a single flush request although the protocol
+ * allows for multiple flush requests to be sent for a single request.
+ *
+ */
+
+static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
 {
-	int n, total;
-
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d offset %llu count %d\n", fid->fid,
-					(long long unsigned) offset, count);
-	n = 0;
-	total = 0;
-	while (count) {
-		n = p9_client_read(fid, data, offset, count);
-		if (n <= 0)
-			break;
-
-		data += n;
-		offset += n;
-		count -= n;
-		total += n;
-	}
-
-	if (n < 0)
-		total = n;
-
-	return total;
-}
-EXPORT_SYMBOL(p9_client_readn);
-
-struct p9_stat *p9_client_stat(struct p9_fid *fid)
-{
+	struct p9_req_t *req;
+	int16_t oldtag;
 	int err;
-	struct p9_fcall *tc, *rc;
-	struct p9_client *clnt;
-	struct p9_stat *ret;
 
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d\n", fid->fid);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-	ret = NULL;
-	clnt = fid->clnt;
-
-	tc = p9_create_tstat(fid->fid);
-	if (IS_ERR(tc)) {
-		err = PTR_ERR(tc);
-		tc = NULL;
-		goto error;
-	}
-
-	err = p9_client_rpc(clnt, tc, &rc);
+	err = p9_parse_header(oldreq->tc, NULL, NULL, &oldtag, 1);
 	if (err)
-		goto error;
+		return err;
 
-	ret = p9_clone_stat(&rc->params.rstat.stat, clnt->dotu);
-	if (IS_ERR(ret)) {
-		err = PTR_ERR(ret);
-		ret = NULL;
-		goto error;
-	}
+	P9_DPRINTK(P9_DEBUG_9P, ">>> TFLUSH tag %d\n", oldtag);
 
-	kfree(tc);
-	kfree(rc);
-	return ret;
+	req = p9_client_rpc(c, P9_TFLUSH, "w", oldtag);
+	if (IS_ERR(req))
+		return PTR_ERR(req);
 
-error:
-	kfree(tc);
-	kfree(rc);
-	kfree(ret);
-	return ERR_PTR(err);
+	req->flush_tag = oldtag;
+
+	/* we don't free anything here because RPC isn't complete */
+	return 0;
 }
-EXPORT_SYMBOL(p9_client_stat);
 
-int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
+/**
+ * p9_client_rpc - issue a request and wait for a response
+ * @c: client session
+ * @type: type of request
+ * @fmt: protocol format string (see protocol.c)
+ *
+ * Returns request structure (which client must free using p9_free_req)
+ */
+
+static struct p9_req_t *
+p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
 {
-	int err;
-	struct p9_fcall *tc, *rc;
-	struct p9_client *clnt;
+	va_list ap;
+	int tag, err;
+	struct p9_req_t *req;
+	unsigned long flags;
+	int sigpending;
+	int flushed = 0;
 
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d\n", fid->fid);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-	clnt = fid->clnt;
+	P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type);
 
-	tc = p9_create_twstat(fid->fid, wst, clnt->dotu);
-	if (IS_ERR(tc)) {
-		err = PTR_ERR(tc);
-		tc = NULL;
-		goto done;
+	if (c->status != Connected)
+		return ERR_PTR(-EIO);
+
+	if (signal_pending(current)) {
+		sigpending = 1;
+		clear_thread_flag(TIF_SIGPENDING);
+	} else
+		sigpending = 0;
+
+	tag = P9_NOTAG;
+	if (type != P9_TVERSION) {
+		tag = p9_idpool_get(c->tagpool);
+		if (tag < 0)
+			return ERR_PTR(-ENOMEM);
 	}
 
-	err = p9_client_rpc(clnt, tc, &rc);
+	req = p9_tag_alloc(c, tag);
+	if (IS_ERR(req))
+		return req;
 
-done:
-	kfree(tc);
-	kfree(rc);
-	return err;
-}
-EXPORT_SYMBOL(p9_client_wstat);
+	/* marshall the data */
+	p9pdu_prepare(req->tc, tag, type);
+	va_start(ap, fmt);
+	err = p9pdu_vwritef(req->tc, c->dotu, fmt, ap);
+	va_end(ap);
+	p9pdu_finalize(req->tc);
 
-struct p9_stat *p9_client_dirread(struct p9_fid *fid, u64 offset)
-{
-	int err, n, m;
-	struct p9_fcall *tc, *rc;
-	struct p9_client *clnt;
-	struct p9_stat st, *ret;
-
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d offset %llu\n", fid->fid,
-						(long long unsigned) offset);
-	err = 0;
-	tc = NULL;
-	rc = NULL;
-	ret = NULL;
-	clnt = fid->clnt;
-
-	/* if the offset is below or above the current response, free it */
-	if (offset < fid->rdir_fpos || (fid->rdir_fcall &&
-		offset >= fid->rdir_fpos+fid->rdir_fcall->params.rread.count)) {
-		fid->rdir_pos = 0;
-		if (fid->rdir_fcall)
-			fid->rdir_fpos += fid->rdir_fcall->params.rread.count;
-
-		kfree(fid->rdir_fcall);
-		fid->rdir_fcall = NULL;
-		if (offset < fid->rdir_fpos)
-			fid->rdir_fpos = 0;
+	err = c->trans_mod->request(c, req);
+	if (err < 0) {
+		c->status = Disconnected;
+		goto reterr;
 	}
 
-	if (!fid->rdir_fcall) {
-		n = fid->iounit;
-		if (!n || n > clnt->msize-P9_IOHDRSZ)
-			n = clnt->msize - P9_IOHDRSZ;
+	/* if it was a flush we just transmitted, return our tag */
+	if (type == P9_TFLUSH)
+		return req;
+again:
+	P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag);
+	err = wait_event_interruptible(*req->wq,
+						req->status >= REQ_STATUS_RCVD);
+	P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d (flushed=%d)\n",
+						req->wq, tag, err, flushed);
 
-		while (1) {
-			if (fid->rdir_fcall) {
-				fid->rdir_fpos +=
-					fid->rdir_fcall->params.rread.count;
-				kfree(fid->rdir_fcall);
-				fid->rdir_fcall = NULL;
-			}
+	if (req->status == REQ_STATUS_ERROR) {
+		P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
+		err = req->t_err;
+	} else if (err == -ERESTARTSYS && flushed) {
+		P9_DPRINTK(P9_DEBUG_MUX, "flushed - going again\n");
+		goto again;
+	} else if (req->status == REQ_STATUS_FLSHD) {
+		P9_DPRINTK(P9_DEBUG_MUX, "flushed - erestartsys\n");
+		err = -ERESTARTSYS;
+	}
 
-			tc = p9_create_tread(fid->fid, fid->rdir_fpos, n);
-			if (IS_ERR(tc)) {
-				err = PTR_ERR(tc);
-				tc = NULL;
-				goto error;
-			}
+	if ((err == -ERESTARTSYS) && (c->status == Connected) && (!flushed)) {
+		P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
+		spin_lock_irqsave(&c->lock, flags);
+		if (req->status == REQ_STATUS_SENT)
+			req->status = REQ_STATUS_FLSH;
+		spin_unlock_irqrestore(&c->lock, flags);
+		sigpending = 1;
+		flushed = 1;
+		clear_thread_flag(TIF_SIGPENDING);
 
-			err = p9_client_rpc(clnt, tc, &rc);
-			if (err)
-				goto error;
-
-			n = rc->params.rread.count;
-			if (n == 0)
-				goto done;
-
-			fid->rdir_fcall = rc;
-			rc = NULL;
-			if (offset >= fid->rdir_fpos &&
-						offset < fid->rdir_fpos+n)
-				break;
+		if (c->trans_mod->cancel(c, req)) {
+			err = p9_client_flush(c, req);
+			if (err == 0)
+				goto again;
 		}
-
-		fid->rdir_pos = 0;
 	}
 
-	m = offset - fid->rdir_fpos;
-	if (m < 0)
-		goto done;
-
-	n = p9_deserialize_stat(fid->rdir_fcall->params.rread.data + m,
-		fid->rdir_fcall->params.rread.count - m, &st, clnt->dotu);
-
-	if (!n) {
-		err = -EIO;
-		goto error;
+	if (sigpending) {
+		spin_lock_irqsave(&current->sighand->siglock, flags);
+		recalc_sigpending();
+		spin_unlock_irqrestore(&current->sighand->siglock, flags);
 	}
 
-	fid->rdir_pos += n;
-	st.size = n;
-	ret = p9_clone_stat(&st, clnt->dotu);
-	if (IS_ERR(ret)) {
-		err = PTR_ERR(ret);
-		ret = NULL;
-		goto error;
+	if (err < 0)
+		goto reterr;
+
+	err = p9_check_errors(c, req);
+	if (!err) {
+		P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d\n", c, type);
+		return req;
 	}
 
-done:
-	kfree(tc);
-	kfree(rc);
-	return ret;
-
-error:
-	kfree(tc);
-	kfree(rc);
-	kfree(ret);
+reterr:
+	P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d error: %d\n", c, type,
+									err);
+	p9_free_req(c, req);
 	return ERR_PTR(err);
 }
-EXPORT_SYMBOL(p9_client_dirread);
-
-static struct p9_stat *p9_clone_stat(struct p9_stat *st, int dotu)
-{
-	int n;
-	char *p;
-	struct p9_stat *ret;
-
-	n = sizeof(struct p9_stat) + st->name.len + st->uid.len + st->gid.len +
-		st->muid.len;
-
-	if (dotu)
-		n += st->extension.len;
-
-	ret = kmalloc(n, GFP_KERNEL);
-	if (!ret)
-		return ERR_PTR(-ENOMEM);
-
-	memmove(ret, st, sizeof(struct p9_stat));
-	p = ((char *) ret) + sizeof(struct p9_stat);
-	memmove(p, st->name.str, st->name.len);
-	ret->name.str = p;
-	p += st->name.len;
-	memmove(p, st->uid.str, st->uid.len);
-	ret->uid.str = p;
-	p += st->uid.len;
-	memmove(p, st->gid.str, st->gid.len);
-	ret->gid.str = p;
-	p += st->gid.len;
-	memmove(p, st->muid.str, st->muid.len);
-	ret->muid.str = p;
-	p += st->muid.len;
-
-	if (dotu) {
-		memmove(p, st->extension.str, st->extension.len);
-		ret->extension.str = p;
-		p += st->extension.len;
-	}
-
-	return ret;
-}
 
 static struct p9_fid *p9_fid_create(struct p9_client *clnt)
 {
 	int err;
 	struct p9_fid *fid;
 
-	P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
+	P9_DPRINTK(P9_DEBUG_FID, "clnt %p\n", clnt);
 	fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL);
 	if (!fid)
 		return ERR_PTR(-ENOMEM);
@@ -1053,8 +628,6 @@
 	memset(&fid->qid, 0, sizeof(struct p9_qid));
 	fid->mode = -1;
 	fid->rdir_fpos = 0;
-	fid->rdir_pos = 0;
-	fid->rdir_fcall = NULL;
 	fid->uid = current->fsuid;
 	fid->clnt = clnt;
 	fid->aux = NULL;
@@ -1074,12 +647,631 @@
 {
 	struct p9_client *clnt;
 
-	P9_DPRINTK(P9_DEBUG_9P, "fid %d\n", fid->fid);
+	P9_DPRINTK(P9_DEBUG_FID, "fid %d\n", fid->fid);
 	clnt = fid->clnt;
 	p9_idpool_put(fid->fid, clnt->fidpool);
 	spin_lock(&clnt->lock);
 	list_del(&fid->flist);
 	spin_unlock(&clnt->lock);
-	kfree(fid->rdir_fcall);
 	kfree(fid);
 }
+
+int p9_client_version(struct p9_client *c)
+{
+	int err = 0;
+	struct p9_req_t *req;
+	char *version;
+	int msize;
+
+	P9_DPRINTK(P9_DEBUG_9P, ">>> TVERSION msize %d extended %d\n",
+							c->msize, c->dotu);
+	req = p9_client_rpc(c, P9_TVERSION, "ds", c->msize,
+				c->dotu ? "9P2000.u" : "9P2000");
+	if (IS_ERR(req))
+		return PTR_ERR(req);
+
+	err = p9pdu_readf(req->rc, c->dotu, "ds", &msize, &version);
+	if (err) {
+		P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err);
+		p9pdu_dump(1, req->rc);
+		goto error;
+	}
+
+	P9_DPRINTK(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version);
+	if (!memcmp(version, "9P2000.u", 8))
+		c->dotu = 1;
+	else if (!memcmp(version, "9P2000", 6))
+		c->dotu = 0;
+	else {
+		err = -EREMOTEIO;
+		goto error;
+	}
+
+	if (msize < c->msize)
+		c->msize = msize;
+
+error:
+	kfree(version);
+	p9_free_req(c, req);
+
+	return err;
+}
+EXPORT_SYMBOL(p9_client_version);
+
+struct p9_client *p9_client_create(const char *dev_name, char *options)
+{
+	int err;
+	struct p9_client *clnt;
+
+	err = 0;
+	clnt = kmalloc(sizeof(struct p9_client), GFP_KERNEL);
+	if (!clnt)
+		return ERR_PTR(-ENOMEM);
+
+	clnt->trans_mod = NULL;
+	clnt->trans = NULL;
+	spin_lock_init(&clnt->lock);
+	INIT_LIST_HEAD(&clnt->fidlist);
+	clnt->fidpool = p9_idpool_create();
+	if (IS_ERR(clnt->fidpool)) {
+		err = PTR_ERR(clnt->fidpool);
+		clnt->fidpool = NULL;
+		goto error;
+	}
+
+	p9_tag_init(clnt);
+
+	err = parse_opts(options, clnt);
+	if (err < 0)
+		goto error;
+
+	if (clnt->trans_mod == NULL) {
+		err = -EPROTONOSUPPORT;
+		P9_DPRINTK(P9_DEBUG_ERROR,
+				"No transport defined or default transport\n");
+		goto error;
+	}
+
+	P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n",
+		clnt, clnt->trans_mod, clnt->msize, clnt->dotu);
+
+	err = clnt->trans_mod->create(clnt, dev_name, options);
+	if (err)
+		goto error;
+
+	if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize)
+		clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ;
+
+	err = p9_client_version(clnt);
+	if (err)
+		goto error;
+
+	return clnt;
+
+error:
+	p9_client_destroy(clnt);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(p9_client_create);
+
+void p9_client_destroy(struct p9_client *clnt)
+{
+	struct p9_fid *fid, *fidptr;
+
+	P9_DPRINTK(P9_DEBUG_MUX, "clnt %p\n", clnt);
+
+	if (clnt->trans_mod)
+		clnt->trans_mod->close(clnt);
+
+	v9fs_put_trans(clnt->trans_mod);
+
+	list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist)
+		p9_fid_destroy(fid);
+
+	if (clnt->fidpool)
+		p9_idpool_destroy(clnt->fidpool);
+
+	p9_tag_cleanup(clnt);
+
+	kfree(clnt);
+}
+EXPORT_SYMBOL(p9_client_destroy);
+
+void p9_client_disconnect(struct p9_client *clnt)
+{
+	P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
+	clnt->status = Disconnected;
+}
+EXPORT_SYMBOL(p9_client_disconnect);
+
+struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
+	char *uname, u32 n_uname, char *aname)
+{
+	int err;
+	struct p9_req_t *req;
+	struct p9_fid *fid;
+	struct p9_qid qid;
+
+	P9_DPRINTK(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
+					afid ? afid->fid : -1, uname, aname);
+	err = 0;
+
+	fid = p9_fid_create(clnt);
+	if (IS_ERR(fid)) {
+		err = PTR_ERR(fid);
+		fid = NULL;
+		goto error;
+	}
+
+	req = p9_client_rpc(clnt, P9_TATTACH, "ddss?d", fid->fid,
+			afid ? afid->fid : P9_NOFID, uname, aname, n_uname);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto error;
+	}
+
+	err = p9pdu_readf(req->rc, clnt->dotu, "Q", &qid);
+	if (err) {
+		p9pdu_dump(1, req->rc);
+		p9_free_req(clnt, req);
+		goto error;
+	}
+
+	P9_DPRINTK(P9_DEBUG_9P, "<<< RATTACH qid %x.%llx.%x\n",
+					qid.type, qid.path, qid.version);
+
+	memmove(&fid->qid, &qid, sizeof(struct p9_qid));
+
+	p9_free_req(clnt, req);
+	return fid;
+
+error:
+	if (fid)
+		p9_fid_destroy(fid);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(p9_client_attach);
+
+struct p9_fid *
+p9_client_auth(struct p9_client *clnt, char *uname, u32 n_uname, char *aname)
+{
+	int err;
+	struct p9_req_t *req;
+	struct p9_qid qid;
+	struct p9_fid *afid;
+
+	P9_DPRINTK(P9_DEBUG_9P, ">>> TAUTH uname %s aname %s\n", uname, aname);
+	err = 0;
+
+	afid = p9_fid_create(clnt);
+	if (IS_ERR(afid)) {
+		err = PTR_ERR(afid);
+		afid = NULL;
+		goto error;
+	}
+
+	req = p9_client_rpc(clnt, P9_TAUTH, "dss?d",
+			afid ? afid->fid : P9_NOFID, uname, aname, n_uname);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto error;
+	}
+
+	err = p9pdu_readf(req->rc, clnt->dotu, "Q", &qid);
+	if (err) {
+		p9pdu_dump(1, req->rc);
+		p9_free_req(clnt, req);
+		goto error;
+	}
+
+	P9_DPRINTK(P9_DEBUG_9P, "<<< RAUTH qid %x.%llx.%x\n",
+					qid.type, qid.path, qid.version);
+
+	memmove(&afid->qid, &qid, sizeof(struct p9_qid));
+	p9_free_req(clnt, req);
+	return afid;
+
+error:
+	if (afid)
+		p9_fid_destroy(afid);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(p9_client_auth);
+
+struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
+	int clone)
+{
+	int err;
+	struct p9_client *clnt;
+	struct p9_fid *fid;
+	struct p9_qid *wqids;
+	struct p9_req_t *req;
+	int16_t nwqids, count;
+
+	err = 0;
+	clnt = oldfid->clnt;
+	if (clone) {
+		fid = p9_fid_create(clnt);
+		if (IS_ERR(fid)) {
+			err = PTR_ERR(fid);
+			fid = NULL;
+			goto error;
+		}
+
+		fid->uid = oldfid->uid;
+	} else
+		fid = oldfid;
+
+
+	P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %d wname[0] %s\n",
+		oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL);
+
+	req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid,
+								nwname, wnames);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto error;
+	}
+
+	err = p9pdu_readf(req->rc, clnt->dotu, "R", &nwqids, &wqids);
+	if (err) {
+		p9pdu_dump(1, req->rc);
+		p9_free_req(clnt, req);
+		goto clunk_fid;
+	}
+	p9_free_req(clnt, req);
+
+	P9_DPRINTK(P9_DEBUG_9P, "<<< RWALK nwqid %d:\n", nwqids);
+
+	if (nwqids != nwname) {
+		err = -ENOENT;
+		goto clunk_fid;
+	}
+
+	for (count = 0; count < nwqids; count++)
+		P9_DPRINTK(P9_DEBUG_9P, "<<<     [%d] %x.%llx.%x\n",
+			count, wqids[count].type, wqids[count].path,
+			wqids[count].version);
+
+	if (nwname)
+		memmove(&fid->qid, &wqids[nwqids - 1], sizeof(struct p9_qid));
+	else
+		fid->qid = oldfid->qid;
+
+	return fid;
+
+clunk_fid:
+	p9_client_clunk(fid);
+	fid = NULL;
+
+error:
+	if (fid && (fid != oldfid))
+		p9_fid_destroy(fid);
+
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(p9_client_walk);
+
+int p9_client_open(struct p9_fid *fid, int mode)
+{
+	int err;
+	struct p9_client *clnt;
+	struct p9_req_t *req;
+	struct p9_qid qid;
+	int iounit;
+
+	P9_DPRINTK(P9_DEBUG_9P, ">>> TOPEN fid %d mode %d\n", fid->fid, mode);
+	err = 0;
+	clnt = fid->clnt;
+
+	if (fid->mode != -1)
+		return -EINVAL;
+
+	req = p9_client_rpc(clnt, P9_TOPEN, "db", fid->fid, mode);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto error;
+	}
+
+	err = p9pdu_readf(req->rc, clnt->dotu, "Qd", &qid, &iounit);
+	if (err) {
+		p9pdu_dump(1, req->rc);
+		goto free_and_error;
+	}
+
+	P9_DPRINTK(P9_DEBUG_9P, "<<< ROPEN qid %x.%llx.%x iounit %x\n",
+				qid.type, qid.path, qid.version, iounit);
+
+	fid->mode = mode;
+	fid->iounit = iounit;
+
+free_and_error:
+	p9_free_req(clnt, req);
+error:
+	return err;
+}
+EXPORT_SYMBOL(p9_client_open);
+
+int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
+		     char *extension)
+{
+	int err;
+	struct p9_client *clnt;
+	struct p9_req_t *req;
+	struct p9_qid qid;
+	int iounit;
+
+	P9_DPRINTK(P9_DEBUG_9P, ">>> TCREATE fid %d name %s perm %d mode %d\n",
+						fid->fid, name, perm, mode);
+	err = 0;
+	clnt = fid->clnt;
+
+	if (fid->mode != -1)
+		return -EINVAL;
+
+	req = p9_client_rpc(clnt, P9_TCREATE, "dsdb?s", fid->fid, name, perm,
+				mode, extension);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto error;
+	}
+
+	err = p9pdu_readf(req->rc, clnt->dotu, "Qd", &qid, &iounit);
+	if (err) {
+		p9pdu_dump(1, req->rc);
+		goto free_and_error;
+	}
+
+	P9_DPRINTK(P9_DEBUG_9P, "<<< RCREATE qid %x.%llx.%x iounit %x\n",
+				qid.type, qid.path, qid.version, iounit);
+
+	fid->mode = mode;
+	fid->iounit = iounit;
+
+free_and_error:
+	p9_free_req(clnt, req);
+error:
+	return err;
+}
+EXPORT_SYMBOL(p9_client_fcreate);
+
+int p9_client_clunk(struct p9_fid *fid)
+{
+	int err;
+	struct p9_client *clnt;
+	struct p9_req_t *req;
+
+	P9_DPRINTK(P9_DEBUG_9P, ">>> TCLUNK fid %d\n", fid->fid);
+	err = 0;
+	clnt = fid->clnt;
+
+	req = p9_client_rpc(clnt, P9_TCLUNK, "d", fid->fid);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto error;
+	}
+
+	P9_DPRINTK(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid);
+
+	p9_free_req(clnt, req);
+	p9_fid_destroy(fid);
+
+error:
+	return err;
+}
+EXPORT_SYMBOL(p9_client_clunk);
+
+int p9_client_remove(struct p9_fid *fid)
+{
+	int err;
+	struct p9_client *clnt;
+	struct p9_req_t *req;
+
+	P9_DPRINTK(P9_DEBUG_9P, ">>> TREMOVE fid %d\n", fid->fid);
+	err = 0;
+	clnt = fid->clnt;
+
+	req = p9_client_rpc(clnt, P9_TREMOVE, "d", fid->fid);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto error;
+	}
+
+	P9_DPRINTK(P9_DEBUG_9P, "<<< RREMOVE fid %d\n", fid->fid);
+
+	p9_free_req(clnt, req);
+	p9_fid_destroy(fid);
+
+error:
+	return err;
+}
+EXPORT_SYMBOL(p9_client_remove);
+
+int
+p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
+								u32 count)
+{
+	int err, rsize, total;
+	struct p9_client *clnt;
+	struct p9_req_t *req;
+	char *dataptr;
+
+	P9_DPRINTK(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", fid->fid,
+					(long long unsigned) offset, count);
+	err = 0;
+	clnt = fid->clnt;
+	total = 0;
+
+	rsize = fid->iounit;
+	if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
+		rsize = clnt->msize - P9_IOHDRSZ;
+
+	if (count < rsize)
+		rsize = count;
+
+	req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset, rsize);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto error;
+	}
+
+	err = p9pdu_readf(req->rc, clnt->dotu, "D", &count, &dataptr);
+	if (err) {
+		p9pdu_dump(1, req->rc);
+		goto free_and_error;
+	}
+
+	P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
+
+	if (data) {
+		memmove(data, dataptr, count);
+		data += count;
+	}
+
+	if (udata) {
+		err = copy_to_user(udata, dataptr, count);
+		if (err) {
+			err = -EFAULT;
+			goto free_and_error;
+		}
+	}
+
+	p9_free_req(clnt, req);
+	return count;
+
+free_and_error:
+	p9_free_req(clnt, req);
+error:
+	return err;
+}
+EXPORT_SYMBOL(p9_client_read);
+
+int
+p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
+							u64 offset, u32 count)
+{
+	int err, rsize, total;
+	struct p9_client *clnt;
+	struct p9_req_t *req;
+
+	P9_DPRINTK(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n",
+				fid->fid, (long long unsigned) offset, count);
+	err = 0;
+	clnt = fid->clnt;
+	total = 0;
+
+	rsize = fid->iounit;
+	if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
+		rsize = clnt->msize - P9_IOHDRSZ;
+
+	if (count < rsize)
+		rsize = count;
+	if (data)
+		req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid, offset,
+								rsize, data);
+	else
+		req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid, offset,
+								rsize, udata);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto error;
+	}
+
+	err = p9pdu_readf(req->rc, clnt->dotu, "d", &count);
+	if (err) {
+		p9pdu_dump(1, req->rc);
+		goto free_and_error;
+	}
+
+	P9_DPRINTK(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
+
+	p9_free_req(clnt, req);
+	return count;
+
+free_and_error:
+	p9_free_req(clnt, req);
+error:
+	return err;
+}
+EXPORT_SYMBOL(p9_client_write);
+
+struct p9_wstat *p9_client_stat(struct p9_fid *fid)
+{
+	int err;
+	struct p9_client *clnt;
+	struct p9_wstat *ret = kmalloc(sizeof(struct p9_wstat), GFP_KERNEL);
+	struct p9_req_t *req;
+	u16 ignored;
+
+	P9_DPRINTK(P9_DEBUG_9P, ">>> TSTAT fid %d\n", fid->fid);
+
+	if (!ret)
+		return ERR_PTR(-ENOMEM);
+
+	err = 0;
+	clnt = fid->clnt;
+
+	req = p9_client_rpc(clnt, P9_TSTAT, "d", fid->fid);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto error;
+	}
+
+	err = p9pdu_readf(req->rc, clnt->dotu, "wS", &ignored, ret);
+	if (err) {
+		ret = ERR_PTR(err);
+		p9pdu_dump(1, req->rc);
+		goto free_and_error;
+	}
+
+	P9_DPRINTK(P9_DEBUG_9P,
+		"<<< RSTAT sz=%x type=%x dev=%x qid=%x.%llx.%x\n"
+		"<<<    mode=%8.8x atime=%8.8x mtime=%8.8x length=%llx\n"
+		"<<<    name=%s uid=%s gid=%s muid=%s extension=(%s)\n"
+		"<<<    uid=%d gid=%d n_muid=%d\n",
+		ret->size, ret->type, ret->dev, ret->qid.type,
+		ret->qid.path, ret->qid.version, ret->mode,
+		ret->atime, ret->mtime, ret->length, ret->name,
+		ret->uid, ret->gid, ret->muid, ret->extension,
+		ret->n_uid, ret->n_gid, ret->n_muid);
+
+free_and_error:
+	p9_free_req(clnt, req);
+error:
+	return ret;
+}
+EXPORT_SYMBOL(p9_client_stat);
+
+int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
+{
+	int err;
+	struct p9_req_t *req;
+	struct p9_client *clnt;
+
+	P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid);
+	P9_DPRINTK(P9_DEBUG_9P,
+		"     sz=%x type=%x dev=%x qid=%x.%llx.%x\n"
+		"     mode=%8.8x atime=%8.8x mtime=%8.8x length=%llx\n"
+		"     name=%s uid=%s gid=%s muid=%s extension=(%s)\n"
+		"     uid=%d gid=%d n_muid=%d\n",
+		wst->size, wst->type, wst->dev, wst->qid.type,
+		wst->qid.path, wst->qid.version, wst->mode,
+		wst->atime, wst->mtime, wst->length, wst->name,
+		wst->uid, wst->gid, wst->muid, wst->extension,
+		wst->n_uid, wst->n_gid, wst->n_muid);
+	err = 0;
+	clnt = fid->clnt;
+
+	req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, 0, wst);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto error;
+	}
+
+	P9_DPRINTK(P9_DEBUG_9P, "<<< RWSTAT fid %d\n", fid->fid);
+
+	p9_free_req(clnt, req);
+error:
+	return err;
+}
+EXPORT_SYMBOL(p9_client_wstat);
diff --git a/net/9p/conv.c b/net/9p/conv.c
deleted file mode 100644
index 5ad3a3b..0000000
--- a/net/9p/conv.c
+++ /dev/null
@@ -1,1054 +0,0 @@
-/*
- * net/9p/conv.c
- *
- * 9P protocol conversion functions
- *
- *  Copyright (C) 2004, 2005 by Latchesar Ionkov <lucho@ionkov.net>
- *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
- *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2
- *  as published by the Free Software Foundation.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to:
- *  Free Software Foundation
- *  51 Franklin Street, Fifth Floor
- *  Boston, MA  02111-1301  USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/idr.h>
-#include <linux/uaccess.h>
-#include <net/9p/9p.h>
-
-/*
- * Buffer to help with string parsing
- */
-struct cbuf {
-	unsigned char *sp;
-	unsigned char *p;
-	unsigned char *ep;
-};
-
-static inline void buf_init(struct cbuf *buf, void *data, int datalen)
-{
-	buf->sp = buf->p = data;
-	buf->ep = data + datalen;
-}
-
-static inline int buf_check_overflow(struct cbuf *buf)
-{
-	return buf->p > buf->ep;
-}
-
-static int buf_check_size(struct cbuf *buf, int len)
-{
-	if (buf->p + len > buf->ep) {
-		if (buf->p < buf->ep) {
-			P9_EPRINTK(KERN_ERR,
-				"buffer overflow: want %d has %d\n", len,
-				(int)(buf->ep - buf->p));
-			dump_stack();
-			buf->p = buf->ep + 1;
-		}
-
-		return 0;
-	}
-
-	return 1;
-}
-
-static void *buf_alloc(struct cbuf *buf, int len)
-{
-	void *ret = NULL;
-
-	if (buf_check_size(buf, len)) {
-		ret = buf->p;
-		buf->p += len;
-	}
-
-	return ret;
-}
-
-static void buf_put_int8(struct cbuf *buf, u8 val)
-{
-	if (buf_check_size(buf, 1)) {
-		buf->p[0] = val;
-		buf->p++;
-	}
-}
-
-static void buf_put_int16(struct cbuf *buf, u16 val)
-{
-	if (buf_check_size(buf, 2)) {
-		*(__le16 *) buf->p = cpu_to_le16(val);
-		buf->p += 2;
-	}
-}
-
-static void buf_put_int32(struct cbuf *buf, u32 val)
-{
-	if (buf_check_size(buf, 4)) {
-		*(__le32 *)buf->p = cpu_to_le32(val);
-		buf->p += 4;
-	}
-}
-
-static void buf_put_int64(struct cbuf *buf, u64 val)
-{
-	if (buf_check_size(buf, 8)) {
-		*(__le64 *)buf->p = cpu_to_le64(val);
-		buf->p += 8;
-	}
-}
-
-static char *buf_put_stringn(struct cbuf *buf, const char *s, u16 slen)
-{
-	char *ret;
-
-	ret = NULL;
-	if (buf_check_size(buf, slen + 2)) {
-		buf_put_int16(buf, slen);
-		ret = buf->p;
-		memcpy(buf->p, s, slen);
-		buf->p += slen;
-	}
-
-	return ret;
-}
-
-static u8 buf_get_int8(struct cbuf *buf)
-{
-	u8 ret = 0;
-
-	if (buf_check_size(buf, 1)) {
-		ret = buf->p[0];
-		buf->p++;
-	}
-
-	return ret;
-}
-
-static u16 buf_get_int16(struct cbuf *buf)
-{
-	u16 ret = 0;
-
-	if (buf_check_size(buf, 2)) {
-		ret = le16_to_cpu(*(__le16 *)buf->p);
-		buf->p += 2;
-	}
-
-	return ret;
-}
-
-static u32 buf_get_int32(struct cbuf *buf)
-{
-	u32 ret = 0;
-
-	if (buf_check_size(buf, 4)) {
-		ret = le32_to_cpu(*(__le32 *)buf->p);
-		buf->p += 4;
-	}
-
-	return ret;
-}
-
-static u64 buf_get_int64(struct cbuf *buf)
-{
-	u64 ret = 0;
-
-	if (buf_check_size(buf, 8)) {
-		ret = le64_to_cpu(*(__le64 *)buf->p);
-		buf->p += 8;
-	}
-
-	return ret;
-}
-
-static void buf_get_str(struct cbuf *buf, struct p9_str *vstr)
-{
-	vstr->len = buf_get_int16(buf);
-	if (!buf_check_overflow(buf) && buf_check_size(buf, vstr->len)) {
-		vstr->str = buf->p;
-		buf->p += vstr->len;
-	} else {
-		vstr->len = 0;
-		vstr->str = NULL;
-	}
-}
-
-static void buf_get_qid(struct cbuf *bufp, struct p9_qid *qid)
-{
-	qid->type = buf_get_int8(bufp);
-	qid->version = buf_get_int32(bufp);
-	qid->path = buf_get_int64(bufp);
-}
-
-/**
- * p9_size_wstat - calculate the size of a variable length stat struct
- * @wstat: metadata (stat) structure
- * @dotu: non-zero if 9P2000.u
- *
- */
-
-static int p9_size_wstat(struct p9_wstat *wstat, int dotu)
-{
-	int size = 0;
-
-	if (wstat == NULL) {
-		P9_EPRINTK(KERN_ERR, "p9_size_stat: got a NULL stat pointer\n");
-		return 0;
-	}
-
-	size =			/* 2 + *//* size[2] */
-	    2 +			/* type[2] */
-	    4 +			/* dev[4] */
-	    1 +			/* qid.type[1] */
-	    4 +			/* qid.vers[4] */
-	    8 +			/* qid.path[8] */
-	    4 +			/* mode[4] */
-	    4 +			/* atime[4] */
-	    4 +			/* mtime[4] */
-	    8 +			/* length[8] */
-	    8;			/* minimum sum of string lengths */
-
-	if (wstat->name)
-		size += strlen(wstat->name);
-	if (wstat->uid)
-		size += strlen(wstat->uid);
-	if (wstat->gid)
-		size += strlen(wstat->gid);
-	if (wstat->muid)
-		size += strlen(wstat->muid);
-
-	if (dotu) {
-		size += 4 +	/* n_uid[4] */
-		    4 +		/* n_gid[4] */
-		    4 +		/* n_muid[4] */
-		    2;		/* string length of extension[4] */
-		if (wstat->extension)
-			size += strlen(wstat->extension);
-	}
-
-	return size;
-}
-
-/**
- * buf_get_stat - safely decode a recieved metadata (stat) structure
- * @bufp: buffer to deserialize
- * @stat: metadata (stat) structure
- * @dotu: non-zero if 9P2000.u
- *
- */
-
-static void
-buf_get_stat(struct cbuf *bufp, struct p9_stat *stat, int dotu)
-{
-	stat->size = buf_get_int16(bufp);
-	stat->type = buf_get_int16(bufp);
-	stat->dev = buf_get_int32(bufp);
-	stat->qid.type = buf_get_int8(bufp);
-	stat->qid.version = buf_get_int32(bufp);
-	stat->qid.path = buf_get_int64(bufp);
-	stat->mode = buf_get_int32(bufp);
-	stat->atime = buf_get_int32(bufp);
-	stat->mtime = buf_get_int32(bufp);
-	stat->length = buf_get_int64(bufp);
-	buf_get_str(bufp, &stat->name);
-	buf_get_str(bufp, &stat->uid);
-	buf_get_str(bufp, &stat->gid);
-	buf_get_str(bufp, &stat->muid);
-
-	if (dotu) {
-		buf_get_str(bufp, &stat->extension);
-		stat->n_uid = buf_get_int32(bufp);
-		stat->n_gid = buf_get_int32(bufp);
-		stat->n_muid = buf_get_int32(bufp);
-	}
-}
-
-/**
- * p9_deserialize_stat - decode a received metadata structure
- * @buf: buffer to deserialize
- * @buflen: length of received buffer
- * @stat: metadata structure to decode into
- * @dotu: non-zero if 9P2000.u
- *
- * Note: stat will point to the buf region.
- */
-
-int
-p9_deserialize_stat(void *buf, u32 buflen, struct p9_stat *stat,
-		int dotu)
-{
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-	unsigned char *p;
-
-	buf_init(bufp, buf, buflen);
-	p = bufp->p;
-	buf_get_stat(bufp, stat, dotu);
-
-	if (buf_check_overflow(bufp))
-		return 0;
-	else
-		return bufp->p - p;
-}
-EXPORT_SYMBOL(p9_deserialize_stat);
-
-/**
- * deserialize_fcall - unmarshal a response
- * @buf: recieved buffer
- * @buflen: length of received buffer
- * @rcall: fcall structure to populate
- * @rcalllen: length of fcall structure to populate
- * @dotu: non-zero if 9P2000.u
- *
- */
-
-int
-p9_deserialize_fcall(void *buf, u32 buflen, struct p9_fcall *rcall,
-		       int dotu)
-{
-
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-	int i = 0;
-
-	buf_init(bufp, buf, buflen);
-
-	rcall->size = buf_get_int32(bufp);
-	rcall->id = buf_get_int8(bufp);
-	rcall->tag = buf_get_int16(bufp);
-
-	P9_DPRINTK(P9_DEBUG_CONV, "size %d id %d tag %d\n", rcall->size,
-							rcall->id, rcall->tag);
-
-	switch (rcall->id) {
-	default:
-		P9_EPRINTK(KERN_ERR, "unknown message type: %d\n", rcall->id);
-		return -EPROTO;
-	case P9_RVERSION:
-		rcall->params.rversion.msize = buf_get_int32(bufp);
-		buf_get_str(bufp, &rcall->params.rversion.version);
-		break;
-	case P9_RFLUSH:
-		break;
-	case P9_RATTACH:
-		rcall->params.rattach.qid.type = buf_get_int8(bufp);
-		rcall->params.rattach.qid.version = buf_get_int32(bufp);
-		rcall->params.rattach.qid.path = buf_get_int64(bufp);
-		break;
-	case P9_RWALK:
-		rcall->params.rwalk.nwqid = buf_get_int16(bufp);
-		if (rcall->params.rwalk.nwqid > P9_MAXWELEM) {
-			P9_EPRINTK(KERN_ERR,
-					"Rwalk with more than %d qids: %d\n",
-					P9_MAXWELEM, rcall->params.rwalk.nwqid);
-			return -EPROTO;
-		}
-
-		for (i = 0; i < rcall->params.rwalk.nwqid; i++)
-			buf_get_qid(bufp, &rcall->params.rwalk.wqids[i]);
-		break;
-	case P9_ROPEN:
-		buf_get_qid(bufp, &rcall->params.ropen.qid);
-		rcall->params.ropen.iounit = buf_get_int32(bufp);
-		break;
-	case P9_RCREATE:
-		buf_get_qid(bufp, &rcall->params.rcreate.qid);
-		rcall->params.rcreate.iounit = buf_get_int32(bufp);
-		break;
-	case P9_RREAD:
-		rcall->params.rread.count = buf_get_int32(bufp);
-		rcall->params.rread.data = bufp->p;
-		buf_check_size(bufp, rcall->params.rread.count);
-		break;
-	case P9_RWRITE:
-		rcall->params.rwrite.count = buf_get_int32(bufp);
-		break;
-	case P9_RCLUNK:
-		break;
-	case P9_RREMOVE:
-		break;
-	case P9_RSTAT:
-		buf_get_int16(bufp);
-		buf_get_stat(bufp, &rcall->params.rstat.stat, dotu);
-		break;
-	case P9_RWSTAT:
-		break;
-	case P9_RERROR:
-		buf_get_str(bufp, &rcall->params.rerror.error);
-		if (dotu)
-			rcall->params.rerror.errno = buf_get_int16(bufp);
-		break;
-	}
-
-	if (buf_check_overflow(bufp)) {
-		P9_DPRINTK(P9_DEBUG_ERROR, "buffer overflow\n");
-		return -EIO;
-	}
-
-	return bufp->p - bufp->sp;
-}
-EXPORT_SYMBOL(p9_deserialize_fcall);
-
-static inline void p9_put_int8(struct cbuf *bufp, u8 val, u8 * p)
-{
-	*p = val;
-	buf_put_int8(bufp, val);
-}
-
-static inline void p9_put_int16(struct cbuf *bufp, u16 val, u16 * p)
-{
-	*p = val;
-	buf_put_int16(bufp, val);
-}
-
-static inline void p9_put_int32(struct cbuf *bufp, u32 val, u32 * p)
-{
-	*p = val;
-	buf_put_int32(bufp, val);
-}
-
-static inline void p9_put_int64(struct cbuf *bufp, u64 val, u64 * p)
-{
-	*p = val;
-	buf_put_int64(bufp, val);
-}
-
-static void
-p9_put_str(struct cbuf *bufp, char *data, struct p9_str *str)
-{
-	int len;
-	char *s;
-
-	if (data)
-		len = strlen(data);
-	else
-		len = 0;
-
-	s = buf_put_stringn(bufp, data, len);
-	if (str) {
-		str->len = len;
-		str->str = s;
-	}
-}
-
-static int
-p9_put_data(struct cbuf *bufp, const char *data, int count,
-		   unsigned char **pdata)
-{
-	*pdata = buf_alloc(bufp, count);
-	if (*pdata == NULL)
-		return -ENOMEM;
-	memmove(*pdata, data, count);
-	return 0;
-}
-
-static int
-p9_put_user_data(struct cbuf *bufp, const char __user *data, int count,
-		   unsigned char **pdata)
-{
-	*pdata = buf_alloc(bufp, count);
-	if (*pdata == NULL)
-		return -ENOMEM;
-	return copy_from_user(*pdata, data, count);
-}
-
-static void
-p9_put_wstat(struct cbuf *bufp, struct p9_wstat *wstat,
-	       struct p9_stat *stat, int statsz, int dotu)
-{
-	p9_put_int16(bufp, statsz, &stat->size);
-	p9_put_int16(bufp, wstat->type, &stat->type);
-	p9_put_int32(bufp, wstat->dev, &stat->dev);
-	p9_put_int8(bufp, wstat->qid.type, &stat->qid.type);
-	p9_put_int32(bufp, wstat->qid.version, &stat->qid.version);
-	p9_put_int64(bufp, wstat->qid.path, &stat->qid.path);
-	p9_put_int32(bufp, wstat->mode, &stat->mode);
-	p9_put_int32(bufp, wstat->atime, &stat->atime);
-	p9_put_int32(bufp, wstat->mtime, &stat->mtime);
-	p9_put_int64(bufp, wstat->length, &stat->length);
-
-	p9_put_str(bufp, wstat->name, &stat->name);
-	p9_put_str(bufp, wstat->uid, &stat->uid);
-	p9_put_str(bufp, wstat->gid, &stat->gid);
-	p9_put_str(bufp, wstat->muid, &stat->muid);
-
-	if (dotu) {
-		p9_put_str(bufp, wstat->extension, &stat->extension);
-		p9_put_int32(bufp, wstat->n_uid, &stat->n_uid);
-		p9_put_int32(bufp, wstat->n_gid, &stat->n_gid);
-		p9_put_int32(bufp, wstat->n_muid, &stat->n_muid);
-	}
-}
-
-static struct p9_fcall *
-p9_create_common(struct cbuf *bufp, u32 size, u8 id)
-{
-	struct p9_fcall *fc;
-
-	size += 4 + 1 + 2;	/* size[4] id[1] tag[2] */
-	fc = kmalloc(sizeof(struct p9_fcall) + size, GFP_KERNEL);
-	if (!fc)
-		return ERR_PTR(-ENOMEM);
-
-	fc->sdata = (char *)fc + sizeof(*fc);
-
-	buf_init(bufp, (char *)fc->sdata, size);
-	p9_put_int32(bufp, size, &fc->size);
-	p9_put_int8(bufp, id, &fc->id);
-	p9_put_int16(bufp, P9_NOTAG, &fc->tag);
-
-	return fc;
-}
-
-/**
- * p9_set_tag - set the tag field of an &p9_fcall structure
- * @fc: fcall structure to set tag within
- * @tag: tag id to set
- */
-
-void p9_set_tag(struct p9_fcall *fc, u16 tag)
-{
-	fc->tag = tag;
-	*(__le16 *) (fc->sdata + 5) = cpu_to_le16(tag);
-}
-EXPORT_SYMBOL(p9_set_tag);
-
-/**
- * p9_create_tversion - allocates and creates a T_VERSION request
- * @msize: requested maximum data size
- * @version: version string to negotiate
- *
- */
-struct p9_fcall *p9_create_tversion(u32 msize, char *version)
-{
-	int size;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	size = 4 + 2 + strlen(version);	/* msize[4] version[s] */
-	fc = p9_create_common(bufp, size, P9_TVERSION);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int32(bufp, msize, &fc->params.tversion.msize);
-	p9_put_str(bufp, version, &fc->params.tversion.version);
-
-	if (buf_check_overflow(bufp)) {
-		kfree(fc);
-		fc = ERR_PTR(-ENOMEM);
-	}
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_tversion);
-
-/**
- * p9_create_tauth - allocates and creates a T_AUTH request
- * @afid: handle to use for authentication protocol
- * @uname: user name attempting to authenticate
- * @aname: mount specifier for remote server
- * @n_uname: numeric id for user attempting to authneticate
- * @dotu: 9P2000.u extension flag
- *
- */
-
-struct p9_fcall *p9_create_tauth(u32 afid, char *uname, char *aname,
-	u32 n_uname, int dotu)
-{
-	int size;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	/* afid[4] uname[s] aname[s] */
-	size = 4 + 2 + 2;
-	if (uname)
-		size += strlen(uname);
-
-	if (aname)
-		size += strlen(aname);
-
-	if (dotu)
-		size += 4;	/* n_uname */
-
-	fc = p9_create_common(bufp, size, P9_TAUTH);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int32(bufp, afid, &fc->params.tauth.afid);
-	p9_put_str(bufp, uname, &fc->params.tauth.uname);
-	p9_put_str(bufp, aname, &fc->params.tauth.aname);
-	if (dotu)
-		p9_put_int32(bufp, n_uname, &fc->params.tauth.n_uname);
-
-	if (buf_check_overflow(bufp)) {
-		kfree(fc);
-		fc = ERR_PTR(-ENOMEM);
-	}
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_tauth);
-
-/**
- * p9_create_tattach - allocates and creates a T_ATTACH request
- * @fid: handle to use for the new mount point
- * @afid: handle to use for authentication protocol
- * @uname: user name attempting to attach
- * @aname: mount specifier for remote server
- * @n_uname: numeric id for user attempting to attach
- * @n_uname: numeric id for user attempting to attach
- * @dotu: 9P2000.u extension flag
- *
- */
-
-struct p9_fcall *
-p9_create_tattach(u32 fid, u32 afid, char *uname, char *aname,
-	u32 n_uname, int dotu)
-{
-	int size;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	/* fid[4] afid[4] uname[s] aname[s] */
-	size = 4 + 4 + 2 + 2;
-	if (uname)
-		size += strlen(uname);
-
-	if (aname)
-		size += strlen(aname);
-
-	if (dotu)
-		size += 4;	/* n_uname */
-
-	fc = p9_create_common(bufp, size, P9_TATTACH);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int32(bufp, fid, &fc->params.tattach.fid);
-	p9_put_int32(bufp, afid, &fc->params.tattach.afid);
-	p9_put_str(bufp, uname, &fc->params.tattach.uname);
-	p9_put_str(bufp, aname, &fc->params.tattach.aname);
-	if (dotu)
-		p9_put_int32(bufp, n_uname, &fc->params.tattach.n_uname);
-
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_tattach);
-
-/**
- * p9_create_tflush - allocates and creates a T_FLUSH request
- * @oldtag: tag id for the transaction we are attempting to cancel
- *
- */
-
-struct p9_fcall *p9_create_tflush(u16 oldtag)
-{
-	int size;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	size = 2;		/* oldtag[2] */
-	fc = p9_create_common(bufp, size, P9_TFLUSH);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int16(bufp, oldtag, &fc->params.tflush.oldtag);
-
-	if (buf_check_overflow(bufp)) {
-		kfree(fc);
-		fc = ERR_PTR(-ENOMEM);
-	}
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_tflush);
-
-/**
- * p9_create_twalk - allocates and creates a T_FLUSH request
- * @fid: handle we are traversing from
- * @newfid: a new handle for this transaction
- * @nwname: number of path elements to traverse
- * @wnames: array of path elements
- *
- */
-
-struct p9_fcall *p9_create_twalk(u32 fid, u32 newfid, u16 nwname,
-				     char **wnames)
-{
-	int i, size;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	if (nwname > P9_MAXWELEM) {
-		P9_DPRINTK(P9_DEBUG_ERROR, "nwname > %d\n", P9_MAXWELEM);
-		return NULL;
-	}
-
-	size = 4 + 4 + 2;	/* fid[4] newfid[4] nwname[2] ... */
-	for (i = 0; i < nwname; i++) {
-		size += 2 + strlen(wnames[i]);	/* wname[s] */
-	}
-
-	fc = p9_create_common(bufp, size, P9_TWALK);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int32(bufp, fid, &fc->params.twalk.fid);
-	p9_put_int32(bufp, newfid, &fc->params.twalk.newfid);
-	p9_put_int16(bufp, nwname, &fc->params.twalk.nwname);
-	for (i = 0; i < nwname; i++) {
-		p9_put_str(bufp, wnames[i], &fc->params.twalk.wnames[i]);
-	}
-
-	if (buf_check_overflow(bufp)) {
-		kfree(fc);
-		fc = ERR_PTR(-ENOMEM);
-	}
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_twalk);
-
-/**
- * p9_create_topen - allocates and creates a T_OPEN request
- * @fid: handle we are trying to open
- * @mode: what mode we are trying to open the file in
- *
- */
-
-struct p9_fcall *p9_create_topen(u32 fid, u8 mode)
-{
-	int size;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	size = 4 + 1;		/* fid[4] mode[1] */
-	fc = p9_create_common(bufp, size, P9_TOPEN);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int32(bufp, fid, &fc->params.topen.fid);
-	p9_put_int8(bufp, mode, &fc->params.topen.mode);
-
-	if (buf_check_overflow(bufp)) {
-		kfree(fc);
-		fc = ERR_PTR(-ENOMEM);
-	}
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_topen);
-
-/**
- * p9_create_tcreate - allocates and creates a T_CREATE request
- * @fid: handle of directory we are trying to create in
- * @name: name of the file we are trying to create
- * @perm: permissions for the file we are trying to create
- * @mode: what mode we are trying to open the file in
- * @extension: 9p2000.u extension string (for special files)
- * @dotu: 9p2000.u enabled flag
- *
- * Note: Plan 9 create semantics include opening the resulting file
- * which is why mode is included.
- */
-
-struct p9_fcall *p9_create_tcreate(u32 fid, char *name, u32 perm, u8 mode,
-	char *extension, int dotu)
-{
-	int size;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	/* fid[4] name[s] perm[4] mode[1] */
-	size = 4 + 2 + strlen(name) + 4 + 1;
-	if (dotu) {
-		size += 2 +			/* extension[s] */
-		    (extension == NULL ? 0 : strlen(extension));
-	}
-
-	fc = p9_create_common(bufp, size, P9_TCREATE);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int32(bufp, fid, &fc->params.tcreate.fid);
-	p9_put_str(bufp, name, &fc->params.tcreate.name);
-	p9_put_int32(bufp, perm, &fc->params.tcreate.perm);
-	p9_put_int8(bufp, mode, &fc->params.tcreate.mode);
-	if (dotu)
-		p9_put_str(bufp, extension, &fc->params.tcreate.extension);
-
-	if (buf_check_overflow(bufp)) {
-		kfree(fc);
-		fc = ERR_PTR(-ENOMEM);
-	}
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_tcreate);
-
-/**
- * p9_create_tread - allocates and creates a T_READ request
- * @fid: handle of the file we are trying to read
- * @offset: offset to start reading from
- * @count: how many bytes to read
- */
-
-struct p9_fcall *p9_create_tread(u32 fid, u64 offset, u32 count)
-{
-	int size;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	size = 4 + 8 + 4;	/* fid[4] offset[8] count[4] */
-	fc = p9_create_common(bufp, size, P9_TREAD);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int32(bufp, fid, &fc->params.tread.fid);
-	p9_put_int64(bufp, offset, &fc->params.tread.offset);
-	p9_put_int32(bufp, count, &fc->params.tread.count);
-
-	if (buf_check_overflow(bufp)) {
-		kfree(fc);
-		fc = ERR_PTR(-ENOMEM);
-	}
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_tread);
-
-/**
- * p9_create_twrite - allocates and creates a T_WRITE request from the kernel
- * @fid: handle of the file we are trying to write
- * @offset: offset to start writing at
- * @count: how many bytes to write
- * @data: data to write
- *
- * This function will create a requst with data buffers from the kernel
- * such as the page cache.
- */
-
-struct p9_fcall *p9_create_twrite(u32 fid, u64 offset, u32 count,
-				      const char *data)
-{
-	int size, err;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	/* fid[4] offset[8] count[4] data[count] */
-	size = 4 + 8 + 4 + count;
-	fc = p9_create_common(bufp, size, P9_TWRITE);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int32(bufp, fid, &fc->params.twrite.fid);
-	p9_put_int64(bufp, offset, &fc->params.twrite.offset);
-	p9_put_int32(bufp, count, &fc->params.twrite.count);
-	err = p9_put_data(bufp, data, count, &fc->params.twrite.data);
-	if (err) {
-		kfree(fc);
-		fc = ERR_PTR(err);
-		goto error;
-	}
-
-	if (buf_check_overflow(bufp)) {
-		kfree(fc);
-		fc = ERR_PTR(-ENOMEM);
-	}
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_twrite);
-
-/**
- * p9_create_twrite_u - allocates and creates a T_WRITE request from userspace
- * @fid: handle of the file we are trying to write
- * @offset: offset to start writing at
- * @count: how many bytes to write
- * @data: data to write
- *
- * This function will create a request with data buffers from userspace
- */
-
-struct p9_fcall *p9_create_twrite_u(u32 fid, u64 offset, u32 count,
-				      const char __user *data)
-{
-	int size, err;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	/* fid[4] offset[8] count[4] data[count] */
-	size = 4 + 8 + 4 + count;
-	fc = p9_create_common(bufp, size, P9_TWRITE);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int32(bufp, fid, &fc->params.twrite.fid);
-	p9_put_int64(bufp, offset, &fc->params.twrite.offset);
-	p9_put_int32(bufp, count, &fc->params.twrite.count);
-	err = p9_put_user_data(bufp, data, count, &fc->params.twrite.data);
-	if (err) {
-		kfree(fc);
-		fc = ERR_PTR(err);
-		goto error;
-	}
-
-	if (buf_check_overflow(bufp)) {
-		kfree(fc);
-		fc = ERR_PTR(-ENOMEM);
-	}
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_twrite_u);
-
-/**
- * p9_create_tclunk - allocate a request to forget about a file handle
- * @fid: handle of the file we closing or forgetting about
- *
- * clunk is used both to close open files and to discard transient handles
- * which may be created during meta-data operations and hierarchy traversal.
- */
-
-struct p9_fcall *p9_create_tclunk(u32 fid)
-{
-	int size;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	size = 4;		/* fid[4] */
-	fc = p9_create_common(bufp, size, P9_TCLUNK);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int32(bufp, fid, &fc->params.tclunk.fid);
-
-	if (buf_check_overflow(bufp)) {
-		kfree(fc);
-		fc = ERR_PTR(-ENOMEM);
-	}
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_tclunk);
-
-/**
- * p9_create_tremove - allocate and create a request to remove a file
- * @fid: handle of the file or directory we are removing
- *
- */
-
-struct p9_fcall *p9_create_tremove(u32 fid)
-{
-	int size;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	size = 4;		/* fid[4] */
-	fc = p9_create_common(bufp, size, P9_TREMOVE);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int32(bufp, fid, &fc->params.tremove.fid);
-
-	if (buf_check_overflow(bufp)) {
-		kfree(fc);
-		fc = ERR_PTR(-ENOMEM);
-	}
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_tremove);
-
-/**
- * p9_create_tstat - allocate and populate a request for attributes
- * @fid: handle of the file or directory we are trying to get the attributes of
- *
- */
-
-struct p9_fcall *p9_create_tstat(u32 fid)
-{
-	int size;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	size = 4;		/* fid[4] */
-	fc = p9_create_common(bufp, size, P9_TSTAT);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int32(bufp, fid, &fc->params.tstat.fid);
-
-	if (buf_check_overflow(bufp)) {
-		kfree(fc);
-		fc = ERR_PTR(-ENOMEM);
-	}
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_tstat);
-
-/**
- * p9_create_tstat - allocate and populate a request to change attributes
- * @fid: handle of the file or directory we are trying to change
- * @wstat: &p9_stat structure with attributes we wish to set
- * @dotu: 9p2000.u enabled flag
- *
- */
-
-struct p9_fcall *p9_create_twstat(u32 fid, struct p9_wstat *wstat,
-				      int dotu)
-{
-	int size, statsz;
-	struct p9_fcall *fc;
-	struct cbuf buffer;
-	struct cbuf *bufp = &buffer;
-
-	statsz = p9_size_wstat(wstat, dotu);
-	size = 4 + 2 + 2 + statsz;	/* fid[4] stat[n] */
-	fc = p9_create_common(bufp, size, P9_TWSTAT);
-	if (IS_ERR(fc))
-		goto error;
-
-	p9_put_int32(bufp, fid, &fc->params.twstat.fid);
-	buf_put_int16(bufp, statsz + 2);
-	p9_put_wstat(bufp, wstat, &fc->params.twstat.stat, statsz, dotu);
-
-	if (buf_check_overflow(bufp)) {
-		kfree(fc);
-		fc = ERR_PTR(-ENOMEM);
-	}
-error:
-	return fc;
-}
-EXPORT_SYMBOL(p9_create_twstat);
-
diff --git a/net/9p/fcprint.c b/net/9p/fcprint.c
deleted file mode 100644
index 53dd8e2..0000000
--- a/net/9p/fcprint.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- *  net/9p/fcprint.c
- *
- *  Print 9P call.
- *
- *  Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2
- *  as published by the Free Software Foundation.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to:
- *  Free Software Foundation
- *  51 Franklin Street, Fifth Floor
- *  Boston, MA  02111-1301  USA
- *
- */
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/idr.h>
-#include <net/9p/9p.h>
-
-#ifdef CONFIG_NET_9P_DEBUG
-
-static int
-p9_printqid(char *buf, int buflen, struct p9_qid *q)
-{
-	int n;
-	char b[10];
-
-	n = 0;
-	if (q->type & P9_QTDIR)
-		b[n++] = 'd';
-	if (q->type & P9_QTAPPEND)
-		b[n++] = 'a';
-	if (q->type & P9_QTAUTH)
-		b[n++] = 'A';
-	if (q->type & P9_QTEXCL)
-		b[n++] = 'l';
-	if (q->type & P9_QTTMP)
-		b[n++] = 't';
-	if (q->type & P9_QTSYMLINK)
-		b[n++] = 'L';
-	b[n] = '\0';
-
-	return scnprintf(buf, buflen, "(%.16llx %x %s)",
-					(long long int) q->path, q->version, b);
-}
-
-static int
-p9_printperm(char *buf, int buflen, int perm)
-{
-	int n;
-	char b[15];
-
-	n = 0;
-	if (perm & P9_DMDIR)
-		b[n++] = 'd';
-	if (perm & P9_DMAPPEND)
-		b[n++] = 'a';
-	if (perm & P9_DMAUTH)
-		b[n++] = 'A';
-	if (perm & P9_DMEXCL)
-		b[n++] = 'l';
-	if (perm & P9_DMTMP)
-		b[n++] = 't';
-	if (perm & P9_DMDEVICE)
-		b[n++] = 'D';
-	if (perm & P9_DMSOCKET)
-		b[n++] = 'S';
-	if (perm & P9_DMNAMEDPIPE)
-		b[n++] = 'P';
-	if (perm & P9_DMSYMLINK)
-		b[n++] = 'L';
-	b[n] = '\0';
-
-	return scnprintf(buf, buflen, "%s%03o", b, perm&077);
-}
-
-static int
-p9_printstat(char *buf, int buflen, struct p9_stat *st, int extended)
-{
-	int n;
-
-	n = scnprintf(buf, buflen, "'%.*s' '%.*s'", st->name.len,
-		st->name.str, st->uid.len, st->uid.str);
-	if (extended)
-		n += scnprintf(buf+n, buflen-n, "(%d)", st->n_uid);
-
-	n += scnprintf(buf+n, buflen-n, " '%.*s'", st->gid.len, st->gid.str);
-	if (extended)
-		n += scnprintf(buf+n, buflen-n, "(%d)", st->n_gid);
-
-	n += scnprintf(buf+n, buflen-n, " '%.*s'", st->muid.len, st->muid.str);
-	if (extended)
-		n += scnprintf(buf+n, buflen-n, "(%d)", st->n_muid);
-
-	n += scnprintf(buf+n, buflen-n, " q ");
-	n += p9_printqid(buf+n, buflen-n, &st->qid);
-	n += scnprintf(buf+n, buflen-n, " m ");
-	n += p9_printperm(buf+n, buflen-n, st->mode);
-	n += scnprintf(buf+n, buflen-n, " at %d mt %d l %lld",
-		st->atime, st->mtime, (long long int) st->length);
-
-	if (extended)
-		n += scnprintf(buf+n, buflen-n, " ext '%.*s'",
-			st->extension.len, st->extension.str);
-
-	return n;
-}
-
-static int
-p9_dumpdata(char *buf, int buflen, u8 *data, int datalen)
-{
-	int i, n;
-
-	i = n = 0;
-	while (i < datalen) {
-		n += scnprintf(buf + n, buflen - n, "%02x", data[i]);
-		if (i%4 == 3)
-			n += scnprintf(buf + n, buflen - n, " ");
-		if (i%32 == 31)
-			n += scnprintf(buf + n, buflen - n, "\n");
-
-		i++;
-	}
-	n += scnprintf(buf + n, buflen - n, "\n");
-
-	return n;
-}
-
-static int
-p9_printdata(char *buf, int buflen, u8 *data, int datalen)
-{
-	return p9_dumpdata(buf, buflen, data, datalen < 16?datalen:16);
-}
-
-/**
- * p9_printfcall - decode and print a protocol structure into a buffer
- * @buf: buffer to deposit decoded structure into
- * @buflen: available space in buffer
- * @fc: protocol rpc structure of type &p9_fcall
- * @extended: whether or not session is operating with extended protocol
- */
-
-int
-p9_printfcall(char *buf, int buflen, struct p9_fcall *fc, int extended)
-{
-	int i, ret, type, tag;
-
-	if (!fc)
-		return scnprintf(buf, buflen, "<NULL>");
-
-	type = fc->id;
-	tag = fc->tag;
-
-	ret = 0;
-	switch (type) {
-	case P9_TVERSION:
-		ret += scnprintf(buf+ret, buflen-ret,
-				"Tversion tag %u msize %u version '%.*s'", tag,
-				fc->params.tversion.msize,
-				fc->params.tversion.version.len,
-				fc->params.tversion.version.str);
-		break;
-
-	case P9_RVERSION:
-		ret += scnprintf(buf+ret, buflen-ret,
-				"Rversion tag %u msize %u version '%.*s'", tag,
-				fc->params.rversion.msize,
-				fc->params.rversion.version.len,
-				fc->params.rversion.version.str);
-		break;
-
-	case P9_TAUTH:
-		ret += scnprintf(buf+ret, buflen-ret,
-			"Tauth tag %u afid %d uname '%.*s' aname '%.*s'", tag,
-			fc->params.tauth.afid, fc->params.tauth.uname.len,
-			fc->params.tauth.uname.str, fc->params.tauth.aname.len,
-			fc->params.tauth.aname.str);
-		break;
-
-	case P9_RAUTH:
-		ret += scnprintf(buf+ret, buflen-ret, "Rauth tag %u qid ", tag);
-		p9_printqid(buf+ret, buflen-ret, &fc->params.rauth.qid);
-		break;
-
-	case P9_TATTACH:
-		ret += scnprintf(buf+ret, buflen-ret,
-		 "Tattach tag %u fid %d afid %d uname '%.*s' aname '%.*s'", tag,
-		 fc->params.tattach.fid, fc->params.tattach.afid,
-		 fc->params.tattach.uname.len, fc->params.tattach.uname.str,
-		 fc->params.tattach.aname.len, fc->params.tattach.aname.str);
-		break;
-
-	case P9_RATTACH:
-		ret += scnprintf(buf+ret, buflen-ret, "Rattach tag %u qid ",
-									tag);
-		p9_printqid(buf+ret, buflen-ret, &fc->params.rattach.qid);
-		break;
-
-	case P9_RERROR:
-		ret += scnprintf(buf+ret, buflen-ret,
-				"Rerror tag %u ename '%.*s'", tag,
-				fc->params.rerror.error.len,
-				fc->params.rerror.error.str);
-		if (extended)
-			ret += scnprintf(buf+ret, buflen-ret, " ecode %d\n",
-				fc->params.rerror.errno);
-		break;
-
-	case P9_TFLUSH:
-		ret += scnprintf(buf+ret, buflen-ret, "Tflush tag %u oldtag %u",
-			tag, fc->params.tflush.oldtag);
-		break;
-
-	case P9_RFLUSH:
-		ret += scnprintf(buf+ret, buflen-ret, "Rflush tag %u", tag);
-		break;
-
-	case P9_TWALK:
-		ret += scnprintf(buf+ret, buflen-ret,
-			"Twalk tag %u fid %d newfid %d nwname %d", tag,
-			fc->params.twalk.fid, fc->params.twalk.newfid,
-			fc->params.twalk.nwname);
-		for (i = 0; i < fc->params.twalk.nwname; i++)
-			ret += scnprintf(buf+ret, buflen-ret, " '%.*s'",
-				fc->params.twalk.wnames[i].len,
-				fc->params.twalk.wnames[i].str);
-		break;
-
-	case P9_RWALK:
-		ret += scnprintf(buf+ret, buflen-ret, "Rwalk tag %u nwqid %d",
-			tag, fc->params.rwalk.nwqid);
-		for (i = 0; i < fc->params.rwalk.nwqid; i++)
-			ret += p9_printqid(buf+ret, buflen-ret,
-				&fc->params.rwalk.wqids[i]);
-		break;
-
-	case P9_TOPEN:
-		ret += scnprintf(buf+ret, buflen-ret,
-			"Topen tag %u fid %d mode %d", tag,
-			fc->params.topen.fid, fc->params.topen.mode);
-		break;
-
-	case P9_ROPEN:
-		ret += scnprintf(buf+ret, buflen-ret, "Ropen tag %u", tag);
-		ret += p9_printqid(buf+ret, buflen-ret, &fc->params.ropen.qid);
-		ret += scnprintf(buf+ret, buflen-ret, " iounit %d",
-			fc->params.ropen.iounit);
-		break;
-
-	case P9_TCREATE:
-		ret += scnprintf(buf+ret, buflen-ret,
-			"Tcreate tag %u fid %d name '%.*s' perm ", tag,
-			fc->params.tcreate.fid, fc->params.tcreate.name.len,
-			fc->params.tcreate.name.str);
-
-		ret += p9_printperm(buf+ret, buflen-ret,
-						fc->params.tcreate.perm);
-		ret += scnprintf(buf+ret, buflen-ret, " mode %d",
-			fc->params.tcreate.mode);
-		break;
-
-	case P9_RCREATE:
-		ret += scnprintf(buf+ret, buflen-ret, "Rcreate tag %u", tag);
-		ret += p9_printqid(buf+ret, buflen-ret,
-						&fc->params.rcreate.qid);
-		ret += scnprintf(buf+ret, buflen-ret, " iounit %d",
-			fc->params.rcreate.iounit);
-		break;
-
-	case P9_TREAD:
-		ret += scnprintf(buf+ret, buflen-ret,
-			"Tread tag %u fid %d offset %lld count %u", tag,
-			fc->params.tread.fid,
-			(long long int) fc->params.tread.offset,
-			fc->params.tread.count);
-		break;
-
-	case P9_RREAD:
-		ret += scnprintf(buf+ret, buflen-ret,
-			"Rread tag %u count %u data ", tag,
-			fc->params.rread.count);
-		ret += p9_printdata(buf+ret, buflen-ret, fc->params.rread.data,
-			fc->params.rread.count);
-		break;
-
-	case P9_TWRITE:
-		ret += scnprintf(buf+ret, buflen-ret,
-			"Twrite tag %u fid %d offset %lld count %u data ",
-			tag, fc->params.twrite.fid,
-			(long long int) fc->params.twrite.offset,
-			fc->params.twrite.count);
-		ret += p9_printdata(buf+ret, buflen-ret, fc->params.twrite.data,
-			fc->params.twrite.count);
-		break;
-
-	case P9_RWRITE:
-		ret += scnprintf(buf+ret, buflen-ret, "Rwrite tag %u count %u",
-			tag, fc->params.rwrite.count);
-		break;
-
-	case P9_TCLUNK:
-		ret += scnprintf(buf+ret, buflen-ret, "Tclunk tag %u fid %d",
-			tag, fc->params.tclunk.fid);
-		break;
-
-	case P9_RCLUNK:
-		ret += scnprintf(buf+ret, buflen-ret, "Rclunk tag %u", tag);
-		break;
-
-	case P9_TREMOVE:
-		ret += scnprintf(buf+ret, buflen-ret, "Tremove tag %u fid %d",
-			tag, fc->params.tremove.fid);
-		break;
-
-	case P9_RREMOVE:
-		ret += scnprintf(buf+ret, buflen-ret, "Rremove tag %u", tag);
-		break;
-
-	case P9_TSTAT:
-		ret += scnprintf(buf+ret, buflen-ret, "Tstat tag %u fid %d",
-			tag, fc->params.tstat.fid);
-		break;
-
-	case P9_RSTAT:
-		ret += scnprintf(buf+ret, buflen-ret, "Rstat tag %u ", tag);
-		ret += p9_printstat(buf+ret, buflen-ret, &fc->params.rstat.stat,
-			extended);
-		break;
-
-	case P9_TWSTAT:
-		ret += scnprintf(buf+ret, buflen-ret, "Twstat tag %u fid %d ",
-			tag, fc->params.twstat.fid);
-		ret += p9_printstat(buf+ret, buflen-ret,
-					&fc->params.twstat.stat, extended);
-		break;
-
-	case P9_RWSTAT:
-		ret += scnprintf(buf+ret, buflen-ret, "Rwstat tag %u", tag);
-		break;
-
-	default:
-		ret += scnprintf(buf+ret, buflen-ret, "unknown type %d", type);
-		break;
-	}
-
-	return ret;
-}
-#else
-int
-p9_printfcall(char *buf, int buflen, struct p9_fcall *fc, int extended)
-{
-	return 0;
-}
-#endif /* CONFIG_NET_9P_DEBUG */
-EXPORT_SYMBOL(p9_printfcall);
-
diff --git a/net/9p/mod.c b/net/9p/mod.c
index 1084feb..cf8a412 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -29,6 +29,7 @@
 #include <net/9p/9p.h>
 #include <linux/fs.h>
 #include <linux/parser.h>
+#include <net/9p/client.h>
 #include <net/9p/transport.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
new file mode 100644
index 0000000..dcd7666
--- /dev/null
+++ b/net/9p/protocol.c
@@ -0,0 +1,567 @@
+/*
+ * net/9p/protocol.c
+ *
+ * 9P Protocol Support Code
+ *
+ *  Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com>
+ *
+ *  Base on code from Anthony Liguori <aliguori@us.ibm.com>
+ *  Copyright (C) 2008 by IBM, Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to:
+ *  Free Software Foundation
+ *  51 Franklin Street, Fifth Floor
+ *  Boston, MA  02111-1301  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+#include "protocol.h"
+
+#ifndef MIN
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef MAX
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef offset_of
+#define offset_of(type, memb) \
+	((unsigned long)(&((type *)0)->memb))
+#endif
+#ifndef container_of
+#define container_of(obj, type, memb) \
+	((type *)(((char *)obj) - offset_of(type, memb)))
+#endif
+
+static int
+p9pdu_writef(struct p9_fcall *pdu, int optional, const char *fmt, ...);
+
+#ifdef CONFIG_NET_9P_DEBUG
+void
+p9pdu_dump(int way, struct p9_fcall *pdu)
+{
+	int i, n;
+	u8 *data = pdu->sdata;
+	int datalen = pdu->size;
+	char buf[255];
+	int buflen = 255;
+
+	i = n = 0;
+	if (datalen > (buflen-16))
+		datalen = buflen-16;
+	while (i < datalen) {
+		n += scnprintf(buf + n, buflen - n, "%02x ", data[i]);
+		if (i%4 == 3)
+			n += scnprintf(buf + n, buflen - n, " ");
+		if (i%32 == 31)
+			n += scnprintf(buf + n, buflen - n, "\n");
+
+		i++;
+	}
+	n += scnprintf(buf + n, buflen - n, "\n");
+
+	if (way)
+		P9_DPRINTK(P9_DEBUG_PKT, "[[[(%d) %s\n", datalen, buf);
+	else
+		P9_DPRINTK(P9_DEBUG_PKT, "]]](%d) %s\n", datalen, buf);
+}
+#else
+void
+p9pdu_dump(int way, struct p9_fcall *pdu)
+{
+}
+#endif
+EXPORT_SYMBOL(p9pdu_dump);
+
+void p9stat_free(struct p9_wstat *stbuf)
+{
+	kfree(stbuf->name);
+	kfree(stbuf->uid);
+	kfree(stbuf->gid);
+	kfree(stbuf->muid);
+	kfree(stbuf->extension);
+}
+EXPORT_SYMBOL(p9stat_free);
+
+static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
+{
+	size_t len = MIN(pdu->size - pdu->offset, size);
+	memcpy(data, &pdu->sdata[pdu->offset], len);
+	pdu->offset += len;
+	return size - len;
+}
+
+static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size)
+{
+	size_t len = MIN(pdu->capacity - pdu->size, size);
+	memcpy(&pdu->sdata[pdu->size], data, len);
+	pdu->size += len;
+	return size - len;
+}
+
+static size_t
+pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
+{
+	size_t len = MIN(pdu->capacity - pdu->size, size);
+	int err = copy_from_user(&pdu->sdata[pdu->size], udata, len);
+	if (err)
+		printk(KERN_WARNING "pdu_write_u returning: %d\n", err);
+
+	pdu->size += len;
+	return size - len;
+}
+
+/*
+	b - int8_t
+	w - int16_t
+	d - int32_t
+	q - int64_t
+	s - string
+	S - stat
+	Q - qid
+	D - data blob (int32_t size followed by void *, results are not freed)
+	T - array of strings (int16_t count, followed by strings)
+	R - array of qids (int16_t count, followed by qids)
+	? - if optional = 1, continue parsing
+*/
+
+static int
+p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
+{
+	const char *ptr;
+	int errcode = 0;
+
+	for (ptr = fmt; *ptr; ptr++) {
+		switch (*ptr) {
+		case 'b':{
+				int8_t *val = va_arg(ap, int8_t *);
+				if (pdu_read(pdu, val, sizeof(*val))) {
+					errcode = -EFAULT;
+					break;
+				}
+			}
+			break;
+		case 'w':{
+				int16_t *val = va_arg(ap, int16_t *);
+				if (pdu_read(pdu, val, sizeof(*val))) {
+					errcode = -EFAULT;
+					break;
+				}
+				*val = cpu_to_le16(*val);
+			}
+			break;
+		case 'd':{
+				int32_t *val = va_arg(ap, int32_t *);
+				if (pdu_read(pdu, val, sizeof(*val))) {
+					errcode = -EFAULT;
+					break;
+				}
+				*val = cpu_to_le32(*val);
+			}
+			break;
+		case 'q':{
+				int64_t *val = va_arg(ap, int64_t *);
+				if (pdu_read(pdu, val, sizeof(*val))) {
+					errcode = -EFAULT;
+					break;
+				}
+				*val = cpu_to_le64(*val);
+			}
+			break;
+		case 's':{
+				char **sptr = va_arg(ap, char **);
+				int16_t len;
+				int size;
+
+				errcode = p9pdu_readf(pdu, optional, "w", &len);
+				if (errcode)
+					break;
+
+				size = MAX(len, 0);
+
+				*sptr = kmalloc(size + 1, GFP_KERNEL);
+				if (*sptr == NULL) {
+					errcode = -EFAULT;
+					break;
+				}
+				if (pdu_read(pdu, *sptr, size)) {
+					errcode = -EFAULT;
+					kfree(*sptr);
+					*sptr = NULL;
+				} else
+					(*sptr)[size] = 0;
+			}
+			break;
+		case 'Q':{
+				struct p9_qid *qid =
+				    va_arg(ap, struct p9_qid *);
+
+				errcode = p9pdu_readf(pdu, optional, "bdq",
+						      &qid->type, &qid->version,
+						      &qid->path);
+			}
+			break;
+		case 'S':{
+				struct p9_wstat *stbuf =
+				    va_arg(ap, struct p9_wstat *);
+
+				memset(stbuf, 0, sizeof(struct p9_wstat));
+				stbuf->n_uid = stbuf->n_gid = stbuf->n_muid =
+									-1;
+				errcode =
+				    p9pdu_readf(pdu, optional,
+						"wwdQdddqssss?sddd",
+						&stbuf->size, &stbuf->type,
+						&stbuf->dev, &stbuf->qid,
+						&stbuf->mode, &stbuf->atime,
+						&stbuf->mtime, &stbuf->length,
+						&stbuf->name, &stbuf->uid,
+						&stbuf->gid, &stbuf->muid,
+						&stbuf->extension,
+						&stbuf->n_uid, &stbuf->n_gid,
+						&stbuf->n_muid);
+				if (errcode)
+					p9stat_free(stbuf);
+			}
+			break;
+		case 'D':{
+				int32_t *count = va_arg(ap, int32_t *);
+				void **data = va_arg(ap, void **);
+
+				errcode =
+				    p9pdu_readf(pdu, optional, "d", count);
+				if (!errcode) {
+					*count =
+					    MIN(*count,
+						pdu->size - pdu->offset);
+					*data = &pdu->sdata[pdu->offset];
+				}
+			}
+			break;
+		case 'T':{
+				int16_t *nwname = va_arg(ap, int16_t *);
+				char ***wnames = va_arg(ap, char ***);
+
+				errcode =
+				    p9pdu_readf(pdu, optional, "w", nwname);
+				if (!errcode) {
+					*wnames =
+					    kmalloc(sizeof(char *) * *nwname,
+						    GFP_KERNEL);
+					if (!*wnames)
+						errcode = -ENOMEM;
+				}
+
+				if (!errcode) {
+					int i;
+
+					for (i = 0; i < *nwname; i++) {
+						errcode =
+						    p9pdu_readf(pdu, optional,
+								"s",
+								&(*wnames)[i]);
+						if (errcode)
+							break;
+					}
+				}
+
+				if (errcode) {
+					if (*wnames) {
+						int i;
+
+						for (i = 0; i < *nwname; i++)
+							kfree((*wnames)[i]);
+					}
+					kfree(*wnames);
+					*wnames = NULL;
+				}
+			}
+			break;
+		case 'R':{
+				int16_t *nwqid = va_arg(ap, int16_t *);
+				struct p9_qid **wqids =
+				    va_arg(ap, struct p9_qid **);
+
+				*wqids = NULL;
+
+				errcode =
+				    p9pdu_readf(pdu, optional, "w", nwqid);
+				if (!errcode) {
+					*wqids =
+					    kmalloc(*nwqid *
+						    sizeof(struct p9_qid),
+						    GFP_KERNEL);
+					if (*wqids == NULL)
+						errcode = -ENOMEM;
+				}
+
+				if (!errcode) {
+					int i;
+
+					for (i = 0; i < *nwqid; i++) {
+						errcode =
+						    p9pdu_readf(pdu, optional,
+								"Q",
+								&(*wqids)[i]);
+						if (errcode)
+							break;
+					}
+				}
+
+				if (errcode) {
+					kfree(*wqids);
+					*wqids = NULL;
+				}
+			}
+			break;
+		case '?':
+			if (!optional)
+				return 0;
+			break;
+		default:
+			BUG();
+			break;
+		}
+
+		if (errcode)
+			break;
+	}
+
+	return errcode;
+}
+
+int
+p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
+{
+	const char *ptr;
+	int errcode = 0;
+
+	for (ptr = fmt; *ptr; ptr++) {
+		switch (*ptr) {
+		case 'b':{
+				int8_t val = va_arg(ap, int);
+				if (pdu_write(pdu, &val, sizeof(val)))
+					errcode = -EFAULT;
+			}
+			break;
+		case 'w':{
+				int16_t val = va_arg(ap, int);
+				if (pdu_write(pdu, &val, sizeof(val)))
+					errcode = -EFAULT;
+			}
+			break;
+		case 'd':{
+				int32_t val = va_arg(ap, int32_t);
+				if (pdu_write(pdu, &val, sizeof(val)))
+					errcode = -EFAULT;
+			}
+			break;
+		case 'q':{
+				int64_t val = va_arg(ap, int64_t);
+				if (pdu_write(pdu, &val, sizeof(val)))
+					errcode = -EFAULT;
+			}
+			break;
+		case 's':{
+				const char *sptr = va_arg(ap, const char *);
+				int16_t len = 0;
+				if (sptr)
+					len = MIN(strlen(sptr), USHORT_MAX);
+
+				errcode = p9pdu_writef(pdu, optional, "w", len);
+				if (!errcode && pdu_write(pdu, sptr, len))
+					errcode = -EFAULT;
+			}
+			break;
+		case 'Q':{
+				const struct p9_qid *qid =
+				    va_arg(ap, const struct p9_qid *);
+				errcode =
+				    p9pdu_writef(pdu, optional, "bdq",
+						 qid->type, qid->version,
+						 qid->path);
+			} break;
+		case 'S':{
+				const struct p9_wstat *stbuf =
+				    va_arg(ap, const struct p9_wstat *);
+				errcode =
+				    p9pdu_writef(pdu, optional,
+						 "wwdQdddqssss?sddd",
+						 stbuf->size, stbuf->type,
+						 stbuf->dev, &stbuf->qid,
+						 stbuf->mode, stbuf->atime,
+						 stbuf->mtime, stbuf->length,
+						 stbuf->name, stbuf->uid,
+						 stbuf->gid, stbuf->muid,
+						 stbuf->extension, stbuf->n_uid,
+						 stbuf->n_gid, stbuf->n_muid);
+			} break;
+		case 'D':{
+				int32_t count = va_arg(ap, int32_t);
+				const void *data = va_arg(ap, const void *);
+
+				errcode =
+				    p9pdu_writef(pdu, optional, "d", count);
+				if (!errcode && pdu_write(pdu, data, count))
+					errcode = -EFAULT;
+			}
+			break;
+		case 'U':{
+				int32_t count = va_arg(ap, int32_t);
+				const char __user *udata =
+						va_arg(ap, const void __user *);
+				errcode =
+				    p9pdu_writef(pdu, optional, "d", count);
+				if (!errcode && pdu_write_u(pdu, udata, count))
+					errcode = -EFAULT;
+			}
+			break;
+		case 'T':{
+				int16_t nwname = va_arg(ap, int);
+				const char **wnames = va_arg(ap, const char **);
+
+				errcode =
+				    p9pdu_writef(pdu, optional, "w", nwname);
+				if (!errcode) {
+					int i;
+
+					for (i = 0; i < nwname; i++) {
+						errcode =
+						    p9pdu_writef(pdu, optional,
+								 "s",
+								 wnames[i]);
+						if (errcode)
+							break;
+					}
+				}
+			}
+			break;
+		case 'R':{
+				int16_t nwqid = va_arg(ap, int);
+				struct p9_qid *wqids =
+				    va_arg(ap, struct p9_qid *);
+
+				errcode =
+				    p9pdu_writef(pdu, optional, "w", nwqid);
+				if (!errcode) {
+					int i;
+
+					for (i = 0; i < nwqid; i++) {
+						errcode =
+						    p9pdu_writef(pdu, optional,
+								 "Q",
+								 &wqids[i]);
+						if (errcode)
+							break;
+					}
+				}
+			}
+			break;
+		case '?':
+			if (!optional)
+				return 0;
+			break;
+		default:
+			BUG();
+			break;
+		}
+
+		if (errcode)
+			break;
+	}
+
+	return errcode;
+}
+
+int p9pdu_readf(struct p9_fcall *pdu, int optional, const char *fmt, ...)
+{
+	va_list ap;
+	int ret;
+
+	va_start(ap, fmt);
+	ret = p9pdu_vreadf(pdu, optional, fmt, ap);
+	va_end(ap);
+
+	return ret;
+}
+
+static int
+p9pdu_writef(struct p9_fcall *pdu, int optional, const char *fmt, ...)
+{
+	va_list ap;
+	int ret;
+
+	va_start(ap, fmt);
+	ret = p9pdu_vwritef(pdu, optional, fmt, ap);
+	va_end(ap);
+
+	return ret;
+}
+
+int p9stat_read(char *buf, int len, struct p9_wstat *st, int dotu)
+{
+	struct p9_fcall fake_pdu;
+	int ret;
+
+	fake_pdu.size = len;
+	fake_pdu.capacity = len;
+	fake_pdu.sdata = buf;
+	fake_pdu.offset = 0;
+
+	ret = p9pdu_readf(&fake_pdu, dotu, "S", st);
+	if (ret) {
+		P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
+		p9pdu_dump(1, &fake_pdu);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(p9stat_read);
+
+int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type)
+{
+	return p9pdu_writef(pdu, 0, "dbw", 0, type, tag);
+}
+
+int p9pdu_finalize(struct p9_fcall *pdu)
+{
+	int size = pdu->size;
+	int err;
+
+	pdu->size = 0;
+	err = p9pdu_writef(pdu, 0, "d", size);
+	pdu->size = size;
+
+#ifdef CONFIG_NET_9P_DEBUG
+	if ((p9_debug_level & P9_DEBUG_PKT) == P9_DEBUG_PKT)
+		p9pdu_dump(0, pdu);
+#endif
+
+	P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size,
+							pdu->id, pdu->tag);
+
+	return err;
+}
+
+void p9pdu_reset(struct p9_fcall *pdu)
+{
+	pdu->offset = 0;
+	pdu->size = 0;
+}
diff --git a/net/9p/protocol.h b/net/9p/protocol.h
new file mode 100644
index 0000000..ccde462
--- /dev/null
+++ b/net/9p/protocol.h
@@ -0,0 +1,34 @@
+/*
+ * net/9p/protocol.h
+ *
+ * 9P Protocol Support Code
+ *
+ *  Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com>
+ *
+ *  Base on code from Anthony Liguori <aliguori@us.ibm.com>
+ *  Copyright (C) 2008 by IBM, Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to:
+ *  Free Software Foundation
+ *  51 Franklin Street, Fifth Floor
+ *  Boston, MA  02111-1301  USA
+ *
+ */
+
+int
+p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap);
+int p9pdu_readf(struct p9_fcall *pdu, int optional, const char *fmt, ...);
+int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type);
+int p9pdu_finalize(struct p9_fcall *pdu);
+void p9pdu_dump(int, struct p9_fcall *);
+void p9pdu_reset(struct p9_fcall *pdu);
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 6dabbdb..1df0356 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -39,12 +39,11 @@
 #include <linux/file.h>
 #include <linux/parser.h>
 #include <net/9p/9p.h>
+#include <net/9p/client.h>
 #include <net/9p/transport.h>
 
 #define P9_PORT 564
 #define MAX_SOCK_BUF (64*1024)
-#define ERREQFLUSH	1
-#define SCHED_TIMEOUT	10
 #define MAXPOLLWADDR	2
 
 /**
@@ -61,7 +60,6 @@
 	u16 port;
 };
 
-
 /**
  * struct p9_trans_fd - transport state
  * @rd: reference to file to read from
@@ -100,60 +98,22 @@
 	Wpending = 8,		/* can write */
 };
 
-enum {
-	None,
-	Flushing,
-	Flushed,
-};
-
-struct p9_req;
-typedef void (*p9_conn_req_callback)(struct p9_req *req, void *a);
-
-/**
- * struct p9_req - fd mux encoding of an rpc transaction
- * @lock: protects req_list
- * @tag: numeric tag for rpc transaction
- * @tcall: request &p9_fcall structure
- * @rcall: response &p9_fcall structure
- * @err: error state
- * @cb: callback for when response is received
- * @cba: argument to pass to callback
- * @flush: flag to indicate RPC has been flushed
- * @req_list: list link for higher level objects to chain requests
- *
- */
-
-struct p9_req {
-	spinlock_t lock;
-	int tag;
-	struct p9_fcall *tcall;
-	struct p9_fcall *rcall;
-	int err;
-	p9_conn_req_callback cb;
-	void *cba;
-	int flush;
-	struct list_head req_list;
-};
-
-struct p9_mux_poll_task {
-	struct task_struct *task;
-	struct list_head mux_list;
-	int muxnum;
+struct p9_poll_wait {
+	struct p9_conn *conn;
+	wait_queue_t wait;
+	wait_queue_head_t *wait_addr;
 };
 
 /**
  * struct p9_conn - fd mux connection state information
- * @lock: protects mux_list (?)
  * @mux_list: list link for mux to manage multiple connections (?)
- * @poll_task: task polling on this connection
- * @msize: maximum size for connection (dup)
- * @extended: 9p2000.u flag (dup)
- * @trans: reference to transport instance for this connection
- * @tagpool: id accounting for transactions
+ * @client: reference to client instance for this connection
  * @err: error state
  * @req_list: accounting for requests which have been sent
  * @unsent_req_list: accounting for requests that haven't been sent
- * @rcall: current response &p9_fcall structure
+ * @req: current request being processed (if any)
+ * @tmp_buf: temporary buffer to read in header
+ * @rsize: amount to read for current frame
  * @rpos: read position in current frame
  * @rbuf: current read buffer
  * @wpos: write position for current frame
@@ -169,409 +129,300 @@
  */
 
 struct p9_conn {
-	spinlock_t lock; /* protect lock structure */
 	struct list_head mux_list;
-	struct p9_mux_poll_task *poll_task;
-	int msize;
-	unsigned char extended;
-	struct p9_trans *trans;
-	struct p9_idpool *tagpool;
+	struct p9_client *client;
 	int err;
 	struct list_head req_list;
 	struct list_head unsent_req_list;
-	struct p9_fcall *rcall;
+	struct p9_req_t *req;
+	char tmp_buf[7];
+	int rsize;
 	int rpos;
 	char *rbuf;
 	int wpos;
 	int wsize;
 	char *wbuf;
-	wait_queue_t poll_wait[MAXPOLLWADDR];
-	wait_queue_head_t *poll_waddr[MAXPOLLWADDR];
+	struct list_head poll_pending_link;
+	struct p9_poll_wait poll_wait[MAXPOLLWADDR];
 	poll_table pt;
 	struct work_struct rq;
 	struct work_struct wq;
 	unsigned long wsched;
 };
 
-/**
- * struct p9_mux_rpc - fd mux rpc accounting structure
- * @m: connection this request was issued on
- * @err: error state
- * @tcall: request &p9_fcall
- * @rcall: response &p9_fcall
- * @wqueue: wait queue that client is blocked on for this rpc
- *
- * Bug: isn't this information duplicated elsewhere like &p9_req
- */
-
-struct p9_mux_rpc {
-	struct p9_conn *m;
-	int err;
-	struct p9_fcall *tcall;
-	struct p9_fcall *rcall;
-	wait_queue_head_t wqueue;
-};
-
-static int p9_poll_proc(void *);
-static void p9_read_work(struct work_struct *work);
-static void p9_write_work(struct work_struct *work);
-static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address,
-								poll_table *p);
-static int p9_fd_write(struct p9_trans *trans, void *v, int len);
-static int p9_fd_read(struct p9_trans *trans, void *v, int len);
-
-static DEFINE_MUTEX(p9_mux_task_lock);
+static DEFINE_SPINLOCK(p9_poll_lock);
+static LIST_HEAD(p9_poll_pending_list);
 static struct workqueue_struct *p9_mux_wq;
-
-static int p9_mux_num;
-static int p9_mux_poll_task_num;
-static struct p9_mux_poll_task p9_mux_poll_tasks[100];
-
-static void p9_conn_destroy(struct p9_conn *);
-static unsigned int p9_fd_poll(struct p9_trans *trans,
-						struct poll_table_struct *pt);
-
-#ifdef P9_NONBLOCK
-static int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc,
-	p9_conn_req_callback cb, void *a);
-#endif /* P9_NONBLOCK */
-
-static void p9_conn_cancel(struct p9_conn *m, int err);
-
-static u16 p9_mux_get_tag(struct p9_conn *m)
-{
-	int tag;
-
-	tag = p9_idpool_get(m->tagpool);
-	if (tag < 0)
-		return P9_NOTAG;
-	else
-		return (u16) tag;
-}
-
-static void p9_mux_put_tag(struct p9_conn *m, u16 tag)
-{
-	if (tag != P9_NOTAG && p9_idpool_check(tag, m->tagpool))
-		p9_idpool_put(tag, m->tagpool);
-}
-
-/**
- * p9_mux_calc_poll_procs - calculates the number of polling procs
- * @muxnum: number of mounts
- *
- * Calculation is based on the number of mounted v9fs filesystems.
- * The current implementation returns sqrt of the number of mounts.
- */
-
-static int p9_mux_calc_poll_procs(int muxnum)
-{
-	int n;
-
-	if (p9_mux_poll_task_num)
-		n = muxnum / p9_mux_poll_task_num +
-		    (muxnum % p9_mux_poll_task_num ? 1 : 0);
-	else
-		n = 1;
-
-	if (n > ARRAY_SIZE(p9_mux_poll_tasks))
-		n = ARRAY_SIZE(p9_mux_poll_tasks);
-
-	return n;
-}
-
-static int p9_mux_poll_start(struct p9_conn *m)
-{
-	int i, n;
-	struct p9_mux_poll_task *vpt, *vptlast;
-	struct task_struct *pproc;
-
-	P9_DPRINTK(P9_DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, p9_mux_num,
-		p9_mux_poll_task_num);
-	mutex_lock(&p9_mux_task_lock);
-
-	n = p9_mux_calc_poll_procs(p9_mux_num + 1);
-	if (n > p9_mux_poll_task_num) {
-		for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) {
-			if (p9_mux_poll_tasks[i].task == NULL) {
-				vpt = &p9_mux_poll_tasks[i];
-				P9_DPRINTK(P9_DEBUG_MUX, "create proc %p\n",
-									vpt);
-				pproc = kthread_create(p9_poll_proc, vpt,
-								"v9fs-poll");
-
-				if (!IS_ERR(pproc)) {
-					vpt->task = pproc;
-					INIT_LIST_HEAD(&vpt->mux_list);
-					vpt->muxnum = 0;
-					p9_mux_poll_task_num++;
-					wake_up_process(vpt->task);
-				}
-				break;
-			}
-		}
-
-		if (i >= ARRAY_SIZE(p9_mux_poll_tasks))
-			P9_DPRINTK(P9_DEBUG_ERROR,
-					"warning: no free poll slots\n");
-	}
-
-	n = (p9_mux_num + 1) / p9_mux_poll_task_num +
-	    ((p9_mux_num + 1) % p9_mux_poll_task_num ? 1 : 0);
-
-	vptlast = NULL;
-	for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) {
-		vpt = &p9_mux_poll_tasks[i];
-		if (vpt->task != NULL) {
-			vptlast = vpt;
-			if (vpt->muxnum < n) {
-				P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i);
-				list_add(&m->mux_list, &vpt->mux_list);
-				vpt->muxnum++;
-				m->poll_task = vpt;
-				memset(&m->poll_waddr, 0,
-							sizeof(m->poll_waddr));
-				init_poll_funcptr(&m->pt, p9_pollwait);
-				break;
-			}
-		}
-	}
-
-	if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) {
-		if (vptlast == NULL) {
-			mutex_unlock(&p9_mux_task_lock);
-			return -ENOMEM;
-		}
-
-		P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i);
-		list_add(&m->mux_list, &vptlast->mux_list);
-		vptlast->muxnum++;
-		m->poll_task = vptlast;
-		memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
-		init_poll_funcptr(&m->pt, p9_pollwait);
-	}
-
-	p9_mux_num++;
-	mutex_unlock(&p9_mux_task_lock);
-
-	return 0;
-}
+static struct task_struct *p9_poll_task;
 
 static void p9_mux_poll_stop(struct p9_conn *m)
 {
+	unsigned long flags;
 	int i;
-	struct p9_mux_poll_task *vpt;
 
-	mutex_lock(&p9_mux_task_lock);
-	vpt = m->poll_task;
-	list_del(&m->mux_list);
-	for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
-		if (m->poll_waddr[i] != NULL) {
-			remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]);
-			m->poll_waddr[i] = NULL;
-		}
-	}
-	vpt->muxnum--;
-	if (!vpt->muxnum) {
-		P9_DPRINTK(P9_DEBUG_MUX, "destroy proc %p\n", vpt);
-		kthread_stop(vpt->task);
-		vpt->task = NULL;
-		p9_mux_poll_task_num--;
-	}
-	p9_mux_num--;
-	mutex_unlock(&p9_mux_task_lock);
-}
+	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
+		struct p9_poll_wait *pwait = &m->poll_wait[i];
 
-/**
- * p9_conn_create - allocate and initialize the per-session mux data
- * @trans: transport structure
- *
- * Note: Creates the polling task if this is the first session.
- */
-
-static struct p9_conn *p9_conn_create(struct p9_trans *trans)
-{
-	int i, n;
-	struct p9_conn *m;
-
-	P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans,
-								trans->msize);
-	m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
-	if (!m)
-		return ERR_PTR(-ENOMEM);
-
-	spin_lock_init(&m->lock);
-	INIT_LIST_HEAD(&m->mux_list);
-	m->msize = trans->msize;
-	m->extended = trans->extended;
-	m->trans = trans;
-	m->tagpool = p9_idpool_create();
-	if (IS_ERR(m->tagpool)) {
-		kfree(m);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	INIT_LIST_HEAD(&m->req_list);
-	INIT_LIST_HEAD(&m->unsent_req_list);
-	INIT_WORK(&m->rq, p9_read_work);
-	INIT_WORK(&m->wq, p9_write_work);
-	n = p9_mux_poll_start(m);
-	if (n) {
-		kfree(m);
-		return ERR_PTR(n);
-	}
-
-	n = p9_fd_poll(trans, &m->pt);
-	if (n & POLLIN) {
-		P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
-		set_bit(Rpending, &m->wsched);
-	}
-
-	if (n & POLLOUT) {
-		P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
-		set_bit(Wpending, &m->wsched);
-	}
-
-	for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
-		if (IS_ERR(m->poll_waddr[i])) {
-			p9_mux_poll_stop(m);
-			kfree(m);
-			return (void *)m->poll_waddr;	/* the error code */
+		if (pwait->wait_addr) {
+			remove_wait_queue(pwait->wait_addr, &pwait->wait);
+			pwait->wait_addr = NULL;
 		}
 	}
 
-	return m;
+	spin_lock_irqsave(&p9_poll_lock, flags);
+	list_del_init(&m->poll_pending_link);
+	spin_unlock_irqrestore(&p9_poll_lock, flags);
 }
 
 /**
- * p9_mux_destroy - cancels all pending requests and frees mux resources
- * @m: mux to destroy
+ * p9_conn_cancel - cancel all pending requests with error
+ * @m: mux data
+ * @err: error code
  *
  */
 
-static void p9_conn_destroy(struct p9_conn *m)
+static void p9_conn_cancel(struct p9_conn *m, int err)
 {
-	P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
-		m->mux_list.prev, m->mux_list.next);
+	struct p9_req_t *req, *rtmp;
+	unsigned long flags;
+	LIST_HEAD(cancel_list);
 
-	p9_mux_poll_stop(m);
-	cancel_work_sync(&m->rq);
-	cancel_work_sync(&m->wq);
+	P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
 
-	p9_conn_cancel(m, -ECONNRESET);
+	spin_lock_irqsave(&m->client->lock, flags);
 
-	m->trans = NULL;
-	p9_idpool_destroy(m->tagpool);
-	kfree(m);
+	if (m->err) {
+		spin_unlock_irqrestore(&m->client->lock, flags);
+		return;
+	}
+
+	m->err = err;
+
+	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
+		req->status = REQ_STATUS_ERROR;
+		if (!req->t_err)
+			req->t_err = err;
+		list_move(&req->req_list, &cancel_list);
+	}
+	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
+		req->status = REQ_STATUS_ERROR;
+		if (!req->t_err)
+			req->t_err = err;
+		list_move(&req->req_list, &cancel_list);
+	}
+	spin_unlock_irqrestore(&m->client->lock, flags);
+
+	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
+		list_del(&req->req_list);
+		P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req);
+		p9_client_cb(m->client, req);
+	}
+}
+
+static unsigned int
+p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
+{
+	int ret, n;
+	struct p9_trans_fd *ts = NULL;
+
+	if (client && client->status == Connected)
+		ts = client->trans;
+
+	if (!ts)
+		return -EREMOTEIO;
+
+	if (!ts->rd->f_op || !ts->rd->f_op->poll)
+		return -EIO;
+
+	if (!ts->wr->f_op || !ts->wr->f_op->poll)
+		return -EIO;
+
+	ret = ts->rd->f_op->poll(ts->rd, pt);
+	if (ret < 0)
+		return ret;
+
+	if (ts->rd != ts->wr) {
+		n = ts->wr->f_op->poll(ts->wr, pt);
+		if (n < 0)
+			return n;
+		ret = (ret & ~POLLOUT) | (n & ~POLLIN);
+	}
+
+	return ret;
 }
 
 /**
- * p9_pollwait - add poll task to the wait queue
- * @filp: file pointer being polled
- * @wait_address: wait_q to block on
- * @p: poll state
+ * p9_fd_read- read from a fd
+ * @client: client instance
+ * @v: buffer to receive data into
+ * @len: size of receive buffer
  *
- * called by files poll operation to add v9fs-poll task to files wait queue
  */
 
-static void
-p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
+static int p9_fd_read(struct p9_client *client, void *v, int len)
 {
-	int i;
+	int ret;
+	struct p9_trans_fd *ts = NULL;
+
+	if (client && client->status != Disconnected)
+		ts = client->trans;
+
+	if (!ts)
+		return -EREMOTEIO;
+
+	if (!(ts->rd->f_flags & O_NONBLOCK))
+		P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
+
+	ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
+	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
+		client->status = Disconnected;
+	return ret;
+}
+
+/**
+ * p9_read_work - called when there is some data to be read from a transport
+ * @work: container of work to be done
+ *
+ */
+
+static void p9_read_work(struct work_struct *work)
+{
+	int n, err;
 	struct p9_conn *m;
 
-	m = container_of(p, struct p9_conn, pt);
-	for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++)
-		if (m->poll_waddr[i] == NULL)
-			break;
-
-	if (i >= ARRAY_SIZE(m->poll_waddr)) {
-		P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
-		return;
-	}
-
-	m->poll_waddr[i] = wait_address;
-
-	if (!wait_address) {
-		P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n");
-		m->poll_waddr[i] = ERR_PTR(-EIO);
-		return;
-	}
-
-	init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task);
-	add_wait_queue(wait_address, &m->poll_wait[i]);
-}
-
-/**
- * p9_poll_mux - polls a mux and schedules read or write works if necessary
- * @m: connection to poll
- *
- */
-
-static void p9_poll_mux(struct p9_conn *m)
-{
-	int n;
+	m = container_of(work, struct p9_conn, rq);
 
 	if (m->err < 0)
 		return;
 
-	n = p9_fd_poll(m->trans, NULL);
-	if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
-		P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n);
-		if (n >= 0)
-			n = -ECONNRESET;
-		p9_conn_cancel(m, n);
+	P9_DPRINTK(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
+
+	if (!m->rbuf) {
+		m->rbuf = m->tmp_buf;
+		m->rpos = 0;
+		m->rsize = 7; /* start by reading header */
 	}
 
-	if (n & POLLIN) {
-		set_bit(Rpending, &m->wsched);
-		P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
-		if (!test_and_set_bit(Rworksched, &m->wsched)) {
-			P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
+	clear_bit(Rpending, &m->wsched);
+	P9_DPRINTK(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", m,
+					m->rpos, m->rsize, m->rsize-m->rpos);
+	err = p9_fd_read(m->client, m->rbuf + m->rpos,
+						m->rsize - m->rpos);
+	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
+	if (err == -EAGAIN) {
+		clear_bit(Rworksched, &m->wsched);
+		return;
+	}
+
+	if (err <= 0)
+		goto error;
+
+	m->rpos += err;
+
+	if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */
+		u16 tag;
+		P9_DPRINTK(P9_DEBUG_TRANS, "got new header\n");
+
+		n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */
+		if (n >= m->client->msize) {
+			P9_DPRINTK(P9_DEBUG_ERROR,
+				"requested packet size too big: %d\n", n);
+			err = -EIO;
+			goto error;
+		}
+
+		tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */
+		P9_DPRINTK(P9_DEBUG_TRANS,
+			"mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
+
+		m->req = p9_tag_lookup(m->client, tag);
+		if (!m->req) {
+			P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
+								 tag);
+			err = -EIO;
+			goto error;
+		}
+
+		if (m->req->rc == NULL) {
+			m->req->rc = kmalloc(sizeof(struct p9_fcall) +
+						m->client->msize, GFP_KERNEL);
+			if (!m->req->rc) {
+				m->req = NULL;
+				err = -ENOMEM;
+				goto error;
+			}
+		}
+		m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall);
+		memcpy(m->rbuf, m->tmp_buf, m->rsize);
+		m->rsize = n;
+	}
+
+	/* not an else because some packets (like clunk) have no payload */
+	if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
+		P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n");
+		spin_lock(&m->client->lock);
+		list_del(&m->req->req_list);
+		spin_unlock(&m->client->lock);
+		p9_client_cb(m->client, m->req);
+
+		m->rbuf = NULL;
+		m->rpos = 0;
+		m->rsize = 0;
+		m->req = NULL;
+	}
+
+	if (!list_empty(&m->req_list)) {
+		if (test_and_clear_bit(Rpending, &m->wsched))
+			n = POLLIN;
+		else
+			n = p9_fd_poll(m->client, NULL);
+
+		if (n & POLLIN) {
+			P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
 			queue_work(p9_mux_wq, &m->rq);
-		}
-	}
+		} else
+			clear_bit(Rworksched, &m->wsched);
+	} else
+		clear_bit(Rworksched, &m->wsched);
 
-	if (n & POLLOUT) {
-		set_bit(Wpending, &m->wsched);
-		P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
-		if ((m->wsize || !list_empty(&m->unsent_req_list))
-		    && !test_and_set_bit(Wworksched, &m->wsched)) {
-			P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
-			queue_work(p9_mux_wq, &m->wq);
-		}
-	}
+	return;
+error:
+	p9_conn_cancel(m, err);
+	clear_bit(Rworksched, &m->wsched);
 }
 
 /**
- * p9_poll_proc - poll worker thread
- * @a: thread state and arguments
- *
- * polls all v9fs transports for new events and queues the appropriate
- * work to the work queue
+ * p9_fd_write - write to a socket
+ * @client: client instance
+ * @v: buffer to send data from
+ * @len: size of send buffer
  *
  */
 
-static int p9_poll_proc(void *a)
+static int p9_fd_write(struct p9_client *client, void *v, int len)
 {
-	struct p9_conn *m, *mtmp;
-	struct p9_mux_poll_task *vpt;
+	int ret;
+	mm_segment_t oldfs;
+	struct p9_trans_fd *ts = NULL;
 
-	vpt = a;
-	P9_DPRINTK(P9_DEBUG_MUX, "start %p %p\n", current, vpt);
-	while (!kthread_should_stop()) {
-		set_current_state(TASK_INTERRUPTIBLE);
+	if (client && client->status != Disconnected)
+		ts = client->trans;
 
-		list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) {
-			p9_poll_mux(m);
-		}
+	if (!ts)
+		return -EREMOTEIO;
 
-		P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n");
-		schedule_timeout(SCHED_TIMEOUT * HZ);
-	}
+	if (!(ts->wr->f_flags & O_NONBLOCK))
+		P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
 
-	__set_current_state(TASK_RUNNING);
-	P9_DPRINTK(P9_DEBUG_MUX, "finish\n");
-	return 0;
+	oldfs = get_fs();
+	set_fs(get_ds());
+	/* The cast to a user pointer is valid due to the set_fs() */
+	ret = vfs_write(ts->wr, (void __user *)v, len, &ts->wr->f_pos);
+	set_fs(oldfs);
+
+	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
+		client->status = Disconnected;
+	return ret;
 }
 
 /**
@@ -584,7 +435,7 @@
 {
 	int n, err;
 	struct p9_conn *m;
-	struct p9_req *req;
+	struct p9_req_t *req;
 
 	m = container_of(work, struct p9_conn, wq);
 
@@ -599,25 +450,23 @@
 			return;
 		}
 
-		spin_lock(&m->lock);
-again:
-		req = list_entry(m->unsent_req_list.next, struct p9_req,
+		spin_lock(&m->client->lock);
+		req = list_entry(m->unsent_req_list.next, struct p9_req_t,
 			       req_list);
+		req->status = REQ_STATUS_SENT;
 		list_move_tail(&req->req_list, &m->req_list);
-		if (req->err == ERREQFLUSH)
-			goto again;
 
-		m->wbuf = req->tcall->sdata;
-		m->wsize = req->tcall->size;
+		m->wbuf = req->tc->sdata;
+		m->wsize = req->tc->size;
 		m->wpos = 0;
-		spin_unlock(&m->lock);
+		spin_unlock(&m->client->lock);
 	}
 
-	P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos,
+	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", m, m->wpos,
 								m->wsize);
 	clear_bit(Wpending, &m->wsched);
-	err = p9_fd_write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos);
-	P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
+	err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
+	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
 	if (err == -EAGAIN) {
 		clear_bit(Wworksched, &m->wsched);
 		return;
@@ -638,10 +487,10 @@
 		if (test_and_clear_bit(Wpending, &m->wsched))
 			n = POLLOUT;
 		else
-			n = p9_fd_poll(m->trans, NULL);
+			n = p9_fd_poll(m->client, NULL);
 
 		if (n & POLLOUT) {
-			P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
+			P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
 			queue_work(p9_mux_wq, &m->wq);
 		} else
 			clear_bit(Wworksched, &m->wsched);
@@ -655,504 +504,195 @@
 	clear_bit(Wworksched, &m->wsched);
 }
 
-static void process_request(struct p9_conn *m, struct p9_req *req)
+static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 {
-	int ecode;
-	struct p9_str *ename;
+	struct p9_poll_wait *pwait =
+		container_of(wait, struct p9_poll_wait, wait);
+	struct p9_conn *m = pwait->conn;
+	unsigned long flags;
+	DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
 
-	if (!req->err && req->rcall->id == P9_RERROR) {
-		ecode = req->rcall->params.rerror.errno;
-		ename = &req->rcall->params.rerror.error;
+	spin_lock_irqsave(&p9_poll_lock, flags);
+	if (list_empty(&m->poll_pending_link))
+		list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
+	spin_unlock_irqrestore(&p9_poll_lock, flags);
 
-		P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
-								ename->str);
-
-		if (m->extended)
-			req->err = -ecode;
-
-		if (!req->err) {
-			req->err = p9_errstr2errno(ename->str, ename->len);
-
-			/* string match failed */
-			if (!req->err) {
-				PRINT_FCALL_ERROR("unknown error", req->rcall);
-				req->err = -ESERVERFAULT;
-			}
-		}
-	} else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
-		P9_DPRINTK(P9_DEBUG_ERROR,
-				"fcall mismatch: expected %d, got %d\n",
-				req->tcall->id + 1, req->rcall->id);
-		if (!req->err)
-			req->err = -EIO;
-	}
+	/* perform the default wake up operation */
+	return default_wake_function(&dummy_wait, mode, sync, key);
 }
 
 /**
- * p9_read_work - called when there is some data to be read from a transport
- * @work: container of work to be done
+ * p9_pollwait - add poll task to the wait queue
+ * @filp: file pointer being polled
+ * @wait_address: wait_q to block on
+ * @p: poll state
  *
+ * called by files poll operation to add v9fs-poll task to files wait queue
  */
 
-static void p9_read_work(struct work_struct *work)
+static void
+p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
 {
-	int n, err;
-	struct p9_conn *m;
-	struct p9_req *req, *rptr, *rreq;
-	struct p9_fcall *rcall;
-	char *rbuf;
+	struct p9_conn *m = container_of(p, struct p9_conn, pt);
+	struct p9_poll_wait *pwait = NULL;
+	int i;
 
-	m = container_of(work, struct p9_conn, rq);
-
-	if (m->err < 0)
-		return;
-
-	rcall = NULL;
-	P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
-
-	if (!m->rcall) {
-		m->rcall =
-		    kmalloc(sizeof(struct p9_fcall) + m->msize, GFP_KERNEL);
-		if (!m->rcall) {
-			err = -ENOMEM;
-			goto error;
-		}
-
-		m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
-		m->rpos = 0;
-	}
-
-	clear_bit(Rpending, &m->wsched);
-	err = p9_fd_read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos);
-	P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err);
-	if (err == -EAGAIN) {
-		clear_bit(Rworksched, &m->wsched);
-		return;
-	}
-
-	if (err <= 0)
-		goto error;
-
-	m->rpos += err;
-	while (m->rpos > 4) {
-		n = le32_to_cpu(*(__le32 *) m->rbuf);
-		if (n >= m->msize) {
-			P9_DPRINTK(P9_DEBUG_ERROR,
-				"requested packet size too big: %d\n", n);
-			err = -EIO;
-			goto error;
-		}
-
-		if (m->rpos < n)
+	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
+		if (m->poll_wait[i].wait_addr == NULL) {
+			pwait = &m->poll_wait[i];
 			break;
-
-		err =
-		    p9_deserialize_fcall(m->rbuf, n, m->rcall, m->extended);
-		if (err < 0)
-			goto error;
-
-#ifdef CONFIG_NET_9P_DEBUG
-		if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
-			char buf[150];
-
-			p9_printfcall(buf, sizeof(buf), m->rcall,
-				m->extended);
-			printk(KERN_NOTICE ">>> %p %s\n", m, buf);
-		}
-#endif
-
-		rcall = m->rcall;
-		rbuf = m->rbuf;
-		if (m->rpos > n) {
-			m->rcall = kmalloc(sizeof(struct p9_fcall) + m->msize,
-					   GFP_KERNEL);
-			if (!m->rcall) {
-				err = -ENOMEM;
-				goto error;
-			}
-
-			m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
-			memmove(m->rbuf, rbuf + n, m->rpos - n);
-			m->rpos -= n;
-		} else {
-			m->rcall = NULL;
-			m->rbuf = NULL;
-			m->rpos = 0;
-		}
-
-		P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m,
-							rcall->id, rcall->tag);
-
-		req = NULL;
-		spin_lock(&m->lock);
-		list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
-			if (rreq->tag == rcall->tag) {
-				req = rreq;
-				if (req->flush != Flushing)
-					list_del(&req->req_list);
-				break;
-			}
-		}
-		spin_unlock(&m->lock);
-
-		if (req) {
-			req->rcall = rcall;
-			process_request(m, req);
-
-			if (req->flush != Flushing) {
-				if (req->cb)
-					(*req->cb) (req, req->cba);
-				else
-					kfree(req->rcall);
-			}
-		} else {
-			if (err >= 0 && rcall->id != P9_RFLUSH)
-				P9_DPRINTK(P9_DEBUG_ERROR,
-				  "unexpected response mux %p id %d tag %d\n",
-				  m, rcall->id, rcall->tag);
-			kfree(rcall);
 		}
 	}
 
-	if (!list_empty(&m->req_list)) {
-		if (test_and_clear_bit(Rpending, &m->wsched))
-			n = POLLIN;
-		else
-			n = p9_fd_poll(m->trans, NULL);
+	if (!pwait) {
+		P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
+		return;
+	}
 
-		if (n & POLLIN) {
-			P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
-			queue_work(p9_mux_wq, &m->rq);
-		} else
-			clear_bit(Rworksched, &m->wsched);
-	} else
-		clear_bit(Rworksched, &m->wsched);
-
-	return;
-
-error:
-	p9_conn_cancel(m, err);
-	clear_bit(Rworksched, &m->wsched);
+	pwait->conn = m;
+	pwait->wait_addr = wait_address;
+	init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
+	add_wait_queue(wait_address, &pwait->wait);
 }
 
 /**
- * p9_send_request - send 9P request
- * The function can sleep until the request is scheduled for sending.
- * The function can be interrupted. Return from the function is not
- * a guarantee that the request is sent successfully. Can return errors
- * that can be retrieved by PTR_ERR macros.
+ * p9_conn_create - allocate and initialize the per-session mux data
+ * @client: client instance
  *
- * @m: mux data
- * @tc: request to be sent
- * @cb: callback function to call when response is received
- * @cba: parameter to pass to the callback function
- *
+ * Note: Creates the polling task if this is the first session.
  */
 
-static struct p9_req *p9_send_request(struct p9_conn *m,
-					  struct p9_fcall *tc,
-					  p9_conn_req_callback cb, void *cba)
+static struct p9_conn *p9_conn_create(struct p9_client *client)
 {
 	int n;
-	struct p9_req *req;
+	struct p9_conn *m;
 
-	P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
-		tc, tc->id);
+	P9_DPRINTK(P9_DEBUG_TRANS, "client %p msize %d\n", client,
+								client->msize);
+	m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
+	if (!m)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&m->mux_list);
+	m->client = client;
+
+	INIT_LIST_HEAD(&m->req_list);
+	INIT_LIST_HEAD(&m->unsent_req_list);
+	INIT_WORK(&m->rq, p9_read_work);
+	INIT_WORK(&m->wq, p9_write_work);
+	INIT_LIST_HEAD(&m->poll_pending_link);
+	init_poll_funcptr(&m->pt, p9_pollwait);
+
+	n = p9_fd_poll(client, &m->pt);
+	if (n & POLLIN) {
+		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
+		set_bit(Rpending, &m->wsched);
+	}
+
+	if (n & POLLOUT) {
+		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
+		set_bit(Wpending, &m->wsched);
+	}
+
+	return m;
+}
+
+/**
+ * p9_poll_mux - polls a mux and schedules read or write works if necessary
+ * @m: connection to poll
+ *
+ */
+
+static void p9_poll_mux(struct p9_conn *m)
+{
+	int n;
+
 	if (m->err < 0)
-		return ERR_PTR(m->err);
+		return;
 
-	req = kmalloc(sizeof(struct p9_req), GFP_KERNEL);
-	if (!req)
-		return ERR_PTR(-ENOMEM);
-
-	if (tc->id == P9_TVERSION)
-		n = P9_NOTAG;
-	else
-		n = p9_mux_get_tag(m);
-
-	if (n < 0) {
-		kfree(req);
-		return ERR_PTR(-ENOMEM);
+	n = p9_fd_poll(m->client, NULL);
+	if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
+		P9_DPRINTK(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
+		if (n >= 0)
+			n = -ECONNRESET;
+		p9_conn_cancel(m, n);
 	}
 
-	p9_set_tag(tc, n);
-
-#ifdef CONFIG_NET_9P_DEBUG
-	if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
-		char buf[150];
-
-		p9_printfcall(buf, sizeof(buf), tc, m->extended);
-		printk(KERN_NOTICE "<<< %p %s\n", m, buf);
+	if (n & POLLIN) {
+		set_bit(Rpending, &m->wsched);
+		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
+		if (!test_and_set_bit(Rworksched, &m->wsched)) {
+			P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
+			queue_work(p9_mux_wq, &m->rq);
+		}
 	}
-#endif
 
-	spin_lock_init(&req->lock);
-	req->tag = n;
-	req->tcall = tc;
-	req->rcall = NULL;
-	req->err = 0;
-	req->cb = cb;
-	req->cba = cba;
-	req->flush = None;
+	if (n & POLLOUT) {
+		set_bit(Wpending, &m->wsched);
+		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
+		if ((m->wsize || !list_empty(&m->unsent_req_list))
+		    && !test_and_set_bit(Wworksched, &m->wsched)) {
+			P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
+			queue_work(p9_mux_wq, &m->wq);
+		}
+	}
+}
 
-	spin_lock(&m->lock);
+/**
+ * p9_fd_request - send 9P request
+ * The function can sleep until the request is scheduled for sending.
+ * The function can be interrupted. Return from the function is not
+ * a guarantee that the request is sent successfully.
+ *
+ * @client: client instance
+ * @req: request to be sent
+ *
+ */
+
+static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
+{
+	int n;
+	struct p9_trans_fd *ts = client->trans;
+	struct p9_conn *m = ts->conn;
+
+	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", m,
+						current, req->tc, req->tc->id);
+	if (m->err < 0)
+		return m->err;
+
+	spin_lock(&client->lock);
+	req->status = REQ_STATUS_UNSENT;
 	list_add_tail(&req->req_list, &m->unsent_req_list);
-	spin_unlock(&m->lock);
+	spin_unlock(&client->lock);
 
 	if (test_and_clear_bit(Wpending, &m->wsched))
 		n = POLLOUT;
 	else
-		n = p9_fd_poll(m->trans, NULL);
+		n = p9_fd_poll(m->client, NULL);
 
 	if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
 		queue_work(p9_mux_wq, &m->wq);
 
-	return req;
-}
-
-static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req)
-{
-	p9_mux_put_tag(m, req->tag);
-	kfree(req);
-}
-
-static void p9_mux_flush_cb(struct p9_req *freq, void *a)
-{
-	int tag;
-	struct p9_conn *m;
-	struct p9_req *req, *rreq, *rptr;
-
-	m = a;
-	P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
-		freq->tcall, freq->rcall, freq->err,
-		freq->tcall->params.tflush.oldtag);
-
-	spin_lock(&m->lock);
-	tag = freq->tcall->params.tflush.oldtag;
-	req = NULL;
-	list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
-		if (rreq->tag == tag) {
-			req = rreq;
-			list_del(&req->req_list);
-			break;
-		}
-	}
-	spin_unlock(&m->lock);
-
-	if (req) {
-		spin_lock(&req->lock);
-		req->flush = Flushed;
-		spin_unlock(&req->lock);
-
-		if (req->cb)
-			(*req->cb) (req, req->cba);
-		else
-			kfree(req->rcall);
-	}
-
-	kfree(freq->tcall);
-	kfree(freq->rcall);
-	p9_mux_free_request(m, freq);
-}
-
-static int
-p9_mux_flush_request(struct p9_conn *m, struct p9_req *req)
-{
-	struct p9_fcall *fc;
-	struct p9_req *rreq, *rptr;
-
-	P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
-
-	/* if a response was received for a request, do nothing */
-	spin_lock(&req->lock);
-	if (req->rcall || req->err) {
-		spin_unlock(&req->lock);
-		P9_DPRINTK(P9_DEBUG_MUX,
-			"mux %p req %p response already received\n", m, req);
-		return 0;
-	}
-
-	req->flush = Flushing;
-	spin_unlock(&req->lock);
-
-	spin_lock(&m->lock);
-	/* if the request is not sent yet, just remove it from the list */
-	list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
-		if (rreq->tag == req->tag) {
-			P9_DPRINTK(P9_DEBUG_MUX,
-			   "mux %p req %p request is not sent yet\n", m, req);
-			list_del(&rreq->req_list);
-			req->flush = Flushed;
-			spin_unlock(&m->lock);
-			if (req->cb)
-				(*req->cb) (req, req->cba);
-			return 0;
-		}
-	}
-	spin_unlock(&m->lock);
-
-	clear_thread_flag(TIF_SIGPENDING);
-	fc = p9_create_tflush(req->tag);
-	p9_send_request(m, fc, p9_mux_flush_cb, m);
-	return 1;
-}
-
-static void
-p9_conn_rpc_cb(struct p9_req *req, void *a)
-{
-	struct p9_mux_rpc *r;
-
-	P9_DPRINTK(P9_DEBUG_MUX, "req %p r %p\n", req, a);
-	r = a;
-	r->rcall = req->rcall;
-	r->err = req->err;
-
-	if (req->flush != None && !req->err)
-		r->err = -ERESTARTSYS;
-
-	wake_up(&r->wqueue);
-}
-
-/**
- * p9_fd_rpc- sends 9P request and waits until a response is available.
- *	The function can be interrupted.
- * @t: transport data
- * @tc: request to be sent
- * @rc: pointer where a pointer to the response is stored
- *
- */
-
-int
-p9_fd_rpc(struct p9_trans *t, struct p9_fcall *tc, struct p9_fcall **rc)
-{
-	struct p9_trans_fd *p = t->priv;
-	struct p9_conn *m = p->conn;
-	int err, sigpending;
-	unsigned long flags;
-	struct p9_req *req;
-	struct p9_mux_rpc r;
-
-	r.err = 0;
-	r.tcall = tc;
-	r.rcall = NULL;
-	r.m = m;
-	init_waitqueue_head(&r.wqueue);
-
-	if (rc)
-		*rc = NULL;
-
-	sigpending = 0;
-	if (signal_pending(current)) {
-		sigpending = 1;
-		clear_thread_flag(TIF_SIGPENDING);
-	}
-
-	req = p9_send_request(m, tc, p9_conn_rpc_cb, &r);
-	if (IS_ERR(req)) {
-		err = PTR_ERR(req);
-		P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
-		return err;
-	}
-
-	err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
-	if (r.err < 0)
-		err = r.err;
-
-	if (err == -ERESTARTSYS && m->trans->status == Connected
-							&& m->err == 0) {
-		if (p9_mux_flush_request(m, req)) {
-			/* wait until we get response of the flush message */
-			do {
-				clear_thread_flag(TIF_SIGPENDING);
-				err = wait_event_interruptible(r.wqueue,
-					r.rcall || r.err);
-			} while (!r.rcall && !r.err && err == -ERESTARTSYS &&
-				m->trans->status == Connected && !m->err);
-
-			err = -ERESTARTSYS;
-		}
-		sigpending = 1;
-	}
-
-	if (sigpending) {
-		spin_lock_irqsave(&current->sighand->siglock, flags);
-		recalc_sigpending();
-		spin_unlock_irqrestore(&current->sighand->siglock, flags);
-	}
-
-	if (rc)
-		*rc = r.rcall;
-	else
-		kfree(r.rcall);
-
-	p9_mux_free_request(m, req);
-	if (err > 0)
-		err = -EIO;
-
-	return err;
-}
-
-#ifdef P9_NONBLOCK
-/**
- * p9_conn_rpcnb - sends 9P request without waiting for response.
- * @m: mux data
- * @tc: request to be sent
- * @cb: callback function to be called when response arrives
- * @a: value to pass to the callback function
- *
- */
-
-int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc,
-		   p9_conn_req_callback cb, void *a)
-{
-	int err;
-	struct p9_req *req;
-
-	req = p9_send_request(m, tc, cb, a);
-	if (IS_ERR(req)) {
-		err = PTR_ERR(req);
-		P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
-		return PTR_ERR(req);
-	}
-
-	P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
 	return 0;
 }
-#endif /* P9_NONBLOCK */
 
-/**
- * p9_conn_cancel - cancel all pending requests with error
- * @m: mux data
- * @err: error code
- *
- */
-
-void p9_conn_cancel(struct p9_conn *m, int err)
+static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
 {
-	struct p9_req *req, *rtmp;
-	LIST_HEAD(cancel_list);
+	int ret = 1;
 
-	P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
-	m->err = err;
-	spin_lock(&m->lock);
-	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
-		list_move(&req->req_list, &cancel_list);
-	}
-	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
-		list_move(&req->req_list, &cancel_list);
-	}
-	spin_unlock(&m->lock);
+	P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
 
-	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
-		list_del(&req->req_list);
-		if (!req->err)
-			req->err = err;
+	spin_lock(&client->lock);
+	list_del(&req->req_list);
 
-		if (req->cb)
-			(*req->cb) (req, req->cba);
-		else
-			kfree(req->rcall);
+	if (req->status == REQ_STATUS_UNSENT) {
+		req->status = REQ_STATUS_FLSHD;
+		ret = 0;
 	}
+
+	spin_unlock(&client->lock);
+
+	return ret;
 }
 
 /**
@@ -1216,7 +756,7 @@
 	return 0;
 }
 
-static int p9_fd_open(struct p9_trans *trans, int rfd, int wfd)
+static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
 {
 	struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd),
 					   GFP_KERNEL);
@@ -1234,13 +774,13 @@
 		return -EIO;
 	}
 
-	trans->priv = ts;
-	trans->status = Connected;
+	client->trans = ts;
+	client->status = Connected;
 
 	return 0;
 }
 
-static int p9_socket_open(struct p9_trans *trans, struct socket *csocket)
+static int p9_socket_open(struct p9_client *client, struct socket *csocket)
 {
 	int fd, ret;
 
@@ -1251,137 +791,65 @@
 		return fd;
 	}
 
-	ret = p9_fd_open(trans, fd, fd);
+	ret = p9_fd_open(client, fd, fd);
 	if (ret < 0) {
 		P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n");
 		sockfd_put(csocket);
 		return ret;
 	}
 
-	((struct p9_trans_fd *)trans->priv)->rd->f_flags |= O_NONBLOCK;
+	((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK;
 
 	return 0;
 }
 
 /**
- * p9_fd_read- read from a fd
- * @trans: transport instance state
- * @v: buffer to receive data into
- * @len: size of receive buffer
+ * p9_mux_destroy - cancels all pending requests and frees mux resources
+ * @m: mux to destroy
  *
  */
 
-static int p9_fd_read(struct p9_trans *trans, void *v, int len)
+static void p9_conn_destroy(struct p9_conn *m)
 {
-	int ret;
-	struct p9_trans_fd *ts = NULL;
+	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", m,
+		m->mux_list.prev, m->mux_list.next);
 
-	if (trans && trans->status != Disconnected)
-		ts = trans->priv;
+	p9_mux_poll_stop(m);
+	cancel_work_sync(&m->rq);
+	cancel_work_sync(&m->wq);
 
-	if (!ts)
-		return -EREMOTEIO;
+	p9_conn_cancel(m, -ECONNRESET);
 
-	if (!(ts->rd->f_flags & O_NONBLOCK))
-		P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
-
-	ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
-	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
-		trans->status = Disconnected;
-	return ret;
+	m->client = NULL;
+	kfree(m);
 }
 
 /**
- * p9_fd_write - write to a socket
- * @trans: transport instance state
- * @v: buffer to send data from
- * @len: size of send buffer
+ * p9_fd_close - shutdown file descriptor transport
+ * @client: client instance
  *
  */
 
-static int p9_fd_write(struct p9_trans *trans, void *v, int len)
-{
-	int ret;
-	mm_segment_t oldfs;
-	struct p9_trans_fd *ts = NULL;
-
-	if (trans && trans->status != Disconnected)
-		ts = trans->priv;
-
-	if (!ts)
-		return -EREMOTEIO;
-
-	if (!(ts->wr->f_flags & O_NONBLOCK))
-		P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
-
-	oldfs = get_fs();
-	set_fs(get_ds());
-	/* The cast to a user pointer is valid due to the set_fs() */
-	ret = vfs_write(ts->wr, (void __user *)v, len, &ts->wr->f_pos);
-	set_fs(oldfs);
-
-	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
-		trans->status = Disconnected;
-	return ret;
-}
-
-static unsigned int
-p9_fd_poll(struct p9_trans *trans, struct poll_table_struct *pt)
-{
-	int ret, n;
-	struct p9_trans_fd *ts = NULL;
-
-	if (trans && trans->status == Connected)
-		ts = trans->priv;
-
-	if (!ts)
-		return -EREMOTEIO;
-
-	if (!ts->rd->f_op || !ts->rd->f_op->poll)
-		return -EIO;
-
-	if (!ts->wr->f_op || !ts->wr->f_op->poll)
-		return -EIO;
-
-	ret = ts->rd->f_op->poll(ts->rd, pt);
-	if (ret < 0)
-		return ret;
-
-	if (ts->rd != ts->wr) {
-		n = ts->wr->f_op->poll(ts->wr, pt);
-		if (n < 0)
-			return n;
-		ret = (ret & ~POLLOUT) | (n & ~POLLIN);
-	}
-
-	return ret;
-}
-
-/**
- * p9_fd_close - shutdown socket
- * @trans: private socket structure
- *
- */
-
-static void p9_fd_close(struct p9_trans *trans)
+static void p9_fd_close(struct p9_client *client)
 {
 	struct p9_trans_fd *ts;
 
-	if (!trans)
+	if (!client)
 		return;
 
-	ts = xchg(&trans->priv, NULL);
-
+	ts = client->trans;
 	if (!ts)
 		return;
 
+	client->status = Disconnected;
+
 	p9_conn_destroy(ts->conn);
 
-	trans->status = Disconnected;
 	if (ts->rd)
 		fput(ts->rd);
 	if (ts->wr)
 		fput(ts->wr);
+
 	kfree(ts);
 }
 
@@ -1402,31 +870,23 @@
 	return 0;
 }
 
-static struct p9_trans *
-p9_trans_create_tcp(const char *addr, char *args, int msize, unsigned char dotu)
+static int
+p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
 {
 	int err;
-	struct p9_trans *trans;
 	struct socket *csocket;
 	struct sockaddr_in sin_server;
 	struct p9_fd_opts opts;
-	struct p9_trans_fd *p;
+	struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
 
 	err = parse_opts(args, &opts);
 	if (err < 0)
-		return ERR_PTR(err);
+		return err;
 
 	if (valid_ipaddr4(addr) < 0)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	csocket = NULL;
-	trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL);
-	if (!trans)
-		return ERR_PTR(-ENOMEM);
-	trans->msize = msize;
-	trans->extended = dotu;
-	trans->rpc = p9_fd_rpc;
-	trans->close = p9_fd_close;
 
 	sin_server.sin_family = AF_INET;
 	sin_server.sin_addr.s_addr = in_aton(addr);
@@ -1449,45 +909,38 @@
 		goto error;
 	}
 
-	err = p9_socket_open(trans, csocket);
+	err = p9_socket_open(client, csocket);
 	if (err < 0)
 		goto error;
 
-	p = (struct p9_trans_fd *) trans->priv;
-	p->conn = p9_conn_create(trans);
+	p = (struct p9_trans_fd *) client->trans;
+	p->conn = p9_conn_create(client);
 	if (IS_ERR(p->conn)) {
 		err = PTR_ERR(p->conn);
 		p->conn = NULL;
 		goto error;
 	}
 
-	return trans;
+	return 0;
 
 error:
 	if (csocket)
 		sock_release(csocket);
 
-	kfree(trans);
-	return ERR_PTR(err);
+	kfree(p);
+
+	return err;
 }
 
-static struct p9_trans *
-p9_trans_create_unix(const char *addr, char *args, int msize,
-							unsigned char dotu)
+static int
+p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
 {
 	int err;
 	struct socket *csocket;
 	struct sockaddr_un sun_server;
-	struct p9_trans *trans;
-	struct p9_trans_fd *p;
+	struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
 
 	csocket = NULL;
-	trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL);
-	if (!trans)
-		return ERR_PTR(-ENOMEM);
-
-	trans->rpc = p9_fd_rpc;
-	trans->close = p9_fd_close;
 
 	if (strlen(addr) > UNIX_PATH_MAX) {
 		P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
@@ -1508,79 +961,69 @@
 		goto error;
 	}
 
-	err = p9_socket_open(trans, csocket);
+	err = p9_socket_open(client, csocket);
 	if (err < 0)
 		goto error;
 
-	trans->msize = msize;
-	trans->extended = dotu;
-	p = (struct p9_trans_fd *) trans->priv;
-	p->conn = p9_conn_create(trans);
+	p = (struct p9_trans_fd *) client->trans;
+	p->conn = p9_conn_create(client);
 	if (IS_ERR(p->conn)) {
 		err = PTR_ERR(p->conn);
 		p->conn = NULL;
 		goto error;
 	}
 
-	return trans;
+	return 0;
 
 error:
 	if (csocket)
 		sock_release(csocket);
 
-	kfree(trans);
-	return ERR_PTR(err);
+	kfree(p);
+	return err;
 }
 
-static struct p9_trans *
-p9_trans_create_fd(const char *name, char *args, int msize,
-							unsigned char extended)
+static int
+p9_fd_create(struct p9_client *client, const char *addr, char *args)
 {
 	int err;
-	struct p9_trans *trans;
 	struct p9_fd_opts opts;
-	struct p9_trans_fd *p;
+	struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */
 
 	parse_opts(args, &opts);
 
 	if (opts.rfd == ~0 || opts.wfd == ~0) {
 		printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n");
-		return ERR_PTR(-ENOPROTOOPT);
+		return -ENOPROTOOPT;
 	}
 
-	trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL);
-	if (!trans)
-		return ERR_PTR(-ENOMEM);
-
-	trans->rpc = p9_fd_rpc;
-	trans->close = p9_fd_close;
-
-	err = p9_fd_open(trans, opts.rfd, opts.wfd);
+	err = p9_fd_open(client, opts.rfd, opts.wfd);
 	if (err < 0)
 		goto error;
 
-	trans->msize = msize;
-	trans->extended = extended;
-	p = (struct p9_trans_fd *) trans->priv;
-	p->conn = p9_conn_create(trans);
+	p = (struct p9_trans_fd *) client->trans;
+	p->conn = p9_conn_create(client);
 	if (IS_ERR(p->conn)) {
 		err = PTR_ERR(p->conn);
 		p->conn = NULL;
 		goto error;
 	}
 
-	return trans;
+	return 0;
 
 error:
-	kfree(trans);
-	return ERR_PTR(err);
+	kfree(p);
+	return err;
 }
 
 static struct p9_trans_module p9_tcp_trans = {
 	.name = "tcp",
 	.maxsize = MAX_SOCK_BUF,
 	.def = 1,
-	.create = p9_trans_create_tcp,
+	.create = p9_fd_create_tcp,
+	.close = p9_fd_close,
+	.request = p9_fd_request,
+	.cancel = p9_fd_cancel,
 	.owner = THIS_MODULE,
 };
 
@@ -1588,7 +1031,10 @@
 	.name = "unix",
 	.maxsize = MAX_SOCK_BUF,
 	.def = 0,
-	.create = p9_trans_create_unix,
+	.create = p9_fd_create_unix,
+	.close = p9_fd_close,
+	.request = p9_fd_request,
+	.cancel = p9_fd_cancel,
 	.owner = THIS_MODULE,
 };
 
@@ -1596,23 +1042,71 @@
 	.name = "fd",
 	.maxsize = MAX_SOCK_BUF,
 	.def = 0,
-	.create = p9_trans_create_fd,
+	.create = p9_fd_create,
+	.close = p9_fd_close,
+	.request = p9_fd_request,
+	.cancel = p9_fd_cancel,
 	.owner = THIS_MODULE,
 };
 
+/**
+ * p9_poll_proc - poll worker thread
+ * @a: thread state and arguments
+ *
+ * polls all v9fs transports for new events and queues the appropriate
+ * work to the work queue
+ *
+ */
+
+static int p9_poll_proc(void *a)
+{
+	unsigned long flags;
+
+	P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current);
+ repeat:
+	spin_lock_irqsave(&p9_poll_lock, flags);
+	while (!list_empty(&p9_poll_pending_list)) {
+		struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
+							struct p9_conn,
+							poll_pending_link);
+		list_del_init(&conn->poll_pending_link);
+		spin_unlock_irqrestore(&p9_poll_lock, flags);
+
+		p9_poll_mux(conn);
+
+		spin_lock_irqsave(&p9_poll_lock, flags);
+	}
+	spin_unlock_irqrestore(&p9_poll_lock, flags);
+
+	set_current_state(TASK_INTERRUPTIBLE);
+	if (list_empty(&p9_poll_pending_list)) {
+		P9_DPRINTK(P9_DEBUG_TRANS, "sleeping...\n");
+		schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+
+	if (!kthread_should_stop())
+		goto repeat;
+
+	P9_DPRINTK(P9_DEBUG_TRANS, "finish\n");
+	return 0;
+}
+
 int p9_trans_fd_init(void)
 {
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++)
-		p9_mux_poll_tasks[i].task = NULL;
-
 	p9_mux_wq = create_workqueue("v9fs");
 	if (!p9_mux_wq) {
 		printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
 		return -ENOMEM;
 	}
 
+	p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll");
+	if (IS_ERR(p9_poll_task)) {
+		destroy_workqueue(p9_mux_wq);
+		printk(KERN_WARNING "v9fs: mux: creating poll task failed\n");
+		return PTR_ERR(p9_poll_task);
+	}
+
 	v9fs_register_trans(&p9_tcp_trans);
 	v9fs_register_trans(&p9_unix_trans);
 	v9fs_register_trans(&p9_fd_trans);
@@ -1622,6 +1116,7 @@
 
 void p9_trans_fd_exit(void)
 {
+	kthread_stop(p9_poll_task);
 	v9fs_unregister_trans(&p9_tcp_trans);
 	v9fs_unregister_trans(&p9_unix_trans);
 	v9fs_unregister_trans(&p9_fd_trans);
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
new file mode 100644
index 0000000..8d6cc47
--- /dev/null
+++ b/net/9p/trans_rdma.c
@@ -0,0 +1,712 @@
+/*
+ * linux/fs/9p/trans_rdma.c
+ *
+ * RDMA transport layer based on the trans_fd.c implementation.
+ *
+ *  Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com>
+ *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
+ *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
+ *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
+ *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to:
+ *  Free Software Foundation
+ *  51 Franklin Street, Fifth Floor
+ *  Boston, MA  02111-1301  USA
+ *
+ */
+
+#include <linux/in.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/ipv6.h>
+#include <linux/kthread.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/un.h>
+#include <linux/uaccess.h>
+#include <linux/inet.h>
+#include <linux/idr.h>
+#include <linux/file.h>
+#include <linux/parser.h>
+#include <linux/semaphore.h>
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+#include <net/9p/transport.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_verbs.h>
+
+#define P9_PORT			5640
+#define P9_RDMA_SQ_DEPTH	32
+#define P9_RDMA_RQ_DEPTH	32
+#define P9_RDMA_SEND_SGE	4
+#define P9_RDMA_RECV_SGE	4
+#define P9_RDMA_IRD		0
+#define P9_RDMA_ORD		0
+#define P9_RDMA_TIMEOUT		30000		/* 30 seconds */
+#define P9_RDMA_MAXSIZE		(4*4096)	/* Min SGE is 4, so we can
+						 * safely advertise a maxsize
+						 * of 64k */
+
+#define P9_RDMA_MAX_SGE (P9_RDMA_MAXSIZE >> PAGE_SHIFT)
+/**
+ * struct p9_trans_rdma - RDMA transport instance
+ *
+ * @state: tracks the transport state machine for connection setup and tear down
+ * @cm_id: The RDMA CM ID
+ * @pd: Protection Domain pointer
+ * @qp: Queue Pair pointer
+ * @cq: Completion Queue pointer
+ * @lkey: The local access only memory region key
+ * @timeout: Number of uSecs to wait for connection management events
+ * @sq_depth: The depth of the Send Queue
+ * @sq_sem: Semaphore for the SQ
+ * @rq_depth: The depth of the Receive Queue.
+ * @addr: The remote peer's address
+ * @req_lock: Protects the active request list
+ * @send_wait: Wait list when the SQ fills up
+ * @cm_done: Completion event for connection management tracking
+ */
+struct p9_trans_rdma {
+	enum {
+		P9_RDMA_INIT,
+		P9_RDMA_ADDR_RESOLVED,
+		P9_RDMA_ROUTE_RESOLVED,
+		P9_RDMA_CONNECTED,
+		P9_RDMA_FLUSHING,
+		P9_RDMA_CLOSING,
+		P9_RDMA_CLOSED,
+	} state;
+	struct rdma_cm_id *cm_id;
+	struct ib_pd *pd;
+	struct ib_qp *qp;
+	struct ib_cq *cq;
+	struct ib_mr *dma_mr;
+	u32 lkey;
+	long timeout;
+	int sq_depth;
+	struct semaphore sq_sem;
+	int rq_depth;
+	atomic_t rq_count;
+	struct sockaddr_in addr;
+	spinlock_t req_lock;
+
+	struct completion cm_done;
+};
+
+/**
+ * p9_rdma_context - Keeps track of in-process WR
+ *
+ * @wc_op: The original WR op for when the CQE completes in error.
+ * @busa: Bus address to unmap when the WR completes
+ * @req: Keeps track of requests (send)
+ * @rc: Keepts track of replies (receive)
+ */
+struct p9_rdma_req;
+struct p9_rdma_context {
+	enum ib_wc_opcode wc_op;
+	dma_addr_t busa;
+	union {
+		struct p9_req_t *req;
+		struct p9_fcall *rc;
+	};
+};
+
+/**
+ * p9_rdma_opts - Collection of mount options
+ * @port: port of connection
+ * @sq_depth: The requested depth of the SQ. This really doesn't need
+ * to be any deeper than the number of threads used in the client
+ * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
+ * @timeout: Time to wait in msecs for CM events
+ */
+struct p9_rdma_opts {
+	short port;
+	int sq_depth;
+	int rq_depth;
+	long timeout;
+};
+
+/*
+ * Option Parsing (code inspired by NFS code)
+ */
+enum {
+	/* Options that take integer arguments */
+	Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout, Opt_err,
+};
+
+static match_table_t tokens = {
+	{Opt_port, "port=%u"},
+	{Opt_sq_depth, "sq=%u"},
+	{Opt_rq_depth, "rq=%u"},
+	{Opt_timeout, "timeout=%u"},
+	{Opt_err, NULL},
+};
+
+/**
+ * parse_options - parse mount options into session structure
+ * @options: options string passed from mount
+ * @opts: transport-specific structure to parse options into
+ *
+ * Returns 0 upon success, -ERRNO upon failure
+ */
+static int parse_opts(char *params, struct p9_rdma_opts *opts)
+{
+	char *p;
+	substring_t args[MAX_OPT_ARGS];
+	int option;
+	char *options;
+	int ret;
+
+	opts->port = P9_PORT;
+	opts->sq_depth = P9_RDMA_SQ_DEPTH;
+	opts->rq_depth = P9_RDMA_RQ_DEPTH;
+	opts->timeout = P9_RDMA_TIMEOUT;
+
+	if (!params)
+		return 0;
+
+	options = kstrdup(params, GFP_KERNEL);
+	if (!options) {
+		P9_DPRINTK(P9_DEBUG_ERROR,
+			   "failed to allocate copy of option string\n");
+		return -ENOMEM;
+	}
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		int token;
+		int r;
+		if (!*p)
+			continue;
+		token = match_token(p, tokens, args);
+		r = match_int(&args[0], &option);
+		if (r < 0) {
+			P9_DPRINTK(P9_DEBUG_ERROR,
+				   "integer field, but no integer?\n");
+			ret = r;
+			continue;
+		}
+		switch (token) {
+		case Opt_port:
+			opts->port = option;
+			break;
+		case Opt_sq_depth:
+			opts->sq_depth = option;
+			break;
+		case Opt_rq_depth:
+			opts->rq_depth = option;
+			break;
+		case Opt_timeout:
+			opts->timeout = option;
+			break;
+		default:
+			continue;
+		}
+	}
+	/* RQ must be at least as large as the SQ */
+	opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
+	kfree(options);
+	return 0;
+}
+
+static int
+p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
+{
+	struct p9_client *c = id->context;
+	struct p9_trans_rdma *rdma = c->trans;
+	switch (event->event) {
+	case RDMA_CM_EVENT_ADDR_RESOLVED:
+		BUG_ON(rdma->state != P9_RDMA_INIT);
+		rdma->state = P9_RDMA_ADDR_RESOLVED;
+		break;
+
+	case RDMA_CM_EVENT_ROUTE_RESOLVED:
+		BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED);
+		rdma->state = P9_RDMA_ROUTE_RESOLVED;
+		break;
+
+	case RDMA_CM_EVENT_ESTABLISHED:
+		BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED);
+		rdma->state = P9_RDMA_CONNECTED;
+		break;
+
+	case RDMA_CM_EVENT_DISCONNECTED:
+		if (rdma)
+			rdma->state = P9_RDMA_CLOSED;
+		if (c)
+			c->status = Disconnected;
+		break;
+
+	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+		break;
+
+	case RDMA_CM_EVENT_ADDR_CHANGE:
+	case RDMA_CM_EVENT_ROUTE_ERROR:
+	case RDMA_CM_EVENT_DEVICE_REMOVAL:
+	case RDMA_CM_EVENT_MULTICAST_JOIN:
+	case RDMA_CM_EVENT_MULTICAST_ERROR:
+	case RDMA_CM_EVENT_REJECTED:
+	case RDMA_CM_EVENT_CONNECT_REQUEST:
+	case RDMA_CM_EVENT_CONNECT_RESPONSE:
+	case RDMA_CM_EVENT_CONNECT_ERROR:
+	case RDMA_CM_EVENT_ADDR_ERROR:
+	case RDMA_CM_EVENT_UNREACHABLE:
+		c->status = Disconnected;
+		rdma_disconnect(rdma->cm_id);
+		break;
+	default:
+		BUG();
+	}
+	complete(&rdma->cm_done);
+	return 0;
+}
+
+static void
+handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
+	    struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
+{
+	struct p9_req_t *req;
+	int err = 0;
+	int16_t tag;
+
+	req = NULL;
+	ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
+							 DMA_FROM_DEVICE);
+
+	if (status != IB_WC_SUCCESS)
+		goto err_out;
+
+	err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
+	if (err)
+		goto err_out;
+
+	req = p9_tag_lookup(client, tag);
+	if (!req)
+		goto err_out;
+
+	req->rc = c->rc;
+	p9_client_cb(client, req);
+
+	return;
+
+ err_out:
+	P9_DPRINTK(P9_DEBUG_ERROR, "req %p err %d status %d\n",
+		   req, err, status);
+	rdma->state = P9_RDMA_FLUSHING;
+	client->status = Disconnected;
+	return;
+}
+
+static void
+handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
+	    struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
+{
+	ib_dma_unmap_single(rdma->cm_id->device,
+			    c->busa, c->req->tc->size,
+			    DMA_TO_DEVICE);
+}
+
+static void qp_event_handler(struct ib_event *event, void *context)
+{
+	P9_DPRINTK(P9_DEBUG_ERROR, "QP event %d context %p\n", event->event,
+								context);
+}
+
+static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
+{
+	struct p9_client *client = cq_context;
+	struct p9_trans_rdma *rdma = client->trans;
+	int ret;
+	struct ib_wc wc;
+
+	ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
+	while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
+		struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id;
+
+		switch (c->wc_op) {
+		case IB_WC_RECV:
+			atomic_dec(&rdma->rq_count);
+			handle_recv(client, rdma, c, wc.status, wc.byte_len);
+			break;
+
+		case IB_WC_SEND:
+			handle_send(client, rdma, c, wc.status, wc.byte_len);
+			up(&rdma->sq_sem);
+			break;
+
+		default:
+			printk(KERN_ERR "9prdma: unexpected completion type, "
+			       "c->wc_op=%d, wc.opcode=%d, status=%d\n",
+			       c->wc_op, wc.opcode, wc.status);
+			break;
+		}
+		kfree(c);
+	}
+}
+
+static void cq_event_handler(struct ib_event *e, void *v)
+{
+	P9_DPRINTK(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
+}
+
+static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
+{
+	if (!rdma)
+		return;
+
+	if (rdma->dma_mr && !IS_ERR(rdma->dma_mr))
+		ib_dereg_mr(rdma->dma_mr);
+
+	if (rdma->qp && !IS_ERR(rdma->qp))
+		ib_destroy_qp(rdma->qp);
+
+	if (rdma->pd && !IS_ERR(rdma->pd))
+		ib_dealloc_pd(rdma->pd);
+
+	if (rdma->cq && !IS_ERR(rdma->cq))
+		ib_destroy_cq(rdma->cq);
+
+	if (rdma->cm_id && !IS_ERR(rdma->cm_id))
+		rdma_destroy_id(rdma->cm_id);
+
+	kfree(rdma);
+}
+
+static int
+post_recv(struct p9_client *client, struct p9_rdma_context *c)
+{
+	struct p9_trans_rdma *rdma = client->trans;
+	struct ib_recv_wr wr, *bad_wr;
+	struct ib_sge sge;
+
+	c->busa = ib_dma_map_single(rdma->cm_id->device,
+				    c->rc->sdata, client->msize,
+				    DMA_FROM_DEVICE);
+	if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
+		goto error;
+
+	sge.addr = c->busa;
+	sge.length = client->msize;
+	sge.lkey = rdma->lkey;
+
+	wr.next = NULL;
+	c->wc_op = IB_WC_RECV;
+	wr.wr_id = (unsigned long) c;
+	wr.sg_list = &sge;
+	wr.num_sge = 1;
+	return ib_post_recv(rdma->qp, &wr, &bad_wr);
+
+ error:
+	P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
+	return -EIO;
+}
+
+static int rdma_request(struct p9_client *client, struct p9_req_t *req)
+{
+	struct p9_trans_rdma *rdma = client->trans;
+	struct ib_send_wr wr, *bad_wr;
+	struct ib_sge sge;
+	int err = 0;
+	unsigned long flags;
+	struct p9_rdma_context *c = NULL;
+	struct p9_rdma_context *rpl_context = NULL;
+
+	/* Allocate an fcall for the reply */
+	rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
+	if (!rpl_context)
+		goto err_close;
+
+	/*
+	 * If the request has a buffer, steal it, otherwise
+	 * allocate a new one.  Typically, requests should already
+	 * have receive buffers allocated and just swap them around
+	 */
+	if (!req->rc) {
+		req->rc = kmalloc(sizeof(struct p9_fcall)+client->msize,
+								GFP_KERNEL);
+		if (req->rc) {
+			req->rc->sdata = (char *) req->rc +
+						sizeof(struct p9_fcall);
+			req->rc->capacity = client->msize;
+		}
+	}
+	rpl_context->rc = req->rc;
+	if (!rpl_context->rc) {
+		kfree(rpl_context);
+		goto err_close;
+	}
+
+	/*
+	 * Post a receive buffer for this request. We need to ensure
+	 * there is a reply buffer available for every outstanding
+	 * request. A flushed request can result in no reply for an
+	 * outstanding request, so we must keep a count to avoid
+	 * overflowing the RQ.
+	 */
+	if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
+		err = post_recv(client, rpl_context);
+		if (err) {
+			kfree(rpl_context->rc);
+			kfree(rpl_context);
+			goto err_close;
+		}
+	} else
+		atomic_dec(&rdma->rq_count);
+
+	/* remove posted receive buffer from request structure */
+	req->rc = NULL;
+
+	/* Post the request */
+	c = kmalloc(sizeof *c, GFP_KERNEL);
+	if (!c)
+		goto err_close;
+	c->req = req;
+
+	c->busa = ib_dma_map_single(rdma->cm_id->device,
+				    c->req->tc->sdata, c->req->tc->size,
+				    DMA_TO_DEVICE);
+	if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
+		goto error;
+
+	sge.addr = c->busa;
+	sge.length = c->req->tc->size;
+	sge.lkey = rdma->lkey;
+
+	wr.next = NULL;
+	c->wc_op = IB_WC_SEND;
+	wr.wr_id = (unsigned long) c;
+	wr.opcode = IB_WR_SEND;
+	wr.send_flags = IB_SEND_SIGNALED;
+	wr.sg_list = &sge;
+	wr.num_sge = 1;
+
+	if (down_interruptible(&rdma->sq_sem))
+		goto error;
+
+	return ib_post_send(rdma->qp, &wr, &bad_wr);
+
+ error:
+	P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
+	return -EIO;
+
+ err_close:
+	spin_lock_irqsave(&rdma->req_lock, flags);
+	if (rdma->state < P9_RDMA_CLOSING) {
+		rdma->state = P9_RDMA_CLOSING;
+		spin_unlock_irqrestore(&rdma->req_lock, flags);
+		rdma_disconnect(rdma->cm_id);
+	} else
+		spin_unlock_irqrestore(&rdma->req_lock, flags);
+	return err;
+}
+
+static void rdma_close(struct p9_client *client)
+{
+	struct p9_trans_rdma *rdma;
+
+	if (!client)
+		return;
+
+	rdma = client->trans;
+	if (!rdma)
+		return;
+
+	client->status = Disconnected;
+	rdma_disconnect(rdma->cm_id);
+	rdma_destroy_trans(rdma);
+}
+
+/**
+ * alloc_rdma - Allocate and initialize the rdma transport structure
+ * @msize: MTU
+ * @dotu: Extension attribute
+ * @opts: Mount options structure
+ */
+static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
+{
+	struct p9_trans_rdma *rdma;
+
+	rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL);
+	if (!rdma)
+		return NULL;
+
+	rdma->sq_depth = opts->sq_depth;
+	rdma->rq_depth = opts->rq_depth;
+	rdma->timeout = opts->timeout;
+	spin_lock_init(&rdma->req_lock);
+	init_completion(&rdma->cm_done);
+	sema_init(&rdma->sq_sem, rdma->sq_depth);
+	atomic_set(&rdma->rq_count, 0);
+
+	return rdma;
+}
+
+/* its not clear to me we can do anything after send has been posted */
+static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
+{
+	return 1;
+}
+
+/**
+ * trans_create_rdma - Transport method for creating atransport instance
+ * @client: client instance
+ * @addr: IP address string
+ * @args: Mount options string
+ */
+static int
+rdma_create_trans(struct p9_client *client, const char *addr, char *args)
+{
+	int err;
+	struct p9_rdma_opts opts;
+	struct p9_trans_rdma *rdma;
+	struct rdma_conn_param conn_param;
+	struct ib_qp_init_attr qp_attr;
+	struct ib_device_attr devattr;
+
+	/* Parse the transport specific mount options */
+	err = parse_opts(args, &opts);
+	if (err < 0)
+		return err;
+
+	/* Create and initialize the RDMA transport structure */
+	rdma = alloc_rdma(&opts);
+	if (!rdma)
+		return -ENOMEM;
+
+	/* Create the RDMA CM ID */
+	rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP);
+	if (IS_ERR(rdma->cm_id))
+		goto error;
+
+	/* Resolve the server's address */
+	rdma->addr.sin_family = AF_INET;
+	rdma->addr.sin_addr.s_addr = in_aton(addr);
+	rdma->addr.sin_port = htons(opts.port);
+	err = rdma_resolve_addr(rdma->cm_id, NULL,
+				(struct sockaddr *)&rdma->addr,
+				rdma->timeout);
+	if (err)
+		goto error;
+	err = wait_for_completion_interruptible(&rdma->cm_done);
+	if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED))
+		goto error;
+
+	/* Resolve the route to the server */
+	err = rdma_resolve_route(rdma->cm_id, rdma->timeout);
+	if (err)
+		goto error;
+	err = wait_for_completion_interruptible(&rdma->cm_done);
+	if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
+		goto error;
+
+	/* Query the device attributes */
+	err = ib_query_device(rdma->cm_id->device, &devattr);
+	if (err)
+		goto error;
+
+	/* Create the Completion Queue */
+	rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
+				cq_event_handler, client,
+				opts.sq_depth + opts.rq_depth + 1, 0);
+	if (IS_ERR(rdma->cq))
+		goto error;
+	ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
+
+	/* Create the Protection Domain */
+	rdma->pd = ib_alloc_pd(rdma->cm_id->device);
+	if (IS_ERR(rdma->pd))
+		goto error;
+
+	/* Cache the DMA lkey in the transport */
+	rdma->dma_mr = NULL;
+	if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
+		rdma->lkey = rdma->cm_id->device->local_dma_lkey;
+	else {
+		rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE);
+		if (IS_ERR(rdma->dma_mr))
+			goto error;
+		rdma->lkey = rdma->dma_mr->lkey;
+	}
+
+	/* Create the Queue Pair */
+	memset(&qp_attr, 0, sizeof qp_attr);
+	qp_attr.event_handler = qp_event_handler;
+	qp_attr.qp_context = client;
+	qp_attr.cap.max_send_wr = opts.sq_depth;
+	qp_attr.cap.max_recv_wr = opts.rq_depth;
+	qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE;
+	qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE;
+	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+	qp_attr.qp_type = IB_QPT_RC;
+	qp_attr.send_cq = rdma->cq;
+	qp_attr.recv_cq = rdma->cq;
+	err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr);
+	if (err)
+		goto error;
+	rdma->qp = rdma->cm_id->qp;
+
+	/* Request a connection */
+	memset(&conn_param, 0, sizeof(conn_param));
+	conn_param.private_data = NULL;
+	conn_param.private_data_len = 0;
+	conn_param.responder_resources = P9_RDMA_IRD;
+	conn_param.initiator_depth = P9_RDMA_ORD;
+	err = rdma_connect(rdma->cm_id, &conn_param);
+	if (err)
+		goto error;
+	err = wait_for_completion_interruptible(&rdma->cm_done);
+	if (err || (rdma->state != P9_RDMA_CONNECTED))
+		goto error;
+
+	client->trans = rdma;
+	client->status = Connected;
+
+	return 0;
+
+error:
+	rdma_destroy_trans(rdma);
+	return -ENOTCONN;
+}
+
+static struct p9_trans_module p9_rdma_trans = {
+	.name = "rdma",
+	.maxsize = P9_RDMA_MAXSIZE,
+	.def = 0,
+	.owner = THIS_MODULE,
+	.create = rdma_create_trans,
+	.close = rdma_close,
+	.request = rdma_request,
+	.cancel = rdma_cancel,
+};
+
+/**
+ * p9_trans_rdma_init - Register the 9P RDMA transport driver
+ */
+static int __init p9_trans_rdma_init(void)
+{
+	v9fs_register_trans(&p9_rdma_trans);
+	return 0;
+}
+
+static void __exit p9_trans_rdma_exit(void)
+{
+	v9fs_unregister_trans(&p9_rdma_trans);
+}
+
+module_init(p9_trans_rdma_init);
+module_exit(p9_trans_rdma_exit);
+
+MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
+MODULE_DESCRIPTION("RDMA Transport for 9P");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 94912e0..2d7781e 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -1,12 +1,10 @@
 /*
- * The Guest 9p transport driver
+ * The Virtio 9p transport driver
  *
  * This is a block based transport driver based on the lguest block driver
  * code.
  *
- */
-/*
- *  Copyright (C) 2007 Eric Van Hensbergen, IBM Corporation
+ *  Copyright (C) 2007, 2008 Eric Van Hensbergen, IBM Corporation
  *
  *  Based on virtio console driver
  *  Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation
@@ -41,6 +39,7 @@
 #include <linux/file.h>
 #include <net/9p/9p.h>
 #include <linux/parser.h>
+#include <net/9p/client.h>
 #include <net/9p/transport.h>
 #include <linux/scatterlist.h>
 #include <linux/virtio.h>
@@ -53,50 +52,6 @@
 /* global which tracks highest initialized channel */
 static int chan_index;
 
-#define P9_INIT_MAXTAG	16
-
-
-/**
- * enum p9_req_status_t - virtio request status
- * @REQ_STATUS_IDLE: request slot unused
- * @REQ_STATUS_SENT: request sent to server
- * @REQ_STATUS_RCVD: response received from server
- * @REQ_STATUS_FLSH: request has been flushed
- *
- * The @REQ_STATUS_IDLE state is used to mark a request slot as unused
- * but use is actually tracked by the idpool structure which handles tag
- * id allocation.
- *
- */
-
-enum p9_req_status_t {
-	REQ_STATUS_IDLE,
-	REQ_STATUS_SENT,
-	REQ_STATUS_RCVD,
-	REQ_STATUS_FLSH,
-};
-
-/**
- * struct p9_req_t - virtio request slots
- * @status: status of this request slot
- * @wq: wait_queue for the client to block on for this request
- *
- * The virtio transport uses an array to track outstanding requests
- * instead of a list.  While this may incurr overhead during initial
- * allocation or expansion, it makes request lookup much easier as the
- * tag id is a index into an array.  (We use tag+1 so that we can accomodate
- * the -1 tag for the T_VERSION request).
- * This also has the nice effect of only having to allocate wait_queues
- * once, instead of constantly allocating and freeing them.  Its possible
- * other resources could benefit from this scheme as well.
- *
- */
-
-struct p9_req_t {
-	int status;
-	wait_queue_head_t *wq;
-};
-
 /**
  * struct virtio_chan - per-instance transport information
  * @initialized: whether the channel is initialized
@@ -121,67 +76,14 @@
 
 	spinlock_t lock;
 
+	struct p9_client *client;
 	struct virtio_device *vdev;
 	struct virtqueue *vq;
 
-	struct p9_idpool *tagpool;
-	struct p9_req_t *reqs;
-	int max_tag;
-
 	/* Scatterlist: can be too big for stack. */
 	struct scatterlist sg[VIRTQUEUE_NUM];
 } channels[MAX_9P_CHAN];
 
-/**
- * p9_lookup_tag - Lookup requests by tag
- * @c: virtio channel to lookup tag within
- * @tag: numeric id for transaction
- *
- * this is a simple array lookup, but will grow the
- * request_slots as necessary to accomodate transaction
- * ids which did not previously have a slot.
- *
- * Bugs: there is currently no upper limit on request slots set
- * here, but that should be constrained by the id accounting.
- */
-
-static struct p9_req_t *p9_lookup_tag(struct virtio_chan *c, u16 tag)
-{
-	/* This looks up the original request by tag so we know which
-	 * buffer to read the data into */
-	tag++;
-
-	while (tag >= c->max_tag) {
-		int old_max = c->max_tag;
-		int count;
-
-		if (c->max_tag)
-			c->max_tag *= 2;
-		else
-			c->max_tag = P9_INIT_MAXTAG;
-
-		c->reqs = krealloc(c->reqs, sizeof(struct p9_req_t)*c->max_tag,
-								GFP_ATOMIC);
-		if (!c->reqs) {
-			printk(KERN_ERR "Couldn't grow tag array\n");
-			BUG();
-		}
-		for (count = old_max; count < c->max_tag; count++) {
-			c->reqs[count].status = REQ_STATUS_IDLE;
-			c->reqs[count].wq = kmalloc(sizeof(wait_queue_head_t),
-								GFP_ATOMIC);
-			if (!c->reqs[count].wq) {
-				printk(KERN_ERR "Couldn't grow tag array\n");
-				BUG();
-			}
-			init_waitqueue_head(c->reqs[count].wq);
-		}
-	}
-
-	return &c->reqs[tag];
-}
-
-
 /* How many bytes left in this page. */
 static unsigned int rest_of_page(void *data)
 {
@@ -197,25 +99,13 @@
  *
  */
 
-static void p9_virtio_close(struct p9_trans *trans)
+static void p9_virtio_close(struct p9_client *client)
 {
-	struct virtio_chan *chan = trans->priv;
-	int count;
-	unsigned long flags;
-
-	spin_lock_irqsave(&chan->lock, flags);
-	p9_idpool_destroy(chan->tagpool);
-	for (count = 0; count < chan->max_tag; count++)
-		kfree(chan->reqs[count].wq);
-	kfree(chan->reqs);
-	chan->max_tag = 0;
-	spin_unlock_irqrestore(&chan->lock, flags);
+	struct virtio_chan *chan = client->trans;
 
 	mutex_lock(&virtio_9p_lock);
 	chan->inuse = false;
 	mutex_unlock(&virtio_9p_lock);
-
-	kfree(trans);
 }
 
 /**
@@ -236,17 +126,16 @@
 	struct virtio_chan *chan = vq->vdev->priv;
 	struct p9_fcall *rc;
 	unsigned int len;
-	unsigned long flags;
 	struct p9_req_t *req;
 
-	spin_lock_irqsave(&chan->lock, flags);
+	P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
+
 	while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) {
-		req = p9_lookup_tag(chan, rc->tag);
-		req->status = REQ_STATUS_RCVD;
-		wake_up(req->wq);
+		P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
+		P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
+		req = p9_tag_lookup(chan->client, rc->tag);
+		p9_client_cb(chan->client, req);
 	}
-	/* In case queue is stopped waiting for more buffers. */
-	spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
@@ -283,8 +172,14 @@
 	return index-start;
 }
 
+/* We don't currently allow canceling of virtio requests */
+static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
+{
+	return 1;
+}
+
 /**
- * p9_virtio_rpc - issue a request and wait for a response
+ * p9_virtio_request - issue a request
  * @t: transport state
  * @tc: &p9_fcall request to transmit
  * @rc: &p9_fcall to put reponse into
@@ -292,44 +187,22 @@
  */
 
 static int
-p9_virtio_rpc(struct p9_trans *t, struct p9_fcall *tc, struct p9_fcall **rc)
+p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
 {
 	int in, out;
-	int n, err, size;
-	struct virtio_chan *chan = t->priv;
-	char *rdata;
-	struct p9_req_t *req;
-	unsigned long flags;
+	struct virtio_chan *chan = client->trans;
+	char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
 
-	if (*rc == NULL) {
-		*rc = kmalloc(sizeof(struct p9_fcall) + t->msize, GFP_KERNEL);
-		if (!*rc)
-			return -ENOMEM;
-	}
+	P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
 
-	rdata = (char *)*rc+sizeof(struct p9_fcall);
-
-	n = P9_NOTAG;
-	if (tc->id != P9_TVERSION) {
-		n = p9_idpool_get(chan->tagpool);
-		if (n < 0)
-			return -ENOMEM;
-	}
-
-	spin_lock_irqsave(&chan->lock, flags);
-	req = p9_lookup_tag(chan, n);
-	spin_unlock_irqrestore(&chan->lock, flags);
-
-	p9_set_tag(tc, n);
-
-	P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio rpc tag %d\n", n);
-
-	out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, tc->sdata, tc->size);
-	in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata, t->msize);
+	out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
+								req->tc->size);
+	in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata,
+								client->msize);
 
 	req->status = REQ_STATUS_SENT;
 
-	if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, tc)) {
+	if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc)) {
 		P9_DPRINTK(P9_DEBUG_TRANS,
 			"9p debug: virtio rpc add_buf returned failure");
 		return -EIO;
@@ -337,31 +210,7 @@
 
 	chan->vq->vq_ops->kick(chan->vq);
 
-	wait_event(*req->wq, req->status == REQ_STATUS_RCVD);
-
-	size = le32_to_cpu(*(__le32 *) rdata);
-
-	err = p9_deserialize_fcall(rdata, size, *rc, t->extended);
-	if (err < 0) {
-		P9_DPRINTK(P9_DEBUG_TRANS,
-			"9p debug: virtio rpc deserialize returned %d\n", err);
-		return err;
-	}
-
-#ifdef CONFIG_NET_9P_DEBUG
-	if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
-		char buf[150];
-
-		p9_printfcall(buf, sizeof(buf), *rc, t->extended);
-		printk(KERN_NOTICE ">>> %p %s\n", t, buf);
-	}
-#endif
-
-	if (n != P9_NOTAG && p9_idpool_check(n, chan->tagpool))
-		p9_idpool_put(n, chan->tagpool);
-
-	req->status = REQ_STATUS_IDLE;
-
+	P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
 	return 0;
 }
 
@@ -422,10 +271,9 @@
 
 /**
  * p9_virtio_create - allocate a new virtio channel
+ * @client: client instance invoking this transport
  * @devname: string identifying the channel to connect to (unused)
  * @args: args passed from sys_mount() for per-transport options (unused)
- * @msize: requested maximum packet size
- * @extended: 9p2000.u enabled flag
  *
  * This sets up a transport channel for 9p communication.  Right now
  * we only match the first available channel, but eventually we couldlook up
@@ -441,11 +289,9 @@
  *
  */
 
-static struct p9_trans *
-p9_virtio_create(const char *devname, char *args, int msize,
-							unsigned char extended)
+static int
+p9_virtio_create(struct p9_client *client, const char *devname, char *args)
 {
-	struct p9_trans *trans;
 	struct virtio_chan *chan = channels;
 	int index = 0;
 
@@ -463,30 +309,13 @@
 
 	if (index >= MAX_9P_CHAN) {
 		printk(KERN_ERR "9p: no channels available\n");
-		return ERR_PTR(-ENODEV);
+		return -ENODEV;
 	}
 
-	chan->tagpool = p9_idpool_create();
-	if (IS_ERR(chan->tagpool)) {
-		printk(KERN_ERR "9p: couldn't allocate tagpool\n");
-		return ERR_PTR(-ENOMEM);
-	}
-	p9_idpool_get(chan->tagpool); /* reserve tag 0 */
-	chan->max_tag = 0;
-	chan->reqs = NULL;
+	client->trans = (void *)chan;
+	chan->client = client;
 
-	trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL);
-	if (!trans) {
-		printk(KERN_ERR "9p: couldn't allocate transport\n");
-		return ERR_PTR(-ENOMEM);
-	}
-	trans->extended = extended;
-	trans->msize = msize;
-	trans->close = p9_virtio_close;
-	trans->rpc = p9_virtio_rpc;
-	trans->priv = chan;
-
-	return trans;
+	return 0;
 }
 
 /**
@@ -526,6 +355,9 @@
 static struct p9_trans_module p9_virtio_trans = {
 	.name = "virtio",
 	.create = p9_virtio_create,
+	.close = p9_virtio_close,
+	.request = p9_virtio_request,
+	.cancel = p9_virtio_cancel,
 	.maxsize = PAGE_SIZE*16,
 	.def = 0,
 	.owner = THIS_MODULE,
diff --git a/net/9p/util.c b/net/9p/util.c
index 958fc58..dc4ec05 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -105,6 +105,7 @@
 	else if (error)
 		return -1;
 
+	P9_DPRINTK(P9_DEBUG_MUX, " id %d pool %p\n", i, p);
 	return i;
 }
 EXPORT_SYMBOL(p9_idpool_get);
@@ -121,6 +122,9 @@
 void p9_idpool_put(int id, struct p9_idpool *p)
 {
 	unsigned long flags;
+
+	P9_DPRINTK(P9_DEBUG_MUX, " id %d pool %p\n", id, p);
+
 	spin_lock_irqsave(&p->lock, flags);
 	idr_remove(&p->pool, id);
 	spin_unlock_irqrestore(&p->lock, flags);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index a4abed5..fa5cda4 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -719,7 +719,7 @@
 		return NF_ACCEPT;
 	}
 	*d = (struct net_device *)in;
-	NF_HOOK(NF_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
+	NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
 		(struct net_device *)out, br_nf_forward_finish);
 
 	return NF_STOLEN;
diff --git a/net/core/dev.c b/net/core/dev.c
index 868ec0b..b8a4fd0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -924,10 +924,10 @@
 		strlcpy(dev->name, newname, IFNAMSIZ);
 
 rollback:
-	err = device_rename(&dev->dev, dev->name);
-	if (err) {
+	ret = device_rename(&dev->dev, dev->name);
+	if (ret) {
 		memcpy(dev->name, oldname, IFNAMSIZ);
-		return err;
+		return ret;
 	}
 
 	write_lock_bh(&dev_base_lock);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 1106278..d4ce122 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -259,7 +259,7 @@
 	fl.fl6_flowlabel = 0;
 	fl.oif = ireq6->iif;
 	fl.fl_ip_dport = inet_rsk(req)->rmt_port;
-	fl.fl_ip_sport = inet_sk(sk)->sport;
+	fl.fl_ip_sport = inet_rsk(req)->loc_port;
 	security_req_classify_flow(req, &fl);
 
 	opt = np->opt;
@@ -558,7 +558,7 @@
 		ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
 		fl.oif = sk->sk_bound_dev_if;
 		fl.fl_ip_dport = inet_rsk(req)->rmt_port;
-		fl.fl_ip_sport = inet_sk(sk)->sport;
+		fl.fl_ip_sport = inet_rsk(req)->loc_port;
 		security_sk_classify_flow(sk, &fl);
 
 		if (ip6_dst_lookup(sk, &dst, &fl))
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index b2804e2d..e6bf99e 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -309,6 +309,7 @@
 	struct dccp_request_sock *dreq = dccp_rsk(req);
 
 	inet_rsk(req)->rmt_port	  = dccp_hdr(skb)->dccph_sport;
+	inet_rsk(req)->loc_port	  = dccp_hdr(skb)->dccph_dport;
 	inet_rsk(req)->acked	  = 0;
 	req->rcv_wnd		  = sysctl_dccp_feat_sequence_window;
 	dreq->dreq_timestamp_echo = 0;
diff --git a/net/dccp/output.c b/net/dccp/output.c
index d06945c..809d803 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -347,7 +347,7 @@
 	/* Build and checksum header */
 	dh = dccp_zeroed_hdr(skb, dccp_header_size);
 
-	dh->dccph_sport	= inet_sk(sk)->sport;
+	dh->dccph_sport	= inet_rsk(req)->loc_port;
 	dh->dccph_dport	= inet_rsk(req)->rmt_port;
 	dh->dccph_doff	= (dccp_header_size +
 			   DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index b043eda..1a9dd66 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -663,7 +663,7 @@
 void arp_xmit(struct sk_buff *skb)
 {
 	/* Send it off, maybe filter it using firewalling first.  */
-	NF_HOOK(NF_ARP, NF_ARP_OUT, skb, NULL, skb->dev, dev_queue_xmit);
+	NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, skb, NULL, skb->dev, dev_queue_xmit);
 }
 
 /*
@@ -928,7 +928,7 @@
 
 	memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
 
-	return NF_HOOK(NF_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
+	return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
 
 freeskb:
 	kfree_skb(skb);
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index ffeaffc..8303e4b 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -742,6 +742,7 @@
 			*obj = kmalloc(sizeof(struct snmp_object) + len,
 				       GFP_ATOMIC);
 			if (*obj == NULL) {
+				kfree(p);
 				kfree(id);
 				if (net_ratelimit())
 					printk("OOM in bsalg (%d)\n", __LINE__);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index ec394cf..676c80b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -204,6 +204,7 @@
 
 	req->mss = mss;
 	ireq->rmt_port = th->source;
+	ireq->loc_port = th->dest;
 	ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
 	ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
 	if (ipv6_opt_accepted(sk, skb) ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e5310c9b..b6b356b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -476,7 +476,7 @@
 	fl.fl6_flowlabel = 0;
 	fl.oif = treq->iif;
 	fl.fl_ip_dport = inet_rsk(req)->rmt_port;
-	fl.fl_ip_sport = inet_sk(sk)->sport;
+	fl.fl_ip_sport = inet_rsk(req)->loc_port;
 	security_req_classify_flow(req, &fl);
 
 	opt = np->opt;
@@ -1309,7 +1309,7 @@
 		ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
 		fl.oif = sk->sk_bound_dev_if;
 		fl.fl_ip_dport = inet_rsk(req)->rmt_port;
-		fl.fl_ip_sport = inet_sk(sk)->sport;
+		fl.fl_ip_sport = inet_rsk(req)->loc_port;
 		security_req_classify_flow(req, &fl);
 
 		if (ip6_dst_lookup(sk, &dst, &fl))
@@ -1865,7 +1865,7 @@
 		   i,
 		   src->s6_addr32[0], src->s6_addr32[1],
 		   src->s6_addr32[2], src->s6_addr32[3],
-		   ntohs(inet_sk(sk)->sport),
+		   ntohs(inet_rsk(req)->loc_port),
 		   dest->s6_addr32[0], dest->s6_addr32[1],
 		   dest->s6_addr32[2], dest->s6_addr32[3],
 		   ntohs(inet_rsk(req)->rmt_port),
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 78892cf..25dcef9 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -271,7 +271,6 @@
 config NF_CT_NETLINK
 	tristate 'Connection tracking netlink interface'
 	select NETFILTER_NETLINK
-	depends on NF_NAT=n || NF_NAT
 	default m if NETFILTER_ADVANCED=n
 	help
 	  This option enables support for a netlink-based userspace interface
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 05048e4..79a6980 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -25,11 +25,13 @@
 if IP_VS
 
 config	IP_VS_IPV6
-	bool "IPv6 support for IPVS (DANGEROUS)"
+	bool "IPv6 support for IPVS"
 	depends on EXPERIMENTAL && (IPV6 = y || IP_VS = IPV6)
 	---help---
 	  Add IPv6 support to IPVS. This is incomplete and might be dangerous.
 
+	  See http://www.mindbasket.com/ipvs for more information.
+
 	  Say N if unsure.
 
 config	IP_VS_DEBUG
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 2e4ad96..a040d46 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -813,6 +813,7 @@
 	return err;
 }
 
+#ifdef CONFIG_NF_NAT_NEEDED
 static int
 ctnetlink_parse_nat_setup(struct nf_conn *ct,
 			  enum nf_nat_manip_type manip,
@@ -840,6 +841,7 @@
 
 	return parse_nat_setup(ct, manip, attr);
 }
+#endif
 
 static int
 ctnetlink_change_status(struct nf_conn *ct, struct nlattr *cda[])
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 2cc1fff..f9977b3 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -48,7 +48,7 @@
 	},
 	{
 		.name		= "NFQUEUE",
-		.family		= NF_ARP,
+		.family		= NFPROTO_ARP,
 		.target		= nfqueue_tg,
 		.targetsize	= sizeof(struct xt_NFQ_info),
 		.me		= THIS_MODULE,
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index 6f62c36..7ac54ea 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -61,7 +61,7 @@
 	if (info->flags & IPRANGE_SRC) {
 		m  = ntohl(iph->saddr) < ntohl(info->src_min.ip);
 		m |= ntohl(iph->saddr) > ntohl(info->src_max.ip);
-		m ^= info->flags & IPRANGE_SRC_INV;
+		m ^= !!(info->flags & IPRANGE_SRC_INV);
 		if (m) {
 			pr_debug("src IP " NIPQUAD_FMT " NOT in range %s"
 			         NIPQUAD_FMT "-" NIPQUAD_FMT "\n",
@@ -75,7 +75,7 @@
 	if (info->flags & IPRANGE_DST) {
 		m  = ntohl(iph->daddr) < ntohl(info->dst_min.ip);
 		m |= ntohl(iph->daddr) > ntohl(info->dst_max.ip);
-		m ^= info->flags & IPRANGE_DST_INV;
+		m ^= !!(info->flags & IPRANGE_DST_INV);
 		if (m) {
 			pr_debug("dst IP " NIPQUAD_FMT " NOT in range %s"
 			         NIPQUAD_FMT "-" NIPQUAD_FMT "\n",
@@ -114,14 +114,14 @@
 	if (info->flags & IPRANGE_SRC) {
 		m  = iprange_ipv6_sub(&iph->saddr, &info->src_min.in6) < 0;
 		m |= iprange_ipv6_sub(&iph->saddr, &info->src_max.in6) > 0;
-		m ^= info->flags & IPRANGE_SRC_INV;
+		m ^= !!(info->flags & IPRANGE_SRC_INV);
 		if (m)
 			return false;
 	}
 	if (info->flags & IPRANGE_DST) {
 		m  = iprange_ipv6_sub(&iph->daddr, &info->dst_min.in6) < 0;
 		m |= iprange_ipv6_sub(&iph->daddr, &info->dst_max.in6) > 0;
-		m ^= info->flags & IPRANGE_DST_INV;
+		m ^= !!(info->flags & IPRANGE_DST_INV);
 		if (m)
 			return false;
 	}
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 4ebd4ca..280c471 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -318,15 +318,15 @@
 	for (i = 0; i < ip_list_hash_size; i++)
 		INIT_LIST_HEAD(&t->iphash[i]);
 #ifdef CONFIG_PROC_FS
-	t->proc = proc_create(t->name, ip_list_perms, recent_proc_dir,
-		  &recent_mt_fops);
+	t->proc = proc_create_data(t->name, ip_list_perms, recent_proc_dir,
+		  &recent_mt_fops, t);
 	if (t->proc == NULL) {
 		kfree(t);
 		goto out;
 	}
 #ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
-	t->proc_old = proc_create(t->name, ip_list_perms, proc_old_dir,
-		      &recent_old_fops);
+	t->proc_old = proc_create_data(t->name, ip_list_perms, proc_old_dir,
+		      &recent_old_fops, t);
 	if (t->proc_old == NULL) {
 		remove_proc_entry(t->name, proc_old_dir);
 		kfree(t);
@@ -334,11 +334,9 @@
 	}
 	t->proc_old->uid   = ip_list_uid;
 	t->proc_old->gid   = ip_list_gid;
-	t->proc_old->data  = t;
 #endif
 	t->proc->uid       = ip_list_uid;
 	t->proc->gid       = ip_list_gid;
-	t->proc->data      = t;
 #endif
 	spin_lock_bh(&recent_lock);
 	list_add_tail(&t->list, &tables);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 7b5572d..93cd30c 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -326,6 +326,7 @@
 
 static struct netdev_queue noop_netdev_queue = {
 	.qdisc		=	&noop_qdisc,
+	.qdisc_sleeping	=	&noop_qdisc,
 };
 
 struct Qdisc noop_qdisc = {
@@ -352,6 +353,7 @@
 static struct Qdisc noqueue_qdisc;
 static struct netdev_queue noqueue_netdev_queue = {
 	.qdisc		=	&noqueue_qdisc,
+	.qdisc_sleeping	=	&noqueue_qdisc,
 };
 
 static struct Qdisc noqueue_qdisc = {
diff --git a/samples/Kconfig b/samples/Kconfig
index e1fb471..4b02f5a 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -13,6 +13,12 @@
 	help
 	  This build markers example modules.
 
+config SAMPLE_TRACEPOINTS
+	tristate "Build tracepoints examples -- loadable modules only"
+	depends on TRACEPOINTS && m
+	help
+	  This build tracepoints example modules.
+
 config SAMPLE_KOBJECT
 	tristate "Build kobject examples"
 	help
diff --git a/samples/Makefile b/samples/Makefile
index 2e02575..10eaca8 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -1,3 +1,3 @@
 # Makefile for Linux samples code
 
-obj-$(CONFIG_SAMPLES)	+= markers/ kobject/ kprobes/
+obj-$(CONFIG_SAMPLES)	+= markers/ kobject/ kprobes/ tracepoints/
diff --git a/samples/markers/probe-example.c b/samples/markers/probe-example.c
index c8e099d..2dfb3b3 100644
--- a/samples/markers/probe-example.c
+++ b/samples/markers/probe-example.c
@@ -81,6 +81,7 @@
 			probe_array[i].probe_func, &probe_array[i]);
 	printk(KERN_INFO "Number of event b : %u\n",
 			atomic_read(&eventb_count));
+	marker_synchronize_unregister();
 }
 
 module_init(probe_init);
diff --git a/samples/tracepoints/Makefile b/samples/tracepoints/Makefile
new file mode 100644
index 0000000..36479ad
--- /dev/null
+++ b/samples/tracepoints/Makefile
@@ -0,0 +1,6 @@
+# builds the tracepoint example kernel modules;
+# then to use one (as root):  insmod <module_name.ko>
+
+obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-sample.o
+obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample.o
+obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample2.o
diff --git a/samples/tracepoints/tp-samples-trace.h b/samples/tracepoints/tp-samples-trace.h
new file mode 100644
index 0000000..0216b55
--- /dev/null
+++ b/samples/tracepoints/tp-samples-trace.h
@@ -0,0 +1,13 @@
+#ifndef _TP_SAMPLES_TRACE_H
+#define _TP_SAMPLES_TRACE_H
+
+#include <linux/proc_fs.h>	/* for struct inode and struct file */
+#include <linux/tracepoint.h>
+
+DEFINE_TRACE(subsys_event,
+	TPPROTO(struct inode *inode, struct file *file),
+	TPARGS(inode, file));
+DEFINE_TRACE(subsys_eventb,
+	TPPROTO(void),
+	TPARGS());
+#endif
diff --git a/samples/tracepoints/tracepoint-probe-sample.c b/samples/tracepoints/tracepoint-probe-sample.c
new file mode 100644
index 0000000..55abfdd
--- /dev/null
+++ b/samples/tracepoints/tracepoint-probe-sample.c
@@ -0,0 +1,55 @@
+/*
+ * tracepoint-probe-sample.c
+ *
+ * sample tracepoint probes.
+ */
+
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/dcache.h>
+#include "tp-samples-trace.h"
+
+/*
+ * Here the caller only guarantees locking for struct file and struct inode.
+ * Locking must therefore be done in the probe to use the dentry.
+ */
+static void probe_subsys_event(struct inode *inode, struct file *file)
+{
+	path_get(&file->f_path);
+	dget(file->f_path.dentry);
+	printk(KERN_INFO "Event is encountered with filename %s\n",
+		file->f_path.dentry->d_name.name);
+	dput(file->f_path.dentry);
+	path_put(&file->f_path);
+}
+
+static void probe_subsys_eventb(void)
+{
+	printk(KERN_INFO "Event B is encountered\n");
+}
+
+int __init tp_sample_trace_init(void)
+{
+	int ret;
+
+	ret = register_trace_subsys_event(probe_subsys_event);
+	WARN_ON(ret);
+	ret = register_trace_subsys_eventb(probe_subsys_eventb);
+	WARN_ON(ret);
+
+	return 0;
+}
+
+module_init(tp_sample_trace_init);
+
+void __exit tp_sample_trace_exit(void)
+{
+	unregister_trace_subsys_eventb(probe_subsys_eventb);
+	unregister_trace_subsys_event(probe_subsys_event);
+}
+
+module_exit(tp_sample_trace_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mathieu Desnoyers");
+MODULE_DESCRIPTION("Tracepoint Probes Samples");
diff --git a/samples/tracepoints/tracepoint-probe-sample2.c b/samples/tracepoints/tracepoint-probe-sample2.c
new file mode 100644
index 0000000..5e9fcf4
--- /dev/null
+++ b/samples/tracepoints/tracepoint-probe-sample2.c
@@ -0,0 +1,42 @@
+/*
+ * tracepoint-probe-sample2.c
+ *
+ * 2nd sample tracepoint probes.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include "tp-samples-trace.h"
+
+/*
+ * Here the caller only guarantees locking for struct file and struct inode.
+ * Locking must therefore be done in the probe to use the dentry.
+ */
+static void probe_subsys_event(struct inode *inode, struct file *file)
+{
+	printk(KERN_INFO "Event is encountered with inode number %lu\n",
+		inode->i_ino);
+}
+
+int __init tp_sample_trace_init(void)
+{
+	int ret;
+
+	ret = register_trace_subsys_event(probe_subsys_event);
+	WARN_ON(ret);
+
+	return 0;
+}
+
+module_init(tp_sample_trace_init);
+
+void __exit tp_sample_trace_exit(void)
+{
+	unregister_trace_subsys_event(probe_subsys_event);
+}
+
+module_exit(tp_sample_trace_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mathieu Desnoyers");
+MODULE_DESCRIPTION("Tracepoint Probes Samples");
diff --git a/samples/tracepoints/tracepoint-sample.c b/samples/tracepoints/tracepoint-sample.c
new file mode 100644
index 0000000..4ae4b7f
--- /dev/null
+++ b/samples/tracepoints/tracepoint-sample.c
@@ -0,0 +1,53 @@
+/* tracepoint-sample.c
+ *
+ * Executes a tracepoint when /proc/tracepoint-example is opened.
+ *
+ * (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ *
+ * This file is released under the GPLv2.
+ * See the file COPYING for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include "tp-samples-trace.h"
+
+struct proc_dir_entry *pentry_example;
+
+static int my_open(struct inode *inode, struct file *file)
+{
+	int i;
+
+	trace_subsys_event(inode, file);
+	for (i = 0; i < 10; i++)
+		trace_subsys_eventb();
+	return -EPERM;
+}
+
+static struct file_operations mark_ops = {
+	.open = my_open,
+};
+
+static int example_init(void)
+{
+	printk(KERN_ALERT "example init\n");
+	pentry_example = proc_create("tracepoint-example", 0444, NULL,
+		&mark_ops);
+	if (!pentry_example)
+		return -EPERM;
+	return 0;
+}
+
+static void example_exit(void)
+{
+	printk(KERN_ALERT "example exit\n");
+	remove_proc_entry("tracepoint-example", NULL);
+}
+
+module_init(example_init)
+module_exit(example_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mathieu Desnoyers");
+MODULE_DESCRIPTION("Tracepoint example");
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 277cfe0..5ed4cbf 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -198,10 +198,17 @@
 	fi;
 endif
 
+ifdef CONFIG_FTRACE_MCOUNT_RECORD
+cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl \
+	"$(ARCH)" "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" \
+	"$(MV)" "$(@)";
+endif
+
 define rule_cc_o_c
 	$(call echo-cmd,checksrc) $(cmd_checksrc)			  \
 	$(call echo-cmd,cc_o_c) $(cmd_cc_o_c);				  \
 	$(cmd_modversions)						  \
+	$(cmd_record_mcount)						  \
 	scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' >    \
 	                                              $(dot-target).tmp;  \
 	rm -f $(depfile);						  \
diff --git a/scripts/basic/.gitignore b/scripts/basic/.gitignore
index 7304e19..bf8b199 100644
--- a/scripts/basic/.gitignore
+++ b/scripts/basic/.gitignore
@@ -1,3 +1,3 @@
+hash
 fixdep
-split-include
 docproc
diff --git a/scripts/bootgraph.pl b/scripts/bootgraph.pl
index 2243353..5e7316e 100644
--- a/scripts/bootgraph.pl
+++ b/scripts/bootgraph.pl
@@ -37,13 +37,13 @@
 # 	dmesg | perl scripts/bootgraph.pl > output.svg
 #
 
-my @rows;
-my %start, %end, %row;
+my %start, %end;
 my $done = 0;
-my $rowcount = 0;
 my $maxtime = 0;
 my $firsttime = 100;
 my $count = 0;
+my %pids;
+
 while (<>) {
 	my $line = $_;
 	if ($line =~ /([0-9\.]+)\] calling  ([a-zA-Z0-9\_]+)\+/) {
@@ -54,14 +54,8 @@
 				$firsttime = $1;
 			}
 		}
-		$row{$func} = 1;
 		if ($line =~ /\@ ([0-9]+)/) {
-			my $pid = $1;
-			if (!defined($rows[$pid])) {
-				$rowcount = $rowcount + 1;
-				$rows[$pid] = $rowcount;
-			}
-			$row{$func} = $rows[$pid];
+			$pids{$func} = $1;
 		}
 		$count = $count + 1;
 	}
@@ -109,17 +103,25 @@
 my $mult = 950.0 / ($maxtime - $firsttime);
 my $threshold = ($maxtime - $firsttime) / 60.0;
 my $stylecounter = 0;
+my %rows;
+my $rowscount = 1;
 while (($key,$value) = each %start) {
 	my $duration = $end{$key} - $start{$key};
 
 	if ($duration >= $threshold) {
 		my $s, $s2, $e, $y;
+		$pid = $pids{$key};
+
+		if (!defined($rows{$pid})) {
+			$rows{$pid} = $rowscount;
+			$rowscount = $rowscount + 1;
+		}
 		$s = ($value - $firsttime) * $mult;
 		$s2 = $s + 6;
 		$e = ($end{$key} - $firsttime) * $mult;
 		$w = $e - $s;
 
-		$y = $row{$key} * 150;
+		$y = $rows{$pid} * 150;
 		$y2 = $y + 4;
 
 		$style = $styles[$stylecounter];
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index e30bac1..f88bb3e 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1,5 +1,5 @@
 #!/usr/bin/perl -w
-# (c) 2001, Dave Jones. <davej@codemonkey.org.uk> (the file handling bit)
+# (c) 2001, Dave Jones. <davej@redhat.com> (the file handling bit)
 # (c) 2005, Joel Schopp <jschopp@austin.ibm.com> (the ugly bit)
 # (c) 2007, Andy Whitcroft <apw@uk.ibm.com> (new conditions, test suite, etc)
 # Licensed under the terms of the GNU GPL License version 2
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
new file mode 100755
index 0000000..f56d760
--- /dev/null
+++ b/scripts/recordmcount.pl
@@ -0,0 +1,395 @@
+#!/usr/bin/perl -w
+# (c) 2008, Steven Rostedt <srostedt@redhat.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# recordmcount.pl - makes a section called __mcount_loc that holds
+#                   all the offsets to the calls to mcount.
+#
+#
+# What we want to end up with is a section in vmlinux called
+# __mcount_loc that contains a list of pointers to all the
+# call sites in the kernel that call mcount. Later on boot up, the kernel
+# will read this list, save the locations and turn them into nops.
+# When tracing or profiling is later enabled, these locations will then
+# be converted back to pointers to some function.
+#
+# This is no easy feat. This script is called just after the original
+# object is compiled and before it is linked.
+#
+# The references to the call sites are offsets from the section of text
+# that the call site is in. Hence, all functions in a section that
+# has a call site to mcount, will have the offset from the beginning of
+# the section and not the beginning of the function.
+#
+# The trick is to find a way to record the beginning of the section.
+# The way we do this is to look at the first function in the section
+# which will also be the location of that section after final link.
+# e.g.
+#
+#  .section ".text.sched"
+#  .globl my_func
+#  my_func:
+#        [...]
+#        call mcount  (offset: 0x5)
+#        [...]
+#        ret
+#  other_func:
+#        [...]
+#        call mcount (offset: 0x1b)
+#        [...]
+#
+# Both relocation offsets for the mcounts in the above example will be
+# offset from .text.sched. If we make another file called tmp.s with:
+#
+#  .section __mcount_loc
+#  .quad  my_func + 0x5
+#  .quad  my_func + 0x1b
+#
+# We can then compile this tmp.s into tmp.o, and link it to the original
+# object.
+#
+# But this gets hard if my_func is not globl (a static function).
+# In such a case we have:
+#
+#  .section ".text.sched"
+#  my_func:
+#        [...]
+#        call mcount  (offset: 0x5)
+#        [...]
+#        ret
+#  .globl my_func
+#  other_func:
+#        [...]
+#        call mcount (offset: 0x1b)
+#        [...]
+#
+# If we make the tmp.s the same as above, when we link together with
+# the original object, we will end up with two symbols for my_func:
+# one local, one global.  After final compile, we will end up with
+# an undefined reference to my_func.
+#
+# Since local objects can reference local variables, we need to find
+# a way to make tmp.o reference the local objects of the original object
+# file after it is linked together. To do this, we convert the my_func
+# into a global symbol before linking tmp.o. Then after we link tmp.o
+# we will only have a single symbol for my_func that is global.
+# We can convert my_func back into a local symbol and we are done.
+#
+# Here are the steps we take:
+#
+# 1) Record all the local symbols by using 'nm'
+# 2) Use objdump to find all the call site offsets and sections for
+#    mcount.
+# 3) Compile the list into its own object.
+# 4) Do we have to deal with local functions? If not, go to step 8.
+# 5) Make an object that converts these local functions to global symbols
+#    with objcopy.
+# 6) Link together this new object with the list object.
+# 7) Convert the local functions back to local symbols and rename
+#    the result as the original object.
+#    End.
+# 8) Link the object with the list object.
+# 9) Move the result back to the original object.
+#    End.
+#
+
+use strict;
+
+my $P = $0;
+$P =~ s@.*/@@g;
+
+my $V = '0.1';
+
+if ($#ARGV < 6) {
+	print "usage: $P arch objdump objcopy cc ld nm rm mv inputfile\n";
+	print "version: $V\n";
+	exit(1);
+}
+
+my ($arch, $objdump, $objcopy, $cc, $ld, $nm, $rm, $mv, $inputfile) = @ARGV;
+
+$objdump = "objdump" if ((length $objdump) == 0);
+$objcopy = "objcopy" if ((length $objcopy) == 0);
+$cc = "gcc" if ((length $cc) == 0);
+$ld = "ld" if ((length $ld) == 0);
+$nm = "nm" if ((length $nm) == 0);
+$rm = "rm" if ((length $rm) == 0);
+$mv = "mv" if ((length $mv) == 0);
+
+#print STDERR "running: $P '$arch' '$objdump' '$objcopy' '$cc' '$ld' " .
+#    "'$nm' '$rm' '$mv' '$inputfile'\n";
+
+my %locals;		# List of local (static) functions
+my %weak;		# List of weak functions
+my %convert;		# List of local functions used that needs conversion
+
+my $type;
+my $section_regex;	# Find the start of a section
+my $function_regex;	# Find the name of a function
+			#    (return offset and func name)
+my $mcount_regex;	# Find the call site to mcount (return offset)
+
+if ($arch eq "x86_64") {
+    $section_regex = "Disassembly of section";
+    $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$";
+    $type = ".quad";
+
+    # force flags for this arch
+    $ld .= " -m elf_x86_64";
+    $objdump .= " -M x86-64";
+    $objcopy .= " -O elf64-x86-64";
+    $cc .= " -m64";
+
+} elsif ($arch eq "i386") {
+    $section_regex = "Disassembly of section";
+    $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
+    $type = ".long";
+
+    # force flags for this arch
+    $ld .= " -m elf_i386";
+    $objdump .= " -M i386";
+    $objcopy .= " -O elf32-i386";
+    $cc .= " -m32";
+
+} else {
+    die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
+}
+
+my $text_found = 0;
+my $read_function = 0;
+my $opened = 0;
+my $mcount_section = "__mcount_loc";
+
+my $dirname;
+my $filename;
+my $prefix;
+my $ext;
+
+if ($inputfile =~ m,^(.*)/([^/]*)$,) {
+    $dirname = $1;
+    $filename = $2;
+} else {
+    $dirname = ".";
+    $filename = $inputfile;
+}
+
+if ($filename =~ m,^(.*)(\.\S),) {
+    $prefix = $1;
+    $ext = $2;
+} else {
+    $prefix = $filename;
+    $ext = "";
+}
+
+my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s";
+my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o";
+
+#
+# --globalize-symbols came out in 2.17, we must test the version
+# of objcopy, and if it is less than 2.17, then we can not
+# record local functions.
+my $use_locals = 01;
+my $local_warn_once = 0;
+my $found_version = 0;
+
+open (IN, "$objcopy --version |") || die "error running $objcopy";
+while (<IN>) {
+    if (/objcopy.*\s(\d+)\.(\d+)/) {
+	my $major = $1;
+	my $minor = $2;
+
+	$found_version = 1;
+	if ($major < 2 ||
+	    ($major == 2 && $minor < 17)) {
+	    $use_locals = 0;
+	}
+	last;
+    }
+}
+close (IN);
+
+if (!$found_version) {
+    print STDERR "WARNING: could not find objcopy version.\n" .
+	"\tDisabling local function references.\n";
+}
+
+
+#
+# Step 1: find all the local (static functions) and weak symbols.
+#        't' is local, 'w/W' is weak (we never use a weak function)
+#
+open (IN, "$nm $inputfile|") || die "error running $nm";
+while (<IN>) {
+    if (/^[0-9a-fA-F]+\s+t\s+(\S+)/) {
+	$locals{$1} = 1;
+    } elsif (/^[0-9a-fA-F]+\s+([wW])\s+(\S+)/) {
+	$weak{$2} = $1;
+    }
+}
+close(IN);
+
+my @offsets;		# Array of offsets of mcount callers
+my $ref_func;		# reference function to use for offsets
+my $offset = 0;		# offset of ref_func to section beginning
+
+##
+# update_funcs - print out the current mcount callers
+#
+#  Go through the list of offsets to callers and write them to
+#  the output file in a format that can be read by an assembler.
+#
+sub update_funcs
+{
+    return if ($#offsets < 0);
+
+    defined($ref_func) || die "No function to reference";
+
+    # A section only had a weak function, to represent it.
+    # Unfortunately, a weak function may be overwritten by another
+    # function of the same name, making all these offsets incorrect.
+    # To be safe, we simply print a warning and bail.
+    if (defined $weak{$ref_func}) {
+	print STDERR
+	    "$inputfile: WARNING: referencing weak function" .
+	    " $ref_func for mcount\n";
+	return;
+    }
+
+    # is this function static? If so, note this fact.
+    if (defined $locals{$ref_func}) {
+
+	# only use locals if objcopy supports globalize-symbols
+	if (!$use_locals) {
+	    return;
+	}
+	$convert{$ref_func} = 1;
+    }
+
+    # Loop through all the mcount caller offsets and print a reference
+    # to the caller based from the ref_func.
+    for (my $i=0; $i <= $#offsets; $i++) {
+	if (!$opened) {
+	    open(FILE, ">$mcount_s") || die "can't create $mcount_s\n";
+	    $opened = 1;
+	    print FILE "\t.section $mcount_section,\"a\",\@progbits\n";
+	}
+	printf FILE "\t%s %s + %d\n", $type, $ref_func, $offsets[$i] - $offset;
+    }
+}
+
+#
+# Step 2: find the sections and mcount call sites
+#
+open(IN, "$objdump -dr $inputfile|") || die "error running $objdump";
+
+my $text;
+
+while (<IN>) {
+    # is it a section?
+    if (/$section_regex/) {
+	$read_function = 1;
+	# print out any recorded offsets
+	update_funcs() if ($text_found);
+
+	# reset all markers and arrays
+	$text_found = 0;
+	undef($ref_func);
+	undef(@offsets);
+
+    # section found, now is this a start of a function?
+    } elsif ($read_function && /$function_regex/) {
+	$text_found = 1;
+	$offset = hex $1;
+	$text = $2;
+
+	# if this is either a local function or a weak function
+	# keep looking for functions that are global that
+	# we can use safely.
+	if (!defined($locals{$text}) && !defined($weak{$text})) {
+	    $ref_func = $text;
+	    $read_function = 0;
+	} else {
+	    # if we already have a function, and this is weak, skip it
+	    if (!defined($ref_func) || !defined($weak{$text})) {
+		$ref_func = $text;
+	    }
+	}
+    }
+
+    # is this a call site to mcount? If so, record it to print later
+    if ($text_found && /$mcount_regex/) {
+	$offsets[$#offsets + 1] = hex $1;
+    }
+}
+
+# dump out anymore offsets that may have been found
+update_funcs() if ($text_found);
+
+# If we did not find any mcount callers, we are done (do nothing).
+if (!$opened) {
+    exit(0);
+}
+
+close(FILE);
+
+#
+# Step 3: Compile the file that holds the list of call sites to mcount.
+#
+`$cc -o $mcount_o -c $mcount_s`;
+
+my @converts = keys %convert;
+
+#
+# Step 4: Do we have sections that started with local functions?
+#
+if ($#converts >= 0) {
+    my $globallist = "";
+    my $locallist = "";
+
+    foreach my $con (@converts) {
+	$globallist .= " --globalize-symbol $con";
+	$locallist .= " --localize-symbol $con";
+    }
+
+    my $globalobj = $dirname . "/.tmp_gl_" . $filename;
+    my $globalmix = $dirname . "/.tmp_mx_" . $filename;
+
+    #
+    # Step 5: set up each local function as a global
+    #
+    `$objcopy $globallist $inputfile $globalobj`;
+
+    #
+    # Step 6: Link the global version to our list.
+    #
+    `$ld -r $globalobj $mcount_o -o $globalmix`;
+
+    #
+    # Step 7: Convert the local functions back into local symbols
+    #
+    `$objcopy $locallist $globalmix $inputfile`;
+
+    # Remove the temp files
+    `$rm $globalobj $globalmix`;
+
+} else {
+
+    my $mix = $dirname . "/.tmp_mx_" . $filename;
+
+    #
+    # Step 8: Link the object with our list of call sites object.
+    #
+    `$ld -r $inputfile $mcount_o -o $mix`;
+
+    #
+    # Step 9: Move the result back to the original object.
+    #
+    `$mv $mix $inputfile`;
+}
+
+# Clean up the temp files
+`$rm $mcount_o $mcount_s`;
+
+exit(0);
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 46f2397..5ba7870 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -1,5 +1,5 @@
 /*
- * dev_cgroup.c - device cgroup subsystem
+ * device_cgroup.c - device cgroup subsystem
  *
  * Copyright 2007 IBM Corp
  */
@@ -10,6 +10,7 @@
 #include <linux/list.h>
 #include <linux/uaccess.h>
 #include <linux/seq_file.h>
+#include <linux/rcupdate.h>
 
 #define ACC_MKNOD 1
 #define ACC_READ  2
@@ -22,18 +23,8 @@
 
 /*
  * whitelist locking rules:
- * cgroup_lock() cannot be taken under dev_cgroup->lock.
- * dev_cgroup->lock can be taken with or without cgroup_lock().
- *
- * modifications always require cgroup_lock
- * modifications to a list which is visible require the
- *   dev_cgroup->lock *and* cgroup_lock()
- * walking the list requires dev_cgroup->lock or cgroup_lock().
- *
- * reasoning: dev_whitelist_copy() needs to kmalloc, so needs
- *   a mutex, which the cgroup_lock() is.  Since modifying
- *   a visible list requires both locks, either lock can be
- *   taken for walking the list.
+ * hold cgroup_lock() for update/read.
+ * hold rcu_read_lock() for read.
  */
 
 struct dev_whitelist_item {
@@ -47,7 +38,6 @@
 struct dev_cgroup {
 	struct cgroup_subsys_state css;
 	struct list_head whitelist;
-	spinlock_t lock;
 };
 
 static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
@@ -84,13 +74,9 @@
 	struct dev_whitelist_item *wh, *tmp, *new;
 
 	list_for_each_entry(wh, orig, list) {
-		new = kmalloc(sizeof(*wh), GFP_KERNEL);
+		new = kmemdup(wh, sizeof(*wh), GFP_KERNEL);
 		if (!new)
 			goto free_and_exit;
-		new->major = wh->major;
-		new->minor = wh->minor;
-		new->type = wh->type;
-		new->access = wh->access;
 		list_add_tail(&new->list, dest);
 	}
 
@@ -107,19 +93,16 @@
 /* Stupid prototype - don't bother combining existing entries */
 /*
  * called under cgroup_lock()
- * since the list is visible to other tasks, we need the spinlock also
  */
 static int dev_whitelist_add(struct dev_cgroup *dev_cgroup,
 			struct dev_whitelist_item *wh)
 {
 	struct dev_whitelist_item *whcopy, *walk;
 
-	whcopy = kmalloc(sizeof(*whcopy), GFP_KERNEL);
+	whcopy = kmemdup(wh, sizeof(*wh), GFP_KERNEL);
 	if (!whcopy)
 		return -ENOMEM;
 
-	memcpy(whcopy, wh, sizeof(*whcopy));
-	spin_lock(&dev_cgroup->lock);
 	list_for_each_entry(walk, &dev_cgroup->whitelist, list) {
 		if (walk->type != wh->type)
 			continue;
@@ -135,7 +118,6 @@
 
 	if (whcopy != NULL)
 		list_add_tail_rcu(&whcopy->list, &dev_cgroup->whitelist);
-	spin_unlock(&dev_cgroup->lock);
 	return 0;
 }
 
@@ -149,14 +131,12 @@
 
 /*
  * called under cgroup_lock()
- * since the list is visible to other tasks, we need the spinlock also
  */
 static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup,
 			struct dev_whitelist_item *wh)
 {
 	struct dev_whitelist_item *walk, *tmp;
 
-	spin_lock(&dev_cgroup->lock);
 	list_for_each_entry_safe(walk, tmp, &dev_cgroup->whitelist, list) {
 		if (walk->type == DEV_ALL)
 			goto remove;
@@ -174,7 +154,6 @@
 			call_rcu(&walk->rcu, whitelist_item_free);
 		}
 	}
-	spin_unlock(&dev_cgroup->lock);
 }
 
 /*
@@ -214,7 +193,6 @@
 		}
 	}
 
-	spin_lock_init(&dev_cgroup->lock);
 	return &dev_cgroup->css;
 }
 
@@ -330,15 +308,11 @@
 {
 	struct cgroup *pcg = childcg->css.cgroup->parent;
 	struct dev_cgroup *parent;
-	int ret;
 
 	if (!pcg)
 		return 1;
 	parent = cgroup_to_devcgroup(pcg);
-	spin_lock(&parent->lock);
-	ret = may_access_whitelist(parent, wh);
-	spin_unlock(&parent->lock);
-	return ret;
+	return may_access_whitelist(parent, wh);
 }
 
 /*
@@ -357,17 +331,14 @@
 static int devcgroup_update_access(struct dev_cgroup *devcgroup,
 				   int filetype, const char *buffer)
 {
-	struct dev_cgroup *cur_devcgroup;
 	const char *b;
 	char *endp;
-	int retval = 0, count;
+	int count;
 	struct dev_whitelist_item wh;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	cur_devcgroup = task_devcgroup(current);
-
 	memset(&wh, 0, sizeof(wh));
 	b = buffer;
 
@@ -437,7 +408,6 @@
 	}
 
 handle:
-	retval = 0;
 	switch (filetype) {
 	case DEVCG_ALLOW:
 		if (!parent_has_perm(devcgroup, &wh))
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 576e511..3e3fde7c 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -75,6 +75,7 @@
 #include <linux/string.h>
 #include <linux/selinux.h>
 #include <linux/mutex.h>
+#include <linux/posix-timers.h>
 
 #include "avc.h"
 #include "objsec.h"
@@ -2322,13 +2323,7 @@
 			initrlim = init_task.signal->rlim+i;
 			rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
 		}
-		if (current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
-			/*
-			 * This will cause RLIMIT_CPU calculations
-			 * to be refigured.
-			 */
-			current->it_prof_expires = jiffies_to_cputime(1);
-		}
+		update_rlimit_cpu(rlim->rlim_cur);
 	}
 
 	/* Wake up the parent if it is waiting so that it can
diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
index 89b7f54..ea2bf82 100644
--- a/sound/core/pcm_misc.c
+++ b/sound/core/pcm_misc.c
@@ -319,6 +319,7 @@
 /**
  * snd_pcm_format_size - return the byte size of samples on the given format
  * @format: the format to check
+ * @samples: sampling rate
  *
  * Returns the byte size of the given samples for the format, or a
  * negative error code if unknown format.
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index e5e749f..73be7e1 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -51,7 +51,7 @@
 	if (err < 0)
 		return err;
 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 256, UINT_MAX);
-	if (err) < 0)
+	if (err < 0)
 		return err;
 	return 0;
 }
diff --git a/sound/oss/sh_dac_audio.c b/sound/oss/sh_dac_audio.c
index b493660..e5d4239 100644
--- a/sound/oss/sh_dac_audio.c
+++ b/sound/oss/sh_dac_audio.c
@@ -26,7 +26,7 @@
 #include <asm/cpu/dac.h>
 #include <asm/cpu/timer.h>
 #include <asm/machvec.h>
-#include <asm/hp6xx.h>
+#include <mach/hp6xx.h>
 #include <asm/hd64461.h>
 
 #define MODNAME "sh_dac_audio"
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index a7d8966..88fbf28 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -759,7 +759,6 @@
 			       SPCS_CHANNELNUM_LEFT | SPCS_SOURCENUM_UNSPEC |
 			       SPCS_GENERATIONSTATUS | 0x00001200 |
 			       0x00000000 | SPCS_EMPHASIS_NONE | SPCS_COPYRIGHT );
-	}
 #endif
 
 	return 0;
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 20d0e32..8f9e385 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -666,6 +666,7 @@
 	card->avs.avs_audio_width = PS3AV_CMD_AUDIO_WORD_BITS_16;
 	card->avs.avs_audio_format = PS3AV_CMD_AUDIO_FORMAT_PCM;
 	card->avs.avs_audio_source = PS3AV_CMD_AUDIO_SOURCE_SERIAL;
+	memcpy(card->avs.avs_cs_info, ps3av_mode_cs_info, 8);
 
 	ret = snd_ps3_change_avsetting(card);
 
@@ -685,6 +686,7 @@
 {
 	struct snd_ps3_card_info *card = snd_pcm_substream_chip(substream);
 	struct snd_ps3_avsetting_info avs;
+	int ret;
 
 	avs = card->avs;
 
@@ -729,19 +731,92 @@
 		return 1;
 	}
 
-	if ((card->avs.avs_audio_width != avs.avs_audio_width) ||
-	    (card->avs.avs_audio_rate != avs.avs_audio_rate)) {
-		card->avs = avs;
-		snd_ps3_change_avsetting(card);
+	memcpy(avs.avs_cs_info, ps3av_mode_cs_info, 8);
 
+	if (memcmp(&card->avs, &avs, sizeof(avs))) {
 		pr_debug("%s: after freq=%d width=%d\n", __func__,
 			 card->avs.avs_audio_rate, card->avs.avs_audio_width);
 
-		return 0;
+		card->avs = avs;
+		snd_ps3_change_avsetting(card);
+		ret = 0;
 	} else
-		return 1;
+		ret = 1;
+
+	/* check CS non-audio bit and mute accordingly */
+	if (avs.avs_cs_info[0] & 0x02)
+		ps3av_audio_mute_analog(1); /* mute if non-audio */
+	else
+		ps3av_audio_mute_analog(0);
+
+	return ret;
 }
 
+/*
+ * SPDIF status bits controls
+ */
+static int snd_ps3_spdif_mask_info(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+	uinfo->count = 1;
+	return 0;
+}
+
+/* FIXME: ps3av_set_audio_mode() assumes only consumer mode */
+static int snd_ps3_spdif_cmask_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	memset(ucontrol->value.iec958.status, 0xff, 8);
+	return 0;
+}
+
+static int snd_ps3_spdif_pmask_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	return 0;
+}
+
+static int snd_ps3_spdif_default_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	memcpy(ucontrol->value.iec958.status, ps3av_mode_cs_info, 8);
+	return 0;
+}
+
+static int snd_ps3_spdif_default_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	if (memcmp(ps3av_mode_cs_info, ucontrol->value.iec958.status, 8)) {
+		memcpy(ps3av_mode_cs_info, ucontrol->value.iec958.status, 8);
+		return 1;
+	}
+	return 0;
+}
+
+static struct snd_kcontrol_new spdif_ctls[] = {
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+		.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
+		.info = snd_ps3_spdif_mask_info,
+		.get = snd_ps3_spdif_cmask_get,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+		.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK),
+		.info = snd_ps3_spdif_mask_info,
+		.get = snd_ps3_spdif_pmask_get,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+		.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
+		.info = snd_ps3_spdif_mask_info,
+		.get = snd_ps3_spdif_default_get,
+		.put = snd_ps3_spdif_default_put,
+	},
+};
 
 
 static int snd_ps3_map_mmio(void)
@@ -842,7 +917,7 @@
 
 static int __init snd_ps3_driver_probe(struct ps3_system_bus_device *dev)
 {
-	int ret;
+	int i, ret;
 	u64 lpar_addr, lpar_size;
 
 	BUG_ON(!firmware_has_feature(FW_FEATURE_PS3_LV1));
@@ -903,6 +978,15 @@
 	strcpy(the_card.card->driver, "PS3");
 	strcpy(the_card.card->shortname, "PS3");
 	strcpy(the_card.card->longname, "PS3 sound");
+
+	/* create control elements */
+	for (i = 0; i < ARRAY_SIZE(spdif_ctls); i++) {
+		ret = snd_ctl_add(the_card.card,
+				  snd_ctl_new1(&spdif_ctls[i], &the_card));
+		if (ret < 0)
+			goto clean_card;
+	}
+
 	/* create PCM devices instance */
 	/* NOTE:this driver works assuming pcm:substream = 1:1 */
 	ret = snd_pcm_new(the_card.card,
diff --git a/sound/ppc/snd_ps3.h b/sound/ppc/snd_ps3.h
index 4b7e6fb..326fb29 100644
--- a/sound/ppc/snd_ps3.h
+++ b/sound/ppc/snd_ps3.h
@@ -51,6 +51,7 @@
 	uint32_t avs_audio_width;
 	uint32_t avs_audio_format; /* fixed */
 	uint32_t avs_audio_source; /* fixed */
+	unsigned char avs_cs_info[8];
 };
 /*
  * PS3 audio 'card' instance
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 0a063a9..853b33a 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -43,6 +43,7 @@
 struct omap_mcbsp_data {
 	unsigned int			bus_id;
 	struct omap_mcbsp_reg_cfg	regs;
+	unsigned int			fmt;
 	/*
 	 * Flags indicating is the bus already activated and configured by
 	 * another substream
@@ -200,6 +201,7 @@
 	struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
 	struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
 	int dma, bus_id = mcbsp_data->bus_id, id = cpu_dai->id;
+	int wlen;
 	unsigned long port;
 
 	if (cpu_class_is_omap1()) {
@@ -244,19 +246,29 @@
 	switch (params_format(params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
 		/* Set word lengths */
+		wlen = 16;
 		regs->rcr2	|= RWDLEN2(OMAP_MCBSP_WORD_16);
 		regs->rcr1	|= RWDLEN1(OMAP_MCBSP_WORD_16);
 		regs->xcr2	|= XWDLEN2(OMAP_MCBSP_WORD_16);
 		regs->xcr1	|= XWDLEN1(OMAP_MCBSP_WORD_16);
-		/* Set FS period and length in terms of bit clock periods */
-		regs->srgr2	|= FPER(16 * 2 - 1);
-		regs->srgr1	|= FWID(16 - 1);
 		break;
 	default:
 		/* Unsupported PCM format */
 		return -EINVAL;
 	}
 
+	/* Set FS period and length in terms of bit clock periods */
+	switch (mcbsp_data->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		regs->srgr2	|= FPER(wlen * 2 - 1);
+		regs->srgr1	|= FWID(wlen - 1);
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		regs->srgr2	|= FPER(wlen * 2 - 1);
+		regs->srgr1	|= FWID(0);
+		break;
+	}
+
 	omap_mcbsp_config(bus_id, &mcbsp_data->regs);
 	mcbsp_data->configured = 1;
 
@@ -272,10 +284,12 @@
 {
 	struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
 	struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
+	unsigned int temp_fmt = fmt;
 
 	if (mcbsp_data->configured)
 		return 0;
 
+	mcbsp_data->fmt = fmt;
 	memset(regs, 0, sizeof(*regs));
 	/* Generic McBSP register settings */
 	regs->spcr2	|= XINTM(3) | FREE;
@@ -293,6 +307,8 @@
 		/* 0-bit data delay */
 		regs->rcr2      |= RDATDLY(0);
 		regs->xcr2      |= XDATDLY(0);
+		/* Invert bit clock and FS polarity configuration for DSP_A */
+		temp_fmt ^= SND_SOC_DAIFMT_IB_IF;
 		break;
 	default:
 		/* Unsupported data format */
@@ -316,7 +332,7 @@
 	}
 
 	/* Set bit clock (CLKX/CLKR) and FS polarities */
-	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	switch (temp_fmt & SND_SOC_DAIFMT_INV_MASK) {
 	case SND_SOC_DAIFMT_NB_NF:
 		/*
 		 * Normal BCLK + FS.